├── .gitignore ├── 2fa ├── README.md ├── authy-user-qr-show.py ├── authy-user-register.py ├── authy-user-status.py └── authy-user-verify.py ├── DictionaryUtils.py ├── README.md ├── airflow ├── bash_templating.py ├── branch_dag.py ├── cleaning_tweet.py ├── data │ └── data.csv ├── data_api_call.py ├── docker_celery_dag.py ├── dynamic_dag.py ├── hello_world.py ├── hook_dag.py ├── http_call.py ├── plugin_hook_dag.py ├── plugin_operator_dag.py ├── plugins │ └── elasticsearch_plugin │ │ ├── __init__.py │ │ ├── blueprints │ │ ├── __init__.py │ │ └── elasticsearch_blueprint.py │ │ ├── hooks │ │ ├── __init__.py │ │ └── elasticsearch_hook.py │ │ ├── menu_links │ │ ├── __init__.py │ │ └── elasticsearch_link.py │ │ ├── operators │ │ ├── __init__.py │ │ └── elasticsearch_operator.py │ │ ├── templates │ │ └── elasticsearch_plugin.html │ │ └── views │ │ ├── __init__.py │ │ └── elasticsearch_view.py ├── simple_dag_backfill.py ├── sla_dag.py ├── subdag_dag.py ├── subdag_factory.py ├── twitter_dag_final.py ├── twitter_dag_v_1.py ├── twitter_dag_v_2.py └── xcom_dag.py ├── ansible └── libraries │ └── openshift-wait │ └── openshift-wait-for-start.py ├── arguments.py ├── array-slicing.py ├── authy.py.md ├── aws ├── README.md ├── dynamodb.py ├── lambda │ ├── back2ussr_users.py │ ├── lambda-code.py │ ├── lambda-http-input-request.json │ ├── lambda-http-input-request.py │ ├── lambda-s3-processing.py │ ├── lambda-sns.py │ └── requirements.txt ├── s3-external.py ├── s3-html │ ├── CORS_CONFIG.json │ └── error.html ├── s3-read-files.py ├── s3-snapshot.py ├── s3.py └── ssm.py ├── barcode └── barcode-generator.py ├── base64.py ├── batch-executor-with-http-status ├── db-query.bat ├── notifier.py ├── query.sql ├── slack.bat └── slack.json ├── browser-open.py ├── builder.py ├── cherrypy └── cherry.py ├── class-abstract.py ├── class-additional-extension └── class-extends.py ├── cli-ui-input-output ├── README.md └── pyinquirer.py ├── clipboard.py ├── confluence ├── confluence-api-token.py └── confluence-api.py ├── console ├── autocomplete-example.py ├── autocomplete-example.sh ├── cut-text-file.py ├── download-file.py ├── download-zip.py ├── envvar.py ├── find-in-string-with-option.py ├── input-argument-selector.py ├── input_arguments.py ├── md5-duplication-remover.py ├── pipe-example.py ├── remove_left.py ├── remove_right.py ├── return_result_emulator.py ├── return_result_emulator.settings ├── return_result_emulator.sh ├── return_result_waiting.py ├── split-text-file.py ├── string-in-list.py ├── tail_reader.py ├── trim_strings.py ├── unique-file-lines.py ├── unique.py └── zip-two-files.py ├── country-phone ├── country-phone.csv └── generate-insert.py ├── csv ├── csv-reader.py ├── csv2sql.py └── sample.csv ├── datetime ├── datetime-log-parser.py ├── t-time2timestamp.py └── timestamp.py ├── db-key-value └── shelve-example.py ├── decorator-with-parameters.py ├── decorator.py ├── decorator └── custom-decorator.py ├── default_parameter_list.py ├── deployment.yaml ├── dict-comprehension.py ├── dict-comprehensive.py ├── difference-between-liquibase-db ├── db-diff.py ├── downloadutils.py ├── filesystemutils.py ├── jirawrapper.py ├── sql-scripts │ ├── mvn_create_user.sql │ └── update_folder_name.sql └── zipwrapper.py ├── doc └── DOC.md ├── docker └── docker_list.py ├── dynamic-class.py ├── dynamic-vars.py ├── elastic.py ├── else-for.py ├── email ├── quoted-printable.py ├── send-cli │ ├── email-sender.properties │ ├── email-sender.sh │ ├── main.py │ └── message-template.j2 └── sendgrid │ └── send_message.py ├── enum └── enumeration.py ├── envvar-set.py ├── excel └── README.md ├── exceptions ├── raise-exception.py ├── read_file.py └── simple-extension.py ├── execute-another-script.py ├── expand_dictionary.py ├── fastapi ├── .s2i │ └── environment ├── .s2iignore ├── Dockerfile ├── README.md ├── fastapi-snippets.md ├── requirements.txt ├── simple-fastapi.py └── unicorn-fastapi.py ├── file-extension.py ├── filesystem ├── archiver │ └── batch_archiver.py ├── brand-xml-clear.py ├── calculate-files.py ├── copy-list-of-file-to-dest.py ├── cp.py ├── create-remove-folder.py ├── detect-filesystem-changes.py ├── find-delete-by-mask.py ├── memory-file.py ├── old-file-list.py ├── print_current_path.py ├── remover │ └── remove_old_files.py └── temp-file.py ├── find-file.py ├── flask ├── README.md ├── app-with-doc │ └── flask-app.py ├── app.py ├── app_error.py ├── echo.py ├── flask-cors-manual.py ├── flask-restful-cors-authorize.py ├── global-context.md ├── inline-parameter.py ├── test_app.py └── var-printer.py ├── flatmap.py ├── for-else.py ├── format-output └── format.py ├── formats.md ├── formats.sh ├── formatted-file ├── example.properties ├── free_text │ ├── filter_brackets.py │ ├── test-data-01.txt │ ├── test-data-02.txt │ ├── test-data-03.txt │ ├── test-data-04.txt │ ├── test-data-05.txt │ ├── test-data-06.txt │ └── test_filter_brackets.py ├── ini │ ├── example.ini │ └── ini.py ├── json │ ├── example.json │ └── json.py ├── properties │ ├── example.properties │ ├── properties.py │ └── properties2.py ├── txt │ ├── read-txt-file.py │ └── sample.txt ├── xml │ ├── example.xml │ ├── xml-dom-read.py │ ├── xml-read.py │ └── xml-sax-read.py └── yaml │ ├── example.yaml │ ├── yaml-read.py │ └── yaml_with_comments │ ├── .gitignore │ ├── pyyaml_include.py │ ├── ruamel_comments.py │ └── test-data-01.yaml ├── ftp └── ftp-server-start.sh ├── function-by-name.py ├── functions-built-in ├── iter.py └── iterable.py ├── generator.py ├── generator ├── generator_wrapper.py └── read-lines-from-filelist.py ├── generic-function.py ├── git-check-branches └── git-check-merged-branches.py ├── git-log-parser ├── history.md └── log-parser.py ├── git-print-all-diff └── git-print-all-diffs.py ├── git └── git-collaboration.py ├── global_module.py ├── google-captcha └── reCaptcha.md ├── graphics ├── README.md ├── data.psv └── graphics-creator.py ├── hash256.py ├── hashmd5.py ├── html-automation └── anki │ └── anki-web.py ├── html-scraping ├── README.md ├── advert-check-publishing.py ├── binary-html │ └── beautifulsoup.py ├── lxml │ ├── curl-output-html-parser-example.sh │ ├── curl-output-html-parser.py │ ├── parse_page.py │ └── parse_page_broken_html.py ├── request-html │ └── parse-with-js.py └── selenium │ ├── read-element.py │ ├── read.me │ ├── xing-read-cookies.py │ └── xing-save-cookies.py ├── http-current-date-time.python ├── http-executor ├── README.md └── http-executor.py ├── http ├── check_mock.py ├── current-date-time.python ├── download-zip.py ├── download.py ├── download2.py ├── http-request.py ├── http-server-start.md ├── http-server.py ├── openshift-api.py ├── request-debug.py ├── request-get.py ├── request-grain-control.py ├── request-post.py ├── tornado-get-params.py ├── tornado-upload.py └── web.py ├── images └── image-operations.py ├── import ├── import-module │ ├── import_module.py │ └── some_module │ │ ├── __init__.py │ │ ├── file1.py │ │ └── file2.py └── remote-import.py ├── input-parameters ├── custom-parser.py ├── input_args.py ├── input_params.py └── parameters_extractor.py ├── ista ├── Labels_ISTA4.15 ├── create-mongo-container.sh ├── demo.md ├── mongo-docker │ └── docker-compose.yaml ├── signal-collector.archimate └── soft │ ├── json2mongo.py │ ├── md5-duplication-remover.py │ ├── xml2json.py │ └── zip-opener.py ├── jdbc └── jdbc-mysql.py ├── jenkins-notification ├── READ.me ├── jenkins-job-waiting-for-finish.py ├── jenkins-pipeline-info.py └── windows_notification.py ├── jira ├── connect-to-jira.py ├── jira-comments-with-my-name.py └── jira-open-issues.py ├── json ├── compare-json.py ├── json-example.py ├── json-walk-print-leafs.py └── json2csv.py ├── jupyter_utils.py ├── kafka ├── consumer-single-message.py ├── consumer.py ├── kafka-producer-consumer.py └── producer.py ├── keyboard ├── keyboard-automation.py ├── keypress-getkey.py └── keypress-read.py ├── ldap └── ldap-search.py ├── linkedin.py.snipped ├── list ├── list-sort.py ├── list-substraction-via-set.py ├── list-substraction.py └── list_unpacking.py ├── log-remover ├── READ.me ├── log-remover └── log-remover.py ├── logging └── log-example.py ├── mail-send.py ├── mail ├── imap-client-xing-remover.py └── imap-client.py ├── map-reduce-graph ├── short-path-breadth-mapper.py ├── short-path-breadth-mapper.readme ├── short-path-breadth-mapper.txt ├── short-path-breadth-reducer.py ├── short-path-breadth-reducer.readme ├── short-path-breadth-reducer.txt ├── short-path-deykstra.py ├── short-path-deykstra.readme └── short-path-deykstra.txt ├── map-reduce-improvements ├── input-data-v1.txt ├── input-data-v2.txt ├── map-reduce-combining-v1.py ├── map-reduce-combining-v1.sh ├── map-reduce-combining-v2.py ├── map-reduce-combining-v2.sh ├── page-rank │ ├── page-rank-mapper.py │ ├── page-rank-mapper.txt │ ├── page-rank-reducer.py │ └── page-rank-reducer.txt ├── specific │ ├── complex-MapR.sh │ ├── complex-mapper.1.py │ ├── complex-mapper.1.txt │ ├── complex-mapper.2.py │ ├── complex-mapper.2.txt │ ├── complex-mapper.3.py │ ├── complex-mapper.py │ ├── complex-mapper.txt │ ├── complex-reducer.1.py │ ├── complex-reducer.1.txt │ ├── complex-reducer.py │ ├── mapper-filter.1.py │ ├── mapper-filter.py │ ├── mapper-filter.txt │ ├── reducer-join-inner.py │ ├── reducer-join-outer.py │ ├── reducer-join.txt │ ├── reducer-merge.py │ ├── reducer-merge.txt │ ├── reducer-substract-left.1.py │ ├── reducer-substract-left.py │ └── reducer-substract-outer.py └── tf-idf │ ├── mapper.1.py │ ├── mapper.1.sh │ ├── mapper.1.txt │ ├── mapper.2.py │ ├── mapper.2.txt │ ├── reducer.1.py │ ├── reducer.1.txt │ ├── reducer.2.py │ └── reducer.2.txt ├── map-reduce-streaming ├── MrAppManager.sh ├── input.data ├── mapper.py └── reducer.py ├── map-reduce ├── UserSessionCounter.py ├── UserSessionCounterRunner.py ├── combiner.py ├── input-data-combiner.txt ├── input-data-reducer.txt └── reducer.py ├── mapr-stream ├── kafka-consumer.py └── kafka-producer.py ├── mapr ├── README.md └── mapr_connection.py ├── maven-dependencies ├── dependency-finder.py ├── dependency-splitter.py └── dependency.md ├── maven-duplication ├── duplication.py └── maven-check.sh ├── mctrek ├── advicer.1.py └── advicer.py ├── md5-example.py ├── md5.py ├── method_by_name.py ├── mlflow.py ├── module-attributes-check.py ├── mongo └── json2mongo.py ├── mouse ├── mouse-automation.py ├── mouse-move.py └── mouse-move2.py ├── multi-argument-for.py ├── multiply-inheritance.py ├── mysqldb ├── mariadb-batch.py ├── mariadb-connect.py ├── mariadb-insert.py ├── multi-cursor └── select-big-query.py ├── named-tuple.py ├── numpy └── numpy-example.py ├── oauth └── oauth2.md ├── object-copy.py ├── object-with-properties ├── get_attr.py ├── index-object.py ├── object.py └── set_attr.py ├── object_class └── object-class.py ├── object_enter_exit_with ├── with_enter-exit.txt └── with_yield.py ├── ocr └── image_recognition.py ├── open-shift ├── README.md ├── oc-login.py ├── ocp-check-pvc.py └── ocp-check-running.py ├── operator-override.py ├── oracle ├── README.md ├── compare-two-oracle-connections.py ├── simple-operations │ ├── connection-to-oracle.py │ ├── credential.example │ └── read-data.py └── utils │ ├── db_utils.py │ ├── is-table-exists.py │ └── test.bat ├── os-package.py ├── os-signal.py ├── pandas ├── data.csv ├── df-manipulation.py ├── fetching_tweet.py └── pandas-example.py ├── parameter-for-app ├── parameter-reader.py └── path-to-file.settings ├── parameters └── parameters.py ├── partial.py ├── password_read.py ├── pex ├── .gitignore ├── README.md ├── magic_name │ └── __main__.py ├── naked_example │ ├── __init__.py │ ├── main.py │ └── setup.py ├── requirements.txt ├── samplepkg.pex └── simple.json ├── pip-install-package ├── README.md ├── airflow_shopify │ ├── __init__.py │ └── shopify │ │ ├── __init__.py │ │ └── collection │ │ └── __init__.py └── setup.py ├── pip-install-without-admin-rights.py ├── port-check.py ├── print-to-error ├── private-memeber.py ├── process-kill.py ├── proxy └── foxyproxy-generator.py ├── pulumi ├── vpc-create.py └── vpc-create │ ├── .gitignore │ ├── Pulumi.yaml │ ├── __main__.py │ └── requirements.txt ├── qrcode └── qrcode-with-segno.py ├── quotas-reader └── book-quotas-reader.py ├── random.py ├── range.py ├── redis ├── connect.py └── redis-labs.zip ├── rest-files-read ├── readfilesrest.ini ├── readfilesrest │ ├── __init__.py │ └── main.py └── setup.py ├── rest-files-write └── file_uploader.py ├── return_None_or_str.py ├── script-exec └── execute-string.py ├── selenium ├── .gitignore ├── selenium_headless.py ├── selenium_utils.md ├── selenium_utils.py └── test_selenium_utils.py ├── sendgrid.py.md ├── serialization └── pickle-serialization.py ├── singleton.py ├── slack-send-message.py ├── sort └── sort-file-by-datetime.py ├── spark-command-line-document-here.py ├── spark-command-line-wrapper.py ├── spark-csv-panda └── spark-csv-matplot.py ├── spark-kafka └── spark-kafka-json.py ├── spark ├── .gitignore ├── README.md ├── pyspark-sql.py ├── spark-test-connection.py ├── spark-words-count.py ├── spark_avro_to_parquet.py ├── spark_csv_to_avro.py ├── spark_csv_to_parquet.py ├── spark_json_to_avro.py ├── spark_json_to_parquet.py ├── spark_parquet_to_avro.py └── words.txt ├── speach-recognition └── README.md ├── speech-recognition └── sphinx.md ├── sql-file-processor.python ├── sql ├── postgre-tool.py └── postgre.py ├── sqlalchemy ├── README.md ├── in-memory.py ├── job_status.py ├── schema.py ├── session_scope.py ├── sessionmaker_engine_creator.py └── test_create_tables.py ├── sqlite ├── sqlite-connection.py ├── sqlite-sqlalchemy-orm.py └── sqlite-sqlalchemy.py ├── ssh-operations ├── brand-server-dev-is-process-alive.py ├── brand-server-dev-tomcat-process-count.py ├── ssh-brandserver-version-checker.py ├── ssh-clear-logs-bs-on-machine.py ├── ssh-clear-logs-on-machine.py ├── ssh-command-executor.py └── ssh-command-repeater.py ├── standart-build-tool ├── build.bat ├── check.bat ├── graphical-installer.bat ├── install.bat ├── rpm-package.bat ├── scrabber │ └── __init__.py └── setup.py ├── str-repr.py ├── string-regexp.py ├── string-validation.py ├── string2byte.py ├── string_remove_after_last.py ├── subprocess ├── check-file-existence.py ├── check-ip.py ├── curl.py ├── execute-batch-job.py ├── execute-jar-file.py ├── execute_line.py ├── stdout-to-null.py └── subprocess-example.py ├── subtitiles-translator ├── subtitles-translator-disney.py ├── ttml-translator.py └── ttml-translator2.py ├── teams └── send-message.py ├── telegram ├── .gitignore ├── README.md ├── telegram-send-message.py └── telethon-send-message.py ├── telnet └── telnet.py ├── template └── jinja │ ├── main.py │ └── message-template.j2 ├── test ├── README.md ├── doctest-examples.txt ├── doctest.py ├── pytest.py └── unittest.md ├── text ├── clear-prefix.py ├── find-occurences │ └── find-text-occurences.py ├── read-file-fix-line.py └── unique.py ├── thread ├── function-in-thread.py ├── thread-group.py └── thread.py ├── token └── jwt.py ├── tor ├── change-tor-ip.py └── tor-signal.py ├── torch.py ├── tornado-ssh-command-executer ├── READ.me ├── log-remover └── log-remover.py ├── twilio └── twilio.md ├── web-echo ├── READ.md ├── echo-ssl.py ├── echo.py └── treesheets-automation.py ├── web-file-response └── file-server-tornado.py ├── web-socket ├── socket-client.py ├── socket-server.py └── vars.py ├── xgboost.py ├── xmind-todo-parser ├── education-advicer.py ├── skip.txt ├── todo.sh └── xmind-python.md ├── xml-archimate2svg ├── .gitignore ├── README.md ├── svg-dynamic-tooltip.js-svg ├── svg-tooltip │ └── tooltip-example.html ├── test │ ├── doc-test.archimate │ ├── doc-test.sh │ └── doc-test.svg ├── text_parser.html └── update-archimate.py ├── xml-minidom └── read-xml2.py ├── xml-xpath ├── column_finder.py ├── column_mapper.py ├── red-line.svg ├── svg-parse.py └── xml2json.py ├── youtube └── youtube-snippet.md ├── zip-codes-us ├── README.md └── csv-to-sql.py ├── zip-opener └── zip-opener.py └── zip ├── 1.txt ├── 2.txt ├── 3.txt ├── out.zip ├── zip-archive-unarchive.py ├── zip-read.py └── zip-write.py /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | *.iml 3 | .vscode 4 | __pycache__ 5 | *.sublime-project 6 | *.sublime-workspace 7 | *.sublime-build 8 | *.pyc 9 | **/venv 10 | -------------------------------------------------------------------------------- /2fa/authy-user-qr-show.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | from authy.api import AuthyApiClient 4 | 5 | authy_api = AuthyApiClient(os.environ.get("AUTHY_API_KEY")) 6 | 7 | # Available in version 2.2.4+ 8 | response = authy_api.users.generate_qr(os.environ.get("AUTHY_USER_ID"), size=200, label="QR for cherkavi-test") 9 | if response.ok(): 10 | print(response.content['qr_code']) 11 | else: 12 | print(response.content["message"]) -------------------------------------------------------------------------------- /2fa/authy-user-register.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from authy.api import AuthyApiClient 4 | 5 | # Your API key from twilio.com/console/authy/applications 6 | # DANGER! This is insecure. See http://twil.io/secure 7 | authy_api = AuthyApiClient(os.environ.get("AUTHY_API_KEY")) 8 | 9 | email = sys.argv[1] # 'some_email@gmail.com' 10 | phone = sys.argv[2] # '135 35 35 35 35' 11 | # https://github.com/twilio/authy-form-helpers/blob/master/src/form.authy.js 12 | country_code = sys.argv[3] # 49 13 | 14 | user = authy_api.users.create(email=email,phone=phone,country_code=country_code) 15 | if user.ok(): 16 | print(user.id) # user.id is the `authy_id` needed for future requests 17 | else: 18 | print(user.errors()) 19 | -------------------------------------------------------------------------------- /2fa/authy-user-status.py: -------------------------------------------------------------------------------- 1 | from authy.api import AuthyApiClient 2 | import pprint 3 | import os 4 | 5 | # check client status 6 | authy_api = AuthyApiClient(os.environ.get("AUTHY_API_KEY")) 7 | status = authy_api.users.status(os.environ.get("AUTHY_USER_ID")) 8 | if status.ok(): 9 | print("OK") 10 | else: 11 | print("---ERROR---") 12 | pprint.PrettyPrinter(indent=4).pprint(status) 13 | 14 | -------------------------------------------------------------------------------- /2fa/authy-user-verify.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | from authy.api import AuthyApiClient 4 | 5 | authy_api = AuthyApiClient(os.environ.get("AUTHY_API_KEY")) 6 | # sms = authy_api.users.request_sms(current_user.authy_id, {'force': True}) 7 | # verification = authy_api.phones.verification_check("13535353535","49", sys.argv[1]) 8 | verification = authy_api.tokens.verify(token = sys.argv[1], device_id=os.environ.get("AUTHY_USER_ID")) 9 | if verification.ok(): 10 | print("OK") 11 | else: 12 | print("error") 13 | 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [python modules](https://docs.python.org/3/py-modindex.html) 2 | ![pre-processing](https://i.postimg.cc/Z5YJnS29/python-pre-processing.png) 3 | ![data analysis](https://i.postimg.cc/dQdsSnG0/python-data-analysis.png) 4 | ![explaratory data analysis](https://i.postimg.cc/CLghyD4s/python-explaratory-data-analysis.png) 5 | :todo: SciKit, statsmodels, scipy 6 | -------------------------------------------------------------------------------- /airflow/dynamic_dag.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | 3 | from airflow import DAG 4 | from airflow.operators.bash_operator import BashOperator 5 | from airflow.operators.postgres_operator import PostgresOperator 6 | 7 | default_args = { 8 | 'owner': 'airflow', 9 | 'start_date': dt.datetime(2018, 10, 25, 11, 30, 00), 10 | 'retries': 0 11 | } 12 | 13 | with DAG('dynamic_dag', 14 | default_args=default_args, 15 | schedule_interval='*/5 * * * *', 16 | catchup=False) as dag: 17 | 18 | opr_end = BashOperator(task_id='opr_end', bash_command='echo "Done"') 19 | 20 | # Dynamic Definition of your DAG!! 21 | for counter in range(1, 4): 22 | task_id='opr_insert_' + str(counter) 23 | task_date=dt.datetime.now().strftime("%Y-%m-%d %H:%M:%S") 24 | opr_insert = PostgresOperator(task_id=task_id, 25 | sql="INSERT INTO local_executor.task (id, timestamp) VALUES ('" + task_id + "_" + task_date + "', '" + task_date + "');", 26 | postgres_conn_id='postgre_sql', 27 | autocommit=True, 28 | database='airflow_mdb') 29 | opr_insert >> opr_end 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /airflow/hello_world.py: -------------------------------------------------------------------------------- 1 | from airflow import DAG 2 | from airflow.operators.dummy_operator import DummyOperator 3 | from airflow.operators.python_operator import PythonOperator 4 | 5 | from time import sleep 6 | from datetime import datetime 7 | 8 | def print_hello(): 9 | sleep(5) 10 | return 'Hello World' 11 | 12 | with DAG('hello_world_dag', description='First DAG', schedule_interval='*/10 * * * *', start_date=datetime(2018, 11, 1), catchup=False) as dag: 13 | dummy_task = DummyOperator(task_id='dummy_task', retries=3) 14 | python_task = PythonOperator(task_id='python_task', python_callable=print_hello) 15 | 16 | dummy_task >> python_task 17 | -------------------------------------------------------------------------------- /airflow/plugin_hook_dag.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | 3 | from airflow import DAG 4 | from airflow.operators.bash_operator import BashOperator 5 | from airflow.operators.python_operator import PythonOperator 6 | from airflow.hooks.elasticsearch_plugin import ElasticsearchHook 7 | 8 | default_args = { 9 | 'owner': 'airflow', 10 | 'start_date': dt.datetime(2018, 11, 25, 11, 30, 00), 11 | 'concurrency': 1, 12 | 'retries': 0 13 | } 14 | 15 | def do_some_stuff(): 16 | es_hook = ElasticsearchHook() 17 | print(es_hook.info()) 18 | 19 | with DAG('plugin_hook_dag', 20 | default_args=default_args, 21 | schedule_interval='@once', 22 | catchup=False 23 | ) as dag: 24 | 25 | hook_es = PythonOperator(task_id='hook_es', python_callable=do_some_stuff) 26 | opr_end = BashOperator(task_id='opr_end', bash_command='echo "Done"') 27 | hook_es >> opr_end 28 | -------------------------------------------------------------------------------- /airflow/plugin_operator_dag.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | 3 | from airflow import DAG 4 | from airflow.operators.bash_operator import BashOperator 5 | from airflow.operators.python_operator import PythonOperator 6 | from airflow.hooks.elasticsearch_plugin import ElasticsearchHook 7 | from airflow.operators.elasticsearch_plugin import PostgresToElasticsearchTransfer 8 | 9 | default_args = { 10 | 'owner': 'airflow', 11 | 'start_date': dt.datetime(2018, 11, 25, 11, 30, 00), 12 | 'concurrency': 1, 13 | 'retries': 0 14 | } 15 | 16 | with DAG('plugin_operator_dag', 17 | default_args=default_args, 18 | schedule_interval='@once', 19 | catchup=False 20 | ) as dag: 21 | 22 | opr_transfer = PostgresToElasticsearchTransfer(task_id='postgres_to_es', sql='SELECT * FROM course.source', index='sources') 23 | opr_end = BashOperator(task_id='opr_end', bash_command='echo "Done"') 24 | opr_transfer >> opr_end 25 | -------------------------------------------------------------------------------- /airflow/plugins/elasticsearch_plugin/__init__.py: -------------------------------------------------------------------------------- 1 | from airflow.plugins_manager import AirflowPlugin 2 | 3 | from elasticsearch_plugin.hooks.elasticsearch_hook import ElasticsearchHook 4 | 5 | # Views / Blueprints / MenuLinks are instantied objects 6 | class ElasticsearchPlugin(AirflowPlugin): 7 | name = "elasticsearch_plugin" 8 | operators = [] 9 | sensors = [] 10 | hooks = [ ElasticsearchHook ] 11 | executors = [] 12 | admin_views = [] 13 | flask_blueprints = [] 14 | menu_links = [] 15 | 16 | -------------------------------------------------------------------------------- /airflow/plugins/elasticsearch_plugin/blueprints/__init__.py: -------------------------------------------------------------------------------- 1 | from elasticsearch_plugin.blueprints.elasticsearch_blueprint import ElasticsearchBlueprint 2 | 3 | ELASTICSEARCH_PLUGIN_BLUEPRINTS = [ 4 | ElasticsearchBlueprint 5 | ] 6 | -------------------------------------------------------------------------------- /airflow/plugins/elasticsearch_plugin/blueprints/elasticsearch_blueprint.py: -------------------------------------------------------------------------------- 1 | from flask import Blueprint 2 | 3 | # Creating a flask blueprint to integrate the templates and static folder 4 | # This creates a blueprint named "elasticsearch_plugin" defined in the file __name__. The template folder is ../templates and static_folder is static 5 | ElasticsearchBlueprint = Blueprint('elasticsearch', __name__, template_folder='../templates', static_folder='static', static_url_path='/static/') 6 | -------------------------------------------------------------------------------- /airflow/plugins/elasticsearch_plugin/hooks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cherkavi/python-utilities/110075815ac384cb800e1bcea2dadc3e79f67c0d/airflow/plugins/elasticsearch_plugin/hooks/__init__.py -------------------------------------------------------------------------------- /airflow/plugins/elasticsearch_plugin/menu_links/__init__.py: -------------------------------------------------------------------------------- 1 | from elasticsearch_plugin.menu_links.elasticsearch_link import ElasticsearchLink 2 | 3 | ELASTICSEARCH_PLUGIN_LINKS = [ 4 | ElasticsearchLink 5 | ] 6 | -------------------------------------------------------------------------------- /airflow/plugins/elasticsearch_plugin/menu_links/elasticsearch_link.py: -------------------------------------------------------------------------------- 1 | from flask_admin.base import MenuLink 2 | 3 | ElasticsearchLink = MenuLink(category='Elasticsearch Plugin', name='More Info', url='https://unknown.com') 4 | -------------------------------------------------------------------------------- /airflow/plugins/elasticsearch_plugin/operators/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cherkavi/python-utilities/110075815ac384cb800e1bcea2dadc3e79f67c0d/airflow/plugins/elasticsearch_plugin/operators/__init__.py -------------------------------------------------------------------------------- /airflow/plugins/elasticsearch_plugin/views/__init__.py: -------------------------------------------------------------------------------- 1 | from elasticsearch_plugin.views.elasticsearch_view import ElasticsearchView 2 | 3 | # By leaving empty the parameter "category" you will get a direct to link to your view. (no drop down menu) 4 | ELASTICSEARCH_PLUGIN_VIEWS = [ 5 | ElasticsearchView(category='Elasticsearch Plugin', name='Elasticsearch Dashboard') 6 | ] 7 | -------------------------------------------------------------------------------- /airflow/plugins/elasticsearch_plugin/views/elasticsearch_view.py: -------------------------------------------------------------------------------- 1 | from flask_admin import BaseView, expose 2 | from elasticsearch_plugin.hooks.elasticsearch_hook import ElasticsearchHook 3 | 4 | class ElasticsearchView(BaseView): 5 | @expose('/', methods=['GET', 'POST']) 6 | def index(self): 7 | try: 8 | es = ElasticsearchHook() 9 | data = es.info() 10 | isup = es.ping() 11 | except: 12 | data = {} 13 | isup = False 14 | return self.render("elasticsearch_plugin.html", data=data, isup=isup) 15 | -------------------------------------------------------------------------------- /airflow/simple_dag_backfill.py: -------------------------------------------------------------------------------- 1 | import datetime as dt 2 | 3 | from airflow import DAG 4 | from airflow.operators.bash_operator import BashOperator 5 | from airflow.operators.python_operator import PythonOperator 6 | 7 | default_args = { 8 | 'owner': 'airflow', 9 | 'start_date': dt.datetime(2018, 12, 15, 22, 00, 00), 10 | 'concurrency': 1, 11 | 'retries': 0 12 | } 13 | 14 | with DAG('simple_dag_backfill', 15 | default_args=default_args, 16 | schedule_interval='*/10 * * * *') as dag: 17 | task_hello = BashOperator(task_id='hello', bash_command='echo "hello!"') 18 | task_bye = BashOperator(task_id='bye', bash_command='echo "bye!"') 19 | task_hello >> task_bye 20 | -------------------------------------------------------------------------------- /airflow/sla_dag.py: -------------------------------------------------------------------------------- 1 | from airflow import DAG 2 | from airflow.operators.dummy_operator import DummyOperator 3 | from airflow.operators.bash_operator import BashOperator 4 | from datetime import timedelta, datetime 5 | 6 | def log_sla_miss(dag, task_list, blocking_task_list, slas, blocking_tis): 7 | print("SLA was missed on DAG {0}s by task id {1}s with task list: {2} which are " \ 8 | "blocking task id {3}s with task list: {4}".format(dag.dag_id, slas, task_list, blocking_tis, blocking_task_list)) 9 | 10 | default_args = { 11 | 'owner': 'airflow', 12 | 'depends_on_past': False, 13 | 'start_date': datetime(2019, 2, 4, 23, 15, 0), 14 | 'email': None, 15 | 'email_on_failure': False, 16 | 'email_on_retry': False, 17 | 'retries': 0 18 | } 19 | 20 | with DAG('sla_dag', default_args=default_args, sla_miss_callback=log_sla_miss, schedule_interval="*/1 * * * *", catchup=False) as dag: 21 | 22 | t0 = DummyOperator(task_id='t0') 23 | 24 | t1 = BashOperator(task_id='t1', bash_command='sleep 15', sla=timedelta(seconds=5), retries=0) 25 | 26 | t0 >> t1 27 | -------------------------------------------------------------------------------- /airflow/subdag_dag.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timedelta 2 | 3 | from airflow.models import DAG 4 | from airflow.operators.subdag_operator import SubDagOperator 5 | from airflow.operators.dummy_operator import DummyOperator 6 | from subdag_factory import subdag_factory 7 | 8 | PARENT_DAG_NAME='subdag_dag' 9 | SUBDAG_DAG_NAME='subdag' 10 | 11 | with DAG( 12 | dag_id=PARENT_DAG_NAME, 13 | schedule_interval='*/10 * * * *', 14 | start_date=datetime(2018, 11, 5, 10, 00, 00), 15 | catchup=False 16 | ) as dag: 17 | start_task = DummyOperator(task_id='start') 18 | subdag_task = SubDagOperator( 19 | subdag=subdag_factory(PARENT_DAG_NAME, SUBDAG_DAG_NAME, dag.start_date, dag.schedule_interval), 20 | task_id=SUBDAG_DAG_NAME 21 | ) 22 | end_task = DummyOperator(task_id='end') 23 | start_task >> subdag_task >> end_task 24 | -------------------------------------------------------------------------------- /airflow/subdag_factory.py: -------------------------------------------------------------------------------- 1 | from airflow.models import DAG 2 | from airflow.operators.dummy_operator import DummyOperator 3 | 4 | def subdag_factory(parent_dag_name, child_dag_name, start_date, schedule_interval): 5 | subdag = DAG( 6 | dag_id='{0}.{1}'.format(parent_dag_name, child_dag_name), 7 | schedule_interval=schedule_interval, 8 | start_date=start_date, 9 | catchup=False) 10 | with subdag: 11 | dop_list = [DummyOperator(task_id='subdag_task_{0}'.format(i), dag=subdag) for i in range(5)] 12 | #for i, dop in enumerate(dop_list): 13 | # if i > 0: 14 | # dop_list[i - 1] >> dop 15 | return subdag 16 | -------------------------------------------------------------------------------- /airflow/twitter_dag_v_1.py: -------------------------------------------------------------------------------- 1 | # load the dependencies 2 | from airflow import DAG 3 | from datetime import date, timedelta, datetime 4 | 5 | # default_args are the default arguments applied to the DAG and all inherited tasks 6 | DAG_DEFAULT_ARGS = { 7 | 'owner': 'airflow', 8 | 'depends_on_past': False, 9 | 'retries': 1, 10 | 'retry_delay': timedelta(minutes=1) 11 | } 12 | 13 | with DAG('twitter_dag_v1', start_date=datetime(2018, 10, 1), schedule_interval="@daily", default_args=DAG_DEFAULT_ARGS, catchup=False) as dag: 14 | None 15 | -------------------------------------------------------------------------------- /array-slicing.py: -------------------------------------------------------------------------------- 1 | pets=["dog", "cat", "bird", "pig"] 2 | print pets[0:2] 3 | print pets[:2] 4 | print pets[:-1] 5 | print pets[-3:] 6 | 7 | # another example, with third parameter - step 8 | print pets[0:4:2] 9 | # step for range 10 | print range(0,10)[::3] 11 | print range(0,10)[::-1] 12 | print range(0,10)[::-3] 13 | -------------------------------------------------------------------------------- /aws/README.md: -------------------------------------------------------------------------------- 1 | boto3 configuration 2 | https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html 3 | https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-examples.html 4 | 5 | logging 6 | ```python 7 | logging.getLogger('boto').setLevel(logging.WARNING) 8 | logging.getLogger('boto3').setLevel(logging.WARNING) 9 | logging.getLogger('botocore').setLevel(logging.WARNING) 10 | logging.getLogger('s3transfer').setLevel(logging.WARNING) 11 | logging.getLogger('urllib3').setLevel(logging.WARNING) 12 | 13 | ``` 14 | -------------------------------------------------------------------------------- /aws/lambda/back2ussr_users.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | 4 | BUCKET_NAME = "technik-bucket-001" 5 | BUCKET_ITEM = "index.html" 6 | 7 | 8 | def lambda_handler(event, context): 9 | s3 = boto3.resource("s3") 10 | item_from_s3 = s3.Object(BUCKET_NAME, BUCKET_ITEM) 11 | data_from_s3 = item_from_s3.get()['Body'].read() 12 | # save to temporary 512Mb storage 13 | with open("/tmp/out.txt", "w") as output_file: 14 | output_file.write(data_from_s3.decode("utf-8")) 15 | 16 | return { 17 | 'statusCode': 200, 18 | 'data': json.dumps(str(data_from_s3)) 19 | } 20 | -------------------------------------------------------------------------------- /aws/lambda/lambda-code.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | def lambda_handler(event, context): 4 | body = "data from lambda" 5 | statusCode = 200 6 | return { 7 | "statusCode": statusCode, 8 | "body": json.dumps(body), 9 | "headers": { 10 | "Content-Type": "application/json" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /aws/lambda/lambda-sns.py: -------------------------------------------------------------------------------- 1 | import json 2 | from datetime import datetime 3 | import boto3 4 | 5 | def lambda_handler(event, context): 6 | # example of input message processing 7 | # message = json.loads(event['Records'][0]['Sns']['Message']) 8 | 9 | sns = boto3.client('sns') 10 | # Publish a simple message to the specified SNS topic 11 | response = sns.publish( 12 | TopicArn='arn:aws:sns:eu-central-1:85442027:cherkavi', 13 | Message='Hello from boto3', 14 | ) 15 | 16 | 17 | return { 18 | 'statusCode': 200, 19 | # str(datetime.now()) 20 | 'body': json.dumps(response) 21 | } 22 | -------------------------------------------------------------------------------- /aws/lambda/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3~=1.12.41 -------------------------------------------------------------------------------- /aws/s3-html/CORS_CONFIG.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AllowedHeaders": [ 4 | "Authorization" 5 | ], 6 | "AllowedMethods": [ 7 | "GET" 8 | ], 9 | "AllowedOrigins": [ 10 | "" 11 | ], 12 | "ExposeHeaders": [], 13 | "MaxAgeSeconds": 3000 14 | } 15 | ] -------------------------------------------------------------------------------- /aws/s3-html/error.html: -------------------------------------------------------------------------------- 1 |

internal server error

2 | -------------------------------------------------------------------------------- /aws/s3.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | 4 | BUCKET_NAME = "tk-bt-001" 5 | BUCKET_ITEM = "index.html" 6 | 7 | 8 | def lambda_handler(event, context): 9 | s3 = boto3.resource("s3") 10 | item_from_s3 = s3.Object(BUCKET_NAME, BUCKET_ITEM) 11 | data_from_s3 = item_from_s3.get()['Body'].read() 12 | 13 | # save to temporary 512Mb storage 14 | with open("/tmp/out.txt", "w") as output_file: 15 | output_file.write(data_from_s3.decode("utf-8")) 16 | 17 | return { 18 | 'statusCode': 200, 19 | 'data': json.dumps(str(data_from_s3)) 20 | } 21 | 22 | -------------------------------------------------------------------------------- /aws/ssm.py: -------------------------------------------------------------------------------- 1 | import json 2 | import boto3 3 | import os 4 | 5 | ssm = boto3.client('ssm', region_name="eu-west-3") 6 | dev_or_prod = os.environ['DEV_OR_PROD'] 7 | 8 | def lambda_handler(event, context): 9 | db_url = ssm.get_parameters(Names=["/my-app/" + dev_or_prod + "/db-url"]) 10 | print(db_url) 11 | db_password = ssm.get_parameters(Names=["/my-app/" + dev_or_prod + "/db-password"], WithDecryption=True) 12 | print(db_password) 13 | -------------------------------------------------------------------------------- /barcode/barcode-generator.py: -------------------------------------------------------------------------------- 1 | # pip install python-barcode Pillow 2 | 3 | import barcode 4 | from barcode.writer import ImageWriter 5 | from PIL import Image 6 | import sys 7 | 8 | def generate_barcode(text, filename): 9 | # Create a barcode object 10 | code = barcode.get('code128', text, writer=ImageWriter()) 11 | 12 | # Save the barcode as an image 13 | code.save(filename) 14 | 15 | # Open the generated image and resize it if needed 16 | # img = Image.open(filename) 17 | # img = img.resize((300, 150)) # Adjust the size as needed 18 | # img.save(filename) 19 | 20 | if __name__ == "__main__": 21 | generate_barcode(sys.argv[1], 'out.png') 22 | print("barcode generated in out.png") 23 | -------------------------------------------------------------------------------- /base64.py: -------------------------------------------------------------------------------- 1 | import base64 2 | 3 | print(base64.encodestring('login:password'.encode())) 4 | print(base64.encodestring('weblogic:weblogic1'.encode())) 5 | 6 | 7 | # with open(path_to_image_file, "rb") as f: 8 | # encoded = base64.b64encode(f.read()) 9 | 10 | -------------------------------------------------------------------------------- /batch-executor-with-http-status/db-query.bat: -------------------------------------------------------------------------------- 1 | echo exit | sqlplus /@:/service @query.sql -------------------------------------------------------------------------------- /batch-executor-with-http-status/query.sql: -------------------------------------------------------------------------------- 1 | select count(*) from user_table; -------------------------------------------------------------------------------- /batch-executor-with-http-status/slack.bat: -------------------------------------------------------------------------------- 1 | curl -X POST -H "Content-type: application/json" -H "Authorization: Bearer xoxp-283316862324-298911817009-298923149681-xxxxxxxxxxxxxxx" --data @slack.json https://mycompany.slack.com/api/chat.postMessage 2 | 3 | -------------------------------------------------------------------------------- /batch-executor-with-http-status/slack.json: -------------------------------------------------------------------------------- 1 | {"text":"QA: CONTROL_TABLE has a record", "channel":"team-brand-server"} -------------------------------------------------------------------------------- /browser-open.py: -------------------------------------------------------------------------------- 1 | import webbrowser 2 | # open string in browser 3 | webbrowser.open("https://pyformat.info/"); 4 | 5 | # open local file in browser 6 | #webbrowser.open('file://' + os.path.realpath(filename)) 7 | -------------------------------------------------------------------------------- /builder.py: -------------------------------------------------------------------------------- 1 | # return self, return itself, return this, type self, type itself, generic 2 | from __future__ import annotations 3 | 4 | class Example: 5 | @staticmethod 6 | def build() -> Example: 7 | return Example() 8 | -------------------------------------------------------------------------------- /class-abstract.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from abc import ABC 3 | 4 | class Parent(ABC): 5 | 6 | @abc.abstractmethod 7 | def who_am_i(self): 8 | return "parent" 9 | 10 | 11 | class Child(Parent): 12 | 13 | def who_am_i(self): 14 | return "child" 15 | 16 | if __name__=="__main__": 17 | print(Child().who_am_i()) # child 18 | print( issubclass(Child, Parent) ) # True 19 | print( isinstance(Child(), Child) ) # True 20 | print( isinstance(Child(), Parent) ) # True 21 | print( isinstance(object(), Parent) ) # False 22 | # Parent() - can't create abstract class 23 | -------------------------------------------------------------------------------- /clipboard.py: -------------------------------------------------------------------------------- 1 | from tkinter import Tk 2 | import subprocess 3 | 4 | def copy_to_clipboard(text: str): 5 | r = Tk() 6 | r.clipboard_clear() 7 | r.clipboard_append(text) 8 | r.update() 9 | r.destroy() 10 | 11 | def open_in_browser(selected_url: str): 12 | subprocess.call(["x-www-browser", selected_url]) 13 | -------------------------------------------------------------------------------- /confluence/confluence-api.py: -------------------------------------------------------------------------------- 1 | # pip install atlassian-python-api 2 | 3 | from atlassian import Confluence 4 | 5 | confluence = Confluence(url="https://asc.ubsgroup.net", username="username", password="password") 6 | print(confluence) 7 | 8 | create_result = confluence.create_page(space="AD", title="remove me", body="checking confluence api") 9 | print(create_result) -------------------------------------------------------------------------------- /console/autocomplete-example.py: -------------------------------------------------------------------------------- 1 | in/env python 2 | 3 | # pip install argcomplete 4 | # .bashrc: 5 | # ```eval "$(register-python-argcomplete autocomplete-example.py)"``` 6 | import argparse 7 | from argcomplete.completers import EnvironCompleter 8 | 9 | def main(**args): 10 | pass 11 | 12 | if __name__ == '__main__': 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument('operation', choices=['create', 'delete']).completer = ChoicesCompleter(('create', 'delete')) 15 | parser.add_argument('--timeout', choices=['5', '10', '15'], ).completer = EnvironCompleter 16 | argcomplete.autocomplete() 17 | 18 | args = parser.parse_args() 19 | main(**vars(args)) 20 | -------------------------------------------------------------------------------- /console/cut-text-file.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import subprocess 4 | import sys 5 | 6 | # print part of the file [from line] [to line], 7 | # bite part of text file 8 | 9 | def print_strings_from_file(path_to_file: str, line_begin: int, line_end: int): 10 | with open(path_to_file) as text_file: 11 | line_counter = 0 12 | for each_line in text_file: 13 | line_counter = line_counter + 1 14 | if line_begin <= line_counter <= line_end: 15 | print(each_line, end='') 16 | 17 | 18 | if __name__ == "__main__": 19 | if len(sys.argv) < 4: 20 | print("expected input parameters: ") 21 | sys.exit(1) 22 | if len(sys.argv) == 4: 23 | print_strings_from_file(sys.argv[1], int(sys.argv[2]), int(sys.argv[3])) 24 | 25 | -------------------------------------------------------------------------------- /console/download-file.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # download file via http and save it locally 4 | 5 | import requests 6 | from tqdm import tqdm 7 | 8 | def main(): 9 | url = "http://localhost:8808/published/resources/affin.zip.md5" 10 | for i in range(1,10): 11 | response = requests.get(url, stream=True) 12 | file_name = "affin.zip.md5"+str(i) 13 | with open(file_name, "wb") as handle: 14 | for data in tqdm(response.iter_content()): 15 | handle.write(data) 16 | print( file_name + " " + response.headers["Content-MD5"]) 17 | 18 | if __name__=='__main__': 19 | main() 20 | 21 | # f0b70d245eae21e9c46bd4b94cad41cc *affin-1482164837000.zip 22 | # 91372ab88e402460f2a832ef2c44a834 *affin-1484662020000.zip 23 | 24 | #1482164837000 25 | #1484662020000 -------------------------------------------------------------------------------- /console/download-zip.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # download file via http 4 | # ( save via copyfileobj ) 5 | 6 | import requests 7 | import shutil 8 | import time 9 | 10 | def main(): 11 | host="localhost" 12 | port=8808 13 | brand_name="mywirecard2go" 14 | file_name=brand_name+".zip" 15 | url = "http://%s:%i/published/resources/%s" % (host, port, file_name) 16 | for i in range(1,3): 17 | current_time = time.time() 18 | response = requests.get(url, stream=True) 19 | local_file_name = file_name+str(i) 20 | with open(local_file_name, "wb") as output_file: 21 | response.raw.decode_content = True 22 | shutil.copyfileobj(response.raw, output_file) 23 | print( local_file_name + " " + response.headers["Content-MD5"] ) 24 | print( time.time()-current_time) 25 | if __name__=='__main__': 26 | main() 27 | 28 | # f0b70d245eae21e9c46bd4b94cad41cc *affin-1482164837000.zip 29 | # 91372ab88e402460f2a832ef2c44a834 *affin-1484662020000.zip 30 | 31 | #1482164837000 32 | #1484662020000 -------------------------------------------------------------------------------- /console/envvar.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # print environment variables 4 | 5 | import os 6 | import sys 7 | 8 | for key, value in os.environ.items(): 9 | s = '%s=%s' % (key, value) 10 | print(s) 11 | -------------------------------------------------------------------------------- /console/input_arguments.py: -------------------------------------------------------------------------------- 1 | # print input arguments 2 | 3 | for each_param in sys.argv: 4 | print(each_param) 5 | -------------------------------------------------------------------------------- /console/pipe-example.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | # read data from stdin 4 | # using application in linux pipe 5 | 6 | exclude_start_with = 'INSERT INTO BRAND_SERVER_DATA.DATABASECHANGELOG' 7 | for line in sys.stdin: 8 | if not line.startswith(exclude_start_with): 9 | print(line, end="") 10 | 11 | 12 | # read data from stdin at once 13 | # all_lines = sys.stdin.read() 14 | -------------------------------------------------------------------------------- /console/remove_left.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # find occurrence and remove everything from left 4 | 5 | import sys 6 | 7 | for each_line in sys.stdin: 8 | index = each_line.find(sys.argv[1]) 9 | if index<0: 10 | print(each_line, end="") 11 | else: 12 | print(each_line[index:], end="") 13 | -------------------------------------------------------------------------------- /console/remove_right.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # find occurrence and remove everything from it 4 | 5 | import sys 6 | 7 | for each_line in sys.stdin: 8 | index = each_line.find(sys.argv[1]) 9 | if index<0: 10 | print(each_line, end="") 11 | else: 12 | print(each_line[0:index], end="") 13 | -------------------------------------------------------------------------------- /console/return_result_emulator.settings: -------------------------------------------------------------------------------- 1 | >>>RUNNING<<< 2 | 22:21 3 | >>>SUCCESS<<< -------------------------------------------------------------------------------- /console/return_result_emulator.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python3 return_result_emulator.py --settings_file return_result_emulator.settings -------------------------------------------------------------------------------- /console/string-in-list.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | if __name__=='__main__': 4 | """ 5 | check input line for existing as a line into file ( as parameter for current script ) 6 | """ 7 | if len(sys.argv)==0: 8 | print("file name with list of control values should be specified") 9 | sys.exit(1) 10 | with open(sys.argv[1]) as input_file: 11 | control_list=[each_line.strip() for each_line in input_file.readlines()] 12 | 13 | for each_line in sys.stdin: 14 | if each_line.strip() in control_list: 15 | print(each_line.strip()) 16 | -------------------------------------------------------------------------------- /console/tail_reader.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | 4 | # print last lines from file 5 | # ( hard coded to 5 last lines ) 6 | 7 | def read_tail(file_path, length): 8 | return_value = list() 9 | with open(file_path) as data_file: 10 | for line in data_file: 11 | return_value.append(line) 12 | if len(return_value) > length: 13 | return_value.pop(0) 14 | return return_value 15 | 16 | if __name__ == "__main__": 17 | if len(sys.argv) < 1: 18 | print ( "need to specify input file" ) 19 | sys.exit(1) 20 | 21 | file_name = sys.argv[1] 22 | if not os.path.isfile(file_name): 23 | raise Exception("file was not found: "+file_name) 24 | 25 | for each_line in read_tail(file_name, 5): 26 | print each_line[:-1] 27 | -------------------------------------------------------------------------------- /console/trim_strings.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # trim string 3 | import sys 4 | 5 | for each_line in sys.stdin: 6 | print(each_line.strip()) 7 | -------------------------------------------------------------------------------- /console/unique.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | # print unique elements from file 4 | # order is not guarantee !!! ( replace 'set' with 'list') 5 | 6 | def main(arguments) : 7 | if len(arguments) == 0: 8 | print("name of file should be specified") 9 | sys.exit(1) 10 | unique_lines = set() 11 | with open(arguments[0]) as f: 12 | for line in f: 13 | unique_lines.add(line) 14 | for eachLine in unique_lines: 15 | print(eachLine[:-1]) 16 | 17 | if __name__ == "__main__": 18 | main(sys.argv[1:]) 19 | 20 | -------------------------------------------------------------------------------- /console/zip-two-files.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # fuse two files line by line 4 | # when you have one file with lines and another file with respecitve lines - just join them into one 5 | 6 | import sys 7 | 8 | if __name__=="__main__": 9 | if len(sys.argv)<3: 10 | print("expected parameters: {first file} {second file}") 11 | print("lines in file should be equals") 12 | 13 | f1 = open(sys.argv[1]) 14 | f2 = open(sys.argv[2]) 15 | 16 | for f1_line in f1: 17 | f2_line = f2.readline() 18 | print("%s\t%s" % (f1_line[:-1], f2_line[:-1]) ) 19 | 20 | f1.close() 21 | f2.close() -------------------------------------------------------------------------------- /country-phone/generate-insert.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import csv 3 | # expected first argument - path to file 4 | # generate insert for: 5 | # create table IF NOT EXISTS `phone2country` ( 6 | # `code_2` char(2) NOT NULL PRIMARY KEY, 7 | # `code_3` char(3) NOT NULL, 8 | # `name` varchar(64) NOT NULL, 9 | # `phone_prefix` varchar(10) NOT NULL 10 | # ) ENGINE=InnoDB DEFAULT CHARSET=utf8; 11 | 12 | 13 | with open(sys.argv[1]) as csv_file: 14 | reader = csv.reader(csv_file,) 15 | for row in reader: 16 | country_name = row[0].strip() 17 | phone_code = row[1].strip().split(",")[0].replace("-","").replace(" ","") 18 | code_2 = row[2].split('/')[0].strip() 19 | code_3 = row[2].split('/')[1].strip() 20 | print(f"insert into hlm_phone_country(code_2, code_3, country_name, phone_prefix) values ('{code_2}', '{code_3}', '{country_name}', '{phone_code}')") 21 | 22 | -------------------------------------------------------------------------------- /csv/csv-reader.py: -------------------------------------------------------------------------------- 1 | import csv 2 | 3 | with open("sample.csv") as csv_file: 4 | reader = csv.reader(csv_file,delimiter='\t') 5 | is_header = True 6 | for row in reader: 7 | if is_header: 8 | is_header = False 9 | continue 10 | print(row) 11 | -------------------------------------------------------------------------------- /csv/sample.csv: -------------------------------------------------------------------------------- 1 | column_1, column_2, column_3 2 | 10, "simple text", 2.5 3 | 20, "another text", 12.3 -------------------------------------------------------------------------------- /datetime/datetime-log-parser.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from datetime import timedelta 3 | import sys 4 | 5 | t_str = "2018-01-18 14:35:52.583" 6 | t_str2 = "2018-01-18 14:37:02.583" 7 | t = datetime.strptime(t_str[:19], "%Y-%m-%d %H:%M:%S") 8 | t2 = datetime.strptime(t_str2[:19], "%Y-%m-%d %H:%M:%S") 9 | 10 | print(t) 11 | print(t2) 12 | if t2<(t+timedelta(seconds=30)): 13 | print("less than 30 seconds") 14 | else: 15 | print("more than 30 seconds") 16 | #datetime.datetime.now() 17 | print(t.strftime("%H:%M:%S")) -------------------------------------------------------------------------------- /datetime/t-time2timestamp.py: -------------------------------------------------------------------------------- 1 | import time 2 | # T-timestamp to date 3 | datestr = "20211214T112448" +" GMT" 4 | print(int(time.mktime(time.strptime(datestr, "%Y%m%dT%H%M%S %Z")))) 5 | # date -d @1639477488 6 | -------------------------------------------------------------------------------- /datetime/timestamp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import datetime 3 | import sys 4 | # expected input: 1558082585167 5 | d=datetime.datetime.fromtimestamp(int(sys.argv[1])/1000);print(str(d.day)+"."+str(d.month)+"."+str(d.year)+" "+str(d.hour)+":"+str(d.minute)+":"+str(d.second)) 6 | -------------------------------------------------------------------------------- /db-key-value/shelve-example.py: -------------------------------------------------------------------------------- 1 | import shelve 2 | 3 | 4 | class Example: 5 | def __init__(self, value): 6 | self.value = value 7 | 8 | def __str__(self): 9 | return self.value 10 | 11 | database = shelve.open("my-db-file") 12 | 13 | 14 | database["simple_string"] = "this is simple string into DB" 15 | database["int_value"] = 5 16 | database["complex_value"] = Example("this is simple object") 17 | 18 | database.close() 19 | 20 | # open existing 21 | db = shelve.open("my-db-file") 22 | print(db["simple_string"]) 23 | print(db["int_value"]) 24 | print(db["complex_value"]) 25 | 26 | # delete key 27 | del db["int_value"] 28 | 29 | # print all keys 30 | print(db.keys()) -------------------------------------------------------------------------------- /decorator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python -tt 2 | class Wrapper(object): 3 | Reestr = list() 4 | 5 | def __init__(self, external_function): 6 | self.func = external_function 7 | Wrapper.Reestr.append(external_function) 8 | 9 | def __call__(self): 10 | print ">>> call:", self.func.__name__, " of ", [x.__name__ for x in Wrapper.Reestr] 11 | self.func() 12 | print "<<< call", self.func.__name__ 13 | 14 | @Wrapper 15 | def function_one(): 16 | print "output function_one" 17 | 18 | @Wrapper 19 | def function_two(): 20 | print "output function_two" 21 | 22 | function_one() 23 | function_two() -------------------------------------------------------------------------------- /default_parameter_list.py: -------------------------------------------------------------------------------- 1 | # default parameter list is working unexpectedly !!!! 2 | class AnalyzeResult: 3 | # !!! don't do that, don't set default parameter list !!!! 4 | def __init__(self, delete: bool, product_type: str = "variant", product_id: int = 0, variant_ids: List = list()): 5 | self._delete = delete 6 | self._product_type = product_type 7 | self._product_id = product_id 8 | self._variant_ids = variant_ids 9 | 10 | 11 | class AnalyzeResult: 12 | def __init__(self, delete: bool, product_type: str = "variant", product_id: int = 0, variant_ids: List = None): 13 | self._delete = delete 14 | self._product_type = product_type 15 | self._product_id = product_id 16 | self._variant_ids = variant_ids if variant_ids is not None else list() 17 | 18 | -------------------------------------------------------------------------------- /deployment.yaml: -------------------------------------------------------------------------------- 1 | name: Deploy FastAPI App 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | build-and-deploy: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | - uses: actions/setup-python@v2 14 | with: 15 | python-version: 3.8 16 | - run: | 17 | python -m pip install -U pip 18 | pip install fastapi uvicorn 19 | - run: uvicorn main:app --host 0.0.0.0 --port 8080 20 | - uses: actions/deploy-to-azure@v1 21 | with: 22 | app-name: my-fastapi-app 23 | resource-group: my-resource-group 24 | package: . -------------------------------------------------------------------------------- /dict-comprehension.py: -------------------------------------------------------------------------------- 1 | # dictionary comprehension 2 | # dict comprehension 3 | a=["one", "two", "three"] 4 | {full_name:full_name[0] for full_name in a } 5 | -------------------------------------------------------------------------------- /dict-comprehensive.py: -------------------------------------------------------------------------------- 1 | values = [9,7,6,3] 2 | # inline conversion from list to dictionary 3 | # dictionary comprehensive 4 | squares = { each:each*each for each in values if each>0 } 5 | -------------------------------------------------------------------------------- /difference-between-liquibase-db/sql-scripts/mvn_create_user.sql: -------------------------------------------------------------------------------- 1 | -- kill all connections 2 | SET SERVEROUTPUT ON; 3 | declare 4 | str varchar2(100); 5 | begin 6 | for S in (select sid,serial#,inst_id from gv$session where username is not null 7 | and username = upper('${db_create_user}')) 8 | loop 9 | str := 'alter system kill session '||chr(39)||S.sid||','||S.serial#||','|| '@' || S.inst_id||chr(39)||' IMMEDIATE'; 10 | execute immediate str; 11 | end loop; 12 | end; 13 | / 14 | -- Drop user (might result in an error, which is ignored by the plugin) 15 | BEGIN 16 | EXECUTE IMMEDIATE 'DROP user ${db_create_user} cascade'; 17 | EXCEPTION 18 | WHEN OTHERS THEN 19 | null; 20 | END; 21 | / 22 | -- Create user 23 | create user ${db_create_user} identified by ${db_create_user}; 24 | grant all privileges to ${db_create_user}; 25 | -------------------------------------------------------------------------------- /difference-between-liquibase-db/sql-scripts/update_folder_name.sql: -------------------------------------------------------------------------------- 1 | DECLARE 2 | folder_name VARCHAR2(100); 3 | BEGIN 4 | select substr(dcl.FILENAME, 0, instr(dcl.FILENAME, '/db/changelog/')-1) into folder_name from DATABASECHANGELOG dcl where rownum=1; 5 | update DATABASECHANGELOG dcl set dcl.filename = 6 | replace(dcl.filename, 7 | folder_name, 8 | '${folder_name}' 9 | ); 10 | END; 11 | / -------------------------------------------------------------------------------- /doc/DOC.md: -------------------------------------------------------------------------------- 1 | reference to another object 2 | @see 3 | https://www.sphinx-doc.org/en/master/#cross-referencing-python-objects 4 | ```python 5 | 6 | :param image_size_id: one of the parameter 7 | from :func:`list of sizes ` 8 | 9 | ``` 10 | -------------------------------------------------------------------------------- /docker/docker_list.py: -------------------------------------------------------------------------------- 1 | # pip3 install docker 2 | import docker 3 | print(docker.from_env().containers.list()) 4 | -------------------------------------------------------------------------------- /dynamic-class.py: -------------------------------------------------------------------------------- 1 | cls = type('my_class', (object,), {'__doc__': 'example of dynamically created class'}) 2 | 3 | print(cls) 4 | print(cls.__doc__) 5 | -------------------------------------------------------------------------------- /dynamic-vars.py: -------------------------------------------------------------------------------- 1 | def set_varibale_by_name(source:dict, name_of_variable_to_set:str, value): 2 | """ set variable by name, dynamically set variable 3 | """ 4 | source[name_of_variable_to_set] = value 5 | # globals()[name_of_variable_to_set] = value 6 | 7 | if __name__=="__main__": 8 | set_varibale_by_name(locals(), "a", 10) 9 | set_varibale_by_name(locals(), "b", "hello") 10 | 11 | print(a) 12 | print(b) 13 | -------------------------------------------------------------------------------- /elastic.py: -------------------------------------------------------------------------------- 1 | # pip install elasticsearch==7.10.1 2 | 3 | # Create an Elasticsearch client 4 | es = Elasticsearch() 5 | 6 | # Index a document with an ID of 1 7 | es.index(index="my_index", doc_type="my_type", id=1, body={"name": "John Doe", "age": 34}) 8 | 9 | # Perform a search query 10 | results = es.search(index="my_index", doc_type="my_type", body={"query": {"match": {"name": "John"}}}) 11 | 12 | # Print the results 13 | print(results) 14 | -------------------------------------------------------------------------------- /else-for.py: -------------------------------------------------------------------------------- 1 | #!/usb/bin/python 2 | for i in range(10): 3 | try: 4 | if 10/i==2.0: 5 | break 6 | except ZeroDivisionError: 7 | print(1) 8 | else: 9 | print(2) 10 | 11 | -------------------------------------------------------------------------------- /email/quoted-printable.py: -------------------------------------------------------------------------------- 1 | # quoted-printable data 2 | import quopri 3 | str1 = 'äé' 4 | #encoded = quopri.encodestring('äé'.encode('utf-8')) 5 | encoded = quopri.encodestring(str1.encode('utf-8')) 6 | print(encoded) 7 | 8 | str2 = '=C3=A4=C3=A9' 9 | decoded_string = quopri.decodestring(str2) 10 | print(decoded_string.decode('utf-8')) 11 | -------------------------------------------------------------------------------- /email/send-cli/email-sender.properties: -------------------------------------------------------------------------------- 1 | [message] 2 | ; jinja template with {{ URL }} parameter 3 | template=/home/projects/message-template.j2 4 | ; path to file, that will be attached 5 | cv=/home/projects/cv.pdf 6 | 7 | [email] 8 | mail_server=smtp.mail.ru 9 | mail_port=587 10 | mail_login=my-email@mail.ru 11 | mail_password=passwordqwerty 12 | mail_from=my-email@mail.ru 13 | -------------------------------------------------------------------------------- /email/send-cli/email-sender.sh: -------------------------------------------------------------------------------- 1 | current_directory=`dirname "$0"` 2 | python3 $current_directory/main.py $current_directory/email-sender.properties $* 3 | 4 | 5 | #function send-cv(){ 6 | # if [[ $# != 2 ]] 7 | # then 8 | # echo " " 9 | # fi 10 | # /home/projects/email-sender/email-sender.sh $1 $2 11 | #} 12 | -------------------------------------------------------------------------------- /email/send-cli/message-template.j2: -------------------------------------------------------------------------------- 1 | Hello, 2 | 3 | {{ URL }} 4 | 5 | 6 | BR, 7 | Vitalii 8 | -------------------------------------------------------------------------------- /envvar-set.py: -------------------------------------------------------------------------------- 1 | def parse_bool_value(str_value: str) -> Optional[bool]: 2 | if str_value.strip().lower() in ["true", "ok", "yes"]: 3 | return True 4 | if str_value.strip().lower() in ["false", "ko", "no"]: 5 | return False 6 | return None 7 | 8 | 9 | os.environ["DISPLAY"] = ":2" 10 | -------------------------------------------------------------------------------- /exceptions/raise-exception.py: -------------------------------------------------------------------------------- 1 | try: 2 | raise Exception("this is my personal exception") 3 | except Exception as e: 4 | print(e.args[0]) -------------------------------------------------------------------------------- /exceptions/read_file.py: -------------------------------------------------------------------------------- 1 | try: 2 | f = open('readme.md') 3 | except (ValueError, IOError) as e: 4 | # just an example of multiply exception 5 | if isinstance(e, Iterable) and len(e) > 0: 6 | error_message = e[0].message 7 | else: 8 | error_message = e.message 9 | print( 'can't open the file: %s' % (error_message, ) ) 10 | else: 11 | with f: 12 | print f.readlines() 13 | -------------------------------------------------------------------------------- /exceptions/simple-extension.py: -------------------------------------------------------------------------------- 1 | class StorageException(Exception): 2 | def __init__(self, reason: str = ""): 3 | self.message = reason 4 | 5 | 6 | class ScreenshotException(Exception): 7 | def __init__(self, message): 8 | self.message = message 9 | super().__init__(f"{message}") 10 | 11 | -------------------------------------------------------------------------------- /execute-another-script.py: -------------------------------------------------------------------------------- 1 | # execute another script from current program 2 | os.system("my_script.py first_parameter second_parameter") 3 | -------------------------------------------------------------------------------- /fastapi/.s2i/environment: -------------------------------------------------------------------------------- 1 | MY_CUSTOM_VAR=hello from s2i -------------------------------------------------------------------------------- /fastapi/.s2iignore: -------------------------------------------------------------------------------- 1 | *.md 2 | !README.md -------------------------------------------------------------------------------- /fastapi/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8-ubi8 2 | 3 | RUN pip3 install fastapi 4 | 5 | WORKDIR /usr/src/app 6 | ADD . . 7 | RUN pip install -r requirements.txt 8 | 9 | EXPOSE 8080 10 | CMD uvicorn simple-fastapi:app --host 0.0.0.0 --port 8080 11 | -------------------------------------------------------------------------------- /fastapi/README.md: -------------------------------------------------------------------------------- 1 | # OpenShift deployment 2 | ```sh 3 | oc new-app --list 4 | oc new-app --search python 5 | ``` 6 | 7 | ```sh 8 | GIT_REPO=https://github.com/cherkavi/python-deployment 9 | # app.py, requirements.txt - must be present 10 | GIT_BRANCH=main 11 | APP_NAME=python-test 12 | oc new-app --code "${GIT_REPO}#${GIT_BRANCH}" --name $APP_NAME 13 | # OCP_BASE_IMAGE=python:3.8-ubi8 14 | # oc new-app --code "${GIT_REPO}#${GIT_BRANCH}" --name $APP_NAME --docker-image $OCP_BASE_IMAGE 15 | 16 | ## todo 17 | # source to image 18 | # https://github.com/openshift/source-to-image/releases/tag/v1.3.2 19 | # s2i build . $DOCKER_IMAGE python-test-app --pull-policy never 20 | ``` -------------------------------------------------------------------------------- /fastapi/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi -------------------------------------------------------------------------------- /fastapi/simple-fastapi.py: -------------------------------------------------------------------------------- 1 | ## #webframework 2 | # uvicorn simple-fastapi:app --host 0.0.0.0 --port 8080 3 | 4 | from datetime import datetime 5 | from fastapi import FastAPI 6 | from pydantic import BaseModel 7 | 8 | # Create a FastAPI app 9 | app = FastAPI() 10 | 11 | # Define the input schema for the endpoint 12 | class Input(BaseModel): 13 | x1: float 14 | x2: float 15 | x3: float 16 | x4: float 17 | 18 | @app.get("/time") 19 | def get_time(): 20 | return datetime.now() 21 | 22 | @app.post("/echo") 23 | def echo(input: Input): 24 | return {input.x1, input.x2, input.x3, input.x4} 25 | -------------------------------------------------------------------------------- /file-extension.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | path = '/home/projects/sampledoc.docx' 4 | root, extension = os.path.splitext(path) 5 | 6 | print('Root:', root) 7 | print('extension:', extension) 8 | -------------------------------------------------------------------------------- /filesystem/brand-xml-clear.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | 4 | root_dir = "C:\\temp\\brands\\" 5 | for each_dir in os.listdir(root_dir): 6 | subfolder = root_dir + each_dir 7 | for subfolder_root, subfolder_dirs, subfolder_files in os.walk(subfolder): 8 | for each_dir in subfolder_dirs: 9 | candidate = subfolder + "\\" +each_dir 10 | if os.path.isdir(candidate): 11 | shutil.rmtree(candidate) 12 | for each_file in subfolder_files: 13 | if each_file != 'brand.xml': 14 | full_file_name = subfolder_root + "\\" + each_file 15 | os.unlink(full_file_name) 16 | 17 | 18 | print ( "done" ) 19 | 20 | -------------------------------------------------------------------------------- /filesystem/calculate-files.py: -------------------------------------------------------------------------------- 1 | ''' 2 | calculate amount of files inside folder 3 | ''' 4 | import sys 5 | import os 6 | import time 7 | import datetime 8 | 9 | def main(argv): 10 | while True: 11 | dt = datetime.datetime.fromtimestamp(time.time()) 12 | print ( dt.strftime('%Y-%m-%dT%H:%M:%S'), " ", len(os.listdir(argv[0])) ) 13 | time.sleep(1) 14 | 15 | if __name__ == "__main__": 16 | main(sys.argv[1:]) -------------------------------------------------------------------------------- /filesystem/copy-list-of-file-to-dest.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import shutil 4 | 5 | 6 | def main(path_to_list, destination_folder): 7 | """ copy list of file into destination folder 8 | :param path_to_list: text file with list of files 9 | :param destination_folder: full path to destination folder 10 | """ 11 | with open(path_to_list) as input_file: 12 | for each_line in input_file: 13 | file_candidate = each_line.strip() 14 | # is file exists, existance of file 15 | if os.path.isfile(file_candidate): 16 | print("%s %s" % (destination_folder,file_candidate)) 17 | shutil.copy(file_candidate, destination_folder) 18 | 19 | 20 | if __name__ == '__main__': 21 | if len(sys.argv) <= 2: 22 | print("input parameters should be: ") 23 | exit(1) 24 | main(sys.argv[1], sys.argv[2]) 25 | -------------------------------------------------------------------------------- /filesystem/cp.py: -------------------------------------------------------------------------------- 1 | import shutil;shutil.copyfile('/tmp/1.txt', '/tmp/2.txt') 2 | -------------------------------------------------------------------------------- /filesystem/create-remove-folder.py: -------------------------------------------------------------------------------- 1 | import os 2 | # pip install pytest-shutil 3 | import shutil 4 | import sys 5 | 6 | 7 | output_folder = "test_folder/test1" 8 | 9 | # folder check existence 10 | if os.path.exists(output_folder): 11 | # checking is directory, isdirectory ? 12 | # if not os.path.isdir(control_folder): 13 | 14 | # remove folder, delete folder 15 | # shutil.os.remove(output_folder) 16 | shutil.rmtree(output_folder) 17 | 18 | # create folder 19 | os.makedirs(output_folder) 20 | 21 | 22 | -------------------------------------------------------------------------------- /filesystem/find-delete-by-mask.py: -------------------------------------------------------------------------------- 1 | def delete(self, object_path_in_storage: str) -> bool: 2 | try: 3 | for each_file in glob.glob(f"/tmp/temp_folder/file_nam*"): 4 | os.remove(each_file) 5 | return True 6 | except: 7 | return False 8 | -------------------------------------------------------------------------------- /filesystem/memory-file.py: -------------------------------------------------------------------------------- 1 | import StringIO 2 | # temp file memory file 3 | memfile = StringIO.StringIO() 4 | 5 | try: 6 | memfile.write("hello string") 7 | memfile.seek(0) 8 | print(memfile.read()) 9 | finally: 10 | memfile.close() 11 | -------------------------------------------------------------------------------- /filesystem/old-file-list.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | from datetime import datetime 3 | import time 4 | import os 5 | 6 | if __name__=='__main__': 7 | folder = "c:\\temp\\" 8 | delta = timedelta(days=30) 9 | 10 | for each_file in os.listdir(folder): 11 | path_to_file = os.path.join(folder,each_file) 12 | file_timestamp = datetime.fromtimestamp(os.stat(path_to_file).st_ctime) 13 | if file_timestamp+delta", view_func=self.echo) 16 | self._logger = self._app.logger 17 | self._logger.setLevel(logging.DEBUG) 18 | 19 | def run(self): 20 | self._app.run(port=self._port) 21 | 22 | @property 23 | def app(self): 24 | return self._app 25 | 26 | def echo(self, input_message) -> str: 27 | self._logger.info("test request") 28 | return jsonify(message=f"message is: {input_message}!") 29 | 30 | 31 | if __name__ == "__main__": 32 | FlaskApp(DEFAULT_APP_NAME).run() 33 | -------------------------------------------------------------------------------- /flask/echo.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | 3 | app = Flask(__name__) 4 | app.config.from_pyfile('/path/to/file.cfg') 5 | 6 | @app.route("/") 7 | def hello(): 8 | return "hello world!" 9 | 10 | if __name__ == "__main__": 11 | app.run(host="0.0.0.0") 12 | -------------------------------------------------------------------------------- /flask/global-context.md: -------------------------------------------------------------------------------- 1 | ```python 2 | app=Flask("new_app") 3 | app.db_configuration = create_db_configuration() 4 | ``` 5 | 6 | ```python 7 | from flask import current_app 8 | 9 | def get_job_status(job_id: str) -> JobStatus: 10 | current_app.db_configuration 11 | ... 12 | ``` 13 | -------------------------------------------------------------------------------- /flask/inline-parameter.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | from flask import request 3 | from flask import jsonify 4 | import os 5 | import uuid 6 | import jsonschema 7 | import subprocess 8 | 9 | # example of triggering app 10 | # http://localhost:3000/?message=PATH 11 | 12 | app = Flask(__name__) 13 | # api = Api(app=app) 14 | 15 | 16 | @app.route("/", methods=['GET']) 17 | def get_simulation_job(job_id): 18 | return jsonify(message="job id is: {}!".format(job_id)) 19 | 20 | 21 | @app.route("/simulation", methods=['POST']) 22 | def create_simulation_job(): 23 | # post request read data 24 | job_request = request.get_json(silent=True) 25 | job_id = uuid.uuid1() 26 | return job_id 27 | 28 | 29 | @app.route("/scenario-extraction/", methods=['GET']) 30 | def get_scenario_extraction_job(job_id): 31 | # return code 32 | return jsonify(message="some message %s" % (str(job_id))), 404 33 | 34 | 35 | if __name__ == "__main__": 36 | app.run(host="0.0.0.0", port=3000) 37 | -------------------------------------------------------------------------------- /flask/test_app.py: -------------------------------------------------------------------------------- 1 | from flask_testing import TestCase 2 | 3 | from flask import Flask, jsonify 4 | from app import FlaskApp 5 | 6 | class MyTest(TestCase): 7 | 8 | def create_app(self) -> Flask: 9 | self.flask_app = FlaskApp("test") 10 | self.flask_app.app.testing = True 11 | return self.flask_app.app 12 | 13 | def test_server_up_and_running(self): 14 | # given 15 | control_string: str = "test1" 16 | with self.flask_app.app.test_client() as test_client: 17 | 18 | # when 19 | response = test_client.get(f"/echo/{control_string}") 20 | 21 | # then 22 | assert response.status_code == 200 23 | assert response.json["message"] is not None 24 | assert response.json["message"].find(control_string) > 0 25 | print(response) 26 | -------------------------------------------------------------------------------- /flask/var-printer.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | from flask import request 3 | 4 | import subprocess 5 | 6 | # example of triggering app 7 | # http://localhost:3000/?message=PATH 8 | 9 | app = Flask(__name__) 10 | 11 | @app.route("/") 12 | def execute_application(): 13 | # job_request = request.get_json(silent=True) 14 | remote_request_argument = request.args.get("message") 15 | # result = subprocess.check_output(["tail /home/projects/current-task/results.output"], shell=True).decode("utf-8").splitlines() 16 | result = subprocess.check_output(["echo $"+remote_request_argument], shell=True).splitlines() 17 | return "
".join(result) 18 | return ("result of your request is %s " % result) 19 | 20 | if __name__ == "__main__": 21 | app.run(host="0.0.0.0", port=3000) 22 | -------------------------------------------------------------------------------- /flatmap.py: -------------------------------------------------------------------------------- 1 | data = [ [1,2,3], [4,5,6], [7,8,9] ] 2 | 3 | print( [each_element for each_line in data for each_element in each_line] ) 4 | -------------------------------------------------------------------------------- /for-else.py: -------------------------------------------------------------------------------- 1 | # for else 2 | for each in range(5): 3 | print(each) 4 | else: 5 | print("else without break") 6 | 7 | 8 | for each in range(5): 9 | if each==3: 10 | break 11 | print(each) 12 | else: 13 | print("else for break will never be") 14 | -------------------------------------------------------------------------------- /format-output/format.py: -------------------------------------------------------------------------------- 1 | # format string, print formatted values 2 | a = 10 3 | b = 20 4 | c = "hello" 5 | print("-----") 6 | print( f"{a} {{ curly braces escape example}} {b}\t{c.upper()}" ) 7 | print( "{} {}\t{}".format(a,b,c) ) 8 | 9 | print("-----") 10 | print( " {0:5d} {1:10d} \t {2:10s}".format(a,b,c) ) 11 | print( " {val2:5d} {val1:10d} \t {val3:10s}".format(val1=a,val2=b,val3=c) ) 12 | 13 | # string formatting 14 | print("-----") 15 | print(f" {a:_<5} {c:~^15} {b:0>10}") 16 | 17 | 18 | print("-----") 19 | s="hello" 20 | print(">>%20s<<" % (s)) 21 | 22 | # f-string conversation character 23 | class Example: 24 | def __str__(self) -> str: 25 | return "show str" 26 | def __repr__(self) -> str: 27 | return "show repr юникод" 28 | 29 | print(f"{Example()!r}") # __repr__ 30 | print(f"{Example()!s}") # __str__ 31 | print(f"{Example()!a}") # like ascii (escapes unicode) 32 | -------------------------------------------------------------------------------- /formats.md: -------------------------------------------------------------------------------- 1 | xml yaml json toml formats 2 | 3 | ```python 4 | import yaml 5 | import json 6 | import dicttoxml 7 | import toml 8 | import xml 9 | ``` 10 | 11 | 12 | ```python 13 | values=toml.load(path_to_file) 14 | values=json.load(path_to_file) 15 | 16 | yaml.safe_dump(values) 17 | dicttoxml.dicttoxml(values) 18 | json.safe_dumps(json.loads(json_dumps(json_string_value)) 19 | ``` 20 | -------------------------------------------------------------------------------- /formats.sh: -------------------------------------------------------------------------------- 1 | # json to ... 2 | git clone https://github.com/cherkavi/json-toolkit 3 | -------------------------------------------------------------------------------- /formatted-file/example.properties: -------------------------------------------------------------------------------- 1 | [default] 2 | template=/home/projects/temp/email-sender/message-template.j2 3 | cv=/home/projects/temp/email-sender/cv-cherkashyn.pdf 4 | -------------------------------------------------------------------------------- /formatted-file/free_text/test-data-01.txt: -------------------------------------------------------------------------------- 1 | just an example of 'how to use it'.[] Or, maybe, even more.[ab] 2 | this is.[ab] new file with markers.[bc] but not for all lines.[ab] 3 | that markers.[] 4 | are present.[ab] some lines[bc] still doesn't have them. 5 | and this text.[bc][cd][cd] 6 | and here I just forget to add dot [ab] 7 | two markers in the row.[bc][ab] 8 | at the end of the file -------------------------------------------------------------------------------- /formatted-file/free_text/test-data-02.txt: -------------------------------------------------------------------------------- 1 | filtered: with precedence [ab][cd][de][ef][] 2 | this is[ab] internal data[bc] in the file. 3 | another[ab] internal data[bc] from file. [] 4 | that should be considered. 5 | [ab] this line has mistake[ab] in marker position[bc] 6 | as [bc]necessary.[ab][bc] But not a mandatory[bc] -------------------------------------------------------------------------------- /formatted-file/free_text/test-data-03.txt: -------------------------------------------------------------------------------- 1 | remove last,[ab] comma, [bc][last_comma] 2 | remove last,[ab] comma,[bc][last_comma] 3 | not a last,[ab] comma,[bc] in the line [last_comma] -------------------------------------------------------------------------------- /formatted-file/free_text/test-data-04.txt: -------------------------------------------------------------------------------- 1 | - my first data: [prefix] option one,[ab] option two,[bc] option three[cd][last_comma] 2 | - my second data: [prefix] just an information[ab] 3 | - my third data: [prefix] option three,[ab] option four 4 | - my fourth data: [prefix] just a text [] 5 | -------------------------------------------------------------------------------- /formatted-file/free_text/test-data-05.txt: -------------------------------------------------------------------------------- 1 | remove last,[ab] comma, [bc][last_comma] [control_one] 2 | remove last version two,[ab] comma,[bc][last_comma] [control_two] 3 | not a last,[ab] comma,[bc] in the line [last_comma] -------------------------------------------------------------------------------- /formatted-file/free_text/test-data-06.txt: -------------------------------------------------------------------------------- 1 | line should be shown[ab] if no dc tag [!dc] 2 | line should be shown[aaa] if no ab tag [!ab] -------------------------------------------------------------------------------- /formatted-file/ini/example.ini: -------------------------------------------------------------------------------- 1 | [local] 2 | property1 = this is first default properties 3 | property2 = this is second default properties 4 | 5 | [remote] 6 | another-value = another value -------------------------------------------------------------------------------- /formatted-file/ini/ini.py: -------------------------------------------------------------------------------- 1 | import configparser 2 | 3 | config = configparser.ConfigParser() 4 | # also "write" method exists 5 | config.read("example.properties") 6 | 7 | print(config.sections()) 8 | print(config["remote"]["another-value"]) 9 | print(config["without-section-property"]) -------------------------------------------------------------------------------- /formatted-file/json/example.json: -------------------------------------------------------------------------------- 1 | { 2 | "maps": [ 3 | { 4 | "id": "map1", 5 | "category": "0" 6 | }, 7 | { 8 | "id": "map2", 9 | "category": "0" 10 | } 11 | ], 12 | "om_points": "value", 13 | "parameters": { 14 | "id": "valore" 15 | } 16 | } -------------------------------------------------------------------------------- /formatted-file/json/json.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pprint import pprint 3 | 4 | with open('example.json') as f: 5 | data = json.load(f) 6 | 7 | # for unix don't do this: 8 | # data = json.load(open('example.json')) 9 | 10 | pprint(data) 11 | 12 | print(data["maps"][0]["id"]) 13 | -------------------------------------------------------------------------------- /formatted-file/properties/example.properties: -------------------------------------------------------------------------------- 1 | property1 = this is first default properties 2 | property2 = this is second default properties 3 | another-value = another value -------------------------------------------------------------------------------- /formatted-file/properties/properties.py: -------------------------------------------------------------------------------- 1 | from jproperties import Properties 2 | # only for Python2 3 | 4 | p = Properties() 5 | with open("example.properties", "r") as f: 6 | p.load(f, "utf-8") 7 | print(p["property1"]) 8 | print(p["property2"]) 9 | print(p["another-value"]) 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /formatted-file/properties/properties2.py: -------------------------------------------------------------------------------- 1 | from jproperties import Properties 2 | 3 | p = Properties() 4 | with open("example.properties", "r") as f: 5 | p.load(f, "utf-8") 6 | print(p["property1"]) 7 | print(p["property2"]) 8 | print(p["another-value"]) 9 | 10 | 11 | 12 | # Python3 example 13 | import configparser 14 | parser = configparser.RawConfigParser(default_section="default") 15 | parser.read(path_to_settings_file) 16 | return (parser.get("default", "template"), parser.get("default", "cv")) 17 | 18 | -------------------------------------------------------------------------------- /formatted-file/txt/read-txt-file.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | if __name__=='__main__': 4 | with open(sys.argv[1], "r") as file: 5 | for each_line in file: 6 | word1, word2 = each_line.split(" ")[:2] 7 | print(word1, word2.strip()) -------------------------------------------------------------------------------- /formatted-file/txt/sample.txt: -------------------------------------------------------------------------------- 1 | this is 2 | one test 3 | file for reading -------------------------------------------------------------------------------- /formatted-file/xml/example.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /formatted-file/xml/xml-dom-read.py: -------------------------------------------------------------------------------- 1 | from xml.dom import minidom 2 | 3 | xmldoc = minidom.parse('example.xml') 4 | 5 | itemlist = xmldoc.getElementsByTagName('item') 6 | print(len(itemlist)) 7 | print(itemlist[0].attributes['name'].value) 8 | 9 | for s in itemlist: 10 | print(s.attributes['name'].value) -------------------------------------------------------------------------------- /formatted-file/xml/xml-read.py: -------------------------------------------------------------------------------- 1 | import xmltodict 2 | 3 | with open('example.xml') as fd: 4 | doc = xmltodict.parse(fd.read()) 5 | 6 | print(doc["data"]["items"]["@size"]) 7 | print(doc["data"]["items"]["item"][2]) 8 | -------------------------------------------------------------------------------- /formatted-file/xml/xml-sax-read.py: -------------------------------------------------------------------------------- 1 | import xml.sax 2 | 3 | # create an XMLReader 4 | parser = xml.sax.make_parser() 5 | # turn off namepsaces 6 | parser.setFeature(xml.sax.handler.feature_namespaces, 0) 7 | 8 | # override the default ContextHandler 9 | # Handler = MovieHandler() 10 | # parser.setContentHandler( Handler ) 11 | 12 | class Handler(xml.sax.ContentHandler): 13 | def __init__(self): 14 | print("__init__") 15 | 16 | def startElement(self, tag, attributes): 17 | print("start-tag: %s attributes: %s " % (tag, attributes)) 18 | 19 | def endElement(self, tag): 20 | print(" end -tag %s " % (tag)) 21 | 22 | def characters(self, content): 23 | print(" content : " + content) 24 | 25 | 26 | parser.setContentHandler(Handler()) 27 | xml_object = parser.parse("example.xml") 28 | print(xml_object) -------------------------------------------------------------------------------- /formatted-file/yaml/example.yaml: -------------------------------------------------------------------------------- 1 | mysql: 2 | host: localhost 3 | user: root 4 | password: my secret password 5 | db: write-math 6 | other: 7 | preprocessing_queue: 8 | - preprocessing.scale_and_center 9 | - preprocessing.dot_reduction 10 | - preprocessing.connect_lines 11 | use_anonymous: yes -------------------------------------------------------------------------------- /formatted-file/yaml/yaml-read.py: -------------------------------------------------------------------------------- 1 | # pip search yaml 2 | # pip install pyyaml 3 | import yaml 4 | 5 | with open("example.yaml", 'r') as ymlfile: 6 | cfg = yaml.load(ymlfile) 7 | 8 | print(cfg['mysql']['password']) 9 | -------------------------------------------------------------------------------- /formatted-file/yaml/yaml_with_comments/.gitignore: -------------------------------------------------------------------------------- 1 | test-data-01-filtered.yaml 2 | -------------------------------------------------------------------------------- /formatted-file/yaml/yaml_with_comments/pyyaml_include.py: -------------------------------------------------------------------------------- 1 | # pip install PyYAML pyyaml-include 2 | # 3 | import yaml 4 | from yamlinclude import YamlIncludeConstructor 5 | 6 | 7 | def read_yaml_with_comments(file_path): 8 | YamlIncludeConstructor.add_to_loader_class(loader_class=yaml.Loader) 9 | 10 | with open(file_path, "r") as file: 11 | data = yaml.load(file, Loader=yaml.Loader) 12 | 13 | return data 14 | 15 | 16 | if __name__ == "__main__": 17 | file_path = "test-data-01.yaml" 18 | yaml_data = read_yaml_with_comments(file_path) 19 | print(yaml_data) 20 | -------------------------------------------------------------------------------- /formatted-file/yaml/yaml_with_comments/test-data-01.yaml: -------------------------------------------------------------------------------- 1 | key1: just a message # key1 2 | key2: # key2 3 | - one 4 | - two # key23.2 5 | - three # key23.3 6 | key3: 7 | key31: four # key31 8 | key32: 9 | key321: six 10 | key322: seven # key322.7 11 | key323: eight 12 | key33: # key33 13 | - one # key33.1 14 | - two # key33.2 15 | - three # key33.3 16 | key4: # key4 17 | - key411: one # key41.1 18 | key412: two # key41.2 19 | key413: three # key41.3 20 | - key421: one # key42.1 21 | key422: two # key42.2 22 | key423: three # key42.3 23 | - key431: one 24 | key432: two 25 | key433: three 26 | -------------------------------------------------------------------------------- /ftp/ftp-server-start.sh: -------------------------------------------------------------------------------- 1 | python3 -m pip install python-ftp-server 2 | python3 -m python_ftp_server -d "./" 3 | -------------------------------------------------------------------------------- /function-by-name.py: -------------------------------------------------------------------------------- 1 | module = __import__('name_of_module') 2 | func = getattr(module, 'function_name_inside_module') 3 | func() 4 | -------------------------------------------------------------------------------- /generator.py: -------------------------------------------------------------------------------- 1 | print("example of inline generator") 2 | inline_generator = (str(each) for each in range(1,5)) 3 | for i in inline_generator: 4 | print(i) 5 | 6 | print("example of generator with 'yield'") 7 | def my_generator(limit): 8 | values = range(1, limit+5) 9 | for x in values: 10 | if x%2==0: 11 | yield x 12 | for i in my_generator(10): 13 | print(i) -------------------------------------------------------------------------------- /generator/generator_wrapper.py: -------------------------------------------------------------------------------- 1 | class DataApiProxy(object): 2 | def __init__(self): 3 | pass 4 | 5 | def __iter__(self): 6 | return self 7 | 8 | def __next__(self): 9 | return self.next() 10 | 11 | def next(self): 12 | # raise StopIteration() 13 | return 1 14 | -------------------------------------------------------------------------------- /generator/read-lines-from-filelist.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def read_lines(list_of_files): 4 | def _read_lines(): 5 | for each_file in list_of_files: 6 | with open(each_file) as current_file: 7 | yield from current_file 8 | yield from enumerate(_read_lines()) 9 | 10 | if __name__ == "__main__": 11 | for each_line in read_lines(sys.argv[1:]): 12 | print(each_line) 13 | 14 | -------------------------------------------------------------------------------- /generic-function.py: -------------------------------------------------------------------------------- 1 | T = TypeVar('T') 2 | def get_value_or_else(parse_result: ParseResult, argument_name: str, default_value: T) -> T: 3 | return parse_result[argument_name] if argument_name in parse_result and parse_result[argument_name] else default_value 4 | 5 | -------------------------------------------------------------------------------- /git/git-collaboration.py: -------------------------------------------------------------------------------- 1 | # pip install GitPython 2 | 3 | path_to_git:str="/home/projects/python-utilities" 4 | git_repo=git.Repo(path_to_git) 5 | 6 | git_repo.heads 7 | 8 | git_repo.active_branch 9 | 10 | git_repo.remotes() 11 | -------------------------------------------------------------------------------- /global_module.py: -------------------------------------------------------------------------------- 1 | import __builtin__ 2 | __builtin__.my_variable = 1 -------------------------------------------------------------------------------- /hash256.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import hashlib 3 | my_string=f"Ashley Furniture{sys.argv[1]}" 4 | print(hashlib.sha256(my_string.encode()).hexdigest()) 5 | 6 | -------------------------------------------------------------------------------- /hashmd5.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import hashlib 3 | from typing import List 4 | 5 | parameters:List[str] = list(filter(lambda x: len(x.strip())>0, sys.argv[1:])) 6 | 7 | if len(parameters)==1: 8 | print(hashlib.md5(f"Ashley Furniture{parameters[0]}".encode("utf-8")).hexdigest()) 9 | exit(0) 10 | 11 | if len(parameters)==2: 12 | print(hashlib.md5(f"{parameters[0]}{parameters[1]}".encode("utf-8")).hexdigest()) 13 | exit(0) 14 | 15 | -------------------------------------------------------------------------------- /html-scraping/README.md: -------------------------------------------------------------------------------- 1 | # tools for reading html from remote urls, html parsing, parse html 2 | > take a look into current_project/xing 3 | ## [scrapy](https://scrapy.org/) 4 | ## [beautiful soup](https://realpython.com/beautiful-soup-web-scraper-python/) 5 | * https://www.freecodecamp.org/news/web-scraping-python-tutorial-how-to-scrape-data-from-a-website/ 6 | 7 | -------------------------------------------------------------------------------- /html-scraping/binary-html/beautifulsoup.py: -------------------------------------------------------------------------------- 1 | # print html content as a text in console 2 | # from binary html resource 3 | # 4 | import requests 5 | from bs4 import BeautifulSoup 6 | import sys 7 | 8 | def parse_html(address:str): 9 | response = requests.get(address) 10 | binary_content = response.content 11 | 12 | # Parse the binary content using Beautiful Soup 13 | soup = BeautifulSoup(binary_content, "html.parser") 14 | 15 | # soup.title.text 16 | # ??? elements = soup.find_all("li", xpath="/html/body/div[1]/div[1]/div/div/div/div/div/div[2]/div[8]/div/div/ul") 17 | 18 | print(soup.find("html")) 19 | 20 | if __name__=='__main__': 21 | if len(sys.argv)<2: 22 | print("provide target html") 23 | sys.exit(1) 24 | parse_html(sys.argv[1]) 25 | -------------------------------------------------------------------------------- /html-scraping/lxml/curl-output-html-parser-example.sh: -------------------------------------------------------------------------------- 1 | for each_page in $(seq 1 105) 2 | do 3 | echo $each_page 4 | echo "\n#$each_page#\n" >> strategy-book.txt 5 | curl --silent "http://loveread.ec/read_book.php?id=66258&p=$each_page" | iconv --from-code WINDOWS-1251 --to-code UTF-8 | python3 curl-output-html-parser.py "/html/body/table/tr[2]/td/table/tr/td[2]/div[3]" >> strategy-book.txt 6 | done 7 | -------------------------------------------------------------------------------- /html-scraping/lxml/parse_page.py: -------------------------------------------------------------------------------- 1 | from lxml import html 2 | 3 | page = html.parse("http://www.mctrek.de/bekleidung-unisex-herren/wintersport-skibekleidung/jacken/icepeak-kurt-wintersportjacke-herren_4047336") 4 | # list_of_size = page.xpath('//*[@id="wk_addItem"]/option/text()') 5 | # print(list_of_size) 6 | 7 | list_of_size = page.xpath('//*[@id="wk_addItem"]') 8 | print(list_of_size[0].value_options) 9 | -------------------------------------------------------------------------------- /html-scraping/lxml/parse_page_broken_html.py: -------------------------------------------------------------------------------- 1 | from lxml import html 2 | import requests 3 | 4 | page = requests.get("http://spys.one/free-proxy-list/UA/") 5 | tree = html.fromstring(page.content) 6 | # for XPath remove all "" elements 7 | addresses = tree.xpath("/html/body/table[2]/tr[4]/td/table/tr[*]/td[1]/font[2]") 8 | for each_address in addresses: 9 | print(each_address.text) 10 | print(each_address.getchildren()[0].text) 11 | -------------------------------------------------------------------------------- /html-scraping/request-html/parse-with-js.py: -------------------------------------------------------------------------------- 1 | from requests_html import HTMLSession -------------------------------------------------------------------------------- /html-scraping/selenium/read.me: -------------------------------------------------------------------------------- 1 | # before using run command below: 2 | ```sh 3 | sudo apt install xvfb firefox 4 | pip3 install selenium xvfbwrapper pyvirtualdisplay 5 | 6 | ``` 7 | 8 | # path should contains https://github.com/mozilla/geckodriver/releases 9 | ```sh 10 | export PATH=$PATH:/home/soft/selenium_driver/geckodriver 11 | ``` 12 | 13 | # how to use cookies 14 | ```sh 15 | https://stackoverflow.com/questions/15058462/how-to-save-and-load-cookies-using-python-selenium-webdriver 16 | https://www.selenium.dev/documentation/en/support_packages/working_with_cookies/ 17 | ``` -------------------------------------------------------------------------------- /http-current-date-time.python: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import tornado.escape 3 | import tornado.web 4 | import tornado.ioloop 5 | import time 6 | import sys 7 | 8 | class GetCurrentTimestamp(tornado.web.RequestHandler): 9 | def get(self): 10 | response=time.strftime("%Y%m%d%H%M%S") 11 | self.write(response) 12 | 13 | application=tornado.web.Application([(r"/",GetCurrentTimestamp),]) 14 | 15 | if __name__=="__main__": 16 | if len(sys.argv)>1: 17 | application.listen(sys.argv[1]) 18 | else: 19 | application.listen(9993) 20 | tornado.ioloop.IOLoop.instance().start() 21 | 22 | -------------------------------------------------------------------------------- /http-executor/README.md: -------------------------------------------------------------------------------- 1 | application for Archi editor for opening files or command execution from "documentation" tab on elements 2 | 3 | ![schema](https://i.postimg.cc/4yv5NkBP/http-executor.png) 4 | -------------------------------------------------------------------------------- /http/current-date-time.python: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import tornado.escape 3 | import tornado.web 4 | import tornado.ioloop 5 | import time 6 | import sys 7 | 8 | class GetCurrentTimestamp(tornado.web.RequestHandler): 9 | def get(self): 10 | response=time.strftime("%Y%m%d%H%M%S") 11 | self.write(response) 12 | 13 | application=tornado.web.Application([(r"/",GetCurrentTimestamp),]) 14 | 15 | if __name__=="__main__": 16 | if len(sys.argv)>1: 17 | application.listen(sys.argv[1]) 18 | else: 19 | application.listen(9993) 20 | tornado.ioloop.IOLoop.instance().start() 21 | 22 | -------------------------------------------------------------------------------- /http/download-zip.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import requests 3 | import shutil 4 | import time 5 | 6 | def main(): 7 | host="localhost" 8 | port=8808 9 | brand_name="mywcard2go" 10 | file_name=brand_name+".zip" 11 | url = "http://%s:%i/published/resources/%s" % (host, port, file_name) 12 | for i in range(1,3): 13 | current_time = time.time() 14 | response = requests.get(url, stream=True) 15 | local_file_name = file_name+str(i) 16 | with open(local_file_name, "wb") as output_file: 17 | response.raw.decode_content = True 18 | shutil.copyfileobj(response.raw, output_file) 19 | print( local_file_name + " " + response.headers["Content-MD5"] ) 20 | print( time.time()-current_time) 21 | if __name__=='__main__': 22 | main() 23 | 24 | # f0b70d245eae21e9c46bd4b94cad41cc *affin-1482164837000.zip 25 | # 91372ab88e402460f2a832ef2c44a834 *affin-1484662020000.zip 26 | 27 | #1482164837000 28 | #1484662020000 -------------------------------------------------------------------------------- /http/download.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import requests 3 | from tqdm import tqdm 4 | 5 | def main(): 6 | url = "http://localhost:8808/published/resources/affin.zip.md5" 7 | for i in range(1,10): 8 | response:requests.Response = requests.get(url, stream=True) 9 | file_name = "affin.zip.md5"+str(i) 10 | with open(file_name, "wb") as handle: 11 | for data in tqdm(response.iter_content()): 12 | handle.write(data) 13 | print( file_name + " " + response.headers["Content-MD5"]) 14 | 15 | if __name__=='__main__': 16 | main() 17 | 18 | # f0b70d245eae21e9c46bd4b94cad41cc *affin-1482164837000.zip 19 | # 91372ab88e402460f2a832ef2c44a834 *affin-1484662020000.zip 20 | 21 | #1482164837000 22 | #1484662020000 23 | -------------------------------------------------------------------------------- /http/download2.py: -------------------------------------------------------------------------------- 1 | def download_file(url): 2 | local_filename = url.split('/')[-1] 3 | with requests.get(url, stream=True) as r: 4 | r.raise_for_status() 5 | with open(local_filename, 'wb') as f: 6 | for chunk in r.iter_content(chunk_size=8192): 7 | # If you have chunk encoded response uncomment if 8 | # and set chunk_size parameter to None. 9 | #if chunk: 10 | f.write(chunk) 11 | return local_filename 12 | -------------------------------------------------------------------------------- /http/http-request.py: -------------------------------------------------------------------------------- 1 | import urllib2 2 | 3 | try: 4 | url_response = urllib2.urlopen('http://ecsd0010072c.epam.com:9999/__admin/') 5 | if url_response.getcode() != 200: 6 | print(" return code is "+str(url_response.getcode())) 7 | else: 8 | print("ok"); 9 | // print(dir(url_response)) 10 | except Exception as e: 11 | print("no connection "+e.message) 12 | # print(url_response) 13 | -------------------------------------------------------------------------------- /http/http-server-start.md: -------------------------------------------------------------------------------- 1 | # simple http server with cgi ( script execution ) ability 2 | ```sh 3 | x-www-browser https://realpython.com/python-http-server/ 4 | 5 | # start simple http web server with list of files inside 6 | python3 -m http.server 9090 7 | x-www-broser localhost:9090 8 | 9 | # or start with more customized version 10 | nohup python3 http-server.py & 11 | 12 | # start with /cgi-bin folder processing 13 | python3 -m http.server --cgi 9090 --directory /path/to/dir 14 | ``` 15 | -------------------------------------------------------------------------------- /http/request-get.py: -------------------------------------------------------------------------------- 1 | import requests 2 | response: requests.Response = requests.get(url, cookies={"PHPSESSID": php_session_id }, headers={"x-api-key": self._api_key})` 3 | return response.json().get("user_id", None) 4 | -------------------------------------------------------------------------------- /http/request-grain-control.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from urllib3.util.retry import Retry 3 | from requests.adapters import HTTPAdapter 4 | from requests import RequestException 5 | 6 | s = requests.Session() 7 | 8 | # authentication 9 | # s.auth = (USER_NAME, PASSWORD) 10 | 11 | retries = Retry(total=5, 12 | backoff_factor=0.1, 13 | status_forcelist=[ 500, 502, 503, 504 ]) 14 | 15 | s.mount('http://', HTTPAdapter(max_retries=retries)) 16 | 17 | try: 18 | s.get('http://httpstat.us/500') 19 | except RequestException as ex: 20 | print(ex) 21 | -------------------------------------------------------------------------------- /http/tornado-get-params.py: -------------------------------------------------------------------------------- 1 | app = tornado.web.Application([(r"/file/([a-zA-Z\-0-9\.:,/_]+)", FileHandler, dict(folder=folder)),]) 2 | 3 | class FileHandler(tornado.web.RequestHandler): 4 | def get(self, relative_path): 5 | print(relative_path) 6 | -------------------------------------------------------------------------------- /http/tornado-upload.py: -------------------------------------------------------------------------------- 1 | # uploading file 2 | import tornado.web 3 | import tornado.ioloop 4 | 5 | MB = 1024 * 1024 6 | GB = 1024 * MB 7 | TB = 1024 * GB 8 | 9 | MAX_STREAMED_SIZE = 1 * GB 10 | 11 | 12 | @tornado.web.stream_request_body 13 | class MainHandler(tornado.web.RequestHandler): 14 | 15 | def initialize(self): 16 | print("start upload") 17 | 18 | def prepare(self): 19 | self.f = open("test.png", "wb") 20 | self.request.connection.set_max_body_size(MAX_STREAMED_SIZE) 21 | 22 | def post(self): 23 | print("upload completed") 24 | self.f.close() 25 | 26 | def put(self): 27 | print("upload completed") 28 | self.f.close() 29 | 30 | def data_received(self, data): 31 | self.f.write(data) 32 | 33 | 34 | if __name__ == "__main__": 35 | application = tornado.web.Application([ 36 | (r"/", MainHandler), 37 | ]) 38 | application.listen(7777) 39 | tornado.ioloop.IOLoop.instance().start() 40 | -------------------------------------------------------------------------------- /http/web.py: -------------------------------------------------------------------------------- 1 | # pip install webb 2 | 3 | from webb import webb 4 | 5 | http_address = "http://mail.ru" 6 | 7 | webb.get_ip(http_address) 8 | webb.get_whois_data(http_address) 9 | webb.ping(http_address) 10 | webb.traceroute(http_address) 11 | webb.clean_page( webb.download_page(http_address) ) 12 | 13 | # webb.web_crawl(http_address) -------------------------------------------------------------------------------- /images/image-operations.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | 3 | image = Image.open('1.jpg') 4 | # image.show() 5 | # image.save('1.png') 6 | image.thumbnail((50,50)) 7 | image.rotate(90).save('2.jpg') 8 | -------------------------------------------------------------------------------- /import/import-module/import_module.py: -------------------------------------------------------------------------------- 1 | # import whole module ( point out to __init__.py) 2 | import some_module as ext_module 3 | 4 | if __name__=='__main__': 5 | ext_module.custom_echo() 6 | ext_module.custom_print() -------------------------------------------------------------------------------- /import/import-module/some_module/__init__.py: -------------------------------------------------------------------------------- 1 | # import some function from file1 and make it visible outside 2 | from some_module.file1 import custom_print 3 | # make visible some function from current folder 4 | from some_module.file2 import custom_echo 5 | -------------------------------------------------------------------------------- /import/import-module/some_module/file1.py: -------------------------------------------------------------------------------- 1 | def custom_print(): 2 | print("print function from module") 3 | 4 | def another_not_accessible_outside(): 5 | print("not visible function from outside") -------------------------------------------------------------------------------- /import/import-module/some_module/file2.py: -------------------------------------------------------------------------------- 1 | def custom_echo(): 2 | print("echo function from module") 3 | -------------------------------------------------------------------------------- /import/remote-import.py: -------------------------------------------------------------------------------- 1 | # import file/module not in your "classpath" 2 | # import remote file from classpath 3 | 4 | ##### approach 1.1 5 | import sys 6 | # insert at 1, 0 is the script path (or '' in REPL) 7 | # sys.path.insert(1, '/path/to/application/app/folder') 8 | sys.path.append('/path/to/application/app/folder') 9 | import destination_file 10 | 11 | ##### approach 1.2 12 | import sys 13 | path_to_site_packages="/opt/homebrew/lib/python2.7/site-packages" # Replace this with the place you installed facebookads using pip 14 | sys.path.append(path_to_site_packages) 15 | sys.path.append(f"{path_to_site_packages}/facebook_business-3.0.0-py2.7.egg-info") 16 | from facebook_business.api import FacebookAdsApi 17 | from facebook_business.adobjects.adaccount import AdAccount 18 | 19 | ##### approach 2 20 | # from full.path.to.python.folder import func_name 21 | -------------------------------------------------------------------------------- /input-parameters/custom-parser.py: -------------------------------------------------------------------------------- 1 | def str2bool(value): 2 | if value.lower() in ('yes', 'true', 't', 'y', '1'): 3 | return True 4 | elif value.lower() in ('no', 'false', 'f', 'n', '0'): 5 | return False 6 | else: 7 | raise argparse.ArgumentTypeError('Boolean value is expected.') 8 | 9 | 10 | if __name__ == "__main__": 11 | # input parameters 12 | parser = argparse.ArgumentParser(description='common description for program') 13 | parser.add_argument('--rootFolder', 14 | help='string argument example', 15 | required=True) 16 | parser.add_argument('--dryRun', 17 | help="boolean argument example ", 18 | required=False, type=str2bool, default="true") 19 | args = parser.parse_args() 20 | main(args.rootFolder, args.dryRun) 21 | -------------------------------------------------------------------------------- /input-parameters/input_params.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | parser = argparse.ArgumentParser(description="create interval set ") 4 | parser.add_argument("-c", "--config_name", required=True, help="Name of the YAML configuration file") 5 | parser.add_argument("--data_api_url", required=False, help="data-api url") 6 | args = parser.parse_args() 7 | 8 | args.c 9 | args.config_name 10 | args.data_api_url 11 | -------------------------------------------------------------------------------- /ista/create-mongo-container.sh: -------------------------------------------------------------------------------- 1 | docker run -e MONGO_INITDB_ROOT_USERNAME=vitalii -e MONGO_INITDB_ROOT_PASSWORD=vitalii -d --name mongo -p 27017:27017 -p 28017:28017 -v /home/technik/projects/ista/data/db-mongo:/data/db mongo 2 | 3 | 4 | -------------------------------------------------------------------------------- /ista/mongo-docker/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.1' 2 | 3 | services: 4 | 5 | mongo: 6 | image: mongo 7 | restart: always 8 | ports: 9 | - 27017:27017 10 | - 28017:28017 11 | volumes: 12 | - /home/technik/projects/ista/mongo-docker/container-map-folder:/data/db 13 | environment: 14 | MONGO_INITDB_ROOT_USERNAME: vitalii 15 | MONGO_INITDB_ROOT_PASSWORD: vitalii 16 | 17 | mongo-express: 18 | image: mongo-express 19 | restart: always 20 | ports: 21 | - 28018:8081 22 | environment: 23 | ME_CONFIG_MONGODB_ADMINUSERNAME: vitalii 24 | ME_CONFIG_MONGODB_ADMINPASSWORD: vitalii 25 | -------------------------------------------------------------------------------- /jdbc/jdbc-mysql.py: -------------------------------------------------------------------------------- 1 | import jaydebeapi 2 | import jpype 3 | import os 4 | 5 | classpath = "/home/projects/temp/mysql-connector-java-8.0.22.jar" 6 | # jpype.startJVM(jpype.getDefaultJVMPath(), f"-Djava.class.path={classpath}") 7 | 8 | connection = jaydebeapi.connect('com.mysql.jdbc.Driver', 'jdbc:mysql://127.0.0.1:3313/xing?encoding=UTF-8', ["admin", "admin"], classpath) 9 | 10 | cursor = conn.cursor() 11 | cursor.execute("show tables;") 12 | cursor.fetchall() 13 | # df = pd.DataFrame(data, columns=columns) 14 | 15 | cursor.close() 16 | connection.close() 17 | 18 | 19 | # Drill jdbc 20 | # classpath = "/home/projects/temp/jdbc-drill/drill-jdbc-all-1.18.0.jar" 21 | # connection = jaydebeapi.connect('org.apache.drill.jdbc.Driver', 'jdbc:drill:drillbit=ubsdpdesp000103.vantage.zur:31010', [], classpath) 22 | -------------------------------------------------------------------------------- /jenkins-notification/READ.me: -------------------------------------------------------------------------------- 1 | #pip install pypiwin32 2 | #pip install jenkins 3 | python jenkins-job-waiting-for-finish.py develop-BCM-556-batchjob-supported-countries-and-country-configuration -------------------------------------------------------------------------------- /jira/connect-to-jira.py: -------------------------------------------------------------------------------- 1 | from jira import JIRA 2 | 3 | jira_url: str = "" 4 | jira_username: str = "" 5 | jira_password: str = "" 6 | jira_issue_id: str = "" 7 | 8 | options = {"server": str(jira_url)} 9 | jira = JIRA(options=options, basic_auth=(jira_username, jira_password)) 10 | 11 | jira_issue = jira.issue(jira_issue_id) 12 | print(jira_issue.fields.summary) 13 | print(jira_issue.fields.issuetype.name) 14 | -------------------------------------------------------------------------------- /jira/jira-open-issues.py: -------------------------------------------------------------------------------- 1 | from jira import JIRA 2 | import os 3 | 4 | def remove_before_minus(s): 5 | return "jira-"+s.split('-')[1] 6 | 7 | # curl -H "Authorization: Bearer ${JIRA_TOKEN}" -X GET ${JIRA_URL}/rest/api/2/myself | jq . 8 | jira = JIRA(server=os.getenv('JIRA_URL'), 9 | token_auth=os.getenv('JIRA_TOKEN')) 10 | 11 | # ticket = jira.issue('IOO-6028') 12 | for ticket in jira.search_issues('assignee = currentUser() AND resolution = Unresolved'): 13 | print(f"{remove_before_minus(ticket.key):12} {ticket.fields.summary:<50}") 14 | -------------------------------------------------------------------------------- /json/json-example.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | example = {"a":10, "b":20} 4 | # convert to json string 5 | print(json.dumps(example)) 6 | # convert to json string and then move back to object 7 | # - read from string 8 | print(json.loads(json.dumps(example, sort_keys=True, indent=4))) 9 | 10 | 11 | class Example: 12 | def __init__(self, value): 13 | self.value = value 14 | 15 | def __str__(self): 16 | return self.value 17 | 18 | # is not JSON serializable - using pickle 19 | #print(json.dumps(Example("hello"))) 20 | -------------------------------------------------------------------------------- /json/json-walk-print-leafs.py: -------------------------------------------------------------------------------- 1 | # json document print all the elements in xpath style 2 | import json 3 | 4 | def walk_leaves(obj, path=""): 5 | if isinstance(obj, dict): 6 | for k, v in obj.items(): 7 | new_path = f"{path}.{k}" if path else k 8 | walk_leaves(v, new_path) 9 | elif isinstance(obj, list): 10 | for idx, item in enumerate(obj): 11 | new_path = f"{path}[{idx}]" 12 | walk_leaves(item, new_path) 13 | else: 14 | print(f"{path}: {obj}") 15 | 16 | # your JSON document 17 | with open('1.json') as f: 18 | data = json.load(f) 19 | 20 | walk_leaves(data) 21 | -------------------------------------------------------------------------------- /kafka/kafka-producer-consumer.py: -------------------------------------------------------------------------------- 1 | #docker image: https://hub.docker.com/r/spotify/kafka/ 2 | #docker image run: docker run -p 2181:2181 -p 9092:9092 --env ADVERTISED_HOST=172.18.0.100 --env ADVERTISED_PORT=9092 --env advertised.host.name=172.18.0.100 --net docker.local.network --ip 172.18.0.100 spotify/kafka 3 | from kafka import KafkaConsumer, KafkaProducer 4 | import logging 5 | 6 | kafka_url = "172.18.0.100:9092" 7 | kafka_topic = "my-topic" 8 | 9 | logging.basicConfig(level=logging.DEBUG) 10 | try: 11 | producer = KafkaProducer(bootstrap_servers=kafka_url) 12 | producer.send(kafka_topic, b"test") 13 | producer.send(kafka_topic, "test 2") 14 | finally: 15 | producer.close() 16 | 17 | 18 | consumer = KafkaConsumer(bootstrap_servers=kafka_url, auto_offset_reset='earliest', consumer_timeout_ms=200) 19 | consumer.subscribe([kafka_topic]) 20 | for message in consumer: 21 | print(message) -------------------------------------------------------------------------------- /kafka/producer.py: -------------------------------------------------------------------------------- 1 | ############################################# 2 | # Kafka producer 3 | ############################################# 4 | # This script sends a message to a Kafka topic 5 | 6 | # Usage: 7 | # export KAFKA_BROKER=localhost:9092 8 | # export KAFKA_TOPIC=stream-processor-input 9 | 10 | 11 | # python3 12 | from kafka import KafkaProducer 13 | import os 14 | import sys 15 | 16 | if len(sys.argv)>1: 17 | message = sys.argv[1] 18 | else: 19 | message = '{"test1":"value1", "test2":"value2"}' 20 | 21 | producer = KafkaProducer(bootstrap_servers=[os.environ["KAFKA_BROKER"]]) 22 | producer.send(os.environ["KAFKA_TOPIC"], value=message.encode("utf-8")) 23 | producer.flush() 24 | -------------------------------------------------------------------------------- /keyboard/keyboard-automation.py: -------------------------------------------------------------------------------- 1 | import keyboard 2 | import time 3 | 4 | if __name__ == '__main__': 5 | 6 | ### record all input keys 7 | # recorded: [] = keyboard.record(until='esc') 8 | # print(recorded) 9 | 10 | 11 | keyboard.add_hotkey('alt+p', lambda : print('pressed alt + p')) 12 | ### waiting for ctrl-c, control c 13 | try: 14 | while True: 15 | time.sleep(5) 16 | except KeyboardInterrupt: 17 | pass 18 | 19 | keyboard.clear_hotkey('alt+p') -------------------------------------------------------------------------------- /keyboard/keypress-getkey.py: -------------------------------------------------------------------------------- 1 | # https://github.com/kcsaff/getkey/blob/master/tools/keys.txt 2 | # read char from keyboard 3 | from getkey import getkey, keys 4 | 5 | 6 | while True: 7 | key = getkey() 8 | 9 | if key == keys.UP: 10 | print("key up") 11 | elif key == keys.DOWN: 12 | print("key down") 13 | elif key == keys.ESCAPE: 14 | print("key escape") 15 | elif key == keys.ENTER: 16 | print("key enter") 17 | elif key == keys.DELETE: 18 | print("key delete") 19 | else: 20 | print(key, len(key)) -------------------------------------------------------------------------------- /keyboard/keypress-read.py: -------------------------------------------------------------------------------- 1 | # keyboard management: 2 | # * input: getch 3 | # * output: click 4 | # windows: msvcrt 5 | 6 | import getch 7 | 8 | def waiting_for_key()->[]: 9 | """ 10 | blocking read from keyboard 11 | get char from keyboard 12 | get keytype 13 | """ 14 | read_symbol_ord = ord(getch.getch()) 15 | if read_symbol_ord == 27: 16 | return [read_symbol_ord, ord(getch.getch()), ord(getch.getch())] 17 | else: 18 | return [read_symbol_ord, ] 19 | 20 | while True: 21 | print(waiting_for_key()) 22 | -------------------------------------------------------------------------------- /linkedin.py.snipped: -------------------------------------------------------------------------------- 1 | 2 | @linkedin_namespace.route('/linkedin/test') 3 | class LinkedInTest(Resource): 4 | def __init__(self, api=None, *args, **kwargs): 5 | super().__init__(api, *args, **kwargs) 6 | self._log = logging.getLogger(self.__class__.__name__) 7 | 8 | @linkedin_namespace.param(name="uuid", description="authentication code", _in="header") 9 | @linkedin_namespace.response(401, 'Token not found') 10 | @linkedin_namespace.response(402, 'Token has expired already') 11 | def get(self): 12 | """ 13 | read Profile of user 14 | """ 15 | save_new_uuid(request.headers["uuid"]) 16 | save_code_by_uuid(request.headers["uuid"], "333-my-code") 17 | save_token_by_uuid(request.headers["uuid"], "333-my-token", 222) 18 | return_value = find_by_uuid(request.headers["uuid"]) 19 | return is_token_valid(return_value), 200 20 | -------------------------------------------------------------------------------- /list/list-sort.py: -------------------------------------------------------------------------------- 1 | my_list = [5,4,2,1] 2 | 3 | def get_sort_reverse_function(max_element: int): 4 | def sort_reverse_function(element) -> int: 5 | return max_element-element 6 | return sort_reverse_function 7 | 8 | def sort_function(element) -> int: 9 | return element 10 | 11 | my_list.sort(key=get_sort_reverse_function(max(my_list))) 12 | print("reverse sort:"+ str(my_list)) 13 | 14 | my_list.sort(key=sort_function) 15 | print("sort: "+ str(my_list)) 16 | -------------------------------------------------------------------------------- /list/list-substraction-via-set.py: -------------------------------------------------------------------------------- 1 | set1 = set(["a", "b"]) 2 | set2 = set(["a"]) 3 | print(set1-set2) 4 | -------------------------------------------------------------------------------- /list/list-substraction.py: -------------------------------------------------------------------------------- 1 | files_with_control_date = [each for each in source_list if each not in except_list] 2 | 3 | 4 | class SubstractAbleList(list): 5 | def __init__(self, *args): 6 | super(SubstractAbleList, self).__init__(args) 7 | 8 | def __sub__(self, another_list): 9 | substracted_list = [each for each in self if each not in antoher_list] 10 | return self.__class__(*substracted_list) 11 | 12 | 13 | -------------------------------------------------------------------------------- /list/list_unpacking.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from typing import Union 3 | 4 | # multi type declaration 5 | def get_names(prefix: str, names: Union[str, List]) -> Union[str, List]: 6 | if isinstance(names, List): 7 | return [prefix+each_name for each_name in names] 8 | else: 9 | return prefix+names 10 | 11 | # unpacking 12 | a,b = get_names("___", ["one", "two"]) 13 | c = get_names("___", "figure") 14 | 15 | print(a,b,c) 16 | -------------------------------------------------------------------------------- /log-remover/READ.me: -------------------------------------------------------------------------------- 1 | Python: 2 | =============== 3 | sudo yum install python-pip 4 | 5 | sudo pip install tornado 6 | 7 | --------------- 8 | 9 | Operation System 10 | =============== 11 | 12 | file: /root/log-remover.py 13 | 14 | file: /etc/init.d/log-remover 15 | chmod +x /etc/init.d/log-remover 16 | 17 | link: /etc/rc3.d/S99log-remover -> ../init.d/log-remover 18 | ( ln -s ../init.d/log-remover S99log-remover ) 19 | 20 | ---------------- 21 | 22 | check the approach: 23 | 24 | python /root/log-remover.py 9999 /var/lib/brand-server/cache/zip/ -------------------------------------------------------------------------------- /log-remover/log-remover: -------------------------------------------------------------------------------- 1 | SUCMD="runuser - root -c " 2 | 3 | case "${1}" in 4 | status) 5 | ;; 6 | start) 7 | $SUCMD "python /root/log-remover.py 9999 /var/lib/brand-server/cache/zip/" 8 | ;; 9 | stop) 10 | ;; 11 | restart) 12 | ;; 13 | esac 14 | -------------------------------------------------------------------------------- /log-remover/log-remover.py: -------------------------------------------------------------------------------- 1 | import tornado.ioloop 2 | import tornado.web 3 | import sys 4 | import os 5 | 6 | 7 | folder = "/var/lib/brand-server/cache/zip" 8 | 9 | class MainHandler(tornado.web.RequestHandler): 10 | 11 | def get(self): 12 | list_of_files = os.listdir(folder) 13 | counter = 0 14 | for each_file in list_of_files: 15 | real_path = os.path.join(folder, each_file) 16 | if os.path.isfile(real_path): 17 | os.remove(os.path.join(folder, each_file)) 18 | counter = counter + 1 19 | self.write("removed: " + str(counter)) 20 | 21 | 22 | def make_app(): 23 | return tornado.web.Application([ (r"/", MainHandler), ]) 24 | 25 | 26 | if __name__ == "__main__": 27 | if len(sys.argv)<3: 28 | print("please specify the and ") 29 | sys.exit(2) 30 | app = make_app() 31 | app.listen(sys.argv[1]) 32 | folder = sys.argv[2] 33 | tornado.ioloop.IOLoop.current().start() 34 | -------------------------------------------------------------------------------- /logging/log-example.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | logging.basicConfig(format='%(asctime)s.%(msecs)s:%(name)s:%(thread)d:%(levelname)s:%(process)d:%(message)s',level=logging.INFO) 4 | logging.info("standard logging") 5 | 6 | -------------------------------------------------------------------------------- /mail-send.py: -------------------------------------------------------------------------------- 1 | import smtplib 2 | from email.mime.text import MIMEText 3 | 4 | #with open(textfile, 'rb') as fp: 5 | # Create a text/plain message 6 | # msg = MIMEText(fp.read()) 7 | 8 | msg = MIMEText("simple text message ") 9 | msg['Subject'] = 'just a subject' 10 | msg['From'] = "vitali.cherkashyn@localhost.com" 11 | msg['To'] = "vitali.cherkashyn@vodafone.com" 12 | 13 | 14 | # Send the message via our own SMTP server, but don't include the 15 | # envelope header. 16 | s = smtplib.SMTP('localhost:2525') 17 | s.sendmail("vitali.cherkashyn@localhost.com", ["vitali.cherkashyn@vodafone.com"], msg.as_string()) 18 | s.quit() 19 | 20 | 21 | import yagmail 22 | yag = yagmail.SMTP(user=mail_login, password=mail_password) 23 | yag.send('somename@somewhere.com', subject = None, contents = 'Hello') 24 | 25 | -------------------------------------------------------------------------------- /map-reduce-graph/short-path-breadth-mapper.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | for line in sys.stdin: 4 | data = line.strip().split("\t") 5 | vertex_number = data[0] 6 | dist = data[1] 7 | vertex_destination = sorted(eval(data[2]))# set of vertex 8 | 9 | print(line.strip()) 10 | for each_destination in vertex_destination: 11 | print(str(each_destination) + "\t" + ("INF" if dist == "INF" else str(int(dist)+1)) + "\t{}") 12 | 13 | 14 | -------------------------------------------------------------------------------- /map-reduce-graph/short-path-breadth-mapper.readme: -------------------------------------------------------------------------------- 1 | Реализуйте mapper в задаче поиска кратчайшего пути с помощью Hadoop Streaming. 2 | 3 | Входные и выходные данные: в качестве ключа идет номер вершины, значение состоит из двух полей, разделенных табуляцией: 4 | 5 | Минимальное расстояние до данной вершины (если его еще нет, то пишется INF) 6 | Список исходящих вершин (через "," в фигурных скобках) 7 | -------------------------------------------------------------------------------- /map-reduce-graph/short-path-breadth-mapper.txt: -------------------------------------------------------------------------------- 1 | 1 0 {2,3,4} 2 | 2 1 {5,6} 3 | 3 1 {} 4 | 4 1 {7,8} 5 | 5 INF {9,10} 6 | 6 INF {} 7 | 7 INF {} 8 | 8 INF {} 9 | 9 INF {} 10 | 10 INF {} -------------------------------------------------------------------------------- /map-reduce-graph/short-path-breadth-reducer.readme: -------------------------------------------------------------------------------- 1 | Реализуйте reducer в задаче поиска кратчайшего пути с помощью Hadoop Streaming. 2 | 3 | Входные и выходные данные: в качестве ключа идет номер вершины, значение состоит из двух полей, разделенных табуляцией: 4 | 5 | Минимальное расстояние до данной вершины (если его еще нет, то пишется INF) 6 | Список исходящих вершин (через "," в фигурных скобках). -------------------------------------------------------------------------------- /map-reduce-graph/short-path-breadth-reducer.txt: -------------------------------------------------------------------------------- 1 | 1 0 {2,3,4} 2 | 10 INF {} 3 | 10 INF {} 4 | 2 1 {} 5 | 2 1 {5,6} 6 | 3 1 {} 7 | 3 1 {} 8 | 4 1 {} 9 | 4 1 {7,8} 10 | 5 2 {} 11 | 5 INF {9,10} 12 | 6 2 {} 13 | 6 INF {} 14 | 7 2 {} 15 | 7 INF {} 16 | 8 2 {} 17 | 8 INF {} 18 | 9 INF {} 19 | 9 INF {} -------------------------------------------------------------------------------- /map-reduce-graph/short-path-deykstra.readme: -------------------------------------------------------------------------------- 1 | # Реализуйте алгоритм Дейкстры поиска кратчайшего пути в графе. 2 | # 3 | # Входные данные: В первой строке указаны два числа: число вершин и число ребер графа. Далее идут строки с описанием ребер. Их количество равно числу ребер. В каждой строке указаны 3 числа: исходящая вершина, входящая вершина, вес ребра. В последней строке указаны 2 номера вершины: начальная и конечная вершина, кратчайший путь между которыми нужно найти. 4 | # 5 | # Выходные данные: минимальное расстояние между заданными вершинами. Если пути нет, то нужно вернуть -1. 6 | -------------------------------------------------------------------------------- /map-reduce-graph/short-path-deykstra.txt: -------------------------------------------------------------------------------- 1 | 4 8 2 | 1 2 6 3 | 1 3 2 4 | 1 4 10 5 | 2 4 4 6 | 3 1 5 7 | 3 2 3 8 | 3 4 8 9 | 4 2 1 10 | 1 4 -------------------------------------------------------------------------------- /map-reduce-improvements/input-data-v1.txt: -------------------------------------------------------------------------------- 1 | aut Caesar aut nihil 2 | aut aut 3 | de mortuis aut bene aut nihil -------------------------------------------------------------------------------- /map-reduce-improvements/input-data-v2.txt: -------------------------------------------------------------------------------- 1 | aut Caesar aut nihil 2 | aut aut 3 | de mortuis aut bene aut nihil -------------------------------------------------------------------------------- /map-reduce-improvements/map-reduce-combining-v1.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def increase_word(word_dict, next_word): 4 | if len(next_word)==0: 5 | return 6 | if next_word in word_dict: 7 | word_dict[next_word] = word_dict[next_word]+1 8 | else: 9 | word_dict[next_word] = 1 10 | 11 | 12 | def map_line(string_with_words): 13 | words = {} 14 | for each_word in string_with_words.split(" "): 15 | increase_word(words, each_word.strip()) 16 | for key,value in words.items(): 17 | print(key+"\t"+str(value)) 18 | 19 | def main(): 20 | for each_line in sys.stdin: 21 | map_line(each_line) 22 | 23 | 24 | if __name__ == '__main__': 25 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/map-reduce-combining-v1.sh: -------------------------------------------------------------------------------- 1 | cat input-data-v1.txt | python3 map-reduce-combining-v1.py -------------------------------------------------------------------------------- /map-reduce-improvements/map-reduce-combining-v2.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def increase_word(word_dict, next_word): 4 | if len(next_word)==0: 5 | return 6 | if next_word in word_dict: 7 | word_dict[next_word] = word_dict[next_word]+1 8 | else: 9 | word_dict[next_word] = 1 10 | 11 | 12 | def map_line(string_with_words, words): 13 | for each_word in string_with_words.split(" "): 14 | increase_word(words, each_word.strip()) 15 | 16 | def main(): 17 | words = {} 18 | for each_line in sys.stdin: 19 | map_line(each_line, words) 20 | for key,value in words.items(): 21 | print(key+"\t"+str(value)) 22 | 23 | 24 | if __name__ == '__main__': 25 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/map-reduce-combining-v2.sh: -------------------------------------------------------------------------------- 1 | cat input-data-v2.txt | python3 map-reduce-combining-v2.py -------------------------------------------------------------------------------- /map-reduce-improvements/page-rank/page-rank-mapper.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | for text_line in sys.stdin: 5 | line = text_line.strip().split("\t") 6 | index = line[0] 7 | weight = line[1] 8 | elements = sorted(eval(line[2])) 9 | print(index+"\t"+weight+"\t"+line[2]) 10 | for each_element in elements: 11 | print(str(each_element)+"\t"+"%.3f\t{}" % round(float(weight)/len(elements),3)) 12 | -------------------------------------------------------------------------------- /map-reduce-improvements/page-rank/page-rank-mapper.txt: -------------------------------------------------------------------------------- 1 | 1 0.200 {2,4} 2 | 2 0.200 {3,5} 3 | 3 0.200 {4} 4 | 4 0.200 {5} 5 | 5 0.200 {1,2,3} -------------------------------------------------------------------------------- /map-reduce-improvements/page-rank/page-rank-reducer.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | last_index = None 4 | weights = list() 5 | elements = list() 6 | 7 | def print_values(): 8 | found_weight = 0 9 | for index in range(len(weights)): 10 | if len(elements[index])==2: 11 | found_weight +=weights[index] 12 | else: 13 | found_element = elements[index] 14 | print("%s\t%.3f\t%s" % (last_index, found_weight,found_element) ) 15 | 16 | 17 | for text_line in sys.stdin: 18 | line = text_line.strip().split("\t") 19 | index = line[0] 20 | 21 | if last_index != index: 22 | if last_index: 23 | print_values() 24 | weights = list() 25 | elements = list() 26 | last_index = index 27 | weights.append(float(line[1])) 28 | elements.append(line[2]) 29 | 30 | print_values() -------------------------------------------------------------------------------- /map-reduce-improvements/page-rank/page-rank-reducer.txt: -------------------------------------------------------------------------------- 1 | 1 0.067 {} 2 | 1 0.200 {2,4} 3 | 2 0.067 {} 4 | 2 0.100 {} 5 | 2 0.200 {3,5} 6 | 3 0.067 {} 7 | 3 0.100 {} 8 | 3 0.200 {4} 9 | 4 0.100 {} 10 | 4 0.200 {} 11 | 4 0.200 {5} 12 | 5 0.100 {} 13 | 5 0.200 {} 14 | 5 0.200 {1,2,3} -------------------------------------------------------------------------------- /map-reduce-improvements/specific/complex-MapR.sh: -------------------------------------------------------------------------------- 1 | cat complex-mapper.txt | python3 complex-mapper.py | sort | python3 complex-reducer.py 2 | -------------------------------------------------------------------------------- /map-reduce-improvements/specific/complex-mapper.1.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | def main(): 5 | for each_line in sys.stdin: 6 | words = each_line.strip().split(",") 7 | print(words[1]+"\t1") 8 | 9 | if __name__ == '__main__': 10 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/specific/complex-mapper.1.txt: -------------------------------------------------------------------------------- 1 | 1,a 2 | 2,a 3 | 3,a 4 | 1,b 5 | 3,b 6 | 2,d 7 | 2,e -------------------------------------------------------------------------------- /map-reduce-improvements/specific/complex-mapper.2.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | def print_outer_join(values): 5 | for i in range(len(values)): 6 | for j in range(len(values)): 7 | if i!=j and values[i]!=values[j]: 8 | print(values[i]+","+values[j]+"\t1") 9 | 10 | def main(): 11 | for each_line in sys.stdin: 12 | words = each_line.strip().split(" ") 13 | print_outer_join(words) 14 | 15 | if __name__ == '__main__': 16 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/specific/complex-mapper.2.txt: -------------------------------------------------------------------------------- 1 | a b 2 | a b a c -------------------------------------------------------------------------------- /map-reduce-improvements/specific/complex-mapper.3.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def add_to_stripe(stripe, letter): 4 | if letter in stripe: 5 | stripe[letter] = stripe[letter] + 1 6 | else: 7 | stripe[letter] = 1 8 | 9 | def flat_stripe(stripe): 10 | return ",".join([ key+":"+str(value) for key,value in stripe.items()]) 11 | 12 | def print_stripe(letter, stripe): 13 | print(letter + "\t"+flat_stripe(stripe)) 14 | 15 | def print_outer_join(values): 16 | for i in range(len(values)): 17 | stripe = dict() 18 | for j in range(len(values)): 19 | if i!=j and values[i]!=values[j]: 20 | add_to_stripe(stripe, values[j]) 21 | print_stripe(values[i], stripe) 22 | 23 | def main(): 24 | for each_line in sys.stdin: 25 | words = each_line.strip().split(" ") 26 | print_outer_join(words) 27 | 28 | if __name__ == '__main__': 29 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/specific/complex-mapper.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | def main(): 5 | for each_line in sys.stdin: 6 | words = each_line.strip().split("\t") 7 | count = words[0] 8 | for letter in words[1].strip().split(","): 9 | print(count+","+letter+"\t1") 10 | 11 | if __name__ == '__main__': 12 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/specific/complex-mapper.txt: -------------------------------------------------------------------------------- 1 | 1 a,b 2 | 2 a,d,e 3 | 1 b 4 | 3 a,b -------------------------------------------------------------------------------- /map-reduce-improvements/specific/complex-reducer.1.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def add_to_dictionary(values, letter, count): 4 | if letter in values: 5 | values[letter] = values[letter] + int(count) 6 | else: 7 | values[letter] = int(count) 8 | 9 | def print_dictionary(values): 10 | for key, value in values.items(): 11 | print(key+"\t"+str(value)) 12 | 13 | 14 | def main(): 15 | values = dict() 16 | previous_line = None 17 | for each_line in sys.stdin: 18 | if previous_line==each_line: 19 | continue 20 | words = each_line.strip().split("\t") 21 | letter = words[1] 22 | add_to_dictionary(values, letter, 1) 23 | previous_line = each_line 24 | 25 | print_dictionary(values) 26 | 27 | if __name__ == '__main__': 28 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/specific/complex-reducer.1.txt: -------------------------------------------------------------------------------- 1 | 1 a 2 | 1 b 3 | 1 b 4 | 2 a 5 | 2 d 6 | 2 e 7 | 3 a 8 | 3 b -------------------------------------------------------------------------------- /map-reduce-improvements/specific/complex-reducer.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | def main(): 5 | previous_value = None 6 | for each_line in sys.stdin: 7 | words = each_line.strip().split("\t") 8 | if previous_value!=words[0]: 9 | if previous_value: 10 | print(previous_value) 11 | previous_value = words[0] 12 | print(previous_value) 13 | 14 | if __name__ == '__main__': 15 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/specific/mapper-filter.1.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def main(): 4 | for line in sys.stdin: 5 | values = [each.strip() for each in line.split("\t") if len(each.strip())>0 ] 6 | print(values[2]) 7 | 8 | if __name__ == '__main__': 9 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/specific/mapper-filter.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def main(): 4 | for line in sys.stdin: 5 | values = [each.strip() for each in line.split("\t") if len(each.strip())>0 ] 6 | if values[1]=="user10": 7 | print("\t".join(values)) 8 | 9 | if __name__ == '__main__': 10 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/specific/mapper-filter.txt: -------------------------------------------------------------------------------- 1 | 1448713968 user2 https://ru.wikipedia.org/ 2 | 1448764519 user10 https://stepic.org/ 3 | 1448713968 user5 http://google.com/ 4 | 1448773411 user10 https://stepic.org/explore/courses 5 | 1448709864 user3 http://vk.com/ -------------------------------------------------------------------------------- /map-reduce-improvements/specific/reducer-join-inner.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def main(): 4 | last_key = None 5 | last_letter = None 6 | for line in sys.stdin: 7 | values = [each.strip() for each in line.split("\t") ] 8 | key = values[0] 9 | letter = values[1] 10 | 11 | if last_key == key: 12 | if last_letter!=letter: 13 | print(key) 14 | 15 | last_key = key 16 | last_letter = letter 17 | 18 | if __name__ == '__main__': 19 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/specific/reducer-join-outer.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def main(): 4 | last_value = None 5 | for line in sys.stdin: 6 | values = [each.strip() for each in line.split("\t") ] 7 | value = values[0] 8 | if last_value!=value: 9 | if last_value: 10 | print(last_value) 11 | last_value = value 12 | print(last_value) 13 | 14 | if __name__ == '__main__': 15 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/specific/reducer-join.txt: -------------------------------------------------------------------------------- 1 | 1 A 2 | 1 A 3 | 1 A 4 | 2 A 5 | 2 B 6 | 2 B 7 | 3 B 8 | 3 B 9 | 4 A 10 | 4 B -------------------------------------------------------------------------------- /map-reduce-improvements/specific/reducer-merge.txt: -------------------------------------------------------------------------------- 1 | user1 query:гугл 2 | user1 url:google.ru 3 | user2 query:стэпик 4 | user2 query:стэпик курсы 5 | user2 url:stepic.org 6 | user2 url:stepic.org/explore/courses 7 | user3 query:вконтакте -------------------------------------------------------------------------------- /map-reduce-improvements/specific/reducer-substract-left.1.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def main(): 4 | original_data = [] 5 | 6 | for line in sys.stdin: 7 | values = [each.strip() for each in line.split("\t") ] 8 | key = values[0] 9 | letter = values[1] 10 | 11 | if letter == 'A': 12 | if key not in original_data: 13 | original_data.append(key) 14 | else: 15 | if key in original_data: 16 | original_data.remove(key) 17 | for each in original_data: 18 | print(each) 19 | if __name__ == '__main__': 20 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/specific/reducer-substract-left.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def main(): 4 | last_key = None 5 | last_letter = None 6 | printed = False 7 | last_line = None 8 | for line in sys.stdin: 9 | if line == last_line: 10 | continue 11 | values = [each.strip() for each in line.split("\t") ] 12 | key = values[0] 13 | letter = values[1] 14 | 15 | if last_key != key: 16 | if last_letter!=letter: 17 | print(key) 18 | printed = True 19 | else: 20 | printed = False 21 | last_key = key 22 | last_letter = letter 23 | last_line = line 24 | if not printed and last_letter=='A': 25 | print(key) 26 | 27 | if __name__ == '__main__': 28 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/specific/reducer-substract-outer.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def main(): 4 | original_data = [] 5 | last_line = None 6 | for line in sys.stdin: 7 | if last_line == line: 8 | continue 9 | values = [each.strip() for each in line.split("\t") ] 10 | key = values[0] 11 | letter = values[1] 12 | 13 | if key in original_data: 14 | original_data.remove(key) 15 | else: 16 | original_data.append(key) 17 | last_line = line 18 | for each in original_data: 19 | print(each) 20 | if __name__ == '__main__': 21 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/tf-idf/mapper.1.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | import re 5 | 6 | def main(): 7 | for line in sys.stdin: 8 | delimiter = line.find(":") 9 | if delimiter<=0: 10 | continue 11 | document_number = line[0:delimiter] 12 | # break string to separate words 13 | # split by words 14 | words = re.compile(u'\w+', re.UNICODE) 15 | elements = words.findall(line[delimiter+1:]) 16 | 17 | for each_element in elements: 18 | print(each_element+"#"+document_number+"\t1") 19 | 20 | if __name__ == '__main__': 21 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/tf-idf/mapper.1.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | cat mapper.1.txt | python3 mapper.1.py -------------------------------------------------------------------------------- /map-reduce-improvements/tf-idf/mapper.1.txt: -------------------------------------------------------------------------------- 1 | 1:aut Caesar aut nihil 2 | 1:aut aut 3 | 2:de mortuis aut bene aut nihil -------------------------------------------------------------------------------- /map-reduce-improvements/tf-idf/mapper.2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | 5 | def main(): 6 | for line in sys.stdin: 7 | elements = line.strip().split("\t") 8 | print(elements[0]+"\t"+elements[1]+";"+elements[2]+";1") 9 | 10 | if __name__ == '__main__': 11 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/tf-idf/mapper.2.txt: -------------------------------------------------------------------------------- 1 | aut 1 4 2 | aut 2 2 3 | bene 2 1 4 | de 2 1 5 | mortuis 2 1 6 | nihil 1 1 7 | nihil 2 1 8 | Caesar 1 1 -------------------------------------------------------------------------------- /map-reduce-improvements/tf-idf/reducer.1.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | import re 5 | 6 | def print_value(line, size): 7 | split = line.split("#") 8 | print(split[0]+"\t"+split[1]+"\t"+str(size)) 9 | 10 | def main(): 11 | previous_line = None 12 | counter = 0 13 | for each_line in sys.stdin: 14 | line = each_line.split("\t")[0] 15 | if previous_line == line: 16 | counter+=1 17 | else: 18 | if previous_line!=None: 19 | print_value(previous_line, counter) 20 | counter=1 21 | previous_line = line 22 | print_value(previous_line, counter) 23 | 24 | 25 | if __name__ == '__main__': 26 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/tf-idf/reducer.1.txt: -------------------------------------------------------------------------------- 1 | aut#1 1 2 | aut#1 1 3 | aut#1 1 4 | aut#1 1 5 | aut#2 1 6 | aut#2 1 7 | bene#2 1 8 | de#2 1 9 | mortuis#2 1 10 | nihil#1 1 11 | nihil#2 1 12 | Caesar#1 1 -------------------------------------------------------------------------------- /map-reduce-improvements/tf-idf/reducer.2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | 5 | 6 | def main(): 7 | previous_word = None 8 | counter = 0 9 | to_print = [] 10 | def print_result(): 11 | for each_line in to_print: 12 | print(each_line+str(counter)) 13 | 14 | for each_line in sys.stdin: 15 | line = each_line.strip().split("\t") 16 | control_word = line[0] 17 | elements = line[1].split(";") 18 | if control_word!=previous_word: 19 | if previous_word: 20 | print_result() 21 | to_print = [] 22 | counter = 0 23 | counter +=1 24 | to_print.append(control_word+"#"+elements[0]+"\t"+elements[1]+"\t") 25 | previous_word = control_word 26 | print_result() 27 | 28 | if __name__ == '__main__': 29 | main() -------------------------------------------------------------------------------- /map-reduce-improvements/tf-idf/reducer.2.txt: -------------------------------------------------------------------------------- 1 | aut 1;4;1 2 | aut 2;2;1 3 | bene 2;1;1 4 | de 2;1;1 5 | mortuis 2;1;1 6 | nihil 1;1;1 7 | nihil 2;1;1 8 | Caesar 1;1;1 -------------------------------------------------------------------------------- /map-reduce-streaming/MrAppManager.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | cat input.data | python mapper.py | sort | python reducer.py -------------------------------------------------------------------------------- /map-reduce-streaming/input.data: -------------------------------------------------------------------------------- 1 | Vivere est cogitare 2 | Vivere militate est 3 | Scientia potentia est -------------------------------------------------------------------------------- /map-reduce-streaming/mapper.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | if __name__=="__main__": 4 | for line in sys.stdin: 5 | for each_word in line.strip().split(" "): 6 | word = each_word.strip() 7 | if(len(word)>0): 8 | print(word+"\t1") -------------------------------------------------------------------------------- /map-reduce-streaming/reducer.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | if __name__=="__main__": 4 | current_word = None 5 | counter = 0 6 | 7 | for line in sys.stdin: 8 | key_value=line.strip().split("\t") 9 | key = key_value[0] 10 | value = key_value[1] 11 | if(current_word!=key): 12 | if(current_word): 13 | print(current_word+"\t"+str(counter)) 14 | current_word=key 15 | counter=1 16 | else: 17 | counter=counter+1 18 | if(counter>0): 19 | print(current_word+"\t"+str(counter)) 20 | 21 | -------------------------------------------------------------------------------- /map-reduce/UserSessionCounter.py: -------------------------------------------------------------------------------- 1 | from mrjob.job import MRJob 2 | 3 | class UserSessionCounter(MRJob): 4 | 5 | def mapper(self, key, line): 6 | (id_visitor, id_session, *rest) = line.split("|") 7 | if len(id_visitor)==0 or id_visitor == 'id_visitor': 8 | return 9 | yield id_visitor, 1 10 | 11 | def reducer(self, id_visitor, occurences): 12 | yield id_visitor, sum(occurences) 13 | -------------------------------------------------------------------------------- /map-reduce/combiner.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def main(): 4 | last_key = None 5 | count = 0 6 | size = 0 7 | 8 | def print_values(): 9 | print(last_key + "\t" + str(size)+";"+str(count)) 10 | 11 | for each_line in sys.stdin: 12 | key_value = each_line.strip().split("\t") 13 | if len(key_value)<2: 14 | continue 15 | key = key_value[0] 16 | size_count = key_value[1].split(";") 17 | key_size = int(size_count[0]) 18 | key_count = int(size_count[1]) 19 | 20 | if key!=last_key: 21 | if last_key: 22 | print_values() 23 | count = key_count 24 | size = key_size 25 | last_key = key 26 | else: 27 | count += key_count 28 | size = size+key_size 29 | 30 | print_values() 31 | 32 | 33 | 34 | if __name__ == '__main__': 35 | main() -------------------------------------------------------------------------------- /map-reduce/input-data-combiner.txt: -------------------------------------------------------------------------------- 1 | www.facebook.com 100;1 2 | www.google.com 10;1 3 | www.google.com 5;1 4 | www.google.com 15;1 5 | stepic.org 60;1 6 | stepic.org 100;1 -------------------------------------------------------------------------------- /map-reduce/input-data-reducer.txt: -------------------------------------------------------------------------------- 1 | www.facebook.com 100 2 | www.google.com 10 3 | www.google.com 5 4 | www.google.com 15 5 | www.stepic.org 60 6 | www.stepic.org 100 -------------------------------------------------------------------------------- /map-reduce/reducer.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | def main(): 4 | last_key = None 5 | count = 0 6 | size = 0 7 | 8 | def print_values(): 9 | print(last_key + "\t" + str(int(size/count))) 10 | 11 | for each_line in sys.stdin: 12 | key_value = each_line.strip().split("\t") 13 | if len(key_value)<2: 14 | continue 15 | key = key_value[0] 16 | value = key_value[1] 17 | 18 | if key!=last_key: 19 | if last_key: 20 | print_values() 21 | count=1 22 | size = int(value) 23 | last_key = key 24 | else: 25 | count+=1 26 | size = size+int(value) 27 | 28 | print_values() 29 | 30 | 31 | 32 | if __name__ == '__main__': 33 | main() -------------------------------------------------------------------------------- /mapr-stream/kafka-consumer.py: -------------------------------------------------------------------------------- 1 | from confluent_kafka import Consumer, KafkaError 2 | 3 | c = Consumer({'group.id': 'mygroup', 'default.topic.config': {'auto.offset.reset': 'earliest'}}) 4 | c.subscribe(['/mapr/dp.prod.zur/vantage/streams/extraction-test:extraction-start']) 5 | while True: 6 | msg = c.poll(timeout=1.0) 7 | if msg is None: continue 8 | if not msg.error(): 9 | print('Received message: %s' % msg.value().decode('utf-8')) 10 | continue 11 | else: 12 | print ("error happend during the reading") 13 | if msg.error().code() != KafkaError._PARTITION_EOF: 14 | print(msg.error()) 15 | break 16 | c.close() 17 | -------------------------------------------------------------------------------- /mapr-stream/kafka-producer.py: -------------------------------------------------------------------------------- 1 | from confluent_kafka import Producer 2 | 3 | p = Producer({'streams.producer.default.stream': '/mapr/dp.prod.zur/vantage/streams/extraction-test'}) 4 | print("Kafka producer connected ") 5 | p.produce('scenarioextraction-start', data.encode('utf-8')) 6 | p.flush 7 | -------------------------------------------------------------------------------- /maven-dependencies/dependency-splitter.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | if __name__ == '__main__': 5 | file_counter = 0 6 | file_output = None 7 | 8 | for each_line in sys.stdin: 9 | if each_line.find("---<") > 0: 10 | if file_output: 11 | file_output.close() 12 | file_output = None 13 | continue 14 | 15 | if each_line.find("@") > 0 and each_line.find("---"): 16 | file_counter = file_counter + 1 17 | file_output = open("%s.log" % str(file_counter).zfill(3), "w") 18 | continue 19 | 20 | if file_output and each_line.startswith("[INFO] "): 21 | print(each_line, end='', file=file_output) 22 | -------------------------------------------------------------------------------- /maven-dependencies/dependency.md: -------------------------------------------------------------------------------- 1 | # build dependencies tree 2 | ``` 3 | mvn dependency:tree > out.txt 4 | ``` 5 | 6 | # split output to chunks 7 | ``` 8 | cat out.txt | python3 ../dependency-splitter.py 9 | rm 001.log 10 | rm 002.log 11 | rm 003.log 12 | ``` 13 | 14 | # find component (commons-lang) into dependency tree 15 | ``` 16 | cat *.log | python3 ../dependency-finder.py commons-lang 17 | ``` 18 | -------------------------------------------------------------------------------- /maven-duplication/maven-check.sh: -------------------------------------------------------------------------------- 1 | mvn dependency:tree | python duplication.py -------------------------------------------------------------------------------- /mctrek/advicer.py: -------------------------------------------------------------------------------- 1 | from lxml import html 2 | 3 | start_page = "http://www.mctrek.de/shop/sale?hersteller[]=Jack+Wolfskin&suchevon=" 4 | 5 | # list_of_size = page.xpath('//*[@id="wk_addItem"]/option/text()') 6 | # print(list_of_size) 7 | # list_of_size = 8 | # print(list_of_size[0].value_options) 9 | 10 | 11 | def next_page_generator(): 12 | index = 0 13 | while True: 14 | yield start_page + str(index*24) 15 | index = index+1 16 | 17 | next_page = next_page_generator() 18 | html_url = next_page.next() 19 | print(html_url) 20 | page = html.parse(html_url) 21 | # found_elements = page.xpath('/html/body/div[9]/div/div[2]/div[2]/div[6]') 22 | found_elements = page.xpath('/html/body/div[9]/div/div/div') 23 | print(dir(found_elements[0])) 24 | print(found_elements[0].attrib) 25 | for element in found_elements[0].getchildren(): 26 | print (">>>> %s %s" % (element.tag, element.attrib)) 27 | print(found_elements) 28 | -------------------------------------------------------------------------------- /md5-example.py: -------------------------------------------------------------------------------- 1 | from hashlib import md5 as md5 2 | 3 | def generate_signature(user, password, date): 4 | user_password_hash = md5("%s/%s" % (user, password)).hexdigest().upper() 5 | signature = md5("%s%s" % (date, user_password_hash)).hexdigest().upper() 6 | return signature 7 | 8 | print(generate_signature("U20005", "1234", "2016-10-22T22:25:28+03:00")) 9 | 10 | -------------------------------------------------------------------------------- /md5.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | # python 2.7.x 4 | # import md5 as md5hash from md5 5 | # print(md5("hello")) 6 | 7 | # python 3.5.x 8 | import hashlib 9 | hashlib.md5(open('/home/technik/projects/temp/ista/json-source/2018110815330826780340100-2018110815330829280340100-956.json','rb').read().encode("utf-8")).hexdigest() 10 | 11 | hashlib.md5(b"hello").hexdigest() 12 | brand="temp" 13 | sku="my_code" 14 | hashlib.md5(f"{brand}{sku}".encode("utf-8")).hexdigest() 15 | -------------------------------------------------------------------------------- /method_by_name.py: -------------------------------------------------------------------------------- 1 | class Foo: 2 | def bar1(self): 3 | print(1) 4 | def bar2(self): 5 | print(2) 6 | 7 | def call_method(o, name): 8 | return getattr(o, name)() 9 | 10 | 11 | f = Foo() 12 | -------------------------------------------------------------------------------- /mlflow.py: -------------------------------------------------------------------------------- 1 | import mlflow 2 | import mlflow.sklearn 3 | 4 | mlflow.start_run(experiment_id="my_experiment") 5 | 6 | mlflow.log_param("param1", 5) 7 | mlflow.log_param("param2", "value2") 8 | 9 | # Train a model 10 | from sklearn.ensemble import RandomForestRegressor 11 | model = RandomForestRegressor() 12 | model.fit(x_train, y_train) 13 | 14 | # Log model as artifact 15 | mlflow.sklearn.log_model(model, "random-forest-model") 16 | 17 | # Log metrics 18 | mlflow.log_metrics("mse", mean_squared_error(y_test, model.predict(x_test))) 19 | mlflow.log_metrics("mse", mean_absolute_error(y_test, model.predict(x_test))) -------------------------------------------------------------------------------- /module-attributes-check.py: -------------------------------------------------------------------------------- 1 | import settings 2 | 3 | def check_mandatory_attributes(object_with_attributes, list_of_attributes: List[str]): 4 | for each_attribute in list_of_attributes: 5 | if getattr(object_with_attributes, each_attribute, None) is None: 6 | print(f"mandatory attribute should be present as ENV variable {each_attribute}") 7 | exit(1) 8 | 9 | check_mandatory_attributes(settings, ["DATAAPI_BASE_URL","DATAAPI_API_KEY","DATAAPI_ACCOUNT_ID","DATAAPI_CONTEXT_ID","DATAAPI_CONTEXT","AWS_ACCESS_KEY_ID","AWS_SECRET_ACCESS_KEY","AWS_REGION","AWS_S3_BUCKET_NAME","AIRFLOW_URL","AIRFLOW_USER","AIRFLOW_PASSWORD"]) 10 | 11 | -------------------------------------------------------------------------------- /mouse/mouse-automation.py: -------------------------------------------------------------------------------- 1 | # Xlib.error.DisplayConnectionError: Can't connect to display ":1": b'No protocol specified\n' 2 | # temp solution: xhost + 3 | 4 | 5 | import keyboard 6 | import time 7 | import pyautogui 8 | 9 | if __name__ == '__main__': 10 | 11 | ### record all input keys 12 | # recorded: [] = keyboard.record(until='esc') 13 | # print(recorded) 14 | 15 | 16 | keyboard.add_hotkey('alt+p', lambda : pyautogui.moveRel(100,100)) 17 | ### waiting for ctrl-c, control c 18 | try: 19 | while True: 20 | time.sleep(5) 21 | except KeyboardInterrupt: 22 | pass 23 | 24 | keyboard.clear_hotkey('alt+p') -------------------------------------------------------------------------------- /mouse/mouse-move.py: -------------------------------------------------------------------------------- 1 | import pyautogui 2 | 3 | current_x, current_y = pyautogui.position() 4 | print(f"Current position: ({current_x}, {current_y})") 5 | 6 | pyautogui.moveTo(200, 200, duration=1) 7 | -------------------------------------------------------------------------------- /mouse/mouse-move2.py: -------------------------------------------------------------------------------- 1 | # pip3 install pynput 2 | # python3 3 | from pynput.mouse import Controller 4 | 5 | # move mouse to absolute point 6 | Controller().position = (50, 50) 7 | # move mouse relatively 8 | Controller().move(20, 20) 9 | -------------------------------------------------------------------------------- /multi-argument-for.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python -tt 2 | animals = ["cat", "dog", "pig", "rabbit"] 3 | for index, value in enumerate(animals): 4 | print "index: {} value: {}".format(index,value) 5 | -------------------------------------------------------------------------------- /mysqldb/select-big-query.py: -------------------------------------------------------------------------------- 1 | import mysql.connector 2 | 3 | 4 | def get_db(): 5 | return mysql.connector.connect( 6 | host="127.0.0.1", 7 | port=3310, 8 | user="root", 9 | passwd="example", 10 | database="files" 11 | ) 12 | 13 | 14 | sql_request_select_files = "select concat(vendor.path, sessions.path,'/',sessions.name, '/', files.name) from files inner join vendor on files.id_vendor=vendor.id and vendor.id=2 inner join sessions on sessions.id=files.id_session" 15 | 16 | if __name__=='__main__': 17 | db = get_db() 18 | cursor = db.cursor() 19 | cursor.execute(sql_request_select_files) 20 | for each_record in cursor: 21 | print(each_record[0]) 22 | cursor.close() 23 | db.close() 24 | -------------------------------------------------------------------------------- /numpy/numpy-example.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | ar_strings = np.array(["one", "two", "three", "four", "five", "six", "seven"]) 4 | ar_numbers = np.array([1,2,3,4,5]) 5 | ar_numbers2 = np.array([ 6 | [11,12,13,14,15], 7 | [21,22,23,24,25], 8 | [31,32,33,34,35] 9 | ]) 10 | print(type(ar_strings)) 11 | 12 | print("type of elements ") 13 | print(ar_strings.dtype) 14 | print(ar_numbers.dtype) 15 | 16 | ar_numbers.size 17 | ar_numbers.ndim 18 | ar_numbers.shape 19 | ar_numbers[0:2]=7,8 20 | print(ar_numbers*2) 21 | print(ar_numbers + ar_numbers) 22 | print(ar_numbers * ar_numbers) 23 | 24 | print("pi") 25 | print(np.pi) 26 | print("sin pi") 27 | print(np.sin(np.pi)) 28 | 29 | print("digits in range") 30 | print(np.linspace(-10,10,21)) -------------------------------------------------------------------------------- /oauth/oauth2.md: -------------------------------------------------------------------------------- 1 | OAuth OAuth2 authentication 2 | * [development libraries](https://auth0.com/docs/libraries) 3 | * [auth0 python app](https://auth0.com/docs/quickstart/webapp/python) 4 | * [Add Login Using the Authorization Code Flow](https://auth0.com/docs/flows/guides/auth-code/add-login-auth-code) 5 | * [auth python-flask example](https://github.com/auth0-samples/auth0-python-web-app) 6 | * [flask google login](https://realpython.com/flask-google-login/) 7 | * [google authentication](https://google-auth.readthedocs.io/en/latest/reference/google.oauth2.id_token.html#id1) 8 | * [search openid](https://pypi.org/search/?q=openid&o=) 9 | * [flask openid](https://flask-oidc.readthedocs.io/en/latest/) 10 | * [flask openid](https://pythonhosted.org/Flask-OpenID/) 11 | 12 | -------------------------------------------------------------------------------- /object-copy.py: -------------------------------------------------------------------------------- 1 | # shallow copy 2 | a = {1: [1,2,3]} 3 | b = a.copy() 4 | a[1].append(4) 5 | print(a) 6 | # {1: [1, 2, 3, 4]} 7 | print(b) 8 | # {1: [1, 2, 3, 4]} 9 | 10 | 11 | import copy 12 | # deep copy 13 | a = {1: [1,2,3]} 14 | b = copy.deepcopy(a) 15 | a[1].append(4) 16 | print(a) 17 | # {1: [1, 2, 3, 4]} 18 | print(b) 19 | # {1: [1, 2, 3]} 20 | -------------------------------------------------------------------------------- /object-with-properties/get_attr.py: -------------------------------------------------------------------------------- 1 | class CustomAttribute: 2 | 3 | def __init__(self): 4 | print("init") 5 | self.data = dict() 6 | 7 | def __getattr__(self, prop_name): 8 | print("get attribute %s " % (prop_name)) 9 | return self.data[prop_name] 10 | 11 | # def __setattr__(self, prop_name, prop_value): 12 | # self.data[prop_name] = prop_value 13 | 14 | instance = CustomAttribute() 15 | 16 | # instance["p1"] = "new value" 17 | # print(instance["p1"]) 18 | # print(instance["c2"]) -------------------------------------------------------------------------------- /object-with-properties/set_attr.py: -------------------------------------------------------------------------------- 1 | class CustomAttribute: 2 | 3 | def __init__(self): 4 | # avoid recursion 5 | super(CustomAttribute, self).__setattr__('data', dict()) 6 | 7 | def __getattr__(self, prop_name): 8 | return self.data[prop_name] 9 | 10 | def __setattr__(self, prop_name, prop_value): 11 | self.data[prop_name] = prop_value 12 | 13 | instance = CustomAttribute() 14 | 15 | instance.p1 = "new value for property p1" 16 | print(instance.p1) 17 | try: 18 | print(instance.c2) 19 | except KeyError as e: 20 | print("exception: " + str(e)) 21 | 22 | print(getattr(instance, "p1")) 23 | print(dir(instance)) -------------------------------------------------------------------------------- /object_enter_exit_with/with_enter-exit.txt: -------------------------------------------------------------------------------- 1 | 2 | def __enter__(self) 3 | def __exit__(self, exc_type, exc_value, traceback) 4 | 5 | 6 | In your example above, you'd use 7 | 8 | class Package: 9 | def __init__(self): 10 | self.files = [] 11 | 12 | def __enter__(self): 13 | return self 14 | 15 | # ... 16 | 17 | def __exit__(self, exc_type, exc_value, traceback): 18 | for file in self.files: 19 | os.unlink(file) 20 | Then, when someone wanted to use your class, they'd do the following: 21 | 22 | 23 | 24 | 25 | with Package() as package_obj: 26 | # use package_obj -------------------------------------------------------------------------------- /object_enter_exit_with/with_yield.py: -------------------------------------------------------------------------------- 1 | # the same functionality as enter/exit 2 | 3 | 4 | def create_session(): 5 | session = settings.Session() 6 | try: 7 | # place where it will be returned to client code 8 | yield session 9 | # after "with" block in client code execution will be continued 10 | session.commit() 11 | except Exception: 12 | session.rollback() 13 | raise 14 | finally: 15 | session.close() 16 | 17 | 18 | def provide_session(func): 19 | with create_session() as session: 20 | session.merge(SomeDataObject()) 21 | 22 | -------------------------------------------------------------------------------- /ocr/image_recognition.py: -------------------------------------------------------------------------------- 1 | # tesseract ocr image recognition 2 | 3 | import cv2 4 | import numpy as np 5 | import pytesseract 6 | from PIL import Image 7 | 8 | image_path=~/Screenshot.png 9 | im= cv2.imread(image_path,cv2.IMREAD_GRAYSCALE) 10 | cv2.imshow('Gray', im) 11 | cv2.imwrite(image_path, im) 12 | print(pytesseract.image_to_string(Image.open(image_path))) 13 | 14 | -------------------------------------------------------------------------------- /open-shift/README.md: -------------------------------------------------------------------------------- 1 | ## Architecture 2 | ![image](https://user-images.githubusercontent.com/8113355/209005231-932d97e4-f162-4953-96e8-4e11af07791c.png) 3 | -------------------------------------------------------------------------------- /open-shift/oc-login.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | OC_USER: str = os.environ.get("OC_USER") 5 | OC_PASSWORD: str = os.environ.get("OC_PASS") 6 | OPEN_SHIFT_URL: str = os.environ.get("OPEN_SHIFT_URL") 7 | 8 | print(f"{OC_USER}") 9 | print(f"{OC_PASSWORD}") 10 | print(f"{OPEN_SHIFT_URL}") 11 | 12 | def execute_cmd_command(command_line: str) -> str: 13 | print(f"command: {command_line}") 14 | return subprocess.check_output(command_line, shell=True, text=True) 15 | 16 | print(execute_cmd_command(f"oc login -u {OC_USER} -p {OC_PASSWORD} {OPEN_SHIFT_URL}")) 17 | print(execute_cmd_command(f"oc whoami")) 18 | -------------------------------------------------------------------------------- /operator-override.py: -------------------------------------------------------------------------------- 1 | # example of overriding operators 2 | # operator overriding 3 | class Value: 4 | def __init__(self, value): 5 | self.value = value 6 | 7 | # - __sub__, * __mul__, ** __pow__, / __truediv__, // __floordiv__, % __mod__ 8 | # << __lshift__, >> __rshift__, & __and__, | __or__, ^ __xor__, ~ __invert__ 9 | # < __lt__, <= __le__, == __eq__, != __ne__, > __gt__, >= __ge__ 10 | def __add__(self, other): 11 | return Value(" -[ "+self.value +" ]- -[ "+ other.value+" ]- ") 12 | 13 | def __str__(self): 14 | return self.value 15 | 16 | 17 | v1 = Value("line#1") 18 | v2 = Value("line#2") 19 | print(v1 + v2) 20 | -------------------------------------------------------------------------------- /oracle/simple-operations/credential.example: -------------------------------------------------------------------------------- 1 | example of url: 2 | 'username/password@127.0.0.1/orcl' -------------------------------------------------------------------------------- /oracle/simple-operations/read-data.py: -------------------------------------------------------------------------------- 1 | # http://www.oracle.com/technetwork/articles/dsl/python-091105.html 2 | # pip3 install credentials 3 | import credentials 4 | # pip3 install cx_oracle 5 | import cx_Oracle 6 | 7 | # dsn_tns = cx_Oracle.makedsn( ORACLE_HOST, ORACLE_PORT, service_name=ORACLE_SERVICE ) 8 | dsn_tns = cx_Oracle.makedsn( ORACLE_HOST, ORACLE_PORT, sid=ORACLE_SID ) 9 | conn = cx_Oracle.connect(user=DB_USER, password=DB_PASS, dsn=dsn_tns) 10 | with conn as connection: 11 | try: 12 | cursor = connection.cursor() 13 | # SELECT table_name FROM user_tables; 14 | cursor.execute(f"select dbms_metadata.get_ddl('TABLE', 'TOSKANA') from dual") 15 | for result in cursor: 16 | print(result[0]) 17 | finally: 18 | cursor.close() 19 | -------------------------------------------------------------------------------- /oracle/utils/db_utils.py: -------------------------------------------------------------------------------- 1 | import credentials 2 | import cx_Oracle 3 | 4 | def is_table_exists(table_name): 5 | data = read_from_query( ("select count(table_name) from all_tab_columns where lower(table_name)='%s'" % table_name.lower()) ) 6 | return len(data)>0 7 | 8 | 9 | def read_from_query(query): 10 | with cx_Oracle.connect(credentials.db_url) as connection: 11 | try: 12 | cursor = connection.cursor() 13 | cursor.execute(query) 14 | result = list() 15 | for next_element in cursor: 16 | result.append(next_element) 17 | return result 18 | finally: 19 | cursor.close() 20 | 21 | -------------------------------------------------------------------------------- /oracle/utils/is-table-exists.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import db_utils 3 | 4 | 5 | if __name__=='__main__': 6 | table_name = sys.argv[1] 7 | if(db_utils.is_table_exists(table_name)): 8 | print("table exists %s " % table_name) 9 | sys.exit(0) 10 | else: 11 | print("table NOT exists %s " % table_name) 12 | sys.exit(1) -------------------------------------------------------------------------------- /oracle/utils/test.bat: -------------------------------------------------------------------------------- 1 | python is-table-exists.py corporate_client 2 | python is-table-exists.py corporate_client_brand -------------------------------------------------------------------------------- /pandas/data.csv: -------------------------------------------------------------------------------- 1 | price,head-1,head-2,head-3 2 | 10,1,one,first 3 | 20,2,,second 4 | 30,3,three,third 5 | 15,1,one,fourth 6 | 25,2,two,fifth 7 | 35,3,three,first 8 | 40,4,four,second -------------------------------------------------------------------------------- /pandas/fetching_tweet.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from datetime import datetime as dt 3 | 4 | # Here we should fetch our data from the Twitter API but since now we have to 5 | # apply for getting API's credentials we pass this step for the sake of the tutorial. 6 | # We use data.csv as source of tweets. 7 | 8 | LOCAL_DIR='/tmp/' 9 | 10 | def main(): 11 | # Create the dataframe from data.csv 12 | tweets = pd.read_csv('/home/airflow/airflow_files/data.csv', encoding='latin1') 13 | 14 | # Fomat time using pd.to_datetime and drop the column Row ID 15 | tweets = tweets.assign(Time=pd.to_datetime(tweets.Time)).drop('row ID', axis='columns') 16 | 17 | # Export the dataframe into a new csv file with the current date 18 | tweets.to_csv(LOCAL_DIR + 'data_fetched.csv', index=False) 19 | 20 | if __name__ == '__main___': 21 | main() 22 | -------------------------------------------------------------------------------- /pandas/pandas-example.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | my_data = { 4 | "names":["petya", "vasya", "kolya"], 5 | "age":[40, 51, 23], 6 | "position":["senior", "pension", "junior"], 7 | } 8 | 9 | # pd.read_csv 10 | print("create DataFrame from dictionary") 11 | data_frame:pd.DataFrame = pd.DataFrame(data = my_data) 12 | print(data_frame) 13 | 14 | print("unique values from ") 15 | print(data_frame["position"].unique()) 16 | 17 | print("get data by index") 18 | print(data_frame.iloc[0,0]) 19 | 20 | print("subset of original DataFrame ") 21 | print(data_frame.iloc[1:,0:2]) 22 | 23 | print("unique values from column") 24 | print(data_frame["age"].unique()) 25 | 26 | print("take subset of DataFrame by condition") 27 | print( data_frame[data_frame["age"]>39] ) 28 | print( data_frame[data_frame.age>39] ) -------------------------------------------------------------------------------- /parameter-for-app/parameter-reader.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | settings_filename = "path-to-file.settings___" 4 | environment_variable = "EXPORT_IMPORT_FOLDER" 5 | 6 | def read_path_to_file(): 7 | # attempt to read data from file 8 | if os.path.exists(settings_filename): 9 | with open(settings_filename) as f: 10 | return f.readlines()[0] 11 | 12 | # attempt to read data from ENV variable 13 | if environment_variable in os.environ: 14 | return os.environ[environment_variable] 15 | 16 | # return current folder, when script exists 17 | return os.path.dirname(os.path.realpath(__file__)) 18 | 19 | if __name__ == "__main__": 20 | # os.path.join(folder, filename) 21 | print(read_path_to_file()) 22 | -------------------------------------------------------------------------------- /parameter-for-app/path-to-file.settings: -------------------------------------------------------------------------------- 1 | /opm/wm/import-export -------------------------------------------------------------------------------- /parameters/parameters.py: -------------------------------------------------------------------------------- 1 | def print_parameters(*values: int): 2 | print(type(values)) 3 | # tuple 4 | print(values) 5 | # (1,2,3,4,5) 6 | 7 | print_parameters(1,2,3,4,5) 8 | 9 | -------------------------------------------------------------------------------- /partial.py: -------------------------------------------------------------------------------- 1 | # partial function 2 | from functools import partial 3 | def func(u,v,w,x): 4 | return u*4 + v*3 + w*2 + x 5 | 6 | another_function = partial(func, 1,2,3) 7 | 8 | print(another_function(4)) 9 | -------------------------------------------------------------------------------- /password_read.py: -------------------------------------------------------------------------------- 1 | # read password from command line 2 | 3 | import getpass 4 | 5 | user = "my_user" 6 | 7 | try: 8 | p = getpass.getpass("User Name : %s" % user) 9 | except Exception as error: 10 | print('ERROR', error) 11 | else: 12 | print('Password entered:', p) 13 | -------------------------------------------------------------------------------- /pex/.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | -------------------------------------------------------------------------------- /pex/magic_name/__main__.py: -------------------------------------------------------------------------------- 1 | if __name__=='__main__': 2 | print("hello from pex") 3 | -------------------------------------------------------------------------------- /pex/naked_example/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cherkavi/python-utilities/110075815ac384cb800e1bcea2dadc3e79f67c0d/pex/naked_example/__init__.py -------------------------------------------------------------------------------- /pex/naked_example/main.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | import json 3 | 4 | if __name__=='__main__': 5 | print("hello from pex") 6 | if len(sys.argv)>1: 7 | print("attempt to read json file: "+sys.argv[1]) 8 | with open(sys.argv[1], "r") as json_file: 9 | print(json.load(json_file)) 10 | -------------------------------------------------------------------------------- /pex/naked_example/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name='naked_example', 5 | version='0.0.1', 6 | packages=["json", "."], 7 | package_data={}, 8 | url='', 9 | license='', 10 | description='basement for creating PythonEXectuion', 11 | long_description='' 12 | ) -------------------------------------------------------------------------------- /pex/requirements.txt: -------------------------------------------------------------------------------- 1 | requests -------------------------------------------------------------------------------- /pex/samplepkg.pex: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cherkavi/python-utilities/110075815ac384cb800e1bcea2dadc3e79f67c0d/pex/samplepkg.pex -------------------------------------------------------------------------------- /pex/simple.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "BMW", 3 | "model": "220" 4 | } 5 | -------------------------------------------------------------------------------- /pip-install-package/README.md: -------------------------------------------------------------------------------- 1 | copy package to current folder 2 | ```sh 3 | rsync -avz -e "ssh -i /home/projects/my_project/integration-prototype-keys.pem" /home/projects/my_project/integration-prototype/airflow-dag/airflow_shopify/ ubuntu@ec2-3.compute-1.amazonaws.com:~/airflow/airflow-dag/wondersign_airflow_shopify/ 4 | ``` 5 | install current package 6 | ```sh 7 | pip3 install -U ~/path/to/this/folder 8 | ``` 9 | -------------------------------------------------------------------------------- /pip-install-package/airflow_shopify/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cherkavi/python-utilities/110075815ac384cb800e1bcea2dadc3e79f67c0d/pip-install-package/airflow_shopify/__init__.py -------------------------------------------------------------------------------- /pip-install-package/airflow_shopify/shopify/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cherkavi/python-utilities/110075815ac384cb800e1bcea2dadc3e79f67c0d/pip-install-package/airflow_shopify/shopify/__init__.py -------------------------------------------------------------------------------- /pip-install-package/airflow_shopify/shopify/collection/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cherkavi/python-utilities/110075815ac384cb800e1bcea2dadc3e79f67c0d/pip-install-package/airflow_shopify/shopify/collection/__init__.py -------------------------------------------------------------------------------- /pip-install-package/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | name='shopify_collaboration', 5 | version='0.1.0', 6 | description='shopify airflow utility', 7 | author='Cherkashyn Vitalii', 8 | packages=['airflow_shopify.shopify', 9 | 'airflow_shopify.shopify.collection', 10 | 'airflow_shopify.shopify.product', 11 | 'airflow_shopify.storage'], 12 | install_requires=['requests', 'apache-airflow' ], 13 | classifiers=[ 14 | 'Development Status :: 2 - prototype', 15 | 'Operating System :: POSIX :: Linux', 16 | 'Programming Language :: Python :: 3.8', 17 | ], 18 | ) 19 | -------------------------------------------------------------------------------- /pip-install-without-admin-rights.py: -------------------------------------------------------------------------------- 1 | ## how to install packages inside python to avoid admin lock 2 | # select your python version to download 3 | # curl https://bootstrap.pypa.io/get-pip.py > get-pip.py 4 | # curl https://bootstrap.pypa.io/pip/3.6/get-pip.py > get-pip.py 5 | # copy to destination host 6 | # python get-pip.py 7 | 8 | python3 9 | import pip 10 | pip.main(['install', 'stem']) # install package 11 | # pip.main(['install', '-U', 'requests']) # update package 12 | # pip.main(['uninstall ', '-y', 'requests']) # uninstall package 13 | -------------------------------------------------------------------------------- /port-check.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import sys 3 | from contextlib import closing 4 | 5 | 6 | def check_port(host, port_number): 7 | with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: 8 | sock.settimeout(1) 9 | if sock.connect_ex((host, int(port_number))) == 0: 10 | print("%s : %s >>open<<" % (host, port_number)) 11 | else: 12 | print("%s : %s NOT open" % (host, port_number)) 13 | 14 | 15 | if __name__=='__main__': 16 | check_port(sys.argv[1], sys.argv[2]) -------------------------------------------------------------------------------- /print-to-error: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | print ("standard output ") 4 | print ("standard output ", file=sys.stdout) 5 | # System.err, stderror, error output 6 | print ("output to error ", file=sys.stderr) 7 | -------------------------------------------------------------------------------- /private-memeber.py: -------------------------------------------------------------------------------- 1 | class Car: 2 | __wheel = 4 3 | 4 | c = Car() 5 | print(c._Car__wheel) 6 | -------------------------------------------------------------------------------- /process-kill.py: -------------------------------------------------------------------------------- 1 | import psutil 2 | 3 | PROCNAME = "python.exe" 4 | 5 | for proc in psutil.process_iter(): 6 | # kill process 7 | if proc.name() == PROCNAME: 8 | proc.kill() 9 | -------------------------------------------------------------------------------- /pulumi/vpc-create.py: -------------------------------------------------------------------------------- 1 | import pulumi 2 | import pulumi_aws as aws 3 | 4 | """ 5 | ```sh 6 | export AWS_ACCESS_KEY_ID=`aws configure get aws_access_key_id` 7 | export AWS_SECRET_ACCESS_KEY=`aws configure get aws_secret_access_key` 8 | export AWS_SESSION_TOKEN=`aws configure get aws_session_token` 9 | # !!! maybe pulumi doesn't work with AWS_SESSION_TOKEN 10 | 11 | ### write your code in __main__.py 12 | pulumi up 13 | ``` 14 | 15 | https://www.pulumi.com/registry/packages/aws/api-docs/ec2/vpc/ 16 | """ 17 | 18 | main = aws.ec2.Vpc("second_vpc", cidr_block="10.0.0.0/16") 19 | print(main) 20 | 21 | -------------------------------------------------------------------------------- /pulumi/vpc-create/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | venv/ 3 | -------------------------------------------------------------------------------- /pulumi/vpc-create/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: vpc-create 2 | runtime: 3 | name: python 4 | options: 5 | virtualenv: venv 6 | description: A minimal AWS Python Pulumi program 7 | -------------------------------------------------------------------------------- /pulumi/vpc-create/__main__.py: -------------------------------------------------------------------------------- 1 | """An AWS Python Pulumi program""" 2 | 3 | import pulumi 4 | import pulumi_aws as aws 5 | 6 | main = aws.ec2.Vpc("second_vpc", cidr_block="10.0.0.0/16") 7 | print(main) 8 | -------------------------------------------------------------------------------- /pulumi/vpc-create/requirements.txt: -------------------------------------------------------------------------------- 1 | pulumi>=3.0.0,<4.0.0 2 | pulumi-aws>=5.0.0,<6.0.0 3 | -------------------------------------------------------------------------------- /qrcode/qrcode-with-segno.py: -------------------------------------------------------------------------------- 1 | # pip install segno 2 | 3 | 4 | import segno 5 | 6 | price_tag = segno.make("my own text") 7 | price_tag.save("out.png", scale=5) 8 | 9 | 10 | from segno import helpers 11 | 12 | # email, geo, mecard, vcard, wifi 13 | qrcode = helpers.make_wifi(ssid='my access point', password='1234567890', security='WPA') 14 | # qrcode.designator 15 | qrcode.save('wifi-access.png', scale=5) 16 | 17 | 18 | -------------------------------------------------------------------------------- /random.py: -------------------------------------------------------------------------------- 1 | # random example 2 | import random 3 | # This command returns a floating point number, between 0 and 1. 4 | print(random.random()) 5 | 6 | # It returns a floating point number between the values given as X and Y. 7 | print(random.uniform(10, 20)) 8 | 9 | # This command returns a random integer between the values given as X and Y. 10 | print(random.randint(20, 30)) 11 | -------------------------------------------------------------------------------- /range.py: -------------------------------------------------------------------------------- 1 | # range - return list 2 | print(sum(range(1,100))) 3 | # xrange - return iterator 4 | print(sum(xrange(1,100))) -------------------------------------------------------------------------------- /redis/connect.py: -------------------------------------------------------------------------------- 1 | import os 2 | from redis import Redis 3 | 4 | redis = Redis(host=os.environ.get("REDIS_HOST", "localhost"), 5 | port=os.environ.get("REDIS_PORT", 6379), 6 | db=0) 7 | 8 | # for entry in redis.hgetall("table_1").values(): 9 | # redis.hset(name="table_1",key="key_1",value="value_1") 10 | # redis.hdel("table_1", "key_1") 11 | 12 | print(redis.get("hello")) 13 | -------------------------------------------------------------------------------- /redis/redis-labs.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cherkavi/python-utilities/110075815ac384cb800e1bcea2dadc3e79f67c0d/redis/redis-labs.zip -------------------------------------------------------------------------------- /rest-files-read/readfilesrest.ini: -------------------------------------------------------------------------------- 1 | [local] 2 | folder = c:\\temp\\ 3 | 4 | [remote] 5 | port = 8899 -------------------------------------------------------------------------------- /rest-files-read/readfilesrest/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cherkavi/python-utilities/110075815ac384cb800e1bcea2dadc3e79f67c0d/rest-files-read/readfilesrest/__init__.py -------------------------------------------------------------------------------- /rest-files-read/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | setup( 3 | name="read-files-rest", 4 | packages=["readfilesrest"], 5 | version="1.0.0", 6 | description="read files from folder and provide access to them via HTTP ", 7 | author="Vitalii Cherkashyn", 8 | author_email="technik7job@gmail.com", 9 | keywords=["REST", "Download files"], 10 | classifiers=[ 11 | "Programming Language :: Python", 12 | "Programming Language :: Python :: 3", 13 | "Development Status :: prototype", 14 | "Environment :: Other Environment", 15 | "Intended Audience :: Developers", 16 | "Operating System :: OS Independent", 17 | "Topic :: Software Development :: Components :: REST app", 18 | ], 19 | long_description="""\ 20 | provide ability for external users ( http interface ) 21 | to read files from local filesystem 22 | """ 23 | ) -------------------------------------------------------------------------------- /return_None_or_str.py: -------------------------------------------------------------------------------- 1 | from types import UnionType 2 | 3 | 4 | def my_func(a:int) -> UnionType[None, str]: 5 | if a>10: 6 | return "big" 7 | else: 8 | return None 9 | -------------------------------------------------------------------------------- /script-exec/execute-string.py: -------------------------------------------------------------------------------- 1 | exec("print('this is line inside text script')") 2 | -------------------------------------------------------------------------------- /selenium/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | -------------------------------------------------------------------------------- /serialization/pickle-serialization.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | 3 | class Example: 4 | def __init__(self, value): 5 | self.value = value 6 | 7 | def __str__(self): 8 | return self.value 9 | 10 | 11 | # serialization into file 12 | with open('filename', 'wb') as external_file: 13 | pickle.dump(Example("hello"), external_file) 14 | 15 | # var2 = # should implement def write 16 | # pickle.dump("hello string", var2) 17 | 18 | # deserialization from file 19 | with open('filename','rb') as f: 20 | var = pickle.load(f) 21 | print(var) 22 | print(type(var)) 23 | 24 | 25 | print("# serialize into ASCII ") 26 | raw_value = pickle.dumps(Example("hello2"), protocol=0) 27 | print(raw_value) 28 | 29 | print(pickle.loads(raw_value)) -------------------------------------------------------------------------------- /singleton.py: -------------------------------------------------------------------------------- 1 | # Singleton 2 | 3 | class Singleton(object): 4 | def __new__(cls): 5 | if not hasattr(cls, 'instance'): 6 | cls.instance = super(Singleton, cls).__new__(cls) 7 | return cls.instance 8 | 9 | if __name__ == '__main__': 10 | a = Singleton() 11 | b = Singleton() 12 | print(a is b) # it is True 13 | -------------------------------------------------------------------------------- /sort/sort-file-by-datetime.py: -------------------------------------------------------------------------------- 1 | class ImportFile: 2 | EXTENSION_SIZE = 4 3 | 4 | # expect to see string like: "Mobile_Tariff_2018_06_13_13_03.csv" 5 | def __init__(self, file_name): 6 | self.file_name = file_name 7 | self.suffix = file_name[-16-ImportFile.EXTENSION_SIZE:] # '2018_06_13_13_03.xml' 8 | self.date = datetime.strptime(self.suffix[0:-ImportFile.EXTENSION_SIZE], "%Y_%m_%d_%H_%M") 9 | self.prefix = file_name[0:len(file_name)-len(self.suffix)] 10 | pass 11 | 12 | def __str__(self): 13 | return self.prefix+" "+self.suffix 14 | 15 | 16 | @staticmethod 17 | def buckets(list_of_files): 18 | return_value = set() 19 | for each_file in list_of_files: 20 | return_value.add(each_file.prefix) 21 | return return_value 22 | 23 | @staticmethod 24 | def files_in_bucket(list_of_files, bucket_name): 25 | return sorted(filter(lambda x: x.prefix == bucket_name, list_of_files), key=lambda f: f.date, reverse=True) 26 | -------------------------------------------------------------------------------- /spark/.gitignore: -------------------------------------------------------------------------------- 1 | output 2 | -------------------------------------------------------------------------------- /spark/README.md: -------------------------------------------------------------------------------- 1 | ```sh 2 | spark-submit spark-test-connection.py 3 | ``` 4 | -------------------------------------------------------------------------------- /spark/pyspark-sql.py: -------------------------------------------------------------------------------- 1 | # pyspark 2 | from pyspark.sql import SparkSession 3 | from pyspark.sql.types import * 4 | from pyspark.sql.functions import * 5 | from pyspark.sql import SparkSession 6 | 7 | table_path = "/mapr/prod.zurich/vantage/data/store/processed/marker" 8 | temp_table_name = "marker_table_01" 9 | 10 | spark = SparkSession.builder.getOrCreate() 11 | schema = StructType([StructField("session_id", StringType()), StructField("marker_source", StringType())]) 12 | df = spark.lookupFromMapRDB(table_path, schema) 13 | df.createOrReplaceTempView(temp_table_name) 14 | df2 = df.filter("session_id = '9fc13577-8834-43f4-ad80-01747cb89f84' ") 15 | df2 = spark.sql("select * from "+temp_table_name+" limit 5") 16 | df2.show() 17 | 18 | -------------------------------------------------------------------------------- /spark/spark-test-connection.py: -------------------------------------------------------------------------------- 1 | from pyspark import SparkContext 2 | 3 | #sc = SparkContext("172.17.0.2", "test-app") 4 | #sc = SparkContext("spark://172.17.0.2", "test-app") 5 | #SparkConf().setAppName("App_Name").setMaster("spark://localhost:18080").set("spark.ui.port","18080"); 6 | 7 | try: 8 | sc = SparkContext("local", "test-app") 9 | print("------------") 10 | print(sc) 11 | print("------------") 12 | finally: 13 | sc.stop() 14 | 15 | -------------------------------------------------------------------------------- /spark/spark-words-count.py: -------------------------------------------------------------------------------- 1 | from pyspark import SparkContext 2 | import re 3 | 4 | sc:SparkContext = SparkContext("local","simple example") 5 | sc.getConf().set("spark.hadoop.validateOutputSpecs", "false") 6 | 7 | words = sc.textFile("words.txt").flatMap(lambda line: re.split(" |\n", line)) 8 | 9 | wordCounts = words.map(lambda word: (word, 1)).reduceByKey(lambda a,b:a +b) 10 | 11 | wordCounts.saveAsTextFile("output") -------------------------------------------------------------------------------- /spark/spark_avro_to_parquet.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pyspark import SparkContext # pylint: disable=wrong-import-position,import-error 3 | from pyspark import SparkConf # pylint: disable=wrong-import-position,import-error 4 | from pyspark.sql import SQLContext # pylint: disable=wrong-import-position,import-error 5 | 6 | 7 | 8 | avro_file = sys.getenv('avro') 9 | parquet_dir = sys.getenv('parquet_dir') 10 | 11 | conf = SparkConf().setAppName('Avro => Parquet') 12 | sc = SparkContext(conf=conf) # pylint: disable=invalid-name 13 | sc.setLogLevel('WARN') 14 | sqlContext = SQLContext(sc) # pylint: disable=invalid-name 15 | spark_version = sc.version 16 | 17 | df = sqlContext.read.format('com.databricks.spark.avro').load(avro_file) 18 | df.write.parquet(parquet_dir) 19 | -------------------------------------------------------------------------------- /spark/spark_json_to_avro.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pyspark import SparkContext # pylint: disable=wrong-import-position,import-error 3 | from pyspark import SparkConf # pylint: disable=wrong-import-position,import-error 4 | from pyspark.sql import SQLContext # pylint: disable=wrong-import-position,import-error 5 | 6 | json_file = sys.getenv('json') 7 | avro_dir = sys.getenv('avro_dir') 8 | 9 | conf = SparkConf().setAppName('Json => Avro') 10 | sc = SparkContext(conf=conf) # pylint: disable=invalid-name 11 | sqlContext = SQLContext(sc) # pylint: disable=invalid-name 12 | df = sqlContext.read.json(json_file) 13 | df.write.format('com.databricks.spark.avro').save(avro_dir) 14 | -------------------------------------------------------------------------------- /spark/spark_json_to_parquet.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pyspark import SparkContext # pylint: disable=wrong-import-position,import-error 3 | from pyspark import SparkConf # pylint: disable=wrong-import-position,import-error 4 | from pyspark.sql import SQLContext # pylint: disable=wrong-import-position,import-error 5 | 6 | json_file = sys.getenv('json') 7 | parquet_dir = sys.getenv('parquet_dir') 8 | 9 | conf = SparkConf().setAppName('JSON => Parquet') 10 | sc = SparkContext(conf=conf) # pylint: disable=invalid-name 11 | 12 | sc.setLogLevel('WARN') 13 | sqlContext = SQLContext(sc) # pylint: disable=invalid-name 14 | spark_version = sc.version 15 | 16 | # df = sqlContext.read.json(json_file) # pylint: disable=invalid-name 17 | # df.write.parquet(parquet_dir) 18 | 19 | df = sqlContext.jsonFile(json_file) # pylint: disable=invalid-name 20 | df.saveAsParquetFile(parquet_dir) 21 | -------------------------------------------------------------------------------- /spark/spark_parquet_to_avro.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pyspark import SparkContext # pylint: disable=wrong-import-position,import-error 3 | from pyspark import SparkConf # pylint: disable=wrong-import-position,import-error 4 | from pyspark.sql import SQLContext # pylint: disable=wrong-import-position,import-error 5 | 6 | parquet_file = sys.getenv('parquet') 7 | avro_dir = sys.getenv('avro_dir') 8 | 9 | conf = SparkConf().setAppName('Parquet => Avro') 10 | sc = SparkContext(conf=conf) # pylint: disable=invalid-name 11 | 12 | sc.setLogLevel('WARN') 13 | sqlContext = SQLContext(sc) # pylint: disable=invalid-name 14 | 15 | df = sqlContext.read.parquet(parquet_file) 16 | df.write.format('com.databricks.spark.avro').save(avro_dir) 17 | -------------------------------------------------------------------------------- /spark/words.txt: -------------------------------------------------------------------------------- 1 | first second 2 | third fourth 3 | fifth sixth seventh 4 | -------------------------------------------------------------------------------- /speach-recognition/README.md: -------------------------------------------------------------------------------- 1 | prerequisites: 2 | ```sh 3 | # access to microphone 4 | sudo apt-get install python-pyaudio python3-pyaudio 5 | ``` 6 | 7 | - [apiai](https://pypi.org/project/apiai/) 8 | - [assemblyai](https://pypi.org/project/assemblyai/) 9 | - [google-cloud-speech](https://pypi.org/project/google-cloud-speech/) 10 | - [pocketsphinx](https://pypi.org/project/pocketsphinx/) 11 | - [SpeechRecognition](https://pypi.org/project/SpeechRecognition/) 12 | - [watson-developer-cloud](https://pypi.org/project/watson-developer-cloud/) 13 | - [wit](https://pypi.org/project/wit/) 14 | -------------------------------------------------------------------------------- /speech-recognition/sphinx.md: -------------------------------------------------------------------------------- 1 | ## speech recognition https://github.com/cmusphinx/pocketsphinx 2 | # 3 | ```sh 4 | sudo apt-get install build-essential swig libpulse-dev 5 | sudo pip install pocketsphinx 6 | sudo apt-get install portaudio19-dev 7 | ``` 8 | 9 | ```python3 10 | from pocketsphinx import LiveSpeech 11 | for phrase in LiveSpeech(): 12 | print(phrase) 13 | ``` 14 | -------------------------------------------------------------------------------- /sql-file-processor.python: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python -tt 2 | import sys 3 | import string 4 | from string import join 5 | 6 | 7 | 8 | if len(sys.argv) <= 1: 9 | print("file name should be specified") 10 | sys.exit(1) 11 | 12 | parameters = list() 13 | with open(sys.argv[1]) as f: 14 | for each_line in f: 15 | index = each_line.find("--") 16 | each_line=each_line[:(index-1)] 17 | parameters.append(each_line) 18 | 19 | print(join(parameters, " ")) 20 | print() 21 | for parameter in parameters: 22 | each=parameter.strip() 23 | if each.endswith(","): 24 | each=each[0:len(each)-1] 25 | index = each.index(".") 26 | if index >= 0: 27 | print(each[index+1:]) 28 | else: 29 | print (each) 30 | -------------------------------------------------------------------------------- /sqlalchemy/in-memory.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import create_engine 2 | from sqlalchemy.orm import sessionmaker 3 | 4 | 5 | def get_engine_and_session_cls(echo=True): 6 | engine = create_engine("sqlite:///:memory:", echo=echo) 7 | session_class = sessionmaker(bind=engine) 8 | return engine, session_class 9 | -------------------------------------------------------------------------------- /sqlalchemy/job_status.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | class JobStatus(Enum): 3 | SUCCESS = "SUCCESS" 4 | FAILURE = "FAILURE" 5 | IN_PROGRESS = "IN PROGRESS" 6 | -------------------------------------------------------------------------------- /sqlalchemy/session_scope.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | 3 | 4 | @contextmanager 5 | def session_scope(session_cls): 6 | session = session_cls() 7 | 8 | try: 9 | yield session 10 | session.commit() 11 | except: 12 | session.rollback() 13 | raise 14 | finally: 15 | session.close() 16 | -------------------------------------------------------------------------------- /sqlalchemy/sessionmaker_engine_creator.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy import create_engine 2 | from sqlalchemy.orm import sessionmaker 3 | 4 | 5 | def get_engine_and_session_cls(echo=True): 6 | # engine = create_engine(f"sqlite:///{db_path}?cache=shared", echo=False, connect_args={'check_same_thread': False}) 7 | # "mysql+pymysql://username:password@localhost/db_name" 8 | # "mysql://username:password@localhost:3313/xing" 9 | 10 | engine = create_engine("sqlite:///:memory:", echo=echo) 11 | session_class = sessionmaker(bind=engine) 12 | return engine, session_class 13 | 14 | 15 | path_to_database = os.environ.get("PATH_TO_SQLITE") 16 | if path_to_database: 17 | path_to_database = f"sqlite://{path_to_database}?cache=shared" 18 | else: 19 | path_to_database = f"sqlite:///:memory:?cache=shared" 20 | 21 | 22 | -------------------------------------------------------------------------------- /sqlalchemy/test_create_tables.py: -------------------------------------------------------------------------------- 1 | from . import create_database, Job 2 | from . import session_scope 3 | from . import get_engine_and_session_cls 4 | 5 | 6 | def test_create_in_memory_database(): 7 | engine, session_cls = get_engine_and_session_cls() 8 | 9 | with session_scope(session_cls) as session: 10 | create_database(engine) 11 | assert len(session.query(Job).all()) == 0 12 | -------------------------------------------------------------------------------- /ssh-operations/brand-server-dev-is-process-alive.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys, paramiko, time, cherkavi 3 | 4 | def get_process_by_id(id): 5 | try: 6 | ssh = paramiko.SSHClient() 7 | # client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 8 | ssh.load_system_host_keys() 9 | ssh.connect("hostname", username=cherkavi.linux_login, password=cherkavi.linux_password) 10 | # dir(ssh) 11 | ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("ps -p "+str(id)) 12 | lines = ssh_stdout.readlines() 13 | return lines[1:] 14 | finally: 15 | ssh.close() 16 | 17 | if __name__=="__main__": 18 | proc_id=sys.argv[1] 19 | while len(get_process_by_id(proc_id)) > 0: 20 | print time.ctime() 21 | time.sleep(15) 22 | print "process %s was not found %s " % (proc_id, time.ctime()) -------------------------------------------------------------------------------- /ssh-operations/brand-server-dev-tomcat-process-count.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import paramiko, time, cherkavi 3 | 4 | def get_tomcat_processes(): 5 | try: 6 | ssh = paramiko.SSHClient() 7 | # client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 8 | ssh.load_system_host_keys() 9 | ssh.connect("hostname", username=cherkavi.linux_login, password=cherkavi.linux_password) 10 | # dir(ssh) 11 | ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command("ps -fC java") 12 | lines = ssh_stdout.readlines() 13 | return lines[1:] 14 | finally: 15 | ssh.close() 16 | 17 | 18 | def get_tomcat_process_count(): 19 | ''' 20 | calculate amount of processes which were executed by "tomcat" user 21 | ''' 22 | return len(get_tomcat_processes()) 23 | 24 | while get_tomcat_process_count() < 2: 25 | print time.ctime() 26 | time.sleep(30) 27 | print "second process was found "+time.ctime() 28 | print get_tomcat_processes() -------------------------------------------------------------------------------- /ssh-operations/ssh-brandserver-version-checker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import paramiko 3 | import cherkavi 4 | 5 | def main(): 6 | host = 'hostname' 7 | user = cherkavi.linux_login 8 | secret = cherkavi.linux_password 9 | port = 22 10 | 11 | client = paramiko.SSHClient() 12 | client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 13 | client.connect(hostname=host, username=user, password=secret, port=port) 14 | stdin, stdout, stderr = client.exec_command('sudo yum list | grep brandserver') 15 | package_name = stdout.read().decode("utf-8").strip() 16 | print(package_name[package_name.find("noarch")+8: package_name.rfind(".noarch")]) 17 | client.close() 18 | 19 | if __name__ == "__main__": 20 | main() -------------------------------------------------------------------------------- /ssh-operations/ssh-command-repeater.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import paramiko 3 | import cherkavi 4 | 5 | def main(): 6 | host = 'hostname' 7 | user = cherkavi.linux_login 8 | secret = cherkavi.linux_password 9 | port = 22 10 | 11 | client = paramiko.SSHClient() 12 | client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 13 | client.connect(hostname=host, username=user, password=secret, port=port) 14 | stdin, stdout, stderr = client.exec_command("ls -la /var/log/horus-swordfish/ | grep log | awk '{print $5}'") 15 | result = stdout.read().decode("utf-8").strip() 16 | print(result) 17 | client.close() 18 | 19 | if __name__ == "__main__": 20 | while True: 21 | main() -------------------------------------------------------------------------------- /standart-build-tool/build.bat: -------------------------------------------------------------------------------- 1 | python setup.py clean build -------------------------------------------------------------------------------- /standart-build-tool/check.bat: -------------------------------------------------------------------------------- 1 | python setup.py check -------------------------------------------------------------------------------- /standart-build-tool/graphical-installer.bat: -------------------------------------------------------------------------------- 1 | python setup.py bdist_wininst -------------------------------------------------------------------------------- /standart-build-tool/install.bat: -------------------------------------------------------------------------------- 1 | python setup.py install -------------------------------------------------------------------------------- /standart-build-tool/rpm-package.bat: -------------------------------------------------------------------------------- 1 | rem python setup.py bdist --formats=rpm 2 | python setup.py bdist_rpm -------------------------------------------------------------------------------- /standart-build-tool/scrabber/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cherkavi/python-utilities/110075815ac384cb800e1bcea2dadc3e79f67c0d/standart-build-tool/scrabber/__init__.py -------------------------------------------------------------------------------- /str-repr.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | today = datetime.datetime.now() 3 | 4 | # "official" string representation - all information about object, mostly for debugging 5 | # Prints readable format for date-time object 6 | print(str(today)) 7 | 8 | # prints the official format of date-time object 9 | # "informal" information 10 | print(repr(today)) 11 | -------------------------------------------------------------------------------- /string-regexp.py: -------------------------------------------------------------------------------- 1 | import re 2 | name="Azita M. Mojarad" 3 | 4 | re.sub("[^a-z]","-", name) 5 | -------------------------------------------------------------------------------- /string-validation.py: -------------------------------------------------------------------------------- 1 | # https://regex101.com/ 2 | # 3 | 4 | import re 5 | pattern=r"[1,8,9]{1}[0-9]{7}" 6 | re.match(pattern, "15554449") 7 | -------------------------------------------------------------------------------- /string2byte.py: -------------------------------------------------------------------------------- 1 | # To convert a string to bytes. 2 | data = b"this is string" #bytes 3 | data = "this is string".encode() #bytes 4 | 5 | data = b"".decode() #string 6 | data = str(b"") #string -------------------------------------------------------------------------------- /string_remove_after_last.py: -------------------------------------------------------------------------------- 1 | print(".".join(df.split('.')[:-1])) 2 | -------------------------------------------------------------------------------- /subprocess/check-file-existence.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import os 3 | 4 | FNULL = open(os.devnull, 'w') 5 | 6 | try: 7 | value = subprocess.check_call(["ls","/data/store/collected/car-data/4f1a-a1b4-4bee05c9c281/MDF4/20181129T105009.MF4"], shell=False, stderr=FNULL, stdout=FNULL) 8 | except Exception as e: 9 | print(e.returncode) 10 | else: 11 | print("OK") 12 | -------------------------------------------------------------------------------- /subprocess/check-ip.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import subprocess 3 | 4 | output = subprocess.check_output(["curl", "--silent", "https://api.ipify.org"]) 5 | print(output) 6 | -------------------------------------------------------------------------------- /subprocess/execute-jar-file.py: -------------------------------------------------------------------------------- 1 | ''' 2 | execute jar file 3 | ''' 4 | import sys 5 | import os 6 | import subprocess 7 | 8 | def main(argv): 9 | if len(argv) < 0: 10 | exit("need to specify the jar file ") 11 | # os.spawnl(os.P_DETACH, 'java -jar c:\\temp\\long-execution.jar') 12 | # os.spawnl(os.P_NOWAIT, 'java -jar c:\\temp\\long-execution.jar') 13 | # os.system('cmd /c java -jar c:\\temp\\long-execution.jar') 14 | # os.system('start /b java -jar c:\\temp\\long-execution.jar') 15 | # subprocess.call("java -jar c:\\temp\\long-execution.jar") 16 | # subprocess.Popen("java -jar c:\\temp\\long-execution.jar") 17 | 18 | if __name__ == "__main__": 19 | main(sys.argv[1:]) -------------------------------------------------------------------------------- /subprocess/execute_line.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import subprocess 3 | import os 4 | import stat 5 | 6 | def execute_line(command_line): 7 | file_name = "1.sh" 8 | out_file = "out.txt" 9 | with open(file_name, "w") as output: 10 | output.write(command_line+" > "+out_file) 11 | os.chmod(file_name, stat.S_IRWXU | stat.S_IRWXG) 12 | subprocess.check_output(["bash", file_name], shell=False).decode("utf-8") 13 | os.remove(file_name) 14 | 15 | with open(out_file, "r") as input: 16 | lines = list(map(lambda each_line: each_line.strip(), input.readlines())) 17 | return_value = list( filter(lambda each_line: len(each_line)>0, lines) ) 18 | os.remove(out_file) 19 | return return_value 20 | 21 | 22 | if __name__=="__main__": 23 | # find path to session by id: 24 | if len(sys.argv)<2: 25 | print("session name should be specified") 26 | sys.exit(1) 27 | hdfs_command = """hdfs dfs -ls /ingestor/%s/ | awk '{print $8}'""" % sys.argv[1] 28 | folders = execute_line(hdfs_command) 29 | 30 | -------------------------------------------------------------------------------- /subprocess/stdout-to-null.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | FNULL = open(os.devnull, 'w') 5 | retcode = subprocess.call(['echo', 'foo'], stdout=FNULL, stderr=subprocess.STDOUT) 6 | 7 | # It is effectively the same as running this shell command: 8 | # retcode = os.system("echo 'foo' &> /dev/null") 9 | -------------------------------------------------------------------------------- /teams/send-message.py: -------------------------------------------------------------------------------- 1 | # https://github.com/cherkavi/cheat-sheet/blob/master/microsoft-applications.md#teams-send-message 2 | def send_teams_message_with_text(message: str): 3 | response: Response = requests.post( 4 | f"https://graph.microsoft.com/v1.0/teams/{TEAMS_TEAM_ID}/channels/{TEAMS_CHANNEL_ID}/messages", 5 | json={"body": {"content": f"{message}"}}, 6 | headers={"Authorization": f"Bearer {TEAMS_TOKEN}", "Content-type": "application/json"}) 7 | return response.content 8 | -------------------------------------------------------------------------------- /telegram/.gitignore: -------------------------------------------------------------------------------- 1 | venv 2 | name.session -------------------------------------------------------------------------------- /template/jinja/main.py: -------------------------------------------------------------------------------- 1 | # This is a sample Python script. 2 | import os 3 | from jinja2 import Environment, FileSystemLoader, Template 4 | 5 | 6 | def make_from_file() -> str: 7 | env = Environment(loader=FileSystemLoader(os.path.dirname("."))) 8 | template = env.get_template(os.path.basename("message-template.j2")) 9 | return template.render(URL='my replace text for file') 10 | 11 | def make_from_text() -> str: 12 | with open('message-template.j2') as f: 13 | lines = f.readlines() 14 | template = Template("".join(lines)) 15 | return template.render(URL='my replace text') 16 | 17 | 18 | if __name__ == '__main__': 19 | print(make_from_file()) 20 | print("-----------------") 21 | print(make_from_text()) 22 | -------------------------------------------------------------------------------- /template/jinja/message-template.j2: -------------------------------------------------------------------------------- 1 | 2 | it is my message: 3 | {{ URL }} 4 | 5 | -------------------------------------------------------------------------------- /test/doctest-examples.txt: -------------------------------------------------------------------------------- 1 | some information about dummy test 2 | firstly - import file itself 3 | >>> from implementation import summarize_digits 4 | 5 | and run test 6 | >>> summarize_digits(5,2) 7 | 7 8 | 9 | and then execute test again 10 | >>> summarize_digits(5,8) 11 | 13 12 | 13 | after result must be empty line -------------------------------------------------------------------------------- /test/doctest.py: -------------------------------------------------------------------------------- 1 | 2 | def summarize_digits(a:int, b:int) -> int: 3 | """ 4 | example of using doctest 5 | to execute it: 6 | python3 -m doctest -v implementation.py 7 | or you can execute prepared text file: 8 | python3 -m doctest -v implementation.txt 9 | >>> summarize_digits(10,5) 10 | 15 11 | """ 12 | return a+b -------------------------------------------------------------------------------- /test/pytest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from implementation import summarize_digits 3 | 4 | # for execution just run 5 | # pytest . 6 | @pytest.fixture(scope='function') 7 | def some_resource(request): 8 | stuff_i_setup = ["I setup"] 9 | 10 | def some_teardown(): 11 | stuff_i_setup[0] += " ... but now I'm torn down..." 12 | print(stuff_i_setup[0]) 13 | request.addfinalizer(some_teardown) 14 | 15 | return stuff_i_setup[0] 16 | 17 | @pytest.yield_fixture(scope="function") 18 | def var_a(): 19 | yield 5 20 | 21 | @pytest.yield_fixture(scope="function") 22 | def var_b(): 23 | yield 10 24 | 25 | def test_one(var_a, var_b): 26 | # given 27 | a=5 28 | b=10 29 | # when 30 | result = summarize_digits(var_a, var_b) 31 | # then 32 | assert result == 15 -------------------------------------------------------------------------------- /text/clear-prefix.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | exclude_start_with = 'INSERT INTO BRAND_SERVER_DATA.DATABASECHANGELOG' 4 | for line in sys.stdin: 5 | if not line.startswith(exclude_start_with): 6 | print(line, end="") 7 | -------------------------------------------------------------------------------- /text/read-file-fix-line.py: -------------------------------------------------------------------------------- 1 | textFile=open("array-slicing.py") 2 | for eachLine in textFile : 3 | value =eachLine 4 | if value.endswith('\n'): 5 | value=value[0:-1] 6 | print value 7 | textFile.close() 8 | -------------------------------------------------------------------------------- /text/unique.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | lines = set() 4 | for line in sys.stdin: 5 | lines.add(line) 6 | 7 | for line in lines: 8 | print(line, end="") -------------------------------------------------------------------------------- /thread/function-in-thread.py: -------------------------------------------------------------------------------- 1 | from multiprocessing import Process 2 | from time import sleep 3 | 4 | def long_time_processing(prefix:str, amount_of_iteration:int): 5 | for index in range(amount_of_iteration): 6 | print(prefix, index) 7 | sleep(0.3) 8 | 9 | if __name__=="__main__": 10 | # start Thread from function, function thread 11 | Process(target=long_time_processing, args=("first ", 8)).start() 12 | sleep(1) 13 | Process(target=long_time_processing, args=("second ", 5)).start() -------------------------------------------------------------------------------- /thread/thread-group.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | from gevent.pool import Group 3 | import gevent 4 | import gevent.threadpool 5 | 6 | def counter(name:str, size:int) -> None: 7 | for each in range(1,size): 8 | print(f" {name}: {each}") 9 | sleep(0.5) 10 | 11 | # 12 | # group: Group=Group() 13 | group: gevent.pool.Pool = gevent.get_hub().threadpool 14 | group.spawn(counter, "thread-1", 10) 15 | sleep(1) 16 | group.spawn(counter, "thread-2", 10) 17 | print("waiting for magic") 18 | group.join() 19 | print("end") 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /thread/thread.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import threading 3 | import time 4 | 5 | class Worker(threading.Thread): 6 | def __init__(self, title): 7 | threading.Thread.__init__(self) 8 | self.title=title 9 | 10 | def run(self): 11 | for index in range(1,10): 12 | time.sleep(0.01) 13 | print(self.title+" "+str(index)) 14 | 15 | def main(arguments) : 16 | worker1=Worker("w1") 17 | worker2=Worker("w2") 18 | worker1.start() 19 | worker2.start() 20 | print("-- end --") 21 | 22 | 23 | if __name__ == "__main__": 24 | main(sys.argv) 25 | 26 | -------------------------------------------------------------------------------- /token/jwt.py: -------------------------------------------------------------------------------- 1 | # single sign on 2 | # SSO token 3 | 4 | 5 | # pip install PyJWT 6 | 7 | import jwt 8 | 9 | private_key = 'YOUR_PRIVATE_SSO_KEY' 10 | 11 | def create_canny_token(user): 12 | user_data = { 13 | 'avatarURL': user.avatar_url, # optional, but preferred 14 | 'email': user.email, 15 | 'id': user.id, 16 | 'name': user.name, 17 | } 18 | return jwt.encode(user_data, private_key, algorithm='HS256') 19 | -------------------------------------------------------------------------------- /tor/change-tor-ip.py: -------------------------------------------------------------------------------- 1 | # pip3 install stem 2 | from stem import Signal 3 | from stem.control import Controller 4 | import os 5 | 6 | # generate password ```tor --hash-password mypassword``` 7 | # set properties ```sudo vim /etc/tor/torrc``` 8 | """ 9 | ControlPort 9051 10 | HashedControlPassword 16:872860B76453A77D60CA2BB8C1A7076A3D701ADFDAFDA684053EC4C 11 | """ 12 | # sudo service tor restart 13 | # export TOR_PASSWORD=mypassword 14 | 15 | with Controller.from_port(port = 9051) as controller: #ControlPort 9050 16 | try: 17 | tor_password=os.environ.get("TOR_PASSWORD") 18 | controller.authenticate(password=tor_password) 19 | except Exception as e: 20 | print(e) 21 | exit(1) 22 | print("changing tor-ip address ") 23 | controller.signal(Signal.NEWNYM) 24 | print("signal sent ") 25 | -------------------------------------------------------------------------------- /tor/tor-signal.py: -------------------------------------------------------------------------------- 1 | # apt-get install tor 2 | # tor --hash-password mypassword 3 | # 4 | # /etc/tor/torrc 5 | # ControlPort 9051 6 | # HashedControlPassword 16:872860B76453A77D60CA2BB8C1A7042072093276A3D701AD684053EC4C 7 | # 8 | # sudo service tor restart (or) sudo /etc/init.d/tor restart 9 | # 10 | # pip install -U requests 11 | # pip install requests[socks] 12 | 13 | from stem import Signal 14 | from stem.control import Controller 15 | import requests 16 | 17 | def get_current_ip(): 18 | proxies = {'http':'socks5h://127.0.0.1:9050', 'https':'socks5h://127.0.0.1:9050'} 19 | result = requests.get("http://api.ipify.org?format=json", proxies=proxies) 20 | # result = requests.get("http://api.ipify.org?format=json") 21 | return result.text 22 | 23 | with Controller.from_port(port = 9051) as controller: 24 | print(get_current_ip()) 25 | controller.authenticate(password='technik') 26 | #print("Success!") 27 | # print(dir(controller)) 28 | controller.signal(Signal.NEWNYM) 29 | print(get_current_ip()) 30 | -------------------------------------------------------------------------------- /tornado-ssh-command-executer/READ.me: -------------------------------------------------------------------------------- 1 | Python: 2 | =============== 3 | sudo yum install python-pip 4 | 5 | sudo pip install tornado 6 | 7 | --------------- 8 | 9 | Operation System 10 | =============== 11 | 12 | file: /root/log-remover.py 13 | 14 | file: /etc/init.d/log-remover 15 | chmod +x /etc/init.d/log-remover 16 | 17 | link: /etc/rc3.d/S99log-remover -> ../init.d/log-remover 18 | ( ln -s ../init.d/log-remover S99log-remover ) 19 | 20 | ---------------- 21 | 22 | check the approach: 23 | 24 | python /root/log-remover.py 9999 /var/lib/brand-server/cache/zip/ -------------------------------------------------------------------------------- /tornado-ssh-command-executer/log-remover: -------------------------------------------------------------------------------- 1 | SUCMD="runuser - root -c " 2 | 3 | case "${1}" in 4 | status) 5 | ;; 6 | start) 7 | $SUCMD "python /root/log-remover.py 9999 /var/lib/brand-server/cache/zip/" 8 | ;; 9 | stop) 10 | ;; 11 | restart) 12 | ;; 13 | esac 14 | -------------------------------------------------------------------------------- /tornado-ssh-command-executer/log-remover.py: -------------------------------------------------------------------------------- 1 | import tornado.ioloop 2 | import tornado.web 3 | import sys 4 | import os 5 | 6 | 7 | folder = "/var/lib/brand-server/cache/zip" 8 | 9 | class MainHandler(tornado.web.RequestHandler): 10 | 11 | def get(self): 12 | list_of_files = os.listdir(folder) 13 | counter = 0 14 | for each_file in list_of_files: 15 | real_path = os.path.join(folder, each_file) 16 | if os.path.isfile(real_path): 17 | os.remove(os.path.join(folder, each_file)) 18 | counter = counter + 1 19 | self.write("removed: " + str(counter)) 20 | 21 | 22 | def make_app(): 23 | return tornado.web.Application([ (r"/", MainHandler), ]) 24 | 25 | 26 | if __name__ == "__main__": 27 | if len(sys.argv)<3: 28 | print("please specify the and ") 29 | sys.exit(2) 30 | app = make_app() 31 | app.listen(sys.argv[1]) 32 | folder = sys.argv[2] 33 | tornado.ioloop.IOLoop.current().start() 34 | -------------------------------------------------------------------------------- /twilio/twilio.md: -------------------------------------------------------------------------------- 1 | [doc, examples](https://pypi.org/project/twilio/) 2 | 3 | ```bash 4 | pip install twilio 5 | ``` 6 | 7 | ```python 8 | from twilio.rest import Client 9 | account_sid = "ACc7d575e466..." 10 | auth_token = "f174033188f27..." 11 | client = Client(account_sid, auth_token) 12 | # client.http_client.logger.setLevel(logging.INFO) 13 | 14 | try: 15 | message = client.messages.create( 16 | to="+1540383....", 17 | from_="+141531....", 18 | body="Hello from Python!") 19 | except TwilioRestException as e: 20 | print(e) 21 | 22 | print(message.sid) 23 | ``` 24 | 25 | ```python 26 | message.error_code==None 27 | message.error_message==None 28 | ``` 29 | 30 | -------------------------------------------------------------------------------- /web-echo/READ.md: -------------------------------------------------------------------------------- 1 | curl -X GET :8000 2 | will return current time of the "server" 3 | 4 | [tornado requests](https://www.tornadoweb.org/en/stable/web.html) 5 | -------------------------------------------------------------------------------- /web-echo/echo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import tornado.ioloop 4 | import tornado.web 5 | import datetime 6 | 7 | class MainHandler(tornado.web.RequestHandler): 8 | def get(self): 9 | # self.set_header('Content-Type', 'application/text') 10 | self.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")) 11 | self.finish() 12 | 13 | def post(self): 14 | data = self.get_argument('body', 'No data received') 15 | self.write(data) 16 | self.finish() 17 | 18 | 19 | def make_app(): 20 | return tornado.web.Application([ 21 | (r"/", MainHandler), 22 | ]) 23 | 24 | if __name__ == "__main__": 25 | app = make_app() 26 | app.listen(8000) 27 | tornado.ioloop.IOLoop.instance().start() 28 | -------------------------------------------------------------------------------- /web-file-response/file-server-tornado.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import tornado.ioloop 4 | import tornado.web 5 | 6 | class MainHandler(tornado.web.RequestHandler): 7 | def get(self): 8 | file_name = 'get-pip.py' 9 | buf_size = 4096 10 | self.set_header('Content-Type', 'application/octet-stream') 11 | self.set_header('Content-Disposition', 'attachment; filename=' + file_name) 12 | with open(file_name, 'r') as f: 13 | while True: 14 | data = f.read(buf_size) 15 | if not data: 16 | break 17 | self.write(data) 18 | self.finish() 19 | 20 | 21 | def make_app(): 22 | return tornado.web.Application([ 23 | (r"/", MainHandler), 24 | ]) 25 | 26 | if __name__ == "__main__": 27 | app = make_app() 28 | app.listen(7777) 29 | tornado.ioloop.IOLoop.current().start() 30 | -------------------------------------------------------------------------------- /web-socket/socket-client.py: -------------------------------------------------------------------------------- 1 | import vars 2 | import socket 3 | import contextlib 4 | 5 | 6 | def start_sender(): 7 | with contextlib.closing(socket.socket()) as sock: 8 | sock.connect((vars.HOST, vars.PORT)) 9 | # convert string to bytes 10 | sock.send("this is my string".encode("utf-8")) 11 | data = sock.recv(1024) 12 | if data: 13 | print("data after sending: " + str(data)) 14 | 15 | 16 | if __name__ == "__main__": 17 | start_sender() -------------------------------------------------------------------------------- /web-socket/socket-server.py: -------------------------------------------------------------------------------- 1 | import vars 2 | import socket 3 | import contextlib 4 | 5 | 6 | def start_listener(): 7 | with contextlib.closing(socket.socket()) as sock: 8 | sock.bind((vars.HOST, vars.PORT)) 9 | sock.listen(vars.QUEUE_LEN) 10 | # need to start separate Thread 11 | while True: 12 | connection, address = sock.accept() 13 | print(address) 14 | data = connection.recv(1024) 15 | if data: 16 | print("data from socket:" + str(data)) 17 | connection.send(data[::-1]) 18 | connection.close() 19 | 20 | 21 | if __name__ == "__main__": 22 | print("hostname of server:", socket.gethostname()) 23 | start_listener() 24 | -------------------------------------------------------------------------------- /web-socket/vars.py: -------------------------------------------------------------------------------- 1 | HOST = "127.0.0.1" 2 | PORT = 3003 3 | QUEUE_LEN = 1 -------------------------------------------------------------------------------- /xgboost.py: -------------------------------------------------------------------------------- 1 | import xgboost as xgb 2 | from sklearn.feature_extraction.text import TfidfVectorizer 3 | 4 | # Define the data and the labels 5 | data = ["this is a sample text", "xgboost is a powerful tool", "this is another example"] 6 | labels = [0, 1, 0] 7 | 8 | # Extract features using a TF-IDF vectorizer 9 | vectorizer = TfidfVectorizer() 10 | features = vectorizer.fit_transform(data) 11 | 12 | # Convert the features to a dense matrix 13 | features = features.toarray() 14 | 15 | # Train the xgboost model 16 | model = xgb.XGBClassifier() 17 | model.fit(features, labels) -------------------------------------------------------------------------------- /xmind-todo-parser/skip.txt: -------------------------------------------------------------------------------- 1 | Intro to Hadoop and MapReduce -------------------------------------------------------------------------------- /xmind-todo-parser/todo.sh: -------------------------------------------------------------------------------- 1 | python3 education-advicer.py education.xmind 60 > out.html; firefox out.html & 2 | -------------------------------------------------------------------------------- /xmind-todo-parser/xmind-python.md: -------------------------------------------------------------------------------- 1 | git clone https://github.com/xmindltd/xmind-sdk-python.git 2 | cd xmind-sdk-python 3 | python setup.py install 4 | 5 | 6 | #-*- coding: utf-8 -*- 7 | import xmind 8 | from xmind.core import workbook,saver 9 | from xmind.core.topic import TopicElement 10 | 11 | workbook = xmind.load("central.xmind") 12 | sheet = workbook.getPrimarySheet() 13 | topic = sheet.getRootTopic() 14 | topic.getTitle() 15 | topic.getMarkers() 16 | topic.getSubTopics() 17 | topic.getSubTopics()[0].getNotes() 18 | topic.getSubTopics()[0].getNotes().getContent() 19 | 20 | 21 | https://github.com/xmindltd/xmind-sdk-python 22 | https://github.com/xmindltd/xmind-sdk-python/blob/master/example.py 23 | 24 | topic.getMarkers()[0].getMarkerId().name 25 | 26 | 27 | https://bitbucket.org/Mekk/mekk.xmind/ 28 | -------------------------------------------------------------------------------- /xml-archimate2svg/.gitignore: -------------------------------------------------------------------------------- 1 | test/doc-test-result.svg 2 | -------------------------------------------------------------------------------- /xml-archimate2svg/README.md: -------------------------------------------------------------------------------- 1 | ![schema](https://i.postimg.cc/gr9RvCZ6/archimate-enrichment-from-properties.png) 2 | 3 | Archimate to SVG export improvement. 4 | Update destination SVG with all links from documentation properties ( also add popup hints to elements). 5 | Documentation property should looks like ( to be depicted on tooltip ): 6 | ``` 7 | code: https://github.com/cherkavi/ 8 | doc: https://my.confluence.com/cherkavi 9 | some additional information that will be skipped 10 | ``` 11 | :todo: possible improvement here - instead of documentation to use Properties of certain element. 12 | 13 | execution example 14 | ```sh 15 | python3 update-archimate.py source.archimate exported_from_archimate.svg enriched_with_tooltips.svg 16 | ``` 17 | 18 | ## improvement. using jupyter as link consumer 19 | ![using jupyter](https://i.postimg.cc/wvvdfN7Y/archi2svg-with-jupyter.jpg) 20 | -------------------------------------------------------------------------------- /xml-archimate2svg/test/doc-test.sh: -------------------------------------------------------------------------------- 1 | python3 ../update-archimate.py doc-test.archimate doc-test.svg doc-test-result.svg 2 | firefox doc-test-result.svg 3 | -------------------------------------------------------------------------------- /xml-archimate2svg/text_parser.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 34 | 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /xml-minidom/read-xml2.py: -------------------------------------------------------------------------------- 1 | from xml.dom.minidom import parse, parseString 2 | 3 | dom = parse('ingest-pipeline.archimate') # parse an XML file by name 4 | components = [each_element for each_element in dom.getElementsByTagName("element") if each_element.getAttribute("xsi:type")=="archimate:ApplicationComponent" and each_element.hasChildNodes()] 5 | # filter element by classtype, java "instance of" with shortName 6 | component_description = list(map( lambda x:(x.getAttribute("name"), " ".join([each_child.firstChild.nodeValue for each_child in x.childNodes if each_child.__class__.__name__=="Element" and each_child.tagName=="documentation"])),components)) 7 | print(component_description) -------------------------------------------------------------------------------- /zip-codes-us/README.md: -------------------------------------------------------------------------------- 1 | [source of data](https://public.opendatasoft.com/explore/dataset/us-zip-code-latitude-and-longitude/export/) 2 | ```sh 3 | wget https://public.opendatasoft.com/explore/dataset/us-zip-code-latitude-and-longitude/download/?format=csv&timezone=Europe/Berlin&lang=en&use_labels_for_header=true&csv_separator=%3B 4 | ``` 5 | 6 | 7 | ```sql 8 | create table IF NOT EXISTS `zip_code` ( 9 | `zip_code_id` INT UNSIGNED PRIMARY KEY, 10 | `zip` VARCHAR(16) NOT NULL, 11 | `city` VARCHAR(48) NOT NULL, 12 | `state_code` VARCHAR(8) NOT NULL, 13 | `country_code` VARCHAR(8) NOT NULL, 14 | `latitude` DOUBLE NOT NULL, 15 | `longitude` DOUBLE NOT NULL, 16 | `timezone` INT, 17 | `daylight_saving` SMALLINT 18 | ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; 19 | 20 | ``` 21 | 22 | 23 | ```sh 24 | cat ~/Downloads/us-zip-code-latitude-and-longitude.csv | python3 csv-to-sql.py > out.sql 25 | ``` 26 | -------------------------------------------------------------------------------- /zip-codes-us/csv-to-sql.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | for each_line in sys.stdin: 4 | e = each_line.split(";") 5 | # escape sql escape 6 | city_name=e[1].replace("'", "''").replace('\\', '\\\\') 7 | print(f"insert into hlm_zip_code(zip, city, state_code, country_code, latitude, longitude, timezone, daylight_saving) values "\ 8 | f"('{e[0]}', '{city_name}', '{e[2]}', 'US', {e[3]}, {e[4]}, {e[5]}, {e[6]});") 9 | -------------------------------------------------------------------------------- /zip/1.txt: -------------------------------------------------------------------------------- 1 | first 2 | -------------------------------------------------------------------------------- /zip/2.txt: -------------------------------------------------------------------------------- 1 | second 2 | -------------------------------------------------------------------------------- /zip/3.txt: -------------------------------------------------------------------------------- 1 | third 2 | -------------------------------------------------------------------------------- /zip/out.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cherkavi/python-utilities/110075815ac384cb800e1bcea2dadc3e79f67c0d/zip/out.zip -------------------------------------------------------------------------------- /zip/zip-read.py: -------------------------------------------------------------------------------- 1 | import zipfile 2 | import os 3 | 4 | with zipfile.ZipFile('out.zip', 'r') as zipper: 5 | for file in zipper.namelist(): 6 | zipper.extract(file, file+".restored") 7 | # each_file = zipper.read(file) 8 | # os.write(file+".restored", each_file) 9 | # print(file, each_file) 10 | # os.remove(file) -------------------------------------------------------------------------------- /zip/zip-write.py: -------------------------------------------------------------------------------- 1 | import zipfile 2 | import os 3 | 4 | files = ["1.txt","2.txt","3.txt"] 5 | with zipfile.ZipFile('out.zip', 'w') as zipper: 6 | for file in files: 7 | zipper.write(file, compress_type=zipfile.ZIP_DEFLATED) 8 | # os.remove(file) --------------------------------------------------------------------------------