├── .coveragerc ├── .github └── workflows │ ├── autopr.yml │ ├── cloud_pro_test.yml │ ├── cloud_test.yml │ ├── codecov.yml │ ├── deploys.yml │ ├── lint.yml │ ├── refactor.yml │ ├── release.yml │ ├── release_generator.yml │ └── sync.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── LICENSE ├── MANIFEST.in ├── README.md ├── SECURITY.md ├── bump.py ├── refactor.py ├── requirements.txt ├── setup.py ├── sync.py ├── tests ├── __init__.py ├── test_cloud.py └── test_cloud_pro.py └── upsonic ├── __init__.py └── remote ├── __init__.py ├── helper.py ├── interface.py ├── localimport ├── LICENCE └── __init__.py ├── ollama_langchain.py └── on_prem.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | exclude_lines = 3 | pragma: no cover 4 | def __repr__ 5 | if self.debug: 6 | if settings.DEBUG 7 | raise AssertionError 8 | raise NotImplementedError 9 | if 0: 10 | if __name__ == .__main__.: 11 | class .*\bProtocol\): 12 | @(abc\.)?abstractmethod -------------------------------------------------------------------------------- /.github/workflows/autopr.yml: -------------------------------------------------------------------------------- 1 | name: AutoPR 2 | 3 | on: 4 | issues: 5 | types: [labeled] 6 | issue_comment: 7 | types: [created] 8 | 9 | permissions: 10 | contents: write 11 | issues: write 12 | pull-requests: write 13 | 14 | jobs: 15 | autopr3-5-turbo: 16 | if: ${{ (github.event_name == 'issues' && 17 | contains( github.event.label.name, 'AutoPR-3.5-turbo')) || 18 | (github.event_name == 'issue_comment' && 19 | github.event.issue.pull_request && 20 | contains( github.event.comment.body, 'Hey AutoPR-3.5-turbo')) }} 21 | runs-on: ubuntu-latest 22 | steps: 23 | - name: Install jq 24 | run: sudo apt-get install jq 25 | - name: Check if label was added by a collaborator 26 | env: 27 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 28 | run: | 29 | is_collaborator=$(curl -s -H "Authorization: token $GITHUB_TOKEN" -H "Accept: application/vnd.github+json" \ 30 | "https://api.github.com/repos/${{ github.repository }}/collaborators/${{ github.event.sender.login }}" | jq -r '.message') 31 | 32 | if [ "$is_collaborator" == "Not Found" ]; then 33 | echo "Label not added by a collaborator. Skipping action." 34 | exit 78 35 | fi 36 | - name: Checkout 37 | uses: actions/checkout@v2 38 | with: 39 | fetch-depth: 1 40 | - name: AutoPR 41 | uses: docker://ghcr.io/irgolic/autopr:latest 42 | env: 43 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} 44 | with: 45 | github_token: ${{ secrets.GITHUB_TOKEN }} 46 | model: gpt-3.5-turbo 47 | base_branch: master 48 | 49 | autopr4: 50 | if: ${{ (github.event_name == 'issues' && 51 | contains( github.event.label.name, 'AutoPR-4')) || 52 | (github.event_name == 'issue_comment' && 53 | github.event.issue.pull_request && 54 | contains( github.event.comment.body, 'Hey AutoPR-4')) }} 55 | runs-on: ubuntu-latest 56 | steps: 57 | - name: Install jq 58 | run: sudo apt-get install jq 59 | - name: Check if label was added by a collaborator 60 | env: 61 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 62 | run: | 63 | is_collaborator=$(curl -s -H "Authorization: token $GITHUB_TOKEN" -H "Accept: application/vnd.github+json" \ 64 | "https://api.github.com/repos/${{ github.repository }}/collaborators/${{ github.event.sender.login }}" | jq -r '.message') 65 | 66 | if [ "$is_collaborator" == "Not Found" ]; then 67 | echo "Label not added by a collaborator. Skipping action." 68 | exit 78 69 | fi 70 | - name: Checkout 71 | uses: actions/checkout@v2 72 | with: 73 | fetch-depth: 1 74 | - name: AutoPR 75 | uses: docker://ghcr.io/irgolic/autopr:latest 76 | env: 77 | OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} 78 | with: 79 | github_token: ${{ secrets.GITHUB_TOKEN }} 80 | model: gpt-4 81 | base_branch: master 82 | -------------------------------------------------------------------------------- /.github/workflows/cloud_pro_test.yml: -------------------------------------------------------------------------------- 1 | name: Cloud Pro Test Every - 15 Minute 2 | 3 | on: 4 | schedule: 5 | - cron: '*/15 * * * *' 6 | workflow_dispatch: 7 | 8 | workflow_run: 9 | workflows: ["Production", "Production Custom"] 10 | types: 11 | - completed 12 | 13 | jobs: 14 | 15 | 16 | requested: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Discord Webhook Action 20 | if: ${{ github.event_name == 'workflow_run' }} 21 | uses: tsickert/discord-webhook@v5.3.0 22 | with: 23 | webhook-url: ${{ secrets.CLOUD_PRO_TEST_WEBHOOK_URL }} 24 | content: "Requested <@&1163402741394178162>" 25 | 26 | ubuntu: 27 | needs: requested 28 | runs-on: ${{ matrix.os }} 29 | environment: UNIT Tests 30 | strategy: 31 | matrix: 32 | os: [ubuntu-latest] 33 | env: 34 | OS: ${{ matrix.os }} 35 | PYTHON: '3.8' 36 | steps: 37 | - uses: actions/checkout@v2 38 | - name: Set up Python 39 | uses: actions/setup-python@v2 40 | with: 41 | python-version: 3.8 42 | 43 | - name: Install dependencies 44 | run: | 45 | python -m pip install --upgrade pip 46 | pip install pytest 47 | pip install upsonic --no-cache --upgrade 48 | - name: Test with pytest 49 | env: 50 | CLOUD_PRO_ACCESS_KEY: ${{ secrets.CLOUD_PRO_ACCESS_KEY }} 51 | CLOUD_TEST_DATABASE_NAME: ${{ secrets.CLOUD_TEST_DATABASE_NAME }} 52 | run: | 53 | cd tests 54 | pytest test_cloud_pro.py -v 55 | 56 | 57 | 58 | macos: 59 | needs: ubuntu 60 | runs-on: ${{ matrix.os }} 61 | environment: UNIT Tests 62 | strategy: 63 | matrix: 64 | os: [macos-latest] 65 | env: 66 | OS: ${{ matrix.os }} 67 | PYTHON: '3.8' 68 | steps: 69 | - uses: actions/checkout@v2 70 | - name: Set up Python 71 | uses: actions/setup-python@v2 72 | with: 73 | python-version: 3.8 74 | 75 | - name: Install dependencies 76 | run: | 77 | python -m pip install --upgrade pip 78 | pip install pytest 79 | pip install upsonic --no-cache --upgrade 80 | - name: Test with pytest 81 | env: 82 | CLOUD_PRO_ACCESS_KEY: ${{ secrets.CLOUD_PRO_ACCESS_KEY }} 83 | CLOUD_TEST_DATABASE_NAME: ${{ secrets.CLOUD_TEST_DATABASE_NAME }} 84 | run: | 85 | cd tests 86 | pytest test_cloud_pro.py -v 87 | 88 | 89 | windows: 90 | needs: macos 91 | runs-on: ${{ matrix.os }} 92 | environment: UNIT Tests 93 | strategy: 94 | matrix: 95 | os: [windows-latest] 96 | env: 97 | OS: ${{ matrix.os }} 98 | PYTHON: '3.8' 99 | steps: 100 | - uses: actions/checkout@v2 101 | - name: Set up Python 102 | uses: actions/setup-python@v2 103 | with: 104 | python-version: 3.8 105 | 106 | - name: Install dependencies 107 | run: | 108 | python -m pip install --upgrade pip 109 | pip install pytest 110 | pip install upsonic --no-cache --upgrade 111 | - name: Test with pytest 112 | env: 113 | CLOUD_PRO_ACCESS_KEY: ${{ secrets.CLOUD_PRO_ACCESS_KEY }} 114 | CLOUD_TEST_DATABASE_NAME: ${{ secrets.CLOUD_TEST_DATABASE_NAME }} 115 | run: | 116 | cd tests 117 | pytest test_cloud_pro.py -v 118 | 119 | 120 | 121 | 122 | 123 | success: 124 | needs: [ubuntu, macos, windows] 125 | runs-on: ubuntu-latest 126 | if: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' && (needs.ubuntu.result == 'success' || needs.macos.result == 'success' || needs.windows.result == 'success') }} 127 | steps: 128 | - name: Discord Webhook Action 129 | uses: tsickert/discord-webhook@v5.3.0 130 | with: 131 | webhook-url: ${{ secrets.CLOUD_PRO_TEST_WEBHOOK_URL }} 132 | content: "success" 133 | 134 | success_important: 135 | runs-on: ubuntu-latest 136 | needs: [ubuntu, macos, windows] 137 | if: ${{ github.event_name == 'workflow_run' && (needs.ubuntu.result == 'success' || needs.macos.result == 'success' || needs.windows.result == 'success') }} 138 | steps: 139 | - name: Discord Webhook Action 140 | uses: tsickert/discord-webhook@v5.3.0 141 | with: 142 | webhook-url: ${{ secrets.CLOUD_PRO_TEST_WEBHOOK_URL }} 143 | content: "success <@&1163402741394178162>" 144 | 145 | 146 | failure: 147 | needs: [ubuntu, macos, windows] 148 | runs-on: ubuntu-latest 149 | if: ${{ always() && (needs.ubuntu.result == 'failure' || needs.macos.result == 'failure' || needs.windows.result == 'failure') }} 150 | steps: 151 | - name: Discord Webhook Action 152 | uses: tsickert/discord-webhook@v5.3.0 153 | with: 154 | webhook-url: ${{ secrets.CLOUD_PRO_TEST_WEBHOOK_URL }} 155 | content: "failure <@&1163402741394178162>" -------------------------------------------------------------------------------- /.github/workflows/cloud_test.yml: -------------------------------------------------------------------------------- 1 | name: Cloud Test Every - 15 Minute 2 | 3 | on: 4 | schedule: 5 | - cron: '*/15 * * * *' 6 | workflow_dispatch: 7 | 8 | workflow_run: 9 | workflows: ["Production", "Production Custom"] 10 | types: 11 | - completed 12 | 13 | jobs: 14 | 15 | 16 | requested: 17 | runs-on: ubuntu-latest 18 | steps: 19 | - name: Discord Webhook Action 20 | if: ${{ github.event_name == 'workflow_run' }} 21 | uses: tsickert/discord-webhook@v5.3.0 22 | with: 23 | webhook-url: ${{ secrets.CLOUD_TEST_WEBHOOK_URL }} 24 | content: "Requested <@&1163402741394178162>" 25 | 26 | 27 | 28 | ubuntu: 29 | needs: requested 30 | runs-on: ${{ matrix.os }} 31 | environment: UNIT Tests 32 | strategy: 33 | matrix: 34 | os: [ubuntu-latest] 35 | env: 36 | OS: ${{ matrix.os }} 37 | PYTHON: '3.8' 38 | steps: 39 | - uses: actions/checkout@v2 40 | - name: Set up Python 41 | uses: actions/setup-python@v2 42 | with: 43 | python-version: 3.8 44 | 45 | - name: Install dependencies 46 | run: | 47 | python -m pip install --upgrade pip 48 | pip install pytest 49 | pip install upsonic --no-cache --upgrade 50 | - name: Test with pytest 51 | env: 52 | CLOUD_TEST_DATABASE_NAME: ${{ secrets.CLOUD_TEST_DATABASE_NAME }} 53 | run: | 54 | cd tests 55 | pytest test_cloud.py -v 56 | 57 | macos: 58 | needs: ubuntu 59 | runs-on: ${{ matrix.os }} 60 | environment: UNIT Tests 61 | strategy: 62 | matrix: 63 | os: [macos-latest] 64 | env: 65 | OS: ${{ matrix.os }} 66 | PYTHON: '3.8' 67 | steps: 68 | - uses: actions/checkout@v2 69 | - name: Set up Python 70 | uses: actions/setup-python@v2 71 | with: 72 | python-version: 3.8 73 | 74 | - name: Install dependencies 75 | run: | 76 | python -m pip install --upgrade pip 77 | pip install pytest 78 | pip install upsonic --no-cache --upgrade 79 | - name: Test with pytest 80 | env: 81 | CLOUD_TEST_DATABASE_NAME: ${{ secrets.CLOUD_TEST_DATABASE_NAME }} 82 | run: | 83 | cd tests 84 | pytest test_cloud.py -v 85 | 86 | 87 | 88 | windows: 89 | needs: macos 90 | runs-on: ${{ matrix.os }} 91 | environment: UNIT Tests 92 | strategy: 93 | matrix: 94 | os: [windows-latest] 95 | env: 96 | OS: ${{ matrix.os }} 97 | PYTHON: '3.8' 98 | steps: 99 | - uses: actions/checkout@v2 100 | - name: Set up Python 101 | uses: actions/setup-python@v2 102 | with: 103 | python-version: 3.8 104 | 105 | - name: Install dependencies 106 | run: | 107 | python -m pip install --upgrade pip 108 | pip install pytest 109 | pip install upsonic --no-cache --upgrade 110 | - name: Test with pytest 111 | env: 112 | CLOUD_TEST_DATABASE_NAME: ${{ secrets.CLOUD_TEST_DATABASE_NAME }} 113 | run: | 114 | cd tests 115 | pytest test_cloud.py -v 116 | 117 | 118 | 119 | success: 120 | needs: [ubuntu, macos, windows] 121 | runs-on: ubuntu-latest 122 | if: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' && (needs.ubuntu.result == 'success' || needs.macos.result == 'success' || needs.windows.result == 'success') }} 123 | steps: 124 | - name: Discord Webhook Action 125 | uses: tsickert/discord-webhook@v5.3.0 126 | with: 127 | webhook-url: ${{ secrets.CLOUD_TEST_WEBHOOK_URL }} 128 | content: "success" 129 | 130 | 131 | success_important: 132 | runs-on: ubuntu-latest 133 | needs: [ubuntu, macos, windows] 134 | if: ${{ github.event_name == 'workflow_run' && (needs.ubuntu.result == 'success' || needs.macos.result == 'success' || needs.windows.result == 'success') }} 135 | steps: 136 | - name: Discord Webhook Action 137 | uses: tsickert/discord-webhook@v5.3.0 138 | with: 139 | webhook-url: ${{ secrets.CLOUD_TEST_WEBHOOK_URL }} 140 | content: "success <@&1163402741394178162>" 141 | 142 | 143 | failure: 144 | needs: [ubuntu, macos, windows] 145 | runs-on: ubuntu-latest 146 | if: ${{ always() && (needs.ubuntu.result == 'failure' || needs.macos.result == 'failure' || needs.windows.result == 'failure') }} 147 | steps: 148 | - name: Discord Webhook Action 149 | uses: tsickert/discord-webhook@v5.3.0 150 | with: 151 | webhook-url: ${{ secrets.CLOUD_TEST_WEBHOOK_URL }} 152 | content: "failure <@&1163402741394178162>" -------------------------------------------------------------------------------- /.github/workflows/codecov.yml: -------------------------------------------------------------------------------- 1 | name: Codecov 2 | 3 | on: 4 | push: 5 | paths: 6 | - '.github/workflows/codecov.yml' 7 | - 'upsonic/**' 8 | - 'setups/**' 9 | - 'tests/**' 10 | - '.coveragerc' 11 | pull_request: 12 | paths: 13 | - '.github/workflows/codecov.yml' 14 | - 'upsonic/**' 15 | - 'setups/**' 16 | - 'tests/**' 17 | - '.coveragerc' 18 | workflow_dispatch: 19 | 20 | concurrency: 21 | group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event }} 22 | cancel-in-progress: true 23 | jobs: 24 | run: 25 | runs-on: ${{ matrix.os }} 26 | environment: Codecov 27 | strategy: 28 | matrix: 29 | os: [ubuntu-latest, macos-latest, windows-latest] 30 | env: 31 | OS: ${{ matrix.os }} 32 | PYTHON: '3.8' 33 | steps: 34 | - uses: actions/checkout@master 35 | - name: Setup Python 36 | uses: actions/setup-python@master 37 | with: 38 | python-version: 3.8 39 | - uses: actions/cache@v2 40 | with: 41 | path: ${{ env.pythonLocation }} 42 | key: ${{ runner.os }}-${{ env.pythonLocation }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('setups/api/requirements.txt') }}-${{ hashFiles('setups/web/requirements.txt') }}-${{ hashFiles('setups/gui/requirements.txt') }}-pytest-coverage 43 | - name: Install dependencies 44 | run: | 45 | pip3 install -r requirements.txt 46 | 47 | pip install pytest 48 | pip install coverage 49 | - name: Generate coverage report 50 | run: | 51 | coverage run -m pytest tests/ && coverage xml 52 | - name: Upload coverage to Codecov 53 | uses: codecov/codecov-action@v2 54 | with: 55 | token: ${{ secrets.CODECOV_TOKEN }} 56 | directory: ./ 57 | env_vars: OS,PYTHON 58 | fail_ci_if_error: true 59 | files: ./coverage.xml 60 | flags: unittests 61 | name: codecov-umbrella 62 | path_to_write_report: ./codecov_report.txt 63 | verbose: true 64 | -------------------------------------------------------------------------------- /.github/workflows/deploys.yml: -------------------------------------------------------------------------------- 1 | name: Deploys 2 | 3 | on: 4 | workflow_dispatch: 5 | workflow_run: 6 | workflows: ["Release Generator"] 7 | types: 8 | - completed 9 | 10 | permissions: 11 | packages: write 12 | 13 | 14 | concurrency: 15 | group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event }} 16 | cancel-in-progress: true 17 | jobs: 18 | 19 | tagext: 20 | runs-on: ${{ matrix.os }} 21 | strategy: 22 | matrix: 23 | os: [ubuntu-latest] 24 | env: 25 | OS: ${{ matrix.os }} 26 | PYTHON: '3.8' 27 | steps: 28 | - uses: actions/checkout@v3 29 | with: 30 | fetch-depth: 0 31 | - name: Getting Tag 32 | id: tag_extractor 33 | run: echo "latest_tag=$(git describe --tags --abbrev=0)" >> "$GITHUB_OUTPUT" 34 | 35 | - name: Getting Tag 2 36 | id: tag_extractor_2 37 | run: | 38 | TAG=${{ steps.tag_extractor.outputs.latest_tag }} 39 | echo "latest_tag_2=${TAG:1} " >> "$GITHUB_OUTPUT" 40 | 41 | - name: Discord Webhook Action 42 | uses: tsickert/discord-webhook@v5.3.0 43 | with: 44 | webhook-url: ${{ secrets.DEPLOY_WEBHOOK_URL }} 45 | content: "Requested <@&1163402741394178162> ${{ steps.tag_extractor.outputs.latest_tag }}" 46 | 47 | 48 | pypi: 49 | needs: tagext 50 | runs-on: ubuntu-latest 51 | if: ${{ github.event.workflow_run.conclusion == 'success' }} 52 | environment: Deploys 53 | strategy: 54 | matrix: 55 | python-version: [3.8] 56 | 57 | steps: 58 | - uses: actions/checkout@v3 59 | with: 60 | fetch-depth: 0 61 | - name: Set up Python ${{ matrix.python-version }} 62 | uses: actions/setup-python@v2 63 | with: 64 | python-version: ${{ matrix.python-version }} 65 | 66 | 67 | - name: Build and Publish Python Packages 68 | env: 69 | TWINE_USERNAME: __token__ 70 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 71 | run: | 72 | python -m pip install --upgrade pip 73 | pip install setuptools wheel twine 74 | python setup.py sdist 75 | twine upload dist/* 76 | 77 | 78 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: 4 | push: 5 | paths: 6 | - '.github/workflows/lint.yml' 7 | - 'upsonic/**' 8 | - 'setups/**' 9 | - 'tests/**' 10 | pull_request: 11 | paths: 12 | - '.github/workflows/lint.yml' 13 | - 'upsonic/**' 14 | - 'setups/**' 15 | - 'tests/**' 16 | 17 | 18 | permissions: 19 | contents: write 20 | pull-requests: write 21 | 22 | jobs: 23 | linter_name: 24 | name: runner / black 25 | runs-on: ubuntu-latest 26 | steps: 27 | - uses: actions/checkout@v2 28 | - name: Check files using the black formatter 29 | uses: rickstaa/action-black@v1 30 | id: action_black 31 | with: 32 | black_args: "upsonic/* --force-exclude upsonic/__init__.py --include upsonic/*" 33 | - name: Create Pull Request 34 | if: steps.action_black.outputs.is_formatted == 'true' 35 | uses: peter-evans/create-pull-request@v3 36 | with: 37 | token: ${{ secrets.GITHUB_TOKEN }} 38 | title: "Format Python code with psf/black push" 39 | commit-message: ":art: Format Python code with psf/black" 40 | body: | 41 | There appear to be some python formatting errors in ${{ github.sha }}. This pull request 42 | uses the [psf/black](https://github.com/psf/black) formatter to fix these issues. 43 | base: ${{ github.head_ref }} # Creates pull request onto pull request or commit branch 44 | branch: actions/black -------------------------------------------------------------------------------- /.github/workflows/refactor.yml: -------------------------------------------------------------------------------- 1 | name: Manual Refactor 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | run-refactor: 8 | runs-on: ubuntu-latest 9 | 10 | steps: 11 | - name: Checkout Repository 12 | uses: actions/checkout@v2 13 | 14 | - name: Set up Python 15 | uses: actions/setup-python@v2 16 | with: 17 | python-version: 3.8 18 | 19 | - name: setup git config 20 | run: | 21 | # setup the username and email. I tend to use 'GitHub Actions Bot' with no email by default 22 | git config user.name "Upsonic Refactor Bot" 23 | git config user.email "" 24 | 25 | - name: Run Refactor Script 26 | run: | 27 | python refactor.py -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | release_type: 7 | required: true 8 | type: choice 9 | options: 10 | - patch 11 | - minor 12 | - major 13 | 14 | permissions: 15 | contents: write 16 | jobs: 17 | run: 18 | runs-on: ubuntu-latest 19 | if: contains('["onuratakan"]', github.actor) 20 | 21 | steps: 22 | - name: Checkout Repository 23 | uses: actions/checkout@v2 24 | 25 | - name: Set Up Python 26 | uses: actions/setup-python@v2 27 | with: 28 | python-version: 3.8 29 | 30 | 31 | - name: setup git config 32 | run: | 33 | # setup the username and email. I tend to use 'GitHub Actions Bot' with no email by default 34 | git config user.name "GitHub Actions Bot" 35 | git config user.email "<>" 36 | 37 | - name: Run Version Bump Script 38 | run: python bump.py ${{ github.event.inputs.release_type }} 39 | 40 | 41 | -------------------------------------------------------------------------------- /.github/workflows/release_generator.yml: -------------------------------------------------------------------------------- 1 | name: Release Generator 2 | 3 | on: 4 | workflow_run: 5 | workflows: ["Release", "Sync"] 6 | types: 7 | - completed 8 | 9 | permissions: 10 | contents: write 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | if: ${{ github.event.workflow_run.conclusion == 'success' }} 16 | environment: Release 17 | steps: 18 | - uses: actions/checkout@v3 19 | with: 20 | fetch-depth: 0 21 | - name: Getting Tag 22 | id: tag_extractor 23 | run: echo "latest_tag=$(git describe --tags --abbrev=0)" >> "$GITHUB_OUTPUT" 24 | 25 | - uses: ncipollo/release-action@v1 26 | with: 27 | name: Upsonic ${{ steps.tag_extractor.outputs.latest_tag }} 28 | generateReleaseNotes: true 29 | tag: ${{ steps.tag_extractor.outputs.latest_tag }} 30 | 31 | 32 | 33 | sucess: 34 | needs: build 35 | runs-on: ubuntu-latest 36 | steps: 37 | - uses: actions/checkout@v3 38 | with: 39 | fetch-depth: 0 40 | - name: Getting Tag 41 | id: tag_extractor 42 | run: echo "latest_tag=$(git describe --tags --abbrev=0)" >> "$GITHUB_OUTPUT" 43 | 44 | - name: Discord Webhook Action 45 | if: ${{ (needs.build.result == 'success') }} 46 | uses: tsickert/discord-webhook@v5.3.0 47 | with: 48 | webhook-url: ${{ secrets.RELEASE_WEBHOOK_URL }} 49 | content: "success <@&1163402741394178162> ${{ steps.tag_extractor.outputs.latest_tag }}" 50 | 51 | - name: Discord Webhook Action 2 52 | if: ${{ (needs.build.result == 'failure') }} 53 | uses: tsickert/discord-webhook@v5.3.0 54 | with: 55 | webhook-url: ${{ secrets.RELEASE_WEBHOOK_URL }} 56 | content: "failure <@&1163402741394178162> ${{ steps.tag_extractor.outputs.latest_tag }}" 57 | -------------------------------------------------------------------------------- /.github/workflows/sync.yml: -------------------------------------------------------------------------------- 1 | name: Sync 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | release: 7 | required: true 8 | 9 | 10 | permissions: 11 | contents: write 12 | jobs: 13 | run: 14 | runs-on: ubuntu-latest 15 | if: contains('["onuratakan"]', github.actor) 16 | 17 | steps: 18 | - name: Checkout Repository 19 | uses: actions/checkout@v2 20 | 21 | - name: Set Up Python 22 | uses: actions/setup-python@v2 23 | with: 24 | python-version: 3.8 25 | 26 | 27 | - name: setup git config 28 | run: | 29 | # setup the username and email. I tend to use 'GitHub Actions Bot' with no email by default 30 | git config user.name "GitHub Actions Bot" 31 | git config user.email "<>" 32 | 33 | - name: Run Version Sync Script 34 | run: python sync.py ${{ github.event.inputs.release }} 35 | 36 | 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | Upsonic-*/ 2 | upsonic_cache/ 3 | .vscode 4 | 5 | KOT-* 6 | 7 | test.py 8 | test.ipynb 9 | 10 | test_backup 11 | *.Upsonic 12 | *.zip 13 | test_file.txt 14 | *.db 15 | *.pickle 16 | 17 | # Byte-compiled / optimized / DLL files 18 | __pycache__/ 19 | *.py[cod] 20 | *$py.class 21 | 22 | # C extensions 23 | *.so 24 | 25 | # Distribution / packaging 26 | .Python 27 | build/ 28 | develop-eggs/ 29 | dist/ 30 | downloads/ 31 | eggs/ 32 | .eggs/ 33 | lib/ 34 | lib64/ 35 | parts/ 36 | sdist/ 37 | var/ 38 | wheels/ 39 | pip-wheel-metadata/ 40 | share/python-wheels/ 41 | *.egg-info/ 42 | .installed.cfg 43 | *.egg 44 | MANIFEST 45 | 46 | # PyInstaller 47 | # Usually these files are written by a python script from a template 48 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 49 | *.manifest 50 | *.spec 51 | 52 | # Installer logs 53 | pip-log.txt 54 | pip-delete-this-directory.txt 55 | 56 | # Unit test / coverage reports 57 | htmlcov/ 58 | .tox/ 59 | .nox/ 60 | .coverage 61 | .coverage.* 62 | .cache 63 | nosetests.xml 64 | coverage.xml 65 | *.cover 66 | *.py,cover 67 | .hypothesis/ 68 | .pytest_cache/ 69 | 70 | # Translations 71 | *.mo 72 | *.pot 73 | 74 | # Django stuff: 75 | *.log 76 | local_settings.py 77 | db.sqlite3 78 | db.sqlite3-journal 79 | 80 | # Flask stuff: 81 | instance/ 82 | .webassets-cache 83 | 84 | # Scrapy stuff: 85 | .scrapy 86 | 87 | # Sphinx documentation 88 | docs/_build/ 89 | 90 | # PyBuilder 91 | target/ 92 | 93 | # Jupyter Notebook 94 | .ipynb_checkpoints 95 | 96 | # IPython 97 | profile_default/ 98 | ipython_config.py 99 | 100 | # pyenv 101 | .python-version 102 | 103 | # pipenv 104 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 105 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 106 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 107 | # install all needed dependencies. 108 | #Pipfile.lock 109 | 110 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 111 | __pypackages__/ 112 | 113 | # Celery stuff 114 | celerybeat-schedule 115 | celerybeat.pid 116 | 117 | # SageMath parsed files 118 | *.sage.py 119 | 120 | # Environments 121 | .env 122 | .venv 123 | env/ 124 | venv/ 125 | ENV/ 126 | env.bak/ 127 | venv.bak/ 128 | 129 | # Spyder project settings 130 | .spyderproject 131 | .spyproject 132 | 133 | # Rope project settings 134 | .ropeproject 135 | 136 | # mkdocs documentation 137 | /site 138 | 139 | # mypy 140 | .mypy_cache/ 141 | .dmypy.json 142 | dmypy.json 143 | 144 | # Pyre type checker 145 | .pyre/ 146 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | info@upsonic.co. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Upsonic Teknoloji A.Ş. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include . requirements.txt -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Upsonic | Self-Driven Autonomous Python Libraries 2 | 3 | The Upsonic is designed to help data scientists and ML engineers efficiently manage and automate maintenance-free utility library creation. It provides a simple, easy-to-use Python interface to interact with the Upsonic platform. 4 | [Website](https://upsonic.co/) | [Discord](https://discord.gg/) | [Twitter](https://twitter.com/upsonicco) 5 | 6 | 7 | 8 | ## Features 9 | 10 | - Easy serialization of functions and classes, making them readily available for reuse across different projects. 11 | - Automatic documentation generation for effortless maintenance and readability. 12 | - Support for both direct and modular function importation from the library. 13 | - Streamlined version control and collaboration features, allowing teams to work together seamlessly. 14 | 15 | ### Easiest Library View 16 | Usponic proveides an dashboard for your team members. Everyone can access to the dashboard by their [user status](https://docs.upsonic.co/on-prem/using/users). After the accessing they can easily view the top libraries and automaticaly generated connections codes. 17 | ![image](https://github.com/Upsonic/Upsonic/assets/41792982/aa67f1f9-e510-4c5f-98fd-6876016157e7) 18 | 19 | 20 | ### Automaticaly Documentation 21 | In Upsonic On-Prem dashboard we have automaticaly generated documentation for your each function, class, object or variables. For this you can use OpenAI GPT integration or a self-hosted Google Gemma model in your installation. They are making your documentations automaticaly. Also you can easily search your content. 22 | 23 | - Documentation 24 | - Time Complexity 25 | - Mistakes 26 | - Required Test Tyoes 27 | - Security Analyses 28 | - Tags 29 | 30 | ![image](https://github.com/Upsonic/Upsonic/assets/41792982/031678af-f0a4-43e9-976b-81707060e85e) 31 | 32 | 33 | ## Installation 34 | 35 | You need to install the Upsonic container. 36 | 37 | [Installing and Running On-Prem Container](https://docs.upsonic.co/on-prem/getting_started/install_on_prem) 38 | 39 | Once the container is up and running, you can install the Upsonic Python Client Library on your local system using the pip package manager: 40 | ```console 41 | # pip install upsonic 42 | ``` 43 | 44 | 45 | 46 | ## Usage 47 | 48 | Here's an updated quickstart guide to get you up and running with your container: 49 | 50 | ```python 51 | from upsonic import UpsonicOnPrem 52 | upsonic = UpsonicOnPrem('https://your-server-address:5000', 'ACK_****************') 53 | 54 | 55 | 56 | def sum(a, b): 57 | return a + b 58 | 59 | upsonic.dump("math.basics.sum", sum) 60 | 61 | 62 | 63 | math = upsonic.load_module("math") 64 | 65 | math.basics.sum(5, 2) 66 | ``` 67 | 68 | 69 | 70 | 71 | 72 | ## Documentation 73 | 74 | You can find detailed documentation, including advanced usage and API reference, in the official [Upsonic Documentation](https://docs.upsonic.co/home) . 75 | 76 | 77 | 78 | ## Contributing 79 | 80 | We welcome contributions to the Upsonic Python Client Library! 81 | 82 | 83 | 84 | ## Support & Questions 85 | 86 | For any questions or if you encounter an issue, please reach out to our support team at info@upsonic.co or open an issue on the project's GitHub page. 87 | 88 | 89 | 90 | 91 | ## Supporters 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 |
Bugra Kocaturk
Buğra Kocatürk
AWS Solution Architect

AWS Netherlands
Linkedin
Lemi Orhan Engin
Lemi Orhan Engin
CTO

Craftgate
Linkedin
Mehmet Emin Ozturk
Mehmet Emin Öztürk
Data Team Lead & Kaggle Master
Trendyol Group
Linkedin
Firat Gonen
Fırat Gönen
Chief data Officer & Kaggle Grandmaster 3X
Figopara
Linkedin
Arda Batuhan Demir
Arda Batuhan Demir
Senior DevOps Engineer

Lyrebird Studio
Linkedin
Hasan Ramazan Yurt
Hasan Ramazan Yurt
ML Engineer & Technical Founder
Nicky ai
Linkedin
Sezer Yavuzer Bozkır
Sezer Yavuzer Bozkır
Sr. Python Developer

Petleo
Linkedin
Ozan Günceler
Ozan Günceler
CTO
BSM Consultancy Limited
Linkedin
Mustafa Namoğlu
Mustafa Namoğlu
Co-Founder

İkas
Linkedin
Bünyamin Ergen
Bünyamin Ergen
AI Engineer & Python Developer & Top Ai Voice
eTaşın
Linkedin
Serdar İlarslan
Serdar İlarslan
Sr. Python developer

Easysize
Linkedin
Burak Emre Kabakçı
Burak Emre Kabakçı
Sr. Staff Software Engineer & Maker
LiveRamp
Linkedin
Ozge Oz
Ozge Oz
Partner

QNBEYOND Ventures
Linkedin
Emre Keskin
Emre Keskin
Staff Software Engineer

Oplog
Linkedin
Emrah Samdan
Emrah Şamdan
Senior product manager


Linkedin
Halil İbrahim Yıldırım
Halil İbrahim Yıldırım
Head of data science


Linkedin
117 | 118 | 119 | ## Advisors 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 |
Talha Kılıç
Talha Kılıç
Tech Lead Bigdata


Emre Doğaner
Emre Doğaner
Fractional CMO for B2B SAAS

Funnelepic
Linkedin
Enes Akar
Enes Akar
CEO


Upstash
Linkedin
131 | 132 | 133 | 134 | ## Customers 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 |
wearsexar
144 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting a Vulnerability 4 | 5 | Use `info@upsonic.co` 6 | -------------------------------------------------------------------------------- /bump.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import re 4 | 5 | 6 | def read_version(): 7 | with open("upsonic/__init__.py", "r") as file: 8 | for line in file: 9 | match = re.search(r"__version__ = '(.*)'", line) # fmt: skip 10 | if match: 11 | return match.group(1) 12 | 13 | 14 | def increment_version(part, version): 15 | major, minor, patch = map(int, version.split(".")) 16 | if part == "major": 17 | major += 1 18 | minor = 0 19 | patch = 0 20 | elif part == "minor": 21 | minor += 1 22 | patch = 0 23 | elif part == "patch": 24 | patch += 1 25 | return f"{major}.{minor}.{patch}" 26 | 27 | 28 | def write_version(version): 29 | with open("upsonic/__init__.py", "r+") as file: 30 | content = file.read() 31 | content = re.sub(r"__version__ = '.*'", f"__version__ = '{version}'", content) # fmt: skip 32 | file.seek(0) 33 | file.write(content) 34 | 35 | 36 | def update_version(version): 37 | files = ["setup.py"] 38 | for file in files: 39 | with open(file, "r+") as f: 40 | content = f.read() 41 | content = re.sub(r' version=".*"', f' version="{version}"', content) # fmt: skip 42 | f.seek(0) 43 | f.write(content) 44 | 45 | 46 | def create_tag(version): 47 | os.system(f"git tag v{version}") 48 | 49 | 50 | def create_commit(version): 51 | os.system("git add .") 52 | os.system(f"git commit -m 'Changed version number with v{version}'") 53 | 54 | 55 | def push(): 56 | os.system("git push") 57 | os.system("git push --tag") 58 | 59 | 60 | def main(): 61 | part = sys.argv[1] 62 | version = read_version() 63 | new_version = increment_version(part, version) 64 | write_version(new_version) 65 | update_version(new_version) 66 | create_commit(new_version) 67 | create_tag(new_version) 68 | push() 69 | 70 | 71 | if __name__ == "__main__": 72 | main() 73 | -------------------------------------------------------------------------------- /refactor.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def install_refactor_tool(): 5 | os.system("pip install ruff==0.6.0") 6 | 7 | 8 | def refactor(): 9 | os.system("ruff check --fix") 10 | os.system("ruff format") 11 | 12 | 13 | def create_commit(): 14 | os.system("git add .") 15 | os.system("git commit -m 'refactor: Scheduled refactoring'") 16 | 17 | 18 | def push(): 19 | os.system("git push") 20 | 21 | 22 | if __name__ == "__main__": 23 | install_refactor_tool() 24 | refactor() 25 | create_commit() 26 | push() 27 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | fire==0.5.0 2 | mgzip==0.2.1 3 | cryptography==40.0.2 4 | dill 5 | requests 6 | python-dotenv 7 | cloudpickle 8 | memory-profiler==0.61.0 9 | colorama 10 | termcolor -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | from setuptools import setup 5 | 6 | with open("requirements.txt") as fp: 7 | install_requires = fp.read() 8 | setup( 9 | name="upsonic", 10 | version="0.34.3", 11 | description="""Magic Cloud Layer""", 12 | long_description="".join(open("README.md", encoding="utf-8").readlines()), 13 | long_description_content_type="text/markdown", 14 | url="https://github.com/Upsonic/Upsonic", 15 | author="Upsonic", 16 | author_email="onur.atakan.ulusoy@upsonic.co", 17 | license="MIT", 18 | packages=["upsonic", "upsonic.remote", "upsonic.remote.localimport"], 19 | install_requires=install_requires, 20 | entry_points={ 21 | "console_scripts": ["upsonic=upsonic.remote.interface:Upsonic_CLI"], 22 | }, 23 | python_requires=">=3.6", 24 | zip_safe=False, 25 | ) 26 | -------------------------------------------------------------------------------- /sync.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import sys 4 | 5 | def write_version(version): 6 | with open("upsonic/__init__.py", "r+") as file: 7 | content = file.read() 8 | content = re.sub(r"__version__ = '.*'", f"__version__ = '{version}'", content) # fmt: skip 9 | file.seek(0) 10 | file.write(content) 11 | 12 | def update_version_in_setup(version): 13 | with open("setup.py", "r+") as file: 14 | content = file.read() 15 | content = re.sub(r' version=".*"', f' version="{version}"', content) # fmt: skip 16 | file.seek(0) 17 | file.write(content) 18 | 19 | def create_tag(version): 20 | os.system(f"git tag v{version}") 21 | 22 | def create_commit(version): 23 | os.system("git add .") 24 | os.system(f"git commit -m 'Changed version number to v{version}'") 25 | 26 | def push(): 27 | os.system("git push") 28 | os.system("git push --tags") 29 | 30 | def main(): 31 | if len(sys.argv) != 2: 32 | print("Usage: python sync.py ") 33 | sys.exit(1) 34 | 35 | new_version = sys.argv[1] 36 | write_version(new_version) 37 | update_version_in_setup(new_version) 38 | create_commit(new_version) 39 | create_tag(new_version) 40 | push() 41 | 42 | if __name__ == "__main__": 43 | main() -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Upsonic/Client/5e99d636374e5ae137a4a3a1e85260eb6894c2da/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_cloud.py: -------------------------------------------------------------------------------- 1 | import time 2 | import unittest 3 | import os 4 | import sys 5 | 6 | 7 | from upsonic import Upsonic_Cloud 8 | 9 | 10 | class test_object: 11 | def exp(self): 12 | return {"test": "test"} 13 | 14 | 15 | def my_function(): 16 | return 123 17 | 18 | 19 | class TestCloud(unittest.TestCase): 20 | @classmethod 21 | def setUpClass(cls): 22 | cls.remote = Upsonic_Cloud( 23 | os.environ.get("CLOUD_TEST_DATABASE_NAME", "cloud-workflow") 24 | ) 25 | 26 | def test_remote_api_set_get_deletestring(self): 27 | the = time.time() 28 | value = f"Value{the}" 29 | 30 | self.remote.set("key", value) 31 | time.sleep(1) 32 | 33 | self.assertEqual( 34 | self.remote.get( 35 | "key", 36 | ), 37 | value, 38 | ) 39 | 40 | self.remote.delete("key") 41 | time.sleep(1) 42 | 43 | self.assertNotEqual(self.remote.get("key"), value) 44 | 45 | def test_remote_api_active(self): 46 | self.remote.active(my_function) 47 | time.sleep(1) 48 | self.assertEqual(self.remote.get("my_function")(), 123) 49 | self.remote.delete("my_function") 50 | 51 | 52 | backup = sys.argv 53 | sys.argv = [sys.argv[0]] 54 | unittest.main(exit=False) 55 | sys.argv = backup 56 | -------------------------------------------------------------------------------- /tests/test_cloud_pro.py: -------------------------------------------------------------------------------- 1 | import time 2 | import unittest 3 | import os 4 | import sys 5 | 6 | 7 | from upsonic import Upsonic_Cloud_Pro 8 | 9 | 10 | class ptest_object: 11 | def exp(self): 12 | return {"test": "test"} 13 | 14 | 15 | def pmy_function(): 16 | return 123 17 | 18 | 19 | class TestCloudPro(unittest.TestCase): 20 | @classmethod 21 | def setUpClass(cls): 22 | cls.remote = Upsonic_Cloud_Pro( 23 | os.environ.get("CLOUD_TEST_DATABASE_NAME", "cloud-workflow"), 24 | os.environ.get("CLOUD_PRO_ACCESS_KEY"), 25 | ) 26 | 27 | def test_remote_api_set_get_deletestring(self): 28 | the = time.time() 29 | value = f"Value{the}" 30 | 31 | self.remote.set("key", value) 32 | time.sleep(1) 33 | 34 | self.assertEqual( 35 | self.remote.get( 36 | "key", 37 | ), 38 | value, 39 | ) 40 | 41 | self.remote.delete("key") 42 | time.sleep(1) 43 | 44 | self.assertNotEqual(self.remote.get("key"), value) 45 | 46 | def test_remote_api_active(self): 47 | self.remote.active(pmy_function) 48 | time.sleep(1) 49 | self.assertEqual(self.remote.get("pmy_function")(), 123) 50 | self.remote.delete("pmy_function") 51 | 52 | 53 | backup = sys.argv 54 | sys.argv = [sys.argv[0]] 55 | unittest.main(exit=False) 56 | sys.argv = backup 57 | -------------------------------------------------------------------------------- /upsonic/__init__.py: -------------------------------------------------------------------------------- 1 | import traceback 2 | 3 | 4 | 5 | import warnings 6 | # Suppress the deprecation warning from the cryptography module. 7 | with warnings.catch_warnings(): 8 | warnings.simplefilter("ignore") 9 | import cryptography 10 | 11 | 12 | try: 13 | from .core import Upsonic 14 | from .core import start_location 15 | from .core import HASHES 16 | from .core import Upsonic_Serial 17 | 18 | except: 19 | pass 20 | 21 | from .remote import localimport 22 | from .remote import Upsonic_On_Prem, Tiger, Tiger_Admin, UpsonicOnPrem 23 | from .remote import no_exception 24 | from .remote import requires 25 | from .remote import encrypt 26 | from .remote import decrypt 27 | from .remote import upsonic_serializer 28 | from .remote import interface 29 | 30 | 31 | open_databases = {} 32 | 33 | __version__ = '0.34.3' # fmt: skip 34 | -------------------------------------------------------------------------------- /upsonic/remote/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .on_prem import Upsonic_On_Prem, Tiger, Tiger_Admin, UpsonicOnPrem 3 | 4 | from .helper import no_exception 5 | from .helper import requires 6 | from .interface import encrypt 7 | from .interface import decrypt 8 | from .interface import upsonic_serializer 9 | from .localimport import localimport 10 | from . import interface 11 | -------------------------------------------------------------------------------- /upsonic/remote/helper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | from functools import wraps 4 | 5 | 6 | def no_exception(func): 7 | @wraps(func) 8 | def runner(*args, **kwargs): 9 | try: 10 | result = func(*args, **kwargs) 11 | return result 12 | except Exception as e: 13 | print(f"Exception occurred in function: {e}") 14 | 15 | return runner 16 | 17 | 18 | @no_exception 19 | def requires(name, custom_import=None): 20 | def decorator(function): 21 | @wraps(function) 22 | def wrapper(*args, **kwargs): 23 | try: 24 | import_name = name if custom_import is None else custom_import 25 | exec(f"import {import_name}") 26 | except: 27 | from pip._internal import main as pip 28 | 29 | pip(["install", name]) 30 | retval = function(*args, **kwargs) 31 | return retval 32 | 33 | return wrapper 34 | 35 | return decorator 36 | 37 | 38 | """ 39 | 40 | def durable(func): 41 | @wraps(func) 42 | def runner(*args, **kwargs): 43 | import random 44 | result = None 45 | run_id = random.randint(10000, 99999) 46 | while result is None: 47 | try: 48 | result = func(*args, **kwargs) 49 | 50 | except Exception as e: 51 | import time 52 | print(f"Exception occurred in function and frozed the statement, waiting for update: {e}") 53 | cloud.set(func.__name__+"_upsonic_durable"+str(run_id), str(e)) 54 | time.sleep(5) 55 | return result 56 | return runner 57 | 58 | """ 59 | -------------------------------------------------------------------------------- /upsonic/remote/interface.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | 5 | from dotenv import load_dotenv 6 | 7 | load_dotenv(dotenv_path=".env") 8 | import os 9 | 10 | 11 | import cloudpickle 12 | import dill 13 | import pickle 14 | import importlib.util 15 | 16 | 17 | def upsonic_serializer(func): 18 | the_source = dill.source.findsource(func) 19 | the_full_string = "" 20 | for each in the_source[0]: 21 | the_full_string += each 22 | imports = [ 23 | line + "\n" 24 | for line in the_full_string.split("\n") 25 | if line.startswith("import ") or line.startswith("from ") 26 | ] 27 | 28 | the_import_string = "" 29 | for each in imports: 30 | the_import_string += each 31 | 32 | the_function_string = dill.source.getsource(func) 33 | 34 | return the_import_string + "\n" + the_function_string 35 | 36 | 37 | def encrypt(key, message, engine, byref, recurse, protocol, source, builtin): 38 | from cryptography.fernet import Fernet 39 | import base64 40 | import hashlib 41 | 42 | fernet_key = base64.urlsafe_b64encode(hashlib.sha256(key.encode()).digest()) 43 | fernet = Fernet(fernet_key) 44 | 45 | dumped = None 46 | if engine == "cloudpickle": 47 | the_module = dill.detect.getmodule(message) 48 | if the_module is not None: 49 | cloudpickle.register_pickle_by_value(the_module) 50 | dumped = cloudpickle.dumps(message, protocol=protocol) 51 | elif engine == "dill": 52 | dumped = dill.dumps(message, protocol=protocol, byref=byref, recurse=recurse) 53 | elif engine == "upsonic_serializer": 54 | name_of_object = dill.source.getname(message) 55 | 56 | if name_of_object == None: 57 | try: 58 | name_of_object = message.__name__ 59 | except: 60 | pass 61 | 62 | dumped = { 63 | "name": name_of_object, 64 | "upsonic_serializer": upsonic_serializer(message), 65 | } 66 | dumped = pickle.dumps(dumped, protocol=1) 67 | 68 | encrypted_message = fernet.encrypt(dumped) 69 | return encrypted_message 70 | 71 | 72 | def decrypt(key, message, engine, try_to_extract_importable=False): 73 | from cryptography.fernet import Fernet 74 | import base64 75 | import hashlib 76 | 77 | fernet = Fernet(base64.urlsafe_b64encode(hashlib.sha256(key.encode()).digest())) 78 | 79 | loaded = None 80 | if engine == "cloudpickle": 81 | loaded = cloudpickle.loads(fernet.decrypt(message)) 82 | elif engine == "dill": 83 | loaded = dill.loads(fernet.decrypt(message)) 84 | elif engine == "upsonic_serializer": 85 | loaded = pickle.loads(fernet.decrypt(message)) 86 | 87 | if try_to_extract_importable: 88 | return loaded["upsonic_serializer"] 89 | 90 | def extract(code_string, function_name): 91 | tmp_dir = os.path.dirname(os.path.abspath(__file__)) 92 | tmp_file = os.path.join(tmp_dir, function_name + "_upsonic" + ".py") 93 | with open(tmp_file, "w") as f: 94 | f.write(code_string) 95 | 96 | spec = importlib.util.spec_from_file_location( 97 | function_name + "_upsonic", tmp_file 98 | ) 99 | module = importlib.util.module_from_spec(spec) 100 | spec.loader.exec_module(module) 101 | 102 | os.remove(tmp_file) # Clean up the temporary file 103 | 104 | return getattr(module, function_name) 105 | 106 | loaded = extract(loaded["upsonic_serializer"], loaded["name"]) 107 | 108 | return loaded 109 | 110 | -------------------------------------------------------------------------------- /upsonic/remote/localimport/LICENCE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2022 Niklas Rosenstein 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /upsonic/remote/localimport/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = "Niklas Rosenstein " 2 | __version__ = "1.7.6" 3 | 4 | import copy 5 | import glob 6 | import os 7 | import pkgutil 8 | import sys 9 | import traceback 10 | import typing as t 11 | import zipfile 12 | 13 | if t.TYPE_CHECKING: 14 | from sys import _MetaPathFinder 15 | 16 | 17 | def is_local(filename: str, pathlist: t.List[str]) -> bool: 18 | """Returns True if *filename* is a subpath of any of the paths in *pathlist*.""" 19 | 20 | filename = os.path.abspath(filename) 21 | for path_name in pathlist: 22 | path_name = os.path.abspath(path_name) 23 | if is_subpath(filename, path_name): 24 | return True 25 | return False 26 | 27 | 28 | def is_subpath(path: str, parent: str) -> bool: 29 | """Returns True if *path* points to the same or a subpath of *parent*.""" 30 | 31 | try: 32 | relpath = os.path.relpath(path, parent) 33 | except ValueError: 34 | return False # happens on Windows if drive letters don't match 35 | return relpath == os.curdir or not relpath.startswith(os.pardir) 36 | 37 | 38 | def eval_pth( 39 | filename: str, 40 | sitedir: str, 41 | dest: t.Optional[t.List[str]] = None, 42 | imports: t.Optional[t.List[t.Tuple[str, int, str]]] = None, 43 | ) -> t.List[str]: 44 | """Evaluates a `.pth` file (including support for `import` statements), and appends the result to the list 45 | *dest*. If *dest* is #None, it will fall back to `sys.path`. 46 | 47 | If *imports* is specified, it must be a list. `import` statements will not executed but instead appended to 48 | that list in tuples of (*filename*, *line*, *stmt*). 49 | """ 50 | 51 | if dest is None: 52 | dest = sys.path 53 | 54 | if not os.path.isfile(filename): 55 | return [] 56 | 57 | with open(filename, "r") as fp: 58 | for index, line in enumerate(fp): 59 | if line.startswith("import"): 60 | if imports is None: 61 | exec_pth_import(filename, index + 1, line) 62 | else: 63 | imports.append((filename, index + 1, line)) 64 | else: 65 | index = line.find("#") 66 | if index > 0: 67 | line = line[:index] 68 | line = line.strip() 69 | if not os.path.isabs(line): 70 | line = os.path.join(os.path.dirname(filename), line) 71 | line = os.path.normpath(line) 72 | if line and line not in dest: 73 | dest.insert(0, line) 74 | 75 | return dest 76 | 77 | 78 | def exec_pth_import(filename: str, lineno: int, line: str) -> None: 79 | line = "\n" * (lineno - 1) + line.strip() 80 | try: 81 | exec(compile(line, filename, "exec")) 82 | except BaseException: 83 | traceback.print_exc() 84 | 85 | 86 | def extend_path(pth: t.List[str], name: str) -> t.List[str]: 87 | """Better implementation of #pkgutil.extend_path() which adds support for zipped Python eggs. The original 88 | #pkgutil.extend_path() gets mocked by this function inside the #localimport context. 89 | """ 90 | 91 | def zip_isfile(z, name): 92 | name.rstrip("/") 93 | return name in z.namelist() 94 | 95 | pname = os.path.join(*name.split(".")) 96 | zname = "/".join(name.split(".")) 97 | init_py = "__init__" + os.extsep + "py" 98 | init_pyc = "__init__" + os.extsep + "pyc" 99 | init_pyo = "__init__" + os.extsep + "pyo" 100 | 101 | mod_path = list(pth) 102 | for path in sys.path: 103 | if zipfile.is_zipfile(path): 104 | try: 105 | egg = zipfile.ZipFile(path, "r") 106 | addpath = ( 107 | zip_isfile(egg, zname + "/__init__.py") 108 | or zip_isfile(egg, zname + "/__init__.pyc") 109 | or zip_isfile(egg, zname + "/__init__.pyo") 110 | ) 111 | fpath = os.path.join(path, path, zname) 112 | if addpath and fpath not in mod_path: 113 | mod_path.append(fpath) 114 | except (zipfile.BadZipfile, zipfile.LargeZipFile): 115 | pass # xxx: Show a warning at least? 116 | else: 117 | path = os.path.join(path, pname) 118 | if os.path.isdir(path) and path not in mod_path: 119 | addpath = ( 120 | os.path.isfile(os.path.join(path, init_py)) 121 | or os.path.isfile(os.path.join(path, init_pyc)) 122 | or os.path.isfile(os.path.join(path, init_pyo)) 123 | ) 124 | if addpath and path not in mod_path: 125 | mod_path.append(path) 126 | 127 | return [os.path.normpath(x) for x in mod_path] 128 | 129 | 130 | class localimport: 131 | def __init__( 132 | self, 133 | path: t.Union[t.List[str], str], 134 | parent_dir: t.Optional[str] = None, 135 | do_eggs: bool = True, 136 | do_pth: bool = True, 137 | do_autodisable: bool = True, 138 | ) -> None: 139 | if not parent_dir: 140 | frame = sys._getframe(1).f_globals 141 | if "__file__" in frame: 142 | parent_dir = os.path.dirname(os.path.abspath(frame["__file__"])) 143 | 144 | # Convert relative paths to absolute paths with parent_dir and 145 | # evaluate .egg files in the specified directories. 146 | self.path = [] 147 | if isinstance(path, str): 148 | path = [path] 149 | for path_name in path: 150 | if not os.path.isabs(path_name): 151 | if not parent_dir: 152 | raise ValueError("relative path but no parent_dir") 153 | path_name = os.path.join(parent_dir, path_name) 154 | path_name = os.path.normpath(path_name) 155 | self.path.append(path_name) 156 | if do_eggs: 157 | self.path.extend(glob.glob(os.path.join(path_name, "*.egg"))) 158 | 159 | self.meta_path: t.List[_MetaPathFinder] = [] 160 | self.modules: t.Dict[str, t.Any] = {} 161 | self.do_pth = do_pth 162 | self.in_context = False 163 | self.do_autodisable = do_autodisable 164 | self.pth_imports: t.List[t.Tuple[str, int, str]] = [] 165 | 166 | if self.do_pth: 167 | seen = set() 168 | for path_name in self.path: 169 | for fn in glob.glob(os.path.join(path_name, "*.pth")): 170 | if fn in seen: 171 | continue 172 | seen.add(fn) 173 | eval_pth(fn, path_name, dest=self.path, imports=self.pth_imports) 174 | 175 | def __enter__(self) -> "localimport": 176 | # pkg_resources comes with setuptools. 177 | try: 178 | import pkg_resources 179 | 180 | nsdict = copy.deepcopy(pkg_resources._namespace_packages) # type: ignore 181 | declare_namespace = pkg_resources.declare_namespace 182 | pkg_resources.declare_namespace = self._declare_namespace # type: ignore 183 | except ImportError: 184 | nsdict = None 185 | declare_namespace = None 186 | 187 | # Save the global importer state. 188 | self.state = { 189 | "nsdict": nsdict, 190 | "declare_namespace": declare_namespace, 191 | "nspaths": {}, 192 | "path": sys.path[:], 193 | "meta_path": sys.meta_path[:], 194 | "disables": {}, 195 | "pkgutil.extend_path": pkgutil.extend_path, 196 | } 197 | 198 | # Update the systems meta path and apply function mocks. 199 | sys.path[:] = self.path 200 | sys.meta_path[:] = self.meta_path + sys.meta_path 201 | pkgutil.extend_path = extend_path # type: ignore 202 | 203 | # If this function is called not the first time, we need to 204 | # restore the modules that have been imported with it and 205 | # temporarily disable the ones that would be shadowed. 206 | for key, mod in list(self.modules.items()): 207 | try: 208 | self.state["disables"][key] = sys.modules.pop(key) 209 | except KeyError: 210 | pass 211 | sys.modules[key] = mod 212 | 213 | # Evaluate imports from the .pth files, if any. 214 | for fn, lineno, stmt in self.pth_imports: 215 | exec_pth_import(fn, lineno, stmt) 216 | 217 | # Add the original path to sys.path. 218 | sys.path += self.state["path"] 219 | 220 | # Update the __path__ of all namespace modules. 221 | for key, mod in list(sys.modules.items()): 222 | if mod is None: 223 | # Relative imports could have lead to None-entries in 224 | # sys.modules. Get rid of them so they can be re-evaluated. 225 | prefix = key.rpartition(".")[0] 226 | if hasattr(sys.modules.get(prefix), "__path__"): 227 | del sys.modules[key] 228 | elif hasattr(mod, "__path__"): 229 | self.state["nspaths"][key] = copy.copy(mod.__path__) 230 | mod.__path__ = pkgutil.extend_path(mod.__path__, mod.__name__) 231 | 232 | self.in_context = True 233 | if self.do_autodisable: 234 | self.autodisable() 235 | return self 236 | 237 | def __exit__(self, *__) -> None: 238 | if not self.in_context: 239 | raise RuntimeError("context not entered") 240 | 241 | # Figure the difference of the original sys.path and the 242 | # current path. The list of paths will be used to determine 243 | # what modules are local and what not. 244 | local_paths = [] 245 | for path in sys.path: 246 | if path not in self.state["path"]: 247 | local_paths.append(path) 248 | for path in self.path: 249 | if path not in local_paths: 250 | local_paths.append(path) 251 | 252 | # Move all meta path objects to self.meta_path that have not 253 | # been there before and have not been in the list before. 254 | for meta in sys.meta_path: 255 | if meta is not self and meta not in self.state["meta_path"]: 256 | if meta not in self.meta_path: 257 | self.meta_path.append(meta) 258 | 259 | # Move all modules that shadow modules of the original system 260 | # state or modules that are from any of the localimport context 261 | # paths away. 262 | modules = sys.modules.copy() 263 | for key, mod in modules.items(): 264 | force_pop = False 265 | filename = getattr(mod, "__file__", None) 266 | if not filename and key not in sys.builtin_module_names: 267 | parent = key.rsplit(".", 1)[0] 268 | if parent in modules: 269 | filename = getattr(modules[parent], "__file__", None) 270 | else: 271 | force_pop = True 272 | if force_pop or (filename and is_local(filename, local_paths)): 273 | self.modules[key] = sys.modules.pop(key) 274 | 275 | # Restore the disabled modules. 276 | sys.modules.update(self.state["disables"]) 277 | for key, mod in self.state["disables"].items(): 278 | try: 279 | parent_name = key.split(".")[-2] 280 | except IndexError: 281 | parent_name = None 282 | if parent_name and parent_name in sys.modules: 283 | parent_module = sys.modules[parent_name] 284 | setattr(parent_module, key.split(".")[-1], mod) 285 | 286 | # Restore the original __path__ value of namespace packages. 287 | for key, path_list in self.state["nspaths"].items(): 288 | try: 289 | sys.modules[key].__path__ = path_list 290 | except KeyError: 291 | pass 292 | 293 | # Restore the original state of the global importer. 294 | sys.path[:] = self.state["path"] 295 | sys.meta_path[:] = self.state["meta_path"] 296 | pkgutil.extend_path = self.state["pkgutil.extend_path"] 297 | try: 298 | import pkg_resources 299 | 300 | pkg_resources.declare_namespace = self.state["declare_namespace"] 301 | pkg_resources._namespace_packages.clear() # type: ignore 302 | pkg_resources._namespace_packages.update(self.state["nsdict"]) # type: ignore 303 | except ImportError: 304 | pass 305 | 306 | self.in_context = False 307 | del self.state 308 | 309 | def _declare_namespace(self, package_name: str) -> None: 310 | """ 311 | Mock for #pkg_resources.declare_namespace() which calls 312 | #pkgutil.extend_path() afterwards as the original implementation doesn't 313 | seem to properly find all available namespace paths. 314 | """ 315 | 316 | self.state["declare_namespace"](package_name) 317 | mod = sys.modules[package_name] 318 | mod.__path__ = pkgutil.extend_path(mod.__path__, package_name) # type: ignore 319 | 320 | def discover(self) -> t.Iterable[pkgutil.ModuleInfo]: 321 | return pkgutil.iter_modules(self.path) 322 | 323 | def disable(self, module: t.Union[t.List[str], str]) -> None: 324 | if not isinstance(module, str): 325 | for module_name in module: 326 | self.disable(module_name) 327 | return 328 | 329 | sub_prefix = module + "." 330 | modules = {} 331 | for key, mod in sys.modules.items(): 332 | if key == module or key.startswith(sub_prefix): 333 | try: 334 | parent_name = ".".join(key.split(".")[:-1]) 335 | except IndexError: 336 | parent_name = None 337 | 338 | # Delete the child module reference from the parent module. 339 | modules[key] = mod 340 | if parent_name and parent_name in sys.modules: 341 | parent = sys.modules[parent_name] 342 | try: 343 | delattr(parent, key.split(".")[-1]) 344 | except AttributeError: 345 | pass 346 | 347 | # Pop all the modules we found from sys.modules 348 | for key, mod in modules.items(): 349 | del sys.modules[key] 350 | self.state["disables"][key] = mod 351 | 352 | def autodisable(self) -> None: 353 | for loader, name, ispkg in self.discover(): 354 | self.disable(name) 355 | -------------------------------------------------------------------------------- /upsonic/remote/ollama_langchain.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional, Union 3 | 4 | import aiohttp 5 | import requests 6 | from langchain_core.callbacks import ( 7 | AsyncCallbackManagerForLLMRun, 8 | CallbackManagerForLLMRun, 9 | ) 10 | from langchain_core.language_models import BaseLanguageModel 11 | from langchain_core.language_models.llms import BaseLLM 12 | from langchain_core.outputs import GenerationChunk, LLMResult 13 | from langchain_core.pydantic_v1 import Extra 14 | 15 | 16 | def _stream_response_to_generation_chunk( 17 | stream_response: str, 18 | ) -> GenerationChunk: 19 | """Convert a stream response to a generation chunk.""" 20 | parsed_response = json.loads(stream_response) 21 | generation_info = parsed_response if parsed_response.get("done") is True else None 22 | return GenerationChunk( 23 | text=parsed_response.get("response", ""), generation_info=generation_info 24 | ) 25 | 26 | 27 | class OllamaEndpointNotFoundError(Exception): 28 | """Raised when the Ollama endpoint is not found.""" 29 | 30 | 31 | class _OllamaCommon(BaseLanguageModel): 32 | base_url: str = "http://localhost:11434" 33 | """Base url the model is hosted under.""" 34 | 35 | model: str = "llama2" 36 | """Model name to use.""" 37 | 38 | mirostat: Optional[int] = None 39 | """Enable Mirostat sampling for controlling perplexity. 40 | (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)""" 41 | 42 | mirostat_eta: Optional[float] = None 43 | """Influences how quickly the algorithm responds to feedback 44 | from the generated text. A lower learning rate will result in 45 | slower adjustments, while a higher learning rate will make 46 | the algorithm more responsive. (Default: 0.1)""" 47 | 48 | mirostat_tau: Optional[float] = None 49 | """Controls the balance between coherence and diversity 50 | of the output. A lower value will result in more focused and 51 | coherent text. (Default: 5.0)""" 52 | 53 | num_ctx: Optional[int] = None 54 | """Sets the size of the context window used to generate the 55 | next token. (Default: 2048) """ 56 | 57 | num_gpu: Optional[int] = None 58 | """The number of GPUs to use. On macOS it defaults to 1 to 59 | enable metal support, 0 to disable.""" 60 | 61 | num_thread: Optional[int] = None 62 | """Sets the number of threads to use during computation. 63 | By default, Ollama will detect this for optimal performance. 64 | It is recommended to set this value to the number of physical 65 | CPU cores your system has (as opposed to the logical number of cores).""" 66 | 67 | num_predict: Optional[int] = None 68 | """Maximum number of tokens to predict when generating text. 69 | (Default: 128, -1 = infinite generation, -2 = fill context)""" 70 | 71 | repeat_last_n: Optional[int] = None 72 | """Sets how far back for the model to look back to prevent 73 | repetition. (Default: 64, 0 = disabled, -1 = num_ctx)""" 74 | 75 | repeat_penalty: Optional[float] = None 76 | """Sets how strongly to penalize repetitions. A higher value (e.g., 1.5) 77 | will penalize repetitions more strongly, while a lower value (e.g., 0.9) 78 | will be more lenient. (Default: 1.1)""" 79 | 80 | temperature: Optional[float] = None 81 | """The temperature of the model. Increasing the temperature will 82 | make the model answer more creatively. (Default: 0.8)""" 83 | 84 | stop: Optional[List[str]] = None 85 | """Sets the stop tokens to use.""" 86 | 87 | tfs_z: Optional[float] = None 88 | """Tail free sampling is used to reduce the impact of less probable 89 | tokens from the output. A higher value (e.g., 2.0) will reduce the 90 | impact more, while a value of 1.0 disables this setting. (default: 1)""" 91 | 92 | top_k: Optional[int] = None 93 | """Reduces the probability of generating nonsense. A higher value (e.g. 100) 94 | will give more diverse answers, while a lower value (e.g. 10) 95 | will be more conservative. (Default: 40)""" 96 | 97 | top_p: Optional[float] = None 98 | """Works together with top-k. A higher value (e.g., 0.95) will lead 99 | to more diverse text, while a lower value (e.g., 0.5) will 100 | generate more focused and conservative text. (Default: 0.9)""" 101 | 102 | system: Optional[str] = None 103 | """system prompt (overrides what is defined in the Modelfile)""" 104 | 105 | template: Optional[str] = None 106 | """full prompt or prompt template (overrides what is defined in the Modelfile)""" 107 | 108 | format: Optional[str] = None 109 | """Specify the format of the output (e.g., json)""" 110 | 111 | timeout: Optional[int] = None 112 | """Timeout for the request stream""" 113 | 114 | keep_alive: Optional[Union[int, str]] = None 115 | """How long the model will stay loaded into memory. 116 | 117 | The parameter (Default: 5 minutes) can be set to: 118 | 1. a duration string in Golang (such as "10m" or "24h"); 119 | 2. a number in seconds (such as 3600); 120 | 3. any negative number which will keep the model loaded \ 121 | in memory (e.g. -1 or "-1m"); 122 | 4. 0 which will unload the model immediately after generating a response; 123 | 124 | See the [Ollama documents](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-do-i-keep-a-model-loaded-in-memory-or-make-it-unload-immediately)""" 125 | 126 | headers: Optional[dict] = None 127 | """Additional headers to pass to endpoint (e.g. Authorization, Referer). 128 | This is useful when Ollama is hosted on cloud services that require 129 | tokens for authentication. 130 | """ 131 | 132 | @property 133 | def _default_params(self) -> Dict[str, Any]: 134 | """Get the default parameters for calling Ollama.""" 135 | return { 136 | "model": self.model, 137 | "format": self.format, 138 | "options": { 139 | "mirostat": self.mirostat, 140 | "mirostat_eta": self.mirostat_eta, 141 | "mirostat_tau": self.mirostat_tau, 142 | "num_ctx": self.num_ctx, 143 | "num_gpu": self.num_gpu, 144 | "num_thread": self.num_thread, 145 | "num_predict": self.num_predict, 146 | "repeat_last_n": self.repeat_last_n, 147 | "repeat_penalty": self.repeat_penalty, 148 | "temperature": self.temperature, 149 | "stop": self.stop, 150 | "tfs_z": self.tfs_z, 151 | "top_k": self.top_k, 152 | "top_p": self.top_p, 153 | }, 154 | "system": self.system, 155 | "template": self.template, 156 | "keep_alive": self.keep_alive, 157 | } 158 | 159 | @property 160 | def _identifying_params(self) -> Mapping[str, Any]: 161 | """Get the identifying parameters.""" 162 | return {**{"model": self.model, "format": self.format}, **self._default_params} 163 | 164 | def _create_generate_stream( 165 | self, 166 | prompt: str, 167 | stop: Optional[List[str]] = None, 168 | images: Optional[List[str]] = None, 169 | **kwargs: Any, 170 | ) -> Iterator[str]: 171 | payload = {"prompt": prompt, "images": images} 172 | yield from self._create_stream( 173 | payload=payload, 174 | stop=stop, 175 | api_url=f"{self.base_url}/api/generate", 176 | **kwargs, 177 | ) 178 | 179 | async def _acreate_generate_stream( 180 | self, 181 | prompt: str, 182 | stop: Optional[List[str]] = None, 183 | images: Optional[List[str]] = None, 184 | **kwargs: Any, 185 | ) -> AsyncIterator[str]: 186 | payload = {"prompt": prompt, "images": images} 187 | async for item in self._acreate_stream( 188 | payload=payload, 189 | stop=stop, 190 | api_url=f"{self.base_url}/api/generate", 191 | **kwargs, 192 | ): 193 | yield item 194 | 195 | def _create_stream( 196 | self, 197 | api_url: str, 198 | payload: Any, 199 | stop: Optional[List[str]] = None, 200 | **kwargs: Any, 201 | ) -> Iterator[str]: 202 | if self.stop is not None and stop is not None: 203 | raise ValueError("`stop` found in both the input and default params.") 204 | elif self.stop is not None: 205 | stop = self.stop 206 | 207 | params = self._default_params 208 | 209 | for key in self._default_params: 210 | if key in kwargs: 211 | params[key] = kwargs[key] 212 | 213 | if "options" in kwargs: 214 | params["options"] = kwargs["options"] 215 | else: 216 | params["options"] = { 217 | **params["options"], 218 | "stop": stop, 219 | **{k: v for k, v in kwargs.items() if k not in self._default_params}, 220 | } 221 | 222 | if payload.get("messages"): 223 | request_payload = {"messages": payload.get("messages", []), **params} 224 | else: 225 | request_payload = { 226 | "prompt": payload.get("prompt"), 227 | "images": payload.get("images", []), 228 | **params, 229 | } 230 | 231 | response = requests.post( 232 | url=api_url, 233 | headers={ 234 | "Content-Type": "application/json", 235 | **(self.headers if isinstance(self.headers, dict) else {}), 236 | }, 237 | json=request_payload, 238 | stream=True, 239 | timeout=self.timeout, 240 | verify=False, 241 | ) 242 | response.encoding = "utf-8" 243 | if response.status_code != 200: 244 | if response.status_code == 404: 245 | raise OllamaEndpointNotFoundError( 246 | "Ollama call failed with status code 404. " 247 | "Maybe your model is not found " 248 | f"and you should pull the model with `ollama pull {self.model}`." 249 | ) 250 | else: 251 | optional_detail = response.text 252 | raise ValueError( 253 | f"Ollama call failed with status code {response.status_code}." 254 | f" Details: {optional_detail}" 255 | ) 256 | return response.iter_lines(decode_unicode=True) 257 | 258 | async def _acreate_stream( 259 | self, 260 | api_url: str, 261 | payload: Any, 262 | stop: Optional[List[str]] = None, 263 | **kwargs: Any, 264 | ) -> AsyncIterator[str]: 265 | if self.stop is not None and stop is not None: 266 | raise ValueError("`stop` found in both the input and default params.") 267 | elif self.stop is not None: 268 | stop = self.stop 269 | 270 | params = self._default_params 271 | 272 | for key in self._default_params: 273 | if key in kwargs: 274 | params[key] = kwargs[key] 275 | 276 | if "options" in kwargs: 277 | params["options"] = kwargs["options"] 278 | else: 279 | params["options"] = { 280 | **params["options"], 281 | "stop": stop, 282 | **{k: v for k, v in kwargs.items() if k not in self._default_params}, 283 | } 284 | 285 | if payload.get("messages"): 286 | request_payload = {"messages": payload.get("messages", []), **params} 287 | else: 288 | request_payload = { 289 | "prompt": payload.get("prompt"), 290 | "images": payload.get("images", []), 291 | **params, 292 | } 293 | 294 | async with aiohttp.ClientSession() as session: 295 | async with session.post( 296 | url=api_url, 297 | headers={ 298 | "Content-Type": "application/json", 299 | **(self.headers if isinstance(self.headers, dict) else {}), 300 | }, 301 | json=request_payload, 302 | timeout=self.timeout, 303 | ) as response: 304 | if response.status != 200: 305 | if response.status == 404: 306 | raise OllamaEndpointNotFoundError( 307 | "Ollama call failed with status code 404." 308 | ) 309 | else: 310 | optional_detail = response.text 311 | raise ValueError( 312 | f"Ollama call failed with status code {response.status}." 313 | f" Details: {optional_detail}" 314 | ) 315 | async for line in response.content: 316 | yield line.decode("utf-8") 317 | 318 | def _stream_with_aggregation( 319 | self, 320 | prompt: str, 321 | stop: Optional[List[str]] = None, 322 | run_manager: Optional[CallbackManagerForLLMRun] = None, 323 | verbose: bool = False, 324 | **kwargs: Any, 325 | ) -> GenerationChunk: 326 | final_chunk: Optional[GenerationChunk] = None 327 | for stream_resp in self._create_generate_stream(prompt, stop, **kwargs): 328 | if stream_resp: 329 | chunk = _stream_response_to_generation_chunk(stream_resp) 330 | if final_chunk is None: 331 | final_chunk = chunk 332 | else: 333 | final_chunk += chunk 334 | if run_manager: 335 | run_manager.on_llm_new_token( 336 | chunk.text, 337 | verbose=verbose, 338 | ) 339 | if final_chunk is None: 340 | raise ValueError("No data received from Ollama stream.") 341 | 342 | return final_chunk 343 | 344 | async def _astream_with_aggregation( 345 | self, 346 | prompt: str, 347 | stop: Optional[List[str]] = None, 348 | run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, 349 | verbose: bool = False, 350 | **kwargs: Any, 351 | ) -> GenerationChunk: 352 | final_chunk: Optional[GenerationChunk] = None 353 | async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs): 354 | if stream_resp: 355 | chunk = _stream_response_to_generation_chunk(stream_resp) 356 | if final_chunk is None: 357 | final_chunk = chunk 358 | else: 359 | final_chunk += chunk 360 | if run_manager: 361 | await run_manager.on_llm_new_token( 362 | chunk.text, 363 | verbose=verbose, 364 | ) 365 | if final_chunk is None: 366 | raise ValueError("No data received from Ollama stream.") 367 | 368 | return final_chunk 369 | 370 | 371 | class Ollama(BaseLLM, _OllamaCommon): 372 | """Ollama locally runs large language models. 373 | 374 | To use, follow the instructions at https://ollama.ai/. 375 | 376 | Example: 377 | .. code-block:: python 378 | 379 | from langchain_community.llms import Ollama 380 | ollama = Ollama(model="llama2") 381 | """ 382 | 383 | class Config: 384 | """Configuration for this pydantic object.""" 385 | 386 | extra = Extra.forbid 387 | 388 | @property 389 | def _llm_type(self) -> str: 390 | """Return type of llm.""" 391 | return "ollama-llm" 392 | 393 | def _generate( # type: ignore[override] 394 | self, 395 | prompts: List[str], 396 | stop: Optional[List[str]] = None, 397 | images: Optional[List[str]] = None, 398 | run_manager: Optional[CallbackManagerForLLMRun] = None, 399 | **kwargs: Any, 400 | ) -> LLMResult: 401 | """Call out to Ollama's generate endpoint. 402 | 403 | Args: 404 | prompt: The prompt to pass into the model. 405 | stop: Optional list of stop words to use when generating. 406 | 407 | Returns: 408 | The string generated by the model. 409 | 410 | Example: 411 | .. code-block:: python 412 | 413 | response = ollama("Tell me a joke.") 414 | """ 415 | # TODO: add caching here. 416 | generations = [] 417 | for prompt in prompts: 418 | final_chunk = super()._stream_with_aggregation( 419 | prompt, 420 | stop=stop, 421 | images=images, 422 | run_manager=run_manager, 423 | verbose=self.verbose, 424 | **kwargs, 425 | ) 426 | generations.append([final_chunk]) 427 | return LLMResult(generations=generations) # type: ignore[arg-type] 428 | 429 | async def _agenerate( # type: ignore[override] 430 | self, 431 | prompts: List[str], 432 | stop: Optional[List[str]] = None, 433 | images: Optional[List[str]] = None, 434 | run_manager: Optional[CallbackManagerForLLMRun] = None, 435 | **kwargs: Any, 436 | ) -> LLMResult: 437 | """Call out to Ollama's generate endpoint. 438 | 439 | Args: 440 | prompt: The prompt to pass into the model. 441 | stop: Optional list of stop words to use when generating. 442 | 443 | Returns: 444 | The string generated by the model. 445 | 446 | Example: 447 | .. code-block:: python 448 | 449 | response = ollama("Tell me a joke.") 450 | """ 451 | # TODO: add caching here. 452 | generations = [] 453 | for prompt in prompts: 454 | final_chunk = await super()._astream_with_aggregation( 455 | prompt, 456 | stop=stop, 457 | images=images, 458 | run_manager=run_manager, # type: ignore[arg-type] 459 | verbose=self.verbose, 460 | **kwargs, 461 | ) 462 | generations.append([final_chunk]) 463 | return LLMResult(generations=generations) # type: ignore[arg-type] 464 | 465 | def _stream( 466 | self, 467 | prompt: str, 468 | stop: Optional[List[str]] = None, 469 | run_manager: Optional[CallbackManagerForLLMRun] = None, 470 | **kwargs: Any, 471 | ) -> Iterator[GenerationChunk]: 472 | for stream_resp in self._create_generate_stream(prompt, stop, **kwargs): 473 | if stream_resp: 474 | chunk = _stream_response_to_generation_chunk(stream_resp) 475 | if run_manager: 476 | run_manager.on_llm_new_token( 477 | chunk.text, 478 | verbose=self.verbose, 479 | ) 480 | yield chunk 481 | 482 | async def _astream( 483 | self, 484 | prompt: str, 485 | stop: Optional[List[str]] = None, 486 | run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, 487 | **kwargs: Any, 488 | ) -> AsyncIterator[GenerationChunk]: 489 | async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs): 490 | if stream_resp: 491 | chunk = _stream_response_to_generation_chunk(stream_resp) 492 | if run_manager: 493 | await run_manager.on_llm_new_token( 494 | chunk.text, 495 | verbose=self.verbose, 496 | ) 497 | yield chunk 498 | -------------------------------------------------------------------------------- /upsonic/remote/on_prem.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import json 5 | import ast 6 | from functools import wraps 7 | 8 | from typing import List 9 | 10 | import pickle 11 | import os 12 | import re 13 | 14 | import copy 15 | from cryptography.fernet import Fernet 16 | import base64 17 | import hashlib 18 | import inspect 19 | import pkgutil 20 | import threading 21 | import time 22 | import textwrap 23 | import importlib.util 24 | import cloudpickle 25 | 26 | import psutil 27 | 28 | from contextlib import contextmanager 29 | 30 | import sys 31 | 32 | 33 | from typing import List, Union, Optional 34 | import dill 35 | 36 | from pip._internal.operations import freeze 37 | 38 | import traceback 39 | import shutil 40 | from memory_profiler import memory_usage 41 | 42 | 43 | from datetime import datetime 44 | from termcolor import colored 45 | 46 | 47 | 48 | import platform 49 | 50 | # Initialize colorama for Windows 51 | if platform.system() == "Windows": 52 | from colorama import init 53 | init() 54 | 55 | 56 | 57 | 58 | 59 | sys.path.append(os.path.join(os.path.dirname(__file__), "..")) 60 | from remote.localimport import localimport 61 | from remote.interface import encrypt 62 | from remote.interface import decrypt 63 | 64 | from typing import Optional 65 | 66 | import platform 67 | 68 | 69 | def extract_needed_libraries(func, debug=False)-> dict: 70 | result = {} 71 | the_globals = dill.detect.globalvars(func) 72 | for each in the_globals: 73 | name = dill.source.getname(the_globals[each], fqn=True) 74 | result[each] = name.split(".")[0] 75 | print("result", result) if debug else None 76 | return result 77 | 78 | 79 | 80 | 81 | 82 | def extract_source(obj, key=None, debug=False): 83 | result = None 84 | try: 85 | result = inspect.getsource(obj) 86 | except: 87 | try: 88 | result = dill.source.getsource(obj) 89 | except: 90 | pass 91 | 92 | if result == None: 93 | try: 94 | value = None 95 | if isinstance(obj, str): 96 | value = '"'+str(obj)+'"' 97 | else: 98 | value = str(obj) 99 | result = key.split(".")[-1] + "=" + value 100 | except: 101 | traceback.print_exc() 102 | 103 | 104 | if result == None: 105 | my_source = "Error on extracting source" 106 | else: 107 | my_source = result 108 | return my_source 109 | 110 | 111 | def extract_local_files(obj, debug=False, local_directory=None): 112 | if local_directory == None: 113 | local_directory = os.getcwd() 114 | print(local_directory) if debug else None 115 | 116 | the_elements = dill.detect.globalvars(obj) 117 | print(the_elements) if debug else None 118 | the_local_elements = {} 119 | for element, value in the_elements.items(): 120 | element_file = dill.source.getfile(value) 121 | print(element_file) if debug else None 122 | if element_file.startswith(local_directory): 123 | print("Inside") if debug else None 124 | with open(element_file, "r") as f: 125 | element_content = f.read() 126 | print("element_content", element_content) if debug else None 127 | the_local_elements[os.path.basename(element_file)] = element_content 128 | 129 | print("Complated") if debug else None 130 | return the_local_elements 131 | 132 | 133 | def dump_local_files(extract, debug=False, local_directory=None): 134 | if local_directory == None: 135 | local_directory = os.getcwd() 136 | print(local_directory) if debug else None 137 | 138 | for element, value in extract.items(): 139 | # Create a directory named as upsonic if not exists 140 | if not os.path.exists(os.path.join(local_directory, "upsonic")): 141 | os.makedirs(os.path.join(local_directory, "upsonic")) 142 | 143 | file_location = os.path.join(local_directory, "upsonic", element) 144 | print(file_location) if debug else None 145 | print(value) if debug else None 146 | with open(file_location, "w") as f: 147 | f.write(value) 148 | 149 | sys.path.insert(0, os.path.join(local_directory, "upsonic")) 150 | 151 | 152 | class Upsonic_On_Prem: 153 | prevent_enable = False 154 | quiet_startup = False 155 | 156 | @staticmethod 157 | def export_requirement(): 158 | the_list = list(freeze.freeze()) 159 | the_string = "" 160 | for item in the_list: 161 | the_string += item + ", " 162 | return the_string[:-2] 163 | 164 | def _log(self, message, color=None, bold=False): 165 | 166 | 167 | if bold: 168 | attrs = ['bold'] 169 | background = 'on_white' 170 | message = f" {message} " 171 | else: 172 | attrs = [] 173 | background = None 174 | 175 | print(colored(message, color, background, attrs=attrs)) 176 | 177 | def __enter__(self): 178 | return self # pragma: no cover 179 | 180 | def __exit__(self, exc_type, exc_val, exc_tb): 181 | pass # pragma: no cover 182 | 183 | @property 184 | def localimport(self): 185 | return localimport 186 | 187 | @property 188 | def encrypt(self): 189 | return encrypt 190 | 191 | @property 192 | def decrypt(self): 193 | return decrypt 194 | 195 | def __init__( 196 | self, 197 | api_url, 198 | access_key, 199 | engine="cloudpickle,dill", 200 | enable_usage_analyses=True, 201 | enable_local_files=True, 202 | enable_auto_requirements=False, 203 | enable_elastic_dependency=False, 204 | cache_dir=None, 205 | pass_python_version_check=False, 206 | byref=True, 207 | recurse=True, 208 | protocol=pickle.DEFAULT_PROTOCOL, 209 | source=True, 210 | builtin=True, 211 | tester=False, 212 | ): 213 | import requests 214 | from requests.auth import HTTPBasicAuth 215 | 216 | from requests.packages.urllib3.exceptions import InsecureRequestWarning 217 | 218 | requests.packages.urllib3.disable_warnings(InsecureRequestWarning) 219 | 220 | self.requests = requests 221 | self.HTTPBasicAuth = HTTPBasicAuth 222 | 223 | self.api_url = api_url 224 | self.password = access_key 225 | self.engine = engine 226 | self.byref = byref 227 | self.recurse = recurse 228 | self.protocol = protocol 229 | self.source = source 230 | self.builtin = builtin 231 | self.enable_auto_requirements = enable_auto_requirements 232 | self.enable_elastic_dependency = enable_elastic_dependency 233 | self.enable_usage_analyses = enable_usage_analyses 234 | self.enable_local_files = enable_local_files 235 | 236 | self.tester = tester 237 | self.pass_python_version_check = pass_python_version_check 238 | 239 | self.enable_active = False 240 | 241 | self.cache_dir = ( 242 | os.path.join(os.path.dirname(os.path.realpath(__file__)), "upsonic_cache") 243 | if cache_dir == None 244 | else cache_dir 245 | ) 246 | if not os.path.exists(self.cache_dir): 247 | os.mkdir(self.cache_dir) 248 | 249 | if self.status == True: 250 | self._log( 251 | "Upsonic active", color="green", bold=True, 252 | ) 253 | self._log( 254 | f"Welcome {self.get_username()}" , color="white", bold=False, 255 | ) 256 | else: 257 | self._log( 258 | "Upsonic is down", color="red", bold=True, 259 | ) 260 | 261 | self.thread_number = 5 262 | 263 | def _send_request( 264 | self, method:str, endpoint:str, data=None, make_json=True, include_status=False 265 | ): 266 | try: 267 | response = self.requests.request( 268 | method, 269 | self.api_url + endpoint, 270 | data=data, 271 | auth=self.HTTPBasicAuth("", self.password), 272 | verify=False, 273 | ) 274 | try: 275 | result = None 276 | if not make_json: 277 | result = response.text 278 | else: 279 | result = json.loads(response.text) 280 | if result["status"] == False: 281 | self._log( 282 | f"Error: {endpoint}", color="red", bold=False, 283 | ) 284 | else: 285 | result = result["result"] if not include_status else result 286 | 287 | return result 288 | except: # pragma: no cover 289 | print( 290 | f"Error on '{self.api_url + endpoint}': ", response.text 291 | ) if self.tester else None 292 | return [None] # pragma: no cover 293 | except: 294 | print("Error: Remote is down") 295 | return [None] 296 | 297 | @property 298 | def status(self): 299 | return self._send_request(method = "GET",endpoint = "/status") 300 | 301 | def get_username(self): 302 | try: 303 | response = self._send_request(method="GET", endpoint="/my/username") 304 | return response 305 | except Exception as e: 306 | return "to Upsonic" 307 | 308 | def system_diagnostic(self): 309 | diagnostic_data = { 310 | "System Type": platform.system(), 311 | "User Name": self.get_username(), 312 | "Uptime (seconds)": round(time.time() - psutil.boot_time(), 2), 313 | "CPU Usage (%)": psutil.cpu_percent(interval=1), 314 | "RAM Usage (%)": psutil.virtual_memory().percent, 315 | "Client Version":self.get_client_version(), 316 | "version": self.get_version(), 317 | } 318 | return json.dumps(diagnostic_data, indent=4) 319 | 320 | def get_specific_version(self, package: str) -> int: 321 | package_name = package.split("==")[0] 322 | package_version = ( 323 | package.split("==")[1] if len(package.split("==")) > 1 else "Latest" 324 | ) 325 | backup_sys_path = sys.path 326 | backup_sys_modules = sys.modules 327 | 328 | the_dir = os.path.abspath( 329 | os.path.join(self.cache_dir, package_name, package_version) 330 | ) 331 | with self.localimport(the_dir) as _importer: 332 | return importlib.import_module(package_name) 333 | 334 | def generate_the_globals(self, needed_libraries:dict, key:str) ->dict : 335 | requirements = self.extract_the_requirements(key) 336 | 337 | total = {} 338 | for each, value in needed_libraries.items(): 339 | the_needed = None 340 | for each_r in requirements: 341 | each_r_ = each_r.split("==")[0] 342 | if each_r_.split(".")[0].lower() == value.split(".")[0].lower(): 343 | total[each] = self.get_specific_version(each_r.lower()) 344 | 345 | return total 346 | 347 | def generate_the_true_requirements(self, requirements:list, needed_libraries:dict, key:str) -> dict: 348 | total = {} 349 | for each, value in needed_libraries.items(): 350 | the_needed = None 351 | for each_r in requirements: 352 | each_r_ = each_r.split("==")[0] 353 | if each_r_.split(".")[0].lower() == value.split(".")[0].lower(): 354 | total[each] = each_r 355 | 356 | return total 357 | 358 | def install_package(self, package:str) -> None: 359 | from pip._internal import main as pip 360 | 361 | package_name = package.split("==")[0] 362 | package_version = ( 363 | package.split("==")[1] if len(package.split("==")) > 1 else "Latest" 364 | ) 365 | 366 | the_dir = os.path.abspath( 367 | os.path.join(self.cache_dir, package_name, package_version) 368 | ) 369 | if not os.path.exists(the_dir) or not self.enable_elastic_dependency: 370 | if self.enable_elastic_dependency: 371 | os.makedirs(the_dir) 372 | if self.tester: 373 | self._log(f"Installing {package} to {the_dir}") 374 | pip(["install", package, "--target", the_dir, "--no-dependencies"]) 375 | else: 376 | if self.tester: 377 | self._log(f"Installing {package} to default_dir") 378 | pip(["install", package]) 379 | 380 | def extract_the_requirements(self, key:str)-> List[str]: 381 | the_requirements = self.get_requirements(key) 382 | elements = [] 383 | for each in the_requirements.split(","): 384 | if "==" in each: 385 | the_requirement = textwrap.dedent(each) 386 | elements.append(the_requirement) 387 | return elements 388 | 389 | def install_the_requirements(self, the_requirements:list) -> None: 390 | installed_requirements = self.export_requirement() 391 | if self.tester: 392 | self._log(f"installed_requirements {installed_requirements}") 393 | try: 394 | installed_requirements = installed_requirements.lower() 395 | except: 396 | pass 397 | 398 | for each in the_requirements: 399 | try: 400 | if each not in installed_requirements or self.enable_elastic_dependency: 401 | self.install_package(each) 402 | else: 403 | if self.tester: 404 | self._log("Already installed in system") 405 | except: 406 | if self.tester: 407 | self._log(f"Error on {each}") 408 | traceback.print_exc() 409 | 410 | def delete_cache(self) -> None: 411 | shutil.rmtree(self.cache_dir) 412 | 413 | def set_the_library_specific_locations(self, the_requirements:list) -> str: 414 | the_all_dirs = [] 415 | the_all_string = "" 416 | 417 | ordered_list = sorted(the_requirements) 418 | if self.tester: 419 | self._log(f"ordered_list {ordered_list}") 420 | 421 | for package in ordered_list: 422 | package_name = package.split("==")[0] 423 | package_version = ( 424 | package.split("==")[1] if len(package.split("==")) > 1 else "Latest" 425 | ) 426 | the_all_string += package 427 | 428 | the_dir = os.path.abspath( 429 | os.path.join(self.cache_dir, package_name, package_version) 430 | ) 431 | 432 | the_all_dirs.append(the_dir) 433 | if self.tester: 434 | self._log(f"the_all_string {the_all_string}") 435 | 436 | # Create folder with sha256 of the_all_string 437 | sha256_string = hashlib.sha256(the_all_string.encode("utf-8")).hexdigest() 438 | sha256_dir = os.path.join(self.cache_dir, sha256_string) 439 | already_exist = os.path.exists(sha256_dir) 440 | os.makedirs(sha256_dir, exist_ok=True) 441 | 442 | if not already_exist: 443 | # Copying all contents in the_all_dirs to sha256_dir 444 | for dir_path in the_all_dirs: 445 | for root, dirs, files in os.walk(dir_path): 446 | for file in files: 447 | # construct full file path 448 | full_file_name = os.path.join(root, file) 449 | # construct destination path 450 | dest_file_name = sha256_dir + full_file_name[len(dir_path) :] 451 | # create directories if not present in destination 452 | os.makedirs(os.path.dirname(dest_file_name), exist_ok=True) 453 | # copy file 454 | shutil.copy(full_file_name, dest_file_name) 455 | 456 | if self.tester: 457 | self._log(f"the sha256 of new directory {already_exist} {sha256_dir}") 458 | 459 | return sha256_dir 460 | 461 | def unset_the_library_specific_locations(self)-> None: 462 | sys.path = self.sys_path_backup 463 | 464 | @contextmanager 465 | def import_package(self, package:str) -> None: 466 | package_name = package.split("==")[0] 467 | package_version = ( 468 | package.split("==")[1] if len(package.split("==")) > 1 else "Latest" 469 | ) 470 | 471 | the_dir = os.path.abspath( 472 | os.path.join(self.cache_dir, package_name, package_version) 473 | ) 474 | 475 | if not os.path.exists(the_dir): 476 | self.install_package(package) 477 | 478 | sys_path_backup = sys.path.copy() 479 | 480 | sys.path.insert(0, the_dir) 481 | 482 | try: 483 | yield 484 | finally: 485 | sys.path = sys_path_backup 486 | 487 | def extend_global(self, name:str, value)-> None: 488 | globals()[name] = value 489 | 490 | def load_module(self, module_name:str, version:str =None) -> dict: 491 | encryption_key = "u" 492 | 493 | version_check_pass = False 494 | the_all = self.get_all() 495 | original_name = module_name 496 | sub_module_name = False 497 | if "." in module_name: 498 | sub_module_name = module_name.replace(".", "_") 499 | module_name = sub_module_name 500 | 501 | the_all_imports = {} 502 | for i in the_all: 503 | original_i = i 504 | if "_upsonic_" in i: 505 | continue 506 | if sub_module_name != False: 507 | i = i.replace(original_name, module_name) 508 | name = i.split(".") 509 | if module_name == name[0]: 510 | try: 511 | if not self.pass_python_version_check and not version_check_pass: 512 | key_version = self.get_python_version(original_i) 513 | currenly_version = self.get_currently_version() 514 | if self.tester: 515 | self._log(f"key_version {key_version}") 516 | self._log(f"currenly_version {currenly_version}") 517 | if ( 518 | key_version[0] == currenly_version[0] 519 | and key_version[0] == "3" 520 | ): 521 | if self.tester: 522 | self._log("Versions are same and 3") 523 | if key_version[1] != currenly_version[1]: 524 | if self.tester: 525 | self._log("Minor versions are different") 526 | 527 | self._log( 528 | "Warning: The versions are different, are you sure to continue", bold=True, 529 | ) 530 | the_input = input("Yes or no (y/n)").lower() 531 | if the_input == "n": 532 | key_version = f"{key_version[0]}.{key_version[1]}" 533 | currenly_version = ( 534 | f"{currenly_version[0]}.{currenly_version[1]}" 535 | ) 536 | return ( 537 | "Python versions is different (Key == " 538 | + key_version 539 | + " This runtime == " 540 | + currenly_version 541 | + ")" 542 | ) 543 | if the_input == "y": 544 | version_check_pass = True 545 | except: 546 | if self.tester: 547 | traceback.print_exc() 548 | 549 | if version != None: 550 | version_list_response = self.get_version_history(original_i) 551 | version_list = [] 552 | for each_v in version_list_response: 553 | version_list.append(each_v.replace(original_i + ":", "")) 554 | 555 | if version in version_list: 556 | try: 557 | the_all_imports[i] = self.get( 558 | original_i, version, pass_python_version_control=True 559 | ) 560 | except: 561 | the_all_imports[i] = self.get( 562 | original_i, pass_python_version_control=True 563 | ) 564 | else: 565 | the_all_imports[i] = self.get( 566 | original_i, pass_python_version_control=True 567 | ) 568 | 569 | import types 570 | 571 | def create_module_obj(dictionary:dict) -> dict: 572 | result = {} 573 | for key, value in dictionary.items(): 574 | modules = key.split(".") 575 | current_dict = result 576 | for module in modules[:-1]: 577 | if module not in current_dict: 578 | current_dict[module] = types.ModuleType(module) 579 | current_dict = vars(current_dict[module]) 580 | current_dict[modules[-1]] = value 581 | 582 | return result 583 | 584 | generated_library = create_module_obj(the_all_imports)[module_name] 585 | 586 | return generated_library 587 | 588 | def dump_module( 589 | self, 590 | module_name:str, 591 | module, 592 | ) -> None: 593 | # Getting Started and Encryption Preparation 594 | encryption_key = "u" 595 | top_module = module 596 | 597 | cloudpickle.register_pickle_by_value(top_module) 598 | 599 | # Collection of Sub-Modules 600 | sub_modules = [] 601 | if hasattr(top_module, "__path__"): 602 | for importer, modname, ispkg in pkgutil.walk_packages( 603 | path=top_module.__path__, 604 | prefix=top_module.__name__ + ".", 605 | onerror=lambda x: None, 606 | ): 607 | sub_modules.append(importer.find_module(modname).load_module(modname)) 608 | else: 609 | sub_modules.append(top_module) 610 | 611 | # Collecting object in Submodules 612 | threads = [] 613 | 614 | the_list = [] 615 | 616 | for sub_module in sub_modules: 617 | [the_list.append(obj) for name, obj in inspect.getmembers(sub_module)] 618 | 619 | # Extract just functions and classes 620 | the_list = [i for i in the_list if inspect.isfunction(i) or inspect.isclass(i)] 621 | # If the __module__ is not equal to module_name, remove it from the list 622 | 623 | the_list = [i for i in the_list if i.__module__.split(".")[0] == module_name] 624 | 625 | # Filtering Unwanted Objects 626 | my_list = [] 627 | for element in copy.copy(the_list): 628 | if inspect.isfunction(element): 629 | name = element.__module__ + "." + element.__name__ 630 | 631 | elif inspect.isclass(element): 632 | name = element.__module__ + "." + element.__name__ 633 | if ( 634 | "upsonic.remote" not in name 635 | and "upsonic_updater" not in name 636 | and name != f"{module.__name__}.threading.Thread" 637 | ): 638 | my_list.append(element) 639 | 640 | the_list = my_list 641 | 642 | # Processing Objects with Multiple Threads 643 | for element in the_list: 644 | time.sleep(0.1) 645 | if inspect.isfunction(element): 646 | name = element.__module__ + "." + element.__name__ 647 | 648 | elif inspect.isclass(element): 649 | name = element.__module__ + "." + element.__name__ 650 | else: 651 | continue 652 | 653 | first_element = name.split(".")[0] 654 | 655 | if first_element != module_name: 656 | continue 657 | 658 | try: 659 | while len(threads) >= self.thread_number: 660 | for each in threads: 661 | if not each.is_alive(): 662 | threads.remove(each) 663 | time.sleep(0.1) 664 | 665 | the_thread = threading.Thread( 666 | target=self.set, 667 | args=(name, element), 668 | ) 669 | the_thread.start() 670 | 671 | thread = the_thread 672 | threads.append(thread) 673 | 674 | except: 675 | import traceback 676 | 677 | traceback.print_exc() 678 | self._log(f"[bold red]Error on '{name}'") 679 | self.delete(name) 680 | 681 | # Waiting for All Threads to Complate 682 | for each in threads: 683 | each.join() 684 | 685 | def dump( 686 | self, 687 | key:str, 688 | value, 689 | message:str =None, 690 | code:str =None, 691 | ) -> None: 692 | return self.set(key, value, message=message, code=code) 693 | 694 | def load(self, key:str, version:str=None) -> any: 695 | return self.get(key, version=version, print_exc=True) 696 | 697 | def get_currently_version(self)-> List[int]: 698 | total = sys.version_info 699 | the_version = [] 700 | the_version.append(total.major) 701 | the_version.append(total.minor) 702 | the_version.append(total.micro) 703 | return the_version 704 | 705 | def get_python_version(self, key:str): 706 | data = {"scope": key} 707 | total = self._send_request("POST", "/get_python_version_of_scope", data) 708 | the_version = [] 709 | the_version.append(int(total.split(".")[0])) 710 | the_version.append(int(total.split(".")[1])) 711 | the_version.append(int(total.split(".")[2])) 712 | return the_version 713 | 714 | def get_lock(self, key:str) -> any: 715 | data = {"scope": key} 716 | lock = self._send_request("POST", "/get_lock_of_scope", data) 717 | return lock 718 | 719 | def print_current_datetime(self): 720 | current_datetime = datetime.now() 721 | print("Current date and time:", current_datetime.strftime("%Y-%m-%d %H:%M:%S")) 722 | 723 | def set(self, key:str, value, message:str=None, code:str=None) -> bool: 724 | 725 | if self.tester: 726 | self.print_current_datetime() 727 | 728 | if isinstance(value, str): 729 | pass 730 | elif isinstance(value, int): 731 | pass 732 | elif isinstance(value, float): 733 | pass 734 | elif callable(value): 735 | pass 736 | else: 737 | self._log("Error: Upsonic only supports string, integer, float, and functions.") 738 | return False 739 | 740 | 741 | 742 | 743 | 744 | 745 | 746 | if key.startswith("."): 747 | self._log("Error: The key can not start with '.'") 748 | return False 749 | if ":" in key: 750 | self._log("Error: The key can not include ':'") 751 | return False 752 | if key.endswith("."): 753 | self._log("Error: The key can not end with '.'") 754 | return False 755 | if "." not in key: 756 | self._log("Error: You should create a parent with '.' like math.sum") 757 | return False 758 | if ".." in key: 759 | self._log("Error: The key can not include multiple dot one after one'..'") 760 | return False 761 | 762 | try: 763 | the_lock = self.get_lock(key) 764 | if the_lock and the_lock != [None]: 765 | self._log("This scope is locked now! Someone dumping.") 766 | return False 767 | except: 768 | pass 769 | 770 | # Type Checking and Creation of Encryption Key 771 | the_type = type(value).__name__ 772 | if the_type == "type": 773 | the_type = "class" 774 | 775 | encryption_key = "u" 776 | 777 | the_code = textwrap.dedent(extract_source(value, key=key)) if code == None else code 778 | 779 | # Preparation of Requirements 780 | the_requirements = Upsonic_On_Prem.export_requirement() 781 | the_original_requirements = "" 782 | if self.tester: 783 | self._log(f"The first original requirements {the_original_requirements}") 784 | elements = [] 785 | for each in the_requirements.split(","): 786 | if "==" in each: 787 | the_requirement = textwrap.dedent(each) 788 | elements.append(the_requirement) 789 | the_requirements = elements 790 | if self.tester: 791 | self._log(f"the_requirements {the_requirements}") 792 | 793 | # Extracting Necessary Libraries 794 | extracted_needed_libraries = None 795 | try: 796 | extracted_needed_libraries = extract_needed_libraries(value, self.tester) 797 | try: 798 | the_original_requirements = self.generate_the_true_requirements( 799 | the_requirements, extracted_needed_libraries, key 800 | ) 801 | if self.tester: 802 | self._log( 803 | f"the_original_requirements in_generation {the_original_requirements}" 804 | ) 805 | the_text = "" 806 | for each, value_ in the_original_requirements.items(): 807 | the_text += value_ + ", " 808 | the_original_requirements = the_text[:-2] 809 | 810 | except: 811 | if self.tester: 812 | self._log( 813 | f"Error on generate_the_true_requirements while dumping {key}" 814 | ) 815 | traceback.print_exc() 816 | except: 817 | if self.tester: 818 | self._log(f"Error on extract_needed_libraries while dumping {key}") 819 | traceback.print_exc() 820 | 821 | if self.tester: 822 | self._log(f"the_original_requirements {the_original_requirements}") 823 | 824 | # Python Version Information and Encryption 825 | the_version = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}" 826 | 827 | fernet_key = base64.urlsafe_b64encode( 828 | hashlib.sha256(encryption_key.encode()).digest() 829 | ) 830 | fernet = Fernet(fernet_key) 831 | 832 | # Preparation and Encoding of Engine Reports 833 | engine_reports_exceptions = [] 834 | the_engine_reports = {} 835 | for engine in self.engine.split(","): 836 | try: 837 | the_engine_reports[engine] = self.encrypt( 838 | encryption_key, 839 | value, 840 | engine, 841 | self.byref, 842 | self.recurse, 843 | self.protocol, 844 | self.source, 845 | self.builtin, 846 | ) 847 | except: 848 | if self.tester: 849 | self._log(f"Error on {engine} while dumping {key}") 850 | traceback.print_exc() 851 | else: 852 | engine_reports_exceptions.append(traceback.format_exc()) 853 | 854 | if len(the_engine_reports) == 0 and not self.tester: 855 | self._log("[red] Error: No engine is able to dump the object") 856 | for error in engine_reports_exceptions: 857 | print(error) 858 | 859 | # Extracting Local Files and Source Code 860 | try: 861 | the_engine_reports["extracted_local_files"] = fernet.encrypt( 862 | pickle.dumps(extract_local_files(value, self.tester), protocol=1) 863 | ) 864 | except: 865 | if self.tester: 866 | self._log(f"Error on extracted_local_files while dumping {key}") 867 | traceback.print_exc() 868 | 869 | try: 870 | the_engine_reports["extract_source"] = fernet.encrypt( 871 | pickle.dumps(extract_source(value, debug=self.tester, key=key), protocol=1) 872 | ) 873 | except: 874 | if self.tester: 875 | self._log(f"Error on extract_source while dumping {key}") 876 | traceback.print_exc() 877 | 878 | # Encryption and Logging of Requirements 879 | if extracted_needed_libraries != None: 880 | the_engine_reports["extract_needed_libraries"] = fernet.encrypt( 881 | pickle.dumps(extracted_needed_libraries, protocol=1) 882 | ) 883 | 884 | if self.tester: 885 | self._log(f"the_engine_reports {the_engine_reports}") 886 | 887 | # Data Preparation and Request Submission 888 | dumped = pickle.dumps(the_engine_reports, protocol=1) 889 | 890 | data = { 891 | "scope": key, 892 | "code": the_code, 893 | "type": the_type, 894 | "requirements": the_original_requirements, 895 | "python_version": the_version, 896 | "data": fernet.encrypt(dumped), 897 | "commit_message": message, 898 | } 899 | 900 | response = self._send_request( 901 | "POST", "/dump_together", data, include_status=True 902 | ) 903 | 904 | # Response Processing and Output 905 | if self.tester: 906 | self.print_current_datetime() 907 | if response != [None]: 908 | if response["status"] is False: 909 | return response["result"] 910 | 911 | if response["status"] is True: 912 | if self.tester: 913 | self.print_current_datetime() 914 | print("Dumped.") 915 | return True 916 | else: 917 | return False 918 | 919 | def print_code(self, key:str, version:str=None) -> None: 920 | print(self.get(key, version=version, extract_source=True)) 921 | 922 | def get( 923 | self, 924 | key:str, 925 | version:str=None, 926 | print_exc:bool=True, 927 | pass_python_version_control:bool=False, 928 | pass_usage_analyses:bool=False, 929 | try_to_extract_importable:bool=False, 930 | extract_source:bool=False, 931 | )-> any: 932 | if self.tester: 933 | self._log(f"Process started for {key}") 934 | response = None 935 | 936 | encryption_key = "u" 937 | 938 | data = {"scope": key} 939 | 940 | versions_are_different = False 941 | if pass_python_version_control: 942 | versions_are_different = True 943 | try: 944 | if not self.pass_python_version_check and not pass_python_version_control: 945 | key_version = self.get_python_version(key) 946 | currenly_version = self.get_currently_version() 947 | if self.tester: 948 | self._log(f"key_version {key_version}") 949 | self._log(f"currenly_version {currenly_version}") 950 | if key_version[0] == currenly_version[0] and key_version[0] == 3: 951 | if self.tester: 952 | self._log("Versions are same and 3") 953 | if key_version[1] != currenly_version[1]: 954 | if self.tester: 955 | self._log("Minor versions are different") 956 | if int(currenly_version[1]) >= 11 or int(key_version[1]) >= 11: 957 | if ( 958 | int(currenly_version[1]) < 11 959 | or int(key_version[1]) < 11 960 | ): 961 | versions_are_different = True 962 | self._log( 963 | "Warning: The versions are different, are you sure to continue", bold=True, 964 | ) 965 | the_input = input("Yes or no (y/n)").lower() 966 | if the_input == "n": 967 | key_version = f"{key_version[0]}.{key_version[1]}" 968 | currenly_version = ( 969 | f"{currenly_version[0]}.{currenly_version[1]}" 970 | ) 971 | return ( 972 | "Python versions is different (Key == " 973 | + key_version 974 | + " This runtime == " 975 | + currenly_version 976 | + ")" 977 | ) 978 | except: 979 | if self.tester: 980 | traceback.print_exc() 981 | 982 | the_requirements_path = None 983 | 984 | if self.enable_auto_requirements: 985 | try: 986 | the_requirements = self.extract_the_requirements(key) 987 | 988 | self.install_the_requirements(the_requirements) 989 | if self.tester: 990 | self._log(f"the_requirements {the_requirements}") 991 | if self.enable_elastic_dependency: 992 | the_requirements_path = self.set_the_library_specific_locations( 993 | the_requirements 994 | ) 995 | except: 996 | if self.tester: 997 | self._log(f"Error on requirements while dumping {key}") 998 | traceback.print_exc() 999 | 1000 | if response is None: 1001 | if version != None: 1002 | response = self.get_version_data(key, version) 1003 | else: 1004 | response = self._send_request("POST", "/load", data) 1005 | try: 1006 | fernet_key = base64.urlsafe_b64encode( 1007 | hashlib.sha256(encryption_key.encode()).digest() 1008 | ) 1009 | fernet = Fernet(fernet_key) 1010 | response = pickle.loads(fernet.decrypt(response)) 1011 | if self.tester: 1012 | self._log(f"response {response}") 1013 | if "extracted_local_files" in response: 1014 | try: 1015 | if self.enable_local_files: 1016 | dump_local_files( 1017 | pickle.loads( 1018 | fernet.decrypt(response["extracted_local_files"]) 1019 | ), 1020 | self.tester, 1021 | ) 1022 | else: 1023 | if self.tester: 1024 | self._log("Local files are not enabled") 1025 | except: 1026 | if self.tester: 1027 | self._log(f"Error on extracted_local_files while loading {key}") 1028 | traceback.print_exc() 1029 | response.pop("extracted_local_files") 1030 | if extract_source: 1031 | return pickle.loads(fernet.decrypt(response["extract_source"])) 1032 | if "extract_source" in response: 1033 | response.pop("extract_source") 1034 | needed_libraries = None 1035 | if "extract_needed_libraries" in response: 1036 | needed_libraries = pickle.loads( 1037 | fernet.decrypt(response["extract_needed_libraries"]) 1038 | ) 1039 | response.pop("extract_needed_libraries") 1040 | for engine, value in response.items(): 1041 | try: 1042 | if the_requirements_path is not None: 1043 | with self.localimport(the_requirements_path) as _importer: 1044 | response = self.decrypt( 1045 | encryption_key, 1046 | value, 1047 | engine, 1048 | try_to_extract_importable=try_to_extract_importable, 1049 | ) 1050 | break 1051 | else: 1052 | response = self.decrypt( 1053 | encryption_key, 1054 | value, 1055 | engine, 1056 | try_to_extract_importable=try_to_extract_importable, 1057 | ) 1058 | break 1059 | except: 1060 | response = "Error" 1061 | self._log(f"Error on {engine} while loading {key}") 1062 | traceback.print_exc() 1063 | except: 1064 | if print_exc: 1065 | if self.tester: 1066 | traceback.print_exc() 1067 | else: 1068 | pass 1069 | 1070 | # Run analyses 1071 | if self.enable_usage_analyses and not pass_usage_analyses: 1072 | if inspect.isfunction(response) and self.is_usage_analyses_true(key): 1073 | commit = None 1074 | if version == None: 1075 | the_dump_history = self.get_dump_history(key) 1076 | if len(the_dump_history) > 0: 1077 | commit = the_dump_history[0].split(":")[1] 1078 | response = self.profile_function(key, version, commit, response) 1079 | 1080 | return response 1081 | 1082 | def os_name(self) -> str: 1083 | system_name = platform.system() 1084 | if system_name == "Windows": 1085 | return "Windows" 1086 | elif system_name == "Darwin": 1087 | return "macOS" 1088 | elif system_name == "Linux": 1089 | return "Linux" 1090 | else: 1091 | return "Unknown OS" 1092 | 1093 | def add_run_history( 1094 | self, 1095 | key:str, 1096 | version:str, 1097 | cpu_usage_one_core:float, 1098 | memory_usage:float, 1099 | elapsed_time:float, 1100 | type:str, 1101 | params:dict, 1102 | exception_log:str, 1103 | ) -> None: 1104 | data = { 1105 | "scope": key, 1106 | "version": version, 1107 | "cpu_usage": cpu_usage_one_core, 1108 | "memory_usage": memory_usage, 1109 | "elapsed_time": elapsed_time, 1110 | "type": type, 1111 | "python_version": f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}", 1112 | "os_name": self.os_name(), 1113 | "params": json.dumps(params), 1114 | "exception_log": exception_log, 1115 | } 1116 | 1117 | self._send_request("POST", "/dump_run", data) 1118 | 1119 | def get_dump_history(self, key) -> dict: 1120 | data = {"scope": key} 1121 | return self._send_request("POST", "/get_dump_history", data) 1122 | 1123 | def profile_function(self, key:str, version:str, commit:str, func:callable) -> callable: 1124 | @wraps(func) 1125 | def wrapper_function(*args, **kwargs): 1126 | # Get current process time and memory usage before function execution 1127 | start_time = time.process_time() 1128 | start_memory = memory_usage(-1, interval=0.1, timeout=1)[0] 1129 | # Run the function and get its output 1130 | succed = True 1131 | try: 1132 | output = func(*args, **kwargs) 1133 | except: 1134 | traceback.print_exc() 1135 | succed = False 1136 | output = traceback.format_exc() 1137 | # Get process time and memory usage after function execution 1138 | end_time = time.process_time() 1139 | end_memory = memory_usage(-1, interval=0.1, timeout=1)[0] 1140 | # Calculate time used and memory used 1141 | time_used = end_time - start_time 1142 | memory_used = end_memory - start_memory 1143 | # Calculate the total CPU time available 1144 | total_time = time_used 1145 | 1146 | cpu_usage_for_one_core = time_used * 100 1147 | 1148 | def is_basic_type(value) -> bool: 1149 | """Check if the value is of a basic type (int, float, str, list, dict).""" 1150 | if isinstance(value, (int, float, str)): 1151 | return True 1152 | elif isinstance(value, list): 1153 | return all(is_basic_type(item) for item in value) 1154 | elif isinstance(value, dict): 1155 | return all( 1156 | is_basic_type(k) and is_basic_type(v) for k, v in value.items() 1157 | ) 1158 | return False 1159 | 1160 | def normalize_params(*args, **kwargs) -> dict: 1161 | """Normalize *args and **kwargs into a dictionary with only basic types.""" 1162 | normalized = {} 1163 | 1164 | # Normalize args 1165 | for i, arg in enumerate(args): 1166 | if is_basic_type(arg): 1167 | normalized[f"arg_{i}"] = arg 1168 | 1169 | # Normalize kwargs 1170 | for key, value in kwargs.items(): 1171 | if is_basic_type(value): 1172 | normalized[key] = value 1173 | 1174 | return normalized 1175 | 1176 | the_params = normalize_params(*args, **kwargs) 1177 | 1178 | the_version = None 1179 | if version == None: 1180 | latest_commit = commit 1181 | the_version = latest_commit 1182 | else: 1183 | the_version = version 1184 | the_type = "Succed" if succed else "Failed" 1185 | exception_log = None 1186 | if not succed: 1187 | exception_log = output 1188 | try: 1189 | self.add_run_history( 1190 | key, 1191 | the_version, 1192 | cpu_usage_for_one_core, 1193 | memory_used, 1194 | total_time, 1195 | the_type, 1196 | the_params, 1197 | exception_log, 1198 | ) 1199 | except: 1200 | self._log( 1201 | f"Error on adding run history, for server not supported. {key}" 1202 | ) 1203 | return output 1204 | 1205 | return wrapper_function 1206 | 1207 | def get_settings(self, key:str)-> dict: 1208 | data = {"scope": key} 1209 | return self._send_request("POST", "/get_settings_of_scope", data) 1210 | 1211 | def is_usage_analyses_true(self, key:str)-> Optional[bool]: 1212 | settings = self.get_settings(key) 1213 | 1214 | if settings == None or settings == [None]: 1215 | cpu_usage_analyses = False 1216 | else: 1217 | if "usage_analyses" in settings: 1218 | try: 1219 | cpu_usage_analyses = settings["usage_analyses"].lower() == "true" 1220 | except: 1221 | cpu_usage_analyses = None 1222 | 1223 | return cpu_usage_analyses 1224 | 1225 | def active( 1226 | self, 1227 | value: Optional[callable] = None 1228 | )-> Optional[callable]: 1229 | encryption_key = "u" 1230 | 1231 | def decorate(value) -> None: 1232 | key = value.__name__ 1233 | if ( 1234 | value.__module__ != "__main__" 1235 | and value.__module__ != None 1236 | and not just_name 1237 | ): 1238 | key = value.__module__ + "." + key 1239 | self.set( 1240 | key, 1241 | value, 1242 | ) 1243 | 1244 | if value == None: 1245 | return decorate 1246 | else: 1247 | decorate(value) 1248 | return value 1249 | 1250 | def get_all( 1251 | self, 1252 | ) -> dict: 1253 | encryption_key = "u" 1254 | 1255 | datas = self._send_request("GET", "/get_all_scopes_user") 1256 | return datas 1257 | 1258 | def delete(self, scope:str) -> dict: 1259 | data = {"scope": scope} 1260 | return self._send_request("POST", "/delete_scope", data) 1261 | 1262 | def database_list(self) -> list: 1263 | return ast.literal_eval(self._send_request("GET", "/database/list")) 1264 | 1265 | def database_rename(self, database_name:str, new_database_name:str) -> dict: 1266 | data = { 1267 | "database_name": database_name, 1268 | "new_database_name": new_database_name, 1269 | } 1270 | return self._send_request("POST", "/database/rename", data) 1271 | 1272 | def database_pop(self, database_name:str) -> dict: 1273 | data = {"database_name": database_name} 1274 | return self._send_request("POST", "/database/pop", data) 1275 | 1276 | def database_pop_all(self) -> dict: 1277 | return self._send_request("GET", "/database/pop_all") 1278 | 1279 | def database_delete(self, database_name:str) -> dict: 1280 | data = {"database_name": database_name} 1281 | return self._send_request("POST", "/database/delete", data) 1282 | 1283 | def database_delete_all(self) -> dict: 1284 | return self._send_request("GET", "/database/delete_all") 1285 | 1286 | def ai_completion(self, message:str, model: Optional[str] = None) -> dict: 1287 | data = {"message": message} 1288 | if model != None: 1289 | data["model"] = model 1290 | return self._send_request("POST", "/ai_completion", data) 1291 | 1292 | def get_all_scopes_user(self)-> dict: 1293 | return self._send_request("GET", "/get_all_scopes_user") 1294 | 1295 | def get_name(self, value): 1296 | # Try to use dill to get the name 1297 | name = dill.source.getname(value) 1298 | if name is not None: 1299 | return name 1300 | 1301 | # For functions, methods, and classes 1302 | if inspect.isfunction(value) or inspect.isclass(value) or inspect.ismethod(value): 1303 | return value.__name__ 1304 | 1305 | # For instances of a class 1306 | elif hasattr(value, '__class__'): 1307 | return value.__class__.__name__ 1308 | 1309 | # For variables in the current scope (using globals) 1310 | globals_dict = globals() 1311 | for name, val in globals_dict.items(): 1312 | if val is value: 1313 | return name 1314 | 1315 | # If none of the above work 1316 | return None 1317 | 1318 | def auto_dump( 1319 | self, value, ask=True, suggestion_only=False, check_function=True, print_prompts=False, model:Optional[str] = None 1320 | ) -> None: 1321 | if model == None: 1322 | model = self.get_default_ai_model() 1323 | 1324 | if model == "llama3-8b": 1325 | check_function = False 1326 | 1327 | if check_function: 1328 | check = self.check_function(value, print_prompts=print_prompts, model=model) 1329 | if check != True: 1330 | print("Check:", check) 1331 | return 1332 | 1333 | code = textwrap.dedent(extract_source(value)) 1334 | all_scopes = self.get_all_scopes_user() 1335 | try: 1336 | all_scopes = "\n".join(all_scopes) 1337 | except: 1338 | all_scopes = "" 1339 | 1340 | prompt = f""" 1341 | You are an helpful software engineer. Help to organize library elements in a short and clear manner. 1342 | 1343 | 1344 | Generate a position for this: 1345 | ``` 1346 | {code} 1347 | ``` 1348 | 1349 | Currenlty Index of Library: 1350 | ``` 1351 | {all_scopes} 1352 | ``` 1353 | 1354 | Your answer should be just the suggested position. Dont say any other think. 1355 | 1356 | 1357 | Categories include (but are not limited to): 1358 | - database.connections (e.g., database.connections.postgre, database.connections.mysql) 1359 | - data.processing (e.g., data.processing.cleaning, data.processing.transformation) 1360 | - api.integration (e.g., api.integration.rest, api.integration.graphql) 1361 | - utils.helpers (e.g., utils.helpers.date, utils.helpers.string) 1362 | - machine_learning.models (e.g., machine_learning.models.classification, machine_learning.models.regression) 1363 | - visualization.charts (e.g., visualization.charts.bar, visualization.charts.line) 1364 | 1365 | 1366 | Suggested Position: 1367 | 1368 | """ 1369 | 1370 | ai_answer = self.ai_completion(prompt, model=model) 1371 | ai_answer = ai_answer.replace("`", "").replace("\n", "") 1372 | ai_answer = ".".join(ai_answer.split(".")[:-1]) 1373 | ai_answer = ai_answer + "." + self.get_name(value) 1374 | prompt = prompt + f"\nASSISTANT: {ai_answer}\n" 1375 | 1376 | prompt = ( 1377 | prompt + "\nQUESTION: Extract and just answer with the suggested position" 1378 | ) 1379 | if print_prompts: 1380 | print( 1381 | "Prompt", prompt.replace(code, "CODE").replace(all_scopes, "ALL SCOPES") 1382 | ) 1383 | ai_answer = self.ai_completion(prompt, model=model) 1384 | if print_prompts: 1385 | print( 1386 | "AI answer", 1387 | ai_answer.replace(code, "CODE").replace(all_scopes, "ALL SCOPES"), 1388 | ) 1389 | ai_answer = ai_answer.replace("`", "").replace("\n", "") 1390 | ai_answer = ai_answer.replace("ASSISTANT: ", "") 1391 | if suggestion_only: 1392 | print("Suggestion:", ai_answer) 1393 | return ai_answer 1394 | 1395 | if ai_answer in all_scopes: 1396 | print(f"Check: similarity with the {ai_answer} is detected") 1397 | return 1398 | 1399 | if ask: 1400 | print("Commands:\n(Y)es/(N)o\n") 1401 | while True: 1402 | y_n = input(f"{ai_answer} ").lower() 1403 | 1404 | if y_n == "y": 1405 | self.set(ai_answer, value) 1406 | print("\nDumped") 1407 | break 1408 | if y_n == "n": 1409 | break 1410 | 1411 | else: 1412 | self.set(ai_answer, value) 1413 | print("\nDumped") 1414 | 1415 | def get_code(self, scope:str)-> any: 1416 | data = {"scope": scope} 1417 | return self._send_request("POST", "/get_code_of_scope", data) 1418 | 1419 | def get_document(self, scope:str,version: Optional[str] = None) -> any: 1420 | data = {"scope": scope} 1421 | if version != None: 1422 | data["version"] = version 1423 | return self._send_request("POST", "/get_document_of_scope", data) 1424 | 1425 | def check_function(self, value, print_prompts=False,model: Optional[str] = None) -> Union[bool, str]: 1426 | code = textwrap.dedent(extract_source(value)) 1427 | 1428 | all_scopes_ = self.get_all_scopes_user() 1429 | all_scopes = "" 1430 | for i in all_scopes_: 1431 | all_scopes += i + "\n" 1432 | 1433 | if print_prompts: 1434 | print("Code", code) 1435 | print("All scopes", all_scopes) 1436 | 1437 | prompt = f""" 1438 | Current Library Index: 1439 | ``` 1440 | {all_scopes} 1441 | 1442 | ``` 1443 | 1444 | Now analyze the each element of Current Library Index, if you want a potential similar functionality with this: 1445 | 1446 | ``` 1447 | {code} 1448 | 1449 | ``` 1450 | 1451 | Which one is the most similar ? 1452 | """ 1453 | 1454 | ai_answer = self.ai_completion(prompt, model=model) 1455 | 1456 | ai_answer = ai_answer.replace("`", "").replace("\n", "") 1457 | 1458 | similarity_explanation = ai_answer 1459 | 1460 | prompt = prompt + f"\nASSISTANT: {ai_answer}\n" 1461 | 1462 | prompt = prompt + "\nQUESTION: Is there any duplication risk (Y/N)?" 1463 | 1464 | if print_prompts: 1465 | print( 1466 | "Prompt", prompt.replace(code, "CODE").replace(all_scopes, "ALL SCOPES") 1467 | ) 1468 | ai_answer = self.ai_completion(prompt, model=model) 1469 | ai_answer = ai_answer.replace("`", "").replace("\n", "") 1470 | ai_answer = ai_answer.split(",")[0] 1471 | ai_answer = ai_answer.replace("ASSISTANT: ", "") 1472 | if print_prompts: 1473 | print( 1474 | "AI answer", 1475 | ai_answer.replace(code, "CODE").replace(all_scopes, "ALL SCOPES"), 1476 | ) 1477 | if ai_answer == "Y" or ai_answer == "YES" or ai_answer == "Yes": 1478 | prompt = prompt + f"\nASSISTANT: {ai_answer}\n" 1479 | 1480 | prompt = ( 1481 | prompt 1482 | + "\nQUESTION: Extract and just answer with the suggested position" 1483 | ) 1484 | ai_answer = self.ai_completion(prompt, model=model) 1485 | ai_answer = ai_answer.replace("`", "").replace("\n", "") 1486 | ai_answer = ai_answer.split(",")[0] 1487 | ai_answer = ai_answer.replace("ASSISTANT: ", "") 1488 | return "similarity: " + ai_answer + " - " + similarity_explanation 1489 | if ai_answer == "N" or ai_answer == "NO" or ai_answer == "No": 1490 | return True 1491 | return similarity_explanation 1492 | 1493 | def search_by_documentation(self, question:str) -> List[str]: 1494 | data = {"question": question} 1495 | response = self._send_request("POST", "/search_by_documentation", data) 1496 | result = [] 1497 | for i in response: 1498 | result.append(i[0]) 1499 | return result 1500 | 1501 | def search(self, question:str) -> List[str]: 1502 | return self.search_by_documentation(question) 1503 | 1504 | def get_default_ai_model(self) -> str: 1505 | return self._send_request("GET", "/get_default_ai_model") 1506 | 1507 | def get_version_history(self, key:str)-> any: 1508 | data = {"scope": key} 1509 | version = self._send_request("POST", "/get_version_history", data) 1510 | return version 1511 | 1512 | def get_module_version_history(self, key:str)-> any: 1513 | data = {"top_library": key} 1514 | return self._send_request("POST", "/get_module_version_history", data) 1515 | 1516 | def delete_version(self, key:str, version:str)-> any: 1517 | data = {"version": key + ":" + version} 1518 | return self._send_request("POST", "/delete_version", data) 1519 | 1520 | def delete_module_version(self, module_name:str, version:str)-> any: 1521 | data = {"top_library": module_name, "version": version} 1522 | return self._send_request("POST", "/delete_version_prefix", data) 1523 | 1524 | def create_version(self, key:str, version:str)-> any: 1525 | data = {"scope": key, "version": version} 1526 | return self._send_request("POST", "/create_version", data) 1527 | 1528 | def create_module_version(self, module_name:str, version:str)-> any: 1529 | data = {"top_library": module_name, "version": version} 1530 | return self._send_request("POST", "/create_version_prefix", data) 1531 | 1532 | def get_version_data(self, key:str, version:str)-> any: 1533 | data = {"version": key + ":" + version} 1534 | return self._send_request("POST", "/load_specific_version", data) 1535 | 1536 | def get_requirements(self, key:str)-> any: 1537 | data = {"scope": key} 1538 | return self._send_request("POST", "/get_requirements_of_scope", data) 1539 | 1540 | def add_requirement(self, key:str, requirement:str)-> any: 1541 | currently_requirements = self.get_requirements(key) 1542 | if currently_requirements == None: 1543 | currently_requirements = "" 1544 | 1545 | if currently_requirements != "": 1546 | currently_requirements += ", " 1547 | 1548 | currently_requirements += requirement 1549 | data = { 1550 | "scope": key, 1551 | "requirements": currently_requirements, 1552 | } 1553 | return self._send_request("POST", "/dump_requirements", data) 1554 | 1555 | def clear_requirements(self, key:str)-> any: 1556 | data = { 1557 | "scope": key, 1558 | "requirements": "", 1559 | } 1560 | return self._send_request("POST", "/dump_requirements", data) 1561 | 1562 | def get_type(self, key:str, version:str =None)-> any: 1563 | data = {"scope": key} 1564 | if version != None: 1565 | data["version"] = version 1566 | return self._send_request("POST", "/get_type_of_scope", data) 1567 | 1568 | def get_code(self, key:str, version:str=None): 1569 | data = {"scope": key} 1570 | if version != None: 1571 | data["version"] = version 1572 | return self._send_request("POST", "/get_code_of_scope", data) 1573 | 1574 | def langchain(self, prefix:str=None, version:str=None)-> List[any]: 1575 | from langchain.tools import tool 1576 | 1577 | all_functions = [] 1578 | for each in self.get_all(): 1579 | if each.endswith("__user"): 1580 | continue 1581 | the_true_name = each 1582 | the_true_name = the_true_name.replace("_", ".") 1583 | the_true_name = the_true_name.replace(".", "_") 1584 | if prefix != None: 1585 | if not each.startswith(prefix): 1586 | continue 1587 | if self.get_type(each) == "function": 1588 | the_function = self.get(each, version=version) 1589 | if inspect.isfunction(the_function): 1590 | the_document = self.get_document(each, version=version) or " " 1591 | the_function.__doc__ = the_document 1592 | try: 1593 | the_tool = tool(the_function) 1594 | original_name = the_tool.name 1595 | the_tool.name = the_true_name 1596 | the_tool.description = the_tool.description.replace( 1597 | original_name, the_true_name 1598 | )[:1000] 1599 | all_functions.append(the_tool) 1600 | except: 1601 | traceback.print_exc() 1602 | pass 1603 | return all_functions 1604 | 1605 | def crewai(self, prefix: Optional[str] = None, version: Optional[str] = None)-> list: 1606 | from crewai_tools import tool 1607 | 1608 | all_functions = [] 1609 | for each in self.get_all(): 1610 | if each.endswith("__user"): 1611 | continue 1612 | the_true_name = each 1613 | the_true_name = the_true_name.replace("_", ".") 1614 | the_true_name = the_true_name.replace(".", "_") 1615 | if prefix != None: 1616 | if not each.startswith(prefix): 1617 | continue 1618 | if self.get_type(each) == "function": 1619 | the_function = self.get(each, version=version) 1620 | if inspect.isfunction(the_function): 1621 | the_document = self.get_document(each, version=version) or " " 1622 | the_function.__doc__ = the_document 1623 | try: 1624 | the_tool = tool(the_function) 1625 | original_name = the_tool.name 1626 | the_tool.name = the_true_name 1627 | the_tool.description = the_tool.description.replace( 1628 | original_name, the_true_name 1629 | )[:1000] 1630 | all_functions.append(the_tool) 1631 | except: 1632 | traceback.print_exc() 1633 | pass 1634 | return all_functions 1635 | 1636 | def autogen(self, caller, executor, prefix: Optional[str] = None, version:Optional[str] = None)-> None: 1637 | import autogen 1638 | 1639 | for each in self.get_all(): 1640 | if each.endswith("__user"): 1641 | continue 1642 | the_true_name = each 1643 | the_true_name = the_true_name.replace("_", ".") 1644 | the_true_name = the_true_name.replace(".", "_") 1645 | if prefix != None: 1646 | if not each.startswith(prefix): 1647 | continue 1648 | if self.get_type(each) == "function": 1649 | the_function = self.get(each, version=version) 1650 | if inspect.isfunction(the_function): 1651 | the_document = self.get_document(each, version=version) or " " 1652 | the_function.__doc__ = the_document 1653 | try: 1654 | the_document = the_document.replace( 1655 | each, the_true_name 1656 | ).replace(each.split(".")[-1], the_true_name)[:1000] 1657 | autogen.agentchat.register_function( 1658 | the_function, 1659 | caller=caller, 1660 | executor=executor, 1661 | description=the_document, 1662 | name=the_true_name, 1663 | ) 1664 | except: 1665 | traceback.print_exc() 1666 | pass 1667 | 1668 | def openinterpreter(self, agent, prefix: Optional[str] = None,version: Optional[str] = None) -> None: 1669 | def replace_function_name(input_string, new_function_name) -> str: 1670 | result = re.sub( 1671 | r"^(def )(\w+)", 1672 | f"\g<1>{new_function_name}", 1673 | input_string, 1674 | flags=re.MULTILINE, 1675 | ) 1676 | return result 1677 | 1678 | def extract_function_definition(input_string:str)-> str: 1679 | function_name = input_string.split("def ")[1].split(")")[0] 1680 | 1681 | return function_name + ")" 1682 | 1683 | class open_interpreter_tool: 1684 | def __init__(self, function:str, name:str, document:str): 1685 | self.name = name 1686 | self.description = document 1687 | self.function = replace_function_name(function, name) 1688 | 1689 | @property 1690 | def type_for_prompt(self) -> str: 1691 | the_description = self.description[:200] 1692 | the_description.replace("\n", " ") 1693 | return ( 1694 | extract_function_definition(self.function).replace("\n", " ") 1695 | + " #" 1696 | + the_description 1697 | ) 1698 | 1699 | all_functions = [] 1700 | for each in self.get_all(): 1701 | if each.endswith("__user"): 1702 | continue 1703 | the_true_name = each 1704 | original_name = each 1705 | the_true_name = the_true_name.replace("_", ".") 1706 | the_true_name = the_true_name.replace(".", "_") 1707 | if prefix != None: 1708 | if not each.startswith(prefix): 1709 | continue 1710 | if self.get_type(each) == "function": 1711 | the_function = self.get( 1712 | each, version=version, try_to_extract_importable=True 1713 | ) 1714 | the_document = self.get_document(each, version=version) or " " 1715 | the_document = the_document.replace( 1716 | original_name.split(".")[-1], the_true_name 1717 | )[:1000] 1718 | try: 1719 | the_tool = open_interpreter_tool( 1720 | the_function, the_true_name, the_document 1721 | ) 1722 | all_functions.append(the_tool) 1723 | except: 1724 | traceback.print_exc() 1725 | pass 1726 | 1727 | for each_f in all_functions: 1728 | if self.tester: 1729 | self._log(each_f.function) 1730 | agent.computer.run("python", each_f.function) 1731 | 1732 | agent.system_message += r""" 1733 | # THE OTHER APIs 1734 | 1735 | These functions ALREADY IMPORTED, and can be used for many tasks: 1736 | 1737 | ```python 1738 | 1739 | """ 1740 | 1741 | for each_f in all_functions: 1742 | agent.system_message += each_f.type_for_prompt + "\n\n" 1743 | 1744 | agent.system_message += r""" 1745 | ``` 1746 | 1747 | Do not import the anythink, They are already imported. 1748 | """ 1749 | 1750 | def return_openai_llm(self, model: str = None): 1751 | from langchain_openai import ChatOpenAI 1752 | import httpx 1753 | 1754 | httpx_client = httpx.Client(verify=False) 1755 | 1756 | llm = ChatOpenAI( 1757 | openai_api_key=self.password, 1758 | http_client=httpx_client, 1759 | openai_api_base=self.api_url + "/openai/", 1760 | model_name=model, 1761 | ) 1762 | return llm 1763 | 1764 | def return_ollama_llm(self,model: str = None): 1765 | from .ollama_langchain import Ollama 1766 | 1767 | llm = Ollama( 1768 | model=f"{model}**{self.password}", base_url=self.api_url + "/ollama" 1769 | ) 1770 | return llm 1771 | 1772 | def check_idea(self, idea:str)-> str: 1773 | search_result = self.search(idea) 1774 | if len(search_result) == 0: 1775 | return True 1776 | 1777 | # Get the first result of check and as ai to check the similarity 1778 | first_result = search_result[0] 1779 | 1780 | # Get the document of the first result 1781 | first_result_document = self.get_document(first_result) 1782 | 1783 | # get the code of the first result 1784 | first_result_code = self.get_code(first_result) 1785 | 1786 | # Ask to AI to check the similarity 1787 | prompt = f""" 1788 | The user planning to write a function about this: {idea} 1789 | 1790 | Currently function: {first_result} 1791 | Currently function Document: {first_result_document} 1792 | Currently function Code: {first_result_code} 1793 | 1794 | Is the idea making same thing with the first result (Y/N)? 1795 | """ 1796 | 1797 | ai_answer = self.ai_completion(prompt) 1798 | ai_answer = ai_answer.replace("`", "").replace("\n", "") 1799 | ai_answer = ai_answer.split(",")[0] 1800 | ai_answer = ai_answer.replace("ASSISTANT: ", "") 1801 | 1802 | is_not_similar = False 1803 | 1804 | if ai_answer == "N" or ai_answer == "NO" or ai_answer == "No": 1805 | is_not_similar = True 1806 | 1807 | if not is_not_similar: 1808 | # Ask ai to explain similarity in one sentence 1809 | prompt = f""" 1810 | Current Idea: {idea} 1811 | First Result: {first_result} 1812 | First Result Document: {first_result_document} 1813 | First Result Code: {first_result_code} 1814 | 1815 | Explain the similarity in one sentence. If you need to refer to First Result please use {first_result}. 1816 | """ 1817 | 1818 | ai_answer = self.ai_completion(prompt) 1819 | ai_answer = ai_answer.replace("`", "").replace("\n", "") 1820 | ai_answer = ai_answer.split(",")[0] 1821 | ai_answer = ai_answer.replace("ASSISTANT: ", "") 1822 | 1823 | return "Fail: " + ai_answer 1824 | else: 1825 | return "Pass: There is no same functionality in the library" 1826 | 1827 | 1828 | def get_client_version(self): 1829 | from .. import __version__ 1830 | return __version__ 1831 | 1832 | 1833 | class UpsonicOnPrem(Upsonic_On_Prem): 1834 | pass 1835 | 1836 | 1837 | def Tiger(): 1838 | return Upsonic_On_Prem( 1839 | "https://api_tiger.upsonic.co", 1840 | "ACK_xmxIiqsgGySvBPPd55M0Ldm5AcR2kt6r3kmL52Ptqo", 1841 | engine="upsonic_serializer", 1842 | pass_python_version_check=True, 1843 | enable_auto_requirements=True, 1844 | ) 1845 | 1846 | 1847 | def Tiger_Admin(api_url:str, access_key:str): 1848 | return Upsonic_On_Prem( 1849 | api_url, 1850 | access_key, 1851 | engine="upsonic_serializer", 1852 | pass_python_version_check=True, 1853 | enable_auto_requirements=True, 1854 | ) 1855 | --------------------------------------------------------------------------------