├── .dockerignore ├── .editorconfig ├── .github └── workflows │ ├── default.yml │ ├── flake8-matcher.json │ ├── mypy-matcher.json │ └── timeline-check.yml ├── .gitignore ├── CHANGELOG.md ├── DEPENDENCIES.md ├── Dockerfile ├── HISTORY.md ├── LICENSE ├── LOGGING.md ├── MANIFEST.in ├── MIGRATION.md ├── README.md ├── TESTING.md ├── alembic.ini.sample ├── changes └── template.md ├── codecov.yml ├── config ├── ci.toml ├── halfstack.alembic.ini ├── halfstack.toml ├── sample.etcd.config.json ├── sample.etcd.redis-sentinel.json ├── sample.etcd.redis-single.json ├── sample.etcd.volumes.json └── sample.toml ├── docs ├── 382.fix ├── Makefile ├── conf.py ├── index.rst ├── make.bat └── manager │ ├── driver.rst │ ├── instance.rst │ └── structs.rst ├── fixtures ├── example-keypairs.json ├── example-resource-presets.json └── example-session-templates.json ├── pyproject.toml ├── requirements ├── build.txt ├── dev.txt ├── lint.txt ├── main.txt ├── test.txt └── typecheck.txt ├── scripts ├── create-users-batch.py ├── create-users-from-csv.py ├── migrate-vfolder-structure.py ├── query-test.py ├── set-psql-superuser.sql └── switch-redis.sh ├── setup.cfg ├── setup.py ├── src └── ai │ └── backend │ └── manager │ ├── __init__.py │ ├── api │ ├── __init__.py │ ├── admin.py │ ├── auth.py │ ├── cluster_template.py │ ├── context.py │ ├── domainconfig.py │ ├── etcd.py │ ├── events.py │ ├── exceptions.py │ ├── groupconfig.py │ ├── image.py │ ├── logs.py │ ├── manager.py │ ├── py.typed │ ├── ratelimit.py │ ├── resource.py │ ├── scaling_group.py │ ├── session.py │ ├── session_template.py │ ├── stream.py │ ├── types.py │ ├── userconfig.py │ ├── utils.py │ ├── vfolder.py │ └── wsproxy.py │ ├── cli │ ├── __init__.py │ ├── __main__.py │ ├── context.py │ ├── dbschema.py │ ├── etcd.py │ ├── fixture.py │ ├── gql.py │ ├── image.py │ └── image_impl.py │ ├── config.py │ ├── container_registry │ ├── __init__.py │ ├── base.py │ ├── docker.py │ └── harbor.py │ ├── defs.py │ ├── exceptions.py │ ├── idle.py │ ├── models │ ├── __init__.py │ ├── agent.py │ ├── alembic │ │ ├── README │ │ ├── env.py │ │ ├── script.py.mako │ │ └── versions │ │ │ ├── 01456c812164_add_idle_timeout_to_keypair_resource_.py │ │ │ ├── 015d84d5a5ef_add_image_table.py │ │ │ ├── 0262e50e90e0_add_ssh_keypair_into_keypair.py │ │ │ ├── 02950808ca3d_add_agent_version.py │ │ │ ├── 06184d82a211_add_session_creation_id.py │ │ │ ├── 0c5733f80e4d_index_kernel_timestamps.py │ │ │ ├── 0d553d59f369_users_replace_is_active_to_status_and_its_info.py │ │ │ ├── 0e558d06e0e3_add_service_ports.py │ │ │ ├── 0f3bc98edaa0_more_status.py │ │ │ ├── 0f7a4b643940_.py │ │ │ ├── 10e39a34eed5_enlarge_kernels_lang_column_length.py │ │ │ ├── 11146ba02235_change_char_col_to_str.py │ │ │ ├── 185852ff9872_add_vfolder_permissions_table.py │ │ │ ├── 1e673659b283_add_clusterized_column_to_agents_table.py │ │ │ ├── 1e8531583e20_add_dotfile_column_to_keypairs.py │ │ │ ├── 1fa6a31ea8e3_add_inviter_field_for_vfolder_.py │ │ │ ├── 202b6dcbc159_add_internal_data_to_kernels.py │ │ │ ├── 22964745c12b_add_total_resource_slots_to_group.py │ │ │ ├── 22e52d03fc61_add_allowed_docker_registries_in_domains.py │ │ │ ├── 250e8656cf45_add_status_data.py │ │ │ ├── 25e903510fa1_add_dotfiles_to_domains_and_groups.py │ │ │ ├── 26d0c387e764_create_vfolder_invitations_table.py │ │ │ ├── 2a82340fa30e_add_mounts_info_in_kernel_db.py │ │ │ ├── 2b0931e4a059_convert_lang_to_image_and_registry.py │ │ │ ├── 352fa4f88f61_add_tpu_slot_on_kernel_model.py │ │ │ ├── 3bb80d1887d6_add_preopen_ports.py │ │ │ ├── 3cf19d906e71_.py │ │ │ ├── 3f1dafab60b2_merge.py │ │ │ ├── 405aa2c39458_job_queue.py │ │ │ ├── 4545f5c948b3_add_io_scratch_size_stats.py │ │ │ ├── 48ab2dfefba9_reindex_kernel_updated_order.py │ │ │ ├── 4b7b650bc30e_add_creator_in_vfolders.py │ │ │ ├── 4b8a66fb8d82_revamp_keypairs.py │ │ │ ├── 4cc87e7fbfdf_stats_refactor.py │ │ │ ├── 513164749de4_add_cancelled_to_kernelstatus.py │ │ │ ├── 518ecf41f567_add_index_for_cluster_role.py │ │ │ ├── 51dddd79aa21_add_logs_column_on_kernel_table.py │ │ │ ├── 529113b08c2c_add_vfolder_type_column.py │ │ │ ├── 548cc8aa49c8_update_cluster_columns_in_kernels.py │ │ │ ├── 57b523dec0e8_add_tpu_slots.py │ │ │ ├── 57e717103287_rename_clone_allowed_to_cloneable.py │ │ │ ├── 5b45f28d2cac_add_resource_opts_in_kernels.py │ │ │ ├── 5d8e6043455e_add_user_group_ids_in_vfolder.py │ │ │ ├── 5de06da3c2b5_init.py │ │ │ ├── 5e88398bc340_add_unmanaged_path_column_to_vfolders.py │ │ │ ├── 60a1effa77d2_add_coordinator_address_column_on_.py │ │ │ ├── 65c4a109bbc7_.py │ │ │ ├── 6f1c1b83870a_merge_user_s_first__last_name_into_full_.py │ │ │ ├── 6f5fe19894b7_vfolder_invitation_state_to_enum_type.py │ │ │ ├── 7a82e0c70122_add_group_model.py │ │ │ ├── 7dd1d81c3204_add_vfolder_mounts_to_kernels.py │ │ │ ├── 7ea324d0535b_vfolder_and_kernel.py │ │ │ ├── 80176413d8aa_keypairs_get_is_admin.py │ │ │ ├── 819c2b3830a9_add_user_model.py │ │ │ ├── 81c264528f20_add_max_session_lifetime.py │ │ │ ├── 854bd902b1bc_change_kernel_identification.py │ │ │ ├── 8679d0a7e22b_add_scheduled_to_kernelstatus.py │ │ │ ├── 8e660aa31fe3_add_resource_presets.py │ │ │ ├── 911023380bc9_add_architecture_column_on_agents.py │ │ │ ├── 93e9d31d40bf_agent_add_region.py │ │ │ ├── 97f6c80c8aa5_merge.py │ │ │ ├── 9a91532c8534_add_scaling_group.py │ │ │ ├── 9bd986a75a2a_allow_kernels_scaling_group_nullable.py │ │ │ ├── 9c89b9011872_add_attached_devices_field_in_kernels.py │ │ │ ├── 9cd61b1ae70d_add_scheduable_field_to_agents.py │ │ │ ├── a1fd4e7b7782_enumerate_vfolder_perms.py │ │ │ ├── a7ca9f175d5f_merge.py │ │ │ ├── babc74594aa6_add_partial_index_to_kernels.py │ │ │ ├── bae1a7326e8a_add_domain_model.py │ │ │ ├── bf4bae8f942e_add_kernel_host.py │ │ │ ├── c092dabf3ee5_add_batch_session.py │ │ │ ├── c1409ad0e8da_.py │ │ │ ├── c3e74dcf1808_add_environ_to_kernels.py │ │ │ ├── c401d78cc7b9_add_allowed_vfolder_hosts_to_domain_and_.py │ │ │ ├── c481d3dc6c7d_add_shared_memory_to_resource_presets.py │ │ │ ├── c5e4e764f9e3_add_domain_group_user_fields_to_kernels.py │ │ │ ├── ce209920f654_create_task_template_table.py │ │ │ ├── d2aafa234374_create_error_logs_table.py │ │ │ ├── d452bacd085c_add_mount_map_column_to_kernel.py │ │ │ ├── d463fc5d6109_add_clone_allowed_to_vfolders.py │ │ │ ├── d52bf5ec9ef3_convert_cpu_gpu_slots_to_float.py │ │ │ ├── d582942886ad_add_tag_to_kernels.py │ │ │ ├── d59ff89e7514_remove_keypair_concurrency_used.py │ │ │ ├── d5cc54fd36b5_update_for_multicontainer_sessions.py │ │ │ ├── d643752544de_.py │ │ │ ├── d727b5da20e6_add_callback_url_to_kernels.py │ │ │ ├── da24ff520049_add_starts_at_field_into_kernels.py │ │ │ ├── dbc1e053b880_add_keypair_resource_policy.py │ │ │ ├── dc9b66466e43_remove_clusterized.py │ │ │ ├── e18ed5fcfedf_add_superadmin_role_for_user.py │ │ │ ├── e35332f8d23d_add_modified_at_to_users_and_kernels.py │ │ │ ├── e421c02cf9e4_rename_kernel_dependencies_to_session_.py │ │ │ ├── e7371ca5797a_rename_mem_stats.py │ │ │ ├── ed666f476f39_add_bootstrap_script_to_keypairs.py │ │ │ ├── eec98e65902a_merge_with_vfolder_clone.py │ │ │ ├── f0f4ee907155_dynamic_resource_slots.py │ │ │ ├── f5530eccf202_add_kernels_uuid_prefix_index.py │ │ │ ├── f8a71c3bffa2_stringify_userid.py │ │ │ ├── f9971fbb34d9_add_state_column_to_vfolder_invitations.py │ │ │ └── ff4bfca66bf8_.py │ ├── base.py │ ├── domain.py │ ├── dotfile.py │ ├── error_logs.py │ ├── gql.py │ ├── group.py │ ├── image.py │ ├── kernel.py │ ├── keypair.py │ ├── minilang │ │ ├── __init__.py │ │ ├── ordering.py │ │ └── queryfilter.py │ ├── resource_policy.py │ ├── resource_preset.py │ ├── scaling_group.py │ ├── session_template.py │ ├── storage.py │ ├── user.py │ ├── utils.py │ └── vfolder.py │ ├── pglock.py │ ├── plugin │ ├── __init__.py │ ├── error_monitor.py │ ├── exceptions.py │ └── webapp.py │ ├── py.typed │ ├── registry.py │ ├── scheduler │ ├── __init__.py │ ├── dispatcher.py │ ├── drf.py │ ├── fifo.py │ ├── mof.py │ ├── predicates.py │ └── types.py │ ├── server.py │ └── types.py └── tests ├── __init__.py ├── api ├── test_auth.py ├── test_bgtask.py ├── test_config.py ├── test_exceptions.py ├── test_middlewares.py ├── test_ratelimit.py └── test_utils.py ├── conftest.py ├── model_factory.py ├── models └── test_dbutils.py ├── sample-ssl-cert ├── sample.crt ├── sample.csr └── sample.key ├── test_advisory_lock.py ├── test_image.py ├── test_predicates.py ├── test_queryfilter.py ├── test_queryorder.py ├── test_registry.py └── test_scheduler.py /.dockerignore: -------------------------------------------------------------------------------- 1 | Dockerfile* 2 | build/ 3 | dist/ 4 | *.py[co] 5 | *.egg-info 6 | __pycache__ 7 | venv*/ 8 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | trim_trailing_whitespace = true 7 | charset = utf-8 8 | max_line_length = 0 9 | 10 | [*.{py,pyi,pyx,md,rst}] 11 | max_line_length = 105 12 | indent_style = space 13 | indent_size = 4 14 | 15 | [*.yml] 16 | indent_style = space 17 | indent_size = 2 18 | 19 | [*.toml] 20 | indent_style = space 21 | indent_size = 4 22 | 23 | [changes/*] 24 | max_line_length = 0 25 | -------------------------------------------------------------------------------- /.github/workflows/flake8-matcher.json: -------------------------------------------------------------------------------- 1 | { 2 | "problemMatcher": [ 3 | { 4 | "owner": "flake8", 5 | "pattern": [ 6 | { 7 | "regexp": "^([^:]*):(\\d+):(\\d+): (.*)$", 8 | "file": 1, 9 | "line": 2, 10 | "column": 3, 11 | "message": 4 12 | } 13 | ] 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /.github/workflows/mypy-matcher.json: -------------------------------------------------------------------------------- 1 | { 2 | "problemMatcher": [ 3 | { 4 | "owner": "mypy", 5 | "pattern": [ 6 | { 7 | "regexp": "^([^:]*):(\\d+): ([^:]+): (.*)$", 8 | "file": 1, 9 | "line": 2, 10 | "severity": 3, 11 | "message": 4 12 | } 13 | ] 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /.github/workflows/timeline-check.yml: -------------------------------------------------------------------------------- 1 | name: timeline-check 2 | 3 | on: [pull_request] 4 | 5 | jobs: 6 | towncrier: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v2 10 | with: 11 | fetch-depth: 0 12 | - name: Set up Python 13 | uses: actions/setup-python@v3 14 | with: 15 | python-version: "3.10" 16 | - name: Cache pip packages 17 | uses: actions/cache@v2 18 | with: 19 | path: ~/.cache/pip 20 | key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('requirements/*.txt') }} 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install -U pip setuptools wheel 24 | python -m pip install -U towncrier 25 | - name: Check existence of news fragment 26 | run: | 27 | git fetch --no-tags origin +refs/heads/${BASE_BRANCH}:refs/remotes/origin/${BASE_BRANCH} 28 | python -m towncrier.check --compare-with=origin/${BASE_BRANCH} 29 | env: 30 | BASE_BRANCH: ${{ github.base_ref }} 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *,cover 45 | .mypy_cache/ 46 | .pytest_cache/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | 55 | # Sphinx documentation 56 | docs/_build/ 57 | 58 | # PyBuilder 59 | target/ 60 | 61 | # IDE stuffs 62 | .idea/ 63 | .vscode/ 64 | .env 65 | .envrc 66 | .*.swp 67 | 68 | # virtualenvs 69 | venv*/ 70 | .python-version 71 | 72 | # for local integration tests 73 | src/ai/backend/agent 74 | 75 | # User configuratinos 76 | alembic.ini 77 | 78 | # Local config 79 | manager.toml 80 | manager.pid 81 | .DS_Store -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.6 2 | 3 | VOLUME /usr/src/app 4 | WORKDIR /usr/src/app 5 | 6 | RUN pip install --no-cache-dir -U pip wheel setuptools 7 | 8 | COPY requirements.txt /usr/src/app 9 | RUN pip install --no-cache-dir -r requirements.txt 10 | 11 | COPY . /usr/src/app 12 | RUN pip install --no-cache-dir -e . 13 | 14 | CMD python -m sorna.gateway.server 15 | -------------------------------------------------------------------------------- /LOGGING.md: -------------------------------------------------------------------------------- 1 | Backend.AI Logging Convention 2 | ============================= 3 | 4 | We use Python's standard `logging` module to produce log entries, 5 | with multiple backends pluggable via the configuration. 6 | 7 | Most logs are in the `DEBUG` and `INFO` level. 8 | 9 | Main sources of `INFO` logs: 10 | 11 | * API requests: Capitalized API names with parenthesis of key arguments, where each argument is 12 | formatted as a concatenated string of a variable name shortcut, a colon, and its value. 13 | 14 | - Examples: 15 | 16 | - `VFOLDER.DOWNLOAD (ak:{0}, vf:{1})` with the requester's access key and the name of vfolder. 17 | - `GET_OR_CREATE (ak:{0}/{1}, img:{2}, s:{3})` with the requester's access key and optional owner 18 | access key (these are same for normal users but administrators may invoke the API on behalf of the 19 | resource owner), the image name, and the client-provided session ID. 20 | When the owner access key is same to the requester access key, it is marked as `*`. 21 | - Superadmin-only APIs do not record the requester access key. 22 | 23 | - The API request logs mostly records the most important parameters, not the result. 24 | Some APIs may leave additional logs in a similar format but with additional details found during the 25 | request processing. 26 | In such cases, additional messages are appended after the closing parenthesis followed by a colon 27 | and space. 28 | 29 | - Commonly used variable keys: 30 | 31 | - `ak`: Access key. If include two keys with a slash in the middle, the latter represents the owner 32 | access key. Otherwise it represents the API requester's access key. 33 | - `d`: Domain name. 34 | - `g`: Group name or ID. 35 | - `gid`: Group ID. 36 | - `u`: User ID (email). 37 | - `uid`: User UUID. 38 | - `s`: Session ID, which is unique among all pending/live sessions owned by each user. Usually set 39 | by the clients. 40 | - `k`: ComputeSession ID, which is globally unique identifier. It is a UUID randomly generated by the 41 | manager. 42 | - `sg`: Scaling group. 43 | - `rp`: Resource policy. 44 | - `vf`: Virtual folder name, which is unique for each user. 45 | For renaming operations, this may have additional suffixes with a dot, such as `vf.old` and `vf.new`. 46 | - `vfh`: Host of virtual folder. 47 | - `vfid`: Virtual folder ID, which is a globally unique identifier. It is a UUID randomly 48 | generated by the manager. 49 | - `path`: Target path in virtual folders or containers. If the API supports multiple files, 50 | this may contain only the first entry. 51 | - `img`: ComputeSession image name. 52 | 53 | - Variable values are usually just a string, but the following variations exist: 54 | 55 | - `[string-value,string-value,...]`: Array of string values, separated by comma and wrapped by 56 | square brackets. 57 | - Quoted string: a string value formatted using Python's `repr()` function, which may contain 58 | escaped (with backslashes) quotation symbols and whitespace characters. 59 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include src/ai/backend/manager/models/alembic/* 2 | -------------------------------------------------------------------------------- /MIGRATION.md: -------------------------------------------------------------------------------- 1 | Backend.AI Migration Guide 2 | ========================== 3 | 4 | ## General 5 | 6 | * The migration should be done while the managers and agents are shut down. 7 | * This guide only describes additional steps to follow other than the code/package upgrades. 8 | 9 | ## 21.09 to 22.03 10 | 11 | * `alembic upgrade head` is required to migrate the PostgreSQL database schema. 12 | - The `keypairs.concurrency_used` column is dropped and it will use Redis to keep track of it. 13 | - The `kernels.last_stat` column is still there but it will get updated only when the kernels terminate. 14 | There is a backup option to restore prior behavior of periodic sync: `debug.periodic-sync-stats` in 15 | `manager.toml`, though. 16 | 17 | * The Redis container used with the manager should be reconfigured to use a persistent database. 18 | In HA setup, it is recommended to enable AOF by `appendonly yes` in the Redis configuration to make it 19 | recoverable after hardware failures. 20 | 21 | Consult [the official doc](https://redis.io/docs/manual/persistence/) for more details. 22 | 23 | - FYI: The Docker official image uses `/data` as the directory to store RDB/AOF files. It may be 24 | configured to use an explicit bind-mount of a host directory. If not configured, by default it will 25 | create an anonymous volume and mount it. 26 | 27 | * The image metadata database is migrated from etcd to PostgreSQL while the registry configuration is 28 | still inside the etcd. 29 | 30 | Run `backend.ai mgr image rescan` in the manager venv or `backend.ai admin image rescan` from clients 31 | with the superadmin privilege to resync the image database. The old etcd image database will no longer 32 | be used. 33 | 34 | * The manager now has replacible distributed lock backend, configured by the key `manager.distributed-lock` in 35 | `manager.toml`. **The new default is "etcd".** "filelock" is suitable for single-node manager deployments 36 | as it relies on POSIX file-level advisory locks. Change this value to "pg_advisory" to restore the behavior 37 | of previous versions. "redlock" is not currently supported as aioredis v2 has a limited implementation. 38 | 39 | * (TODO) storage-proxy related stuffs 40 | 41 | * Configure an explicit cron job to execute `backend.ai mgr clear-history -r {retention}` which trims old 42 | sessions' execution records from the PostgreSQL and Redis databases to avoid indefinite grow of disk 43 | and memory usage of the manager. 44 | 45 | The retention argument may be given as human-readable duration expressions, such as `30m`, `6h`, `3d`, 46 | `2w`, `3mo`, and `1yr`. If there is no unit suffix, the value is interpreted as seconds. 47 | It is recommended to schedule this command once a day. 48 | 49 | ## 21.03 to 21.09 50 | 51 | * `alembic upgrade head` is required to migrate the PostgreSQL database schema. 52 | -------------------------------------------------------------------------------- /alembic.ini.sample: -------------------------------------------------------------------------------- 1 | # A generic, single database configuration. 2 | 3 | [alembic] 4 | # path to migration scripts 5 | script_location = ai.backend.manager.models:alembic 6 | 7 | # template used to generate migration files 8 | # file_template = %%(rev)s_%%(slug)s 9 | 10 | # timezone to use when rendering the date 11 | # within the migration file as well as the filename. 12 | # string value is passed to dateutil.tz.gettz() 13 | # leave blank for localtime 14 | # timezone = 15 | 16 | # max length of characters to apply to the 17 | # "slug" field 18 | #truncate_slug_length = 40 19 | 20 | # set to 'true' to run the environment during 21 | # the 'revision' command, regardless of autogenerate 22 | # revision_environment = false 23 | 24 | # set to 'true' to allow .pyc and .pyo files without 25 | # a source .py file to be detected as revisions in the 26 | # versions/ directory 27 | # sourceless = false 28 | 29 | # version location specification; this defaults 30 | # to alembic/versions. When using multiple version 31 | # directories, initial revisions must be specified with --version-path 32 | # version_locations = %(here)s/bar %(here)s/bat alembic/versions 33 | 34 | # the output encoding used when revision files 35 | # are written from script.py.mako 36 | # output_encoding = utf-8 37 | 38 | sqlalchemy.url = postgresql://postgres:develove@localhost:8100/backend 39 | 40 | 41 | # Logging configuration 42 | [loggers] 43 | keys = root,sqlalchemy,alembic 44 | 45 | [handlers] 46 | keys = console 47 | 48 | [formatters] 49 | keys = generic 50 | 51 | [logger_root] 52 | level = WARN 53 | handlers = console 54 | qualname = 55 | 56 | [logger_sqlalchemy] 57 | level = WARN 58 | handlers = 59 | qualname = sqlalchemy.engine 60 | 61 | [logger_alembic] 62 | level = INFO 63 | handlers = 64 | qualname = alembic 65 | 66 | [handler_console] 67 | class = StreamHandler 68 | args = (sys.stderr,) 69 | level = NOTSET 70 | formatter = generic 71 | 72 | [formatter_generic] 73 | format = %(levelname)-5.5s [%(name)s] %(message)s 74 | datefmt = %H:%M:%S 75 | -------------------------------------------------------------------------------- /changes/template.md: -------------------------------------------------------------------------------- 1 | {%- if top_line -%} 2 | {{ top_line }} 3 | {%- elif versiondata.name -%} 4 | {{ versiondata.name }} {{ versiondata.version }} ({{ versiondata.date }}) 5 | {%- else -%} 6 | {{ versiondata.version }} ({{ versiondata.date }}) 7 | {%- endif -%} 8 | {%- for section, _ in sections.items() -%} 9 | {%- if section -%} 10 | ### {{ section }}{%- endif -%} 11 | {%- if sections[section] -%} 12 | {%- for category, val in definitions.items() if category in sections[section] %} 13 | 14 | 15 | ### {{ definitions[category]['name'] }} 16 | 17 | {%- if definitions[category]['showcontent'] %} 18 | {%- for text, values in sections[section][category].items() %} 19 | {%- if values[0].endswith("/0)") %} 20 | 21 | * {{ definitions[category]['name'] }} without explicit PR/issue numbers 22 | {{ text }} 23 | {%- else %} 24 | 25 | * {{ text }} {{ values|join(',\n ') }} 26 | {%- endif %} 27 | 28 | {%- endfor %} 29 | {%- else %} 30 | 31 | * {{ sections[section][category]['']|join(', ') }} 32 | {%- endif %} 33 | {%- if sections[section][category]|length == 0 %} 34 | 35 | No significant changes. 36 | {%- else %} 37 | {%- endif %} 38 | 39 | {%- endfor %} 40 | {%- else %} 41 | 42 | No significant changes. 43 | {%- endif %} 44 | {%- endfor %} 45 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | ignore: 2 | - src/ai/backend/manager/models/alembic 3 | coverage: 4 | status: 5 | project: 6 | default: 7 | threshold: 5% 8 | paths: 9 | - src 10 | informational: true 11 | patch: 12 | default: 13 | threshold: 5% 14 | paths: 15 | - src 16 | informational: true 17 | -------------------------------------------------------------------------------- /config/ci.toml: -------------------------------------------------------------------------------- 1 | [etcd] 2 | namespace = "local" 3 | addr = { host = "127.0.0.1", port = 2379 } 4 | user = "" 5 | password = "" 6 | 7 | 8 | [db] 9 | type = "postgresql" 10 | addr = { host = "localhost", port = 5432 } 11 | name = "testing_db_XXX" # auto-generated for every test run 12 | user = "lablup" 13 | password = "develove" 14 | 15 | 16 | [manager] 17 | num-proc = 2 18 | service-addr = { host = "0.0.0.0", port = 8080 } 19 | ssl-enabled = false 20 | #ssl-cert = "/etc/backend.ai/ssl/apiserver-fullchain.pem" 21 | #ssl-privkey = "/etc/backend.ai/ssl/apiserver-privkey.pem" 22 | 23 | heartbeat-timeout = 5.0 24 | id = "i-travis" 25 | pid-file = "./manager.pid" 26 | disabled-plugins = [] 27 | 28 | importer-image = "lablup/importer:manylinux2010" 29 | 30 | event-loop = "asyncio" 31 | 32 | 33 | [docker-registry] 34 | ssl-verify = true 35 | 36 | 37 | [logging] 38 | level = "INFO" 39 | drivers = ["console"] 40 | 41 | [logging.pkg-ns] 42 | "" = "WARNING" 43 | "aiotools" = "INFO" 44 | "aiohttp" = "INFO" 45 | "ai.backend" = "INFO" 46 | "alembic" = "INFO" 47 | "sqlalchemy" = "WARNING" 48 | 49 | [logging.console] 50 | colored = true 51 | format = "verbose" 52 | 53 | [logging.file] 54 | path = "./logs" 55 | filename = "manager.log" 56 | backup-count = 5 57 | rotation-size = "10M" 58 | 59 | [logging.logstash] 60 | endpoint = { host = "localhost", port = 9300 } 61 | protocol = "tcp" 62 | ssl-enabled = true 63 | ssl-verify = true 64 | 65 | 66 | [debug] 67 | enabled = false 68 | -------------------------------------------------------------------------------- /config/halfstack.alembic.ini: -------------------------------------------------------------------------------- 1 | # A generic, single database configuration. 2 | 3 | [alembic] 4 | # path to migration scripts 5 | script_location = ai.backend.manager.models:alembic 6 | 7 | # template used to generate migration files 8 | # file_template = %%(rev)s_%%(slug)s 9 | 10 | # timezone to use when rendering the date 11 | # within the migration file as well as the filename. 12 | # string value is passed to dateutil.tz.gettz() 13 | # leave blank for localtime 14 | # timezone = 15 | 16 | # max length of characters to apply to the 17 | # "slug" field 18 | #truncate_slug_length = 40 19 | 20 | # set to 'true' to run the environment during 21 | # the 'revision' command, regardless of autogenerate 22 | # revision_environment = false 23 | 24 | # set to 'true' to allow .pyc and .pyo files without 25 | # a source .py file to be detected as revisions in the 26 | # versions/ directory 27 | # sourceless = false 28 | 29 | # version location specification; this defaults 30 | # to alembic/versions. When using multiple version 31 | # directories, initial revisions must be specified with --version-path 32 | # version_locations = %(here)s/bar %(here)s/bat alembic/versions 33 | 34 | # the output encoding used when revision files 35 | # are written from script.py.mako 36 | # output_encoding = utf-8 37 | 38 | sqlalchemy.url = postgresql://postgres:develove@localhost:8100/backend 39 | 40 | 41 | # Logging configuration 42 | [loggers] 43 | keys = root,sqlalchemy,alembic 44 | 45 | [handlers] 46 | keys = console 47 | 48 | [formatters] 49 | keys = generic 50 | 51 | [logger_root] 52 | level = WARN 53 | handlers = console 54 | qualname = 55 | 56 | [logger_sqlalchemy] 57 | level = WARN 58 | handlers = 59 | qualname = sqlalchemy.engine 60 | 61 | [logger_alembic] 62 | level = INFO 63 | handlers = 64 | qualname = alembic 65 | 66 | [handler_console] 67 | class = StreamHandler 68 | args = (sys.stderr,) 69 | level = NOTSET 70 | formatter = generic 71 | 72 | [formatter_generic] 73 | format = %(levelname)-5.5s [%(name)s] %(message)s 74 | datefmt = %H:%M:%S 75 | -------------------------------------------------------------------------------- /config/halfstack.toml: -------------------------------------------------------------------------------- 1 | [etcd] 2 | namespace = "local" 3 | addr = { host = "127.0.0.1", port = 8120 } 4 | user = "" 5 | password = "" 6 | 7 | 8 | [db] 9 | type = "postgresql" 10 | addr = { host = "localhost", port = 8100 } 11 | name = "backend" 12 | user = "postgres" 13 | password = "develove" 14 | 15 | 16 | [manager] 17 | num-proc = 4 18 | service-addr = { host = "127.0.0.1", port = 8081 } 19 | #user = "nobody" 20 | #group = "nobody" 21 | ssl-enabled = false 22 | #ssl-cert = "/etc/backend.ai/ssl/apiserver-fullchain.pem" # env: BACKNED_SSL_CERT 23 | #ssl-privkey = "/etc/backend.ai/ssl/apiserver-privkey.pem" # env: BACKNED_SSL_KEY 24 | 25 | heartbeat-timeout = 10.0 26 | #id = "" 27 | pid-file = "./manager.pid" # env: BACKEND_PID_FILE 28 | disabled-plugins = [] 29 | 30 | hide-agents = true 31 | 32 | 33 | [docker-registry] 34 | ssl-verify = false 35 | 36 | 37 | [logging] 38 | level = "INFO" 39 | drivers = ["console"] 40 | 41 | [logging.pkg-ns] 42 | "" = "WARNING" 43 | "aiotools" = "INFO" 44 | "aiohttp" = "INFO" 45 | "ai.backend" = "INFO" 46 | "alembic" = "INFO" 47 | "sqlalchemy" = "WARNING" 48 | 49 | [logging.console] 50 | colored = true 51 | format = "verbose" 52 | 53 | [logging.file] 54 | path = "./logs" 55 | filename = "manager.log" 56 | backup-count = 5 57 | rotation-size = "10M" 58 | 59 | [logging.logstash] 60 | endpoint = { host = "localhost", port = 9300 } 61 | protocol = "tcp" 62 | ssl-enabled = true 63 | ssl-verify = true 64 | 65 | 66 | [debug] 67 | enabled = false 68 | -------------------------------------------------------------------------------- /config/sample.etcd.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "system": { 3 | "timezone": "UTC" 4 | }, 5 | "redis": { 6 | "addr": "127.0.0.1:6379", 7 | "password": "REDIS_PASSWORD" 8 | }, 9 | "docker": { 10 | "registry": { 11 | "cr.backend.ai": { 12 | "": "https://cr.backend.ai", 13 | "type": "harbor2", 14 | "project": "stable,ngc" 15 | } 16 | } 17 | }, 18 | "idle": { 19 | "enabled": "timeout", 20 | "app-streaming-packet-timeout": "5m", 21 | "checkers": { 22 | "timeout": { 23 | "threshold": "10m" 24 | } 25 | } 26 | }, 27 | "network": { 28 | "subnet": { 29 | "agent": "0.0.0.0/0", 30 | "container": "0.0.0.0/0" 31 | }, 32 | "overlay": { 33 | "mtu": 1500 34 | } 35 | }, 36 | "watcher": { 37 | "token": "some-random-long-string" 38 | }, 39 | "plugins": { 40 | "accelerator": { 41 | "cuda": { 42 | } 43 | }, 44 | "scheduler": { 45 | "fifo": { 46 | "num_retries_to_skip": 3 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /config/sample.etcd.redis-sentinel.json: -------------------------------------------------------------------------------- 1 | { 2 | "sentinel": "127.0.0.1:8217,127.0.0.1:8218,127.0.0.1:8219", 3 | "service_name": "manager" 4 | } 5 | -------------------------------------------------------------------------------- /config/sample.etcd.redis-single.json: -------------------------------------------------------------------------------- 1 | { 2 | "addr": "127.0.0.1:8111" 3 | } 4 | -------------------------------------------------------------------------------- /config/sample.etcd.volumes.json: -------------------------------------------------------------------------------- 1 | { 2 | "_types": { 3 | "group": "", 4 | "user": "" 5 | }, 6 | "default_host": "local:volume1", 7 | "proxies": { 8 | "local": { 9 | "client_api": "http://client-accessible-hostname:6021", 10 | "manager_api": "https://127.0.0.1:6022", 11 | "secret": "some-secret-shared-with-storage-proxy", 12 | "ssl_verify": "false" 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /docs/382.fix: -------------------------------------------------------------------------------- 1 | Revert accidental code removal in #381 due to missing type annotations 2 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. Sorna documentation master file, created by 2 | sphinx-quickstart on Tue Jul 28 20:12:45 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Sorna Documentation 7 | =================== 8 | 9 | The back-end calculation kernel server. 10 | 11 | The name "Sorna" came from the name of an island in the movie 12 | Jurassic Park where InGen's dinosaurs research facility is 13 | located. It means that this project is not visible to the users 14 | but plays a key role in our services. 15 | 16 | 17 | Contents: 18 | 19 | .. toctree:: 20 | :maxdepth: 2 21 | 22 | manager/structs 23 | manager/driver 24 | manager/instance 25 | 26 | 27 | Indices and tables 28 | ================== 29 | 30 | * :ref:`genindex` 31 | * :ref:`modindex` 32 | * :ref:`search` 33 | 34 | -------------------------------------------------------------------------------- /docs/manager/driver.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: sorna.driver 2 | 3 | Driver 4 | ====== 5 | 6 | .. autofunction:: create_driver 7 | 8 | .. autoclass:: BaseDriver 9 | :members: 10 | :undoc-members: 11 | :inherited-members: 12 | 13 | .. autoclass:: AWSDockerDriver 14 | :show-inheritance: 15 | 16 | .. autoclass:: LocalDriver 17 | :show-inheritance: 18 | 19 | -------------------------------------------------------------------------------- /docs/manager/instance.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: sorna.instance 2 | 3 | Instance 4 | ======== 5 | 6 | .. autoclass:: InstanceRegistry 7 | :members: 8 | :undoc-members: 9 | :inherited-members: 10 | 11 | .. autoexception:: InstanceNotAvailableError 12 | 13 | .. autoexception:: SessionNotFoundError 14 | -------------------------------------------------------------------------------- /docs/manager/structs.rst: -------------------------------------------------------------------------------- 1 | .. currentmodule:: sorna.structs 2 | 3 | Data Structures 4 | =============== 5 | 6 | .. autoclass:: Instance 7 | 8 | .. autoclass:: Kernel 9 | 10 | -------------------------------------------------------------------------------- /fixtures/example-resource-presets.json: -------------------------------------------------------------------------------- 1 | { 2 | "resource_presets": [ 3 | { 4 | "name": "01-small", 5 | "resource_slots": { 6 | "cpu": "8", 7 | "mem": "34359738368", 8 | "cuda.device": "1", 9 | "cuda.shares": "0.5" 10 | } 11 | }, 12 | { 13 | "name": "02-medium", 14 | "resource_slots": { 15 | "cpu": "24", 16 | "mem": "171798691840", 17 | "cuda.device": "2", 18 | "cuda.shares": "2.0" 19 | }, 20 | "shared_memory": "1073741824" 21 | }, 22 | { 23 | "name": "03-large", 24 | "resource_slots": { 25 | "cpu": "64", 26 | "mem": "343597383680", 27 | "cuda.device": "4", 28 | "cuda.shares": "4.0" 29 | }, 30 | "shared_memory": "2147483648" 31 | } 32 | ] 33 | } 34 | -------------------------------------------------------------------------------- /fixtures/example-session-templates.json: -------------------------------------------------------------------------------- 1 | { 2 | "session_templates": [ 3 | { 4 | "id": "c1b8441a-ba46-4a83-8727-de6645f521b4", 5 | "is_active": true, 6 | "domain_name": "default", 7 | "group_id": "2de2b969-1d04-48a6-af16-0bc8adb3c831", 8 | "user_uuid": "f38dea23-50fa-42a0-b5ae-338f5f4693f4", 9 | "type": "TASK", 10 | "name": "jupyter", 11 | "template": { 12 | "api_version": "6", 13 | "kind": "task_template", 14 | "metadata": { 15 | "name": "cr.backend.ai/testing/ngc-pytorch", 16 | "tag": "20.11-py3" 17 | }, 18 | "spec": { 19 | "session_type": "interactive", 20 | "kernel": { 21 | "image": "cr.backend.ai/testing/ngc-pytorch:20.11-py3", 22 | "environ": {}, 23 | "run": null, 24 | "git": null 25 | }, 26 | "scaling_group": "default", 27 | "mounts": { 28 | }, 29 | "resources": { 30 | "cpu": "2", 31 | "mem": "4g", 32 | "cuda.shares": "0.2" 33 | } 34 | } 35 | } 36 | }, 37 | { 38 | "id": "59062449-4f57-4434-975d-add2a593438c", 39 | "is_active": true, 40 | "domain_name": "default", 41 | "group_id": "2de2b969-1d04-48a6-af16-0bc8adb3c831", 42 | "user_uuid": "f38dea23-50fa-42a0-b5ae-338f5f4693f4", 43 | "type": "TASK", 44 | "name": "rstudio", 45 | "template": { 46 | "api_version": "6", 47 | "kind": "task_template", 48 | "metadata": { 49 | "name": "cr.backend.ai/cloud/r-base", 50 | "tag": "4.0" 51 | }, 52 | "spec": { 53 | "session_type": "interactive", 54 | "kernel": { 55 | "image": "cr.backend.ai/cloud/r-base:4.0", 56 | "environ": {}, 57 | "run": null, 58 | "git": null 59 | }, 60 | "scaling_group": "default", 61 | "mounts": { 62 | }, 63 | "resources": { 64 | "cpu": "1", 65 | "mem": "2g" 66 | } 67 | } 68 | } 69 | } 70 | ] 71 | } 72 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.towncrier] 2 | package = "ai.backend.manager" 3 | filename = "CHANGELOG.md" 4 | directory = "changes/" 5 | title_format = "## {version} ({project_date})" 6 | template = "changes/template.md" 7 | start_string = "\n" 8 | issue_format = "([#{issue}](https://github.com/lablup/backend.ai-manager/issues/{issue}))" 9 | underlines = ["", "", ""] 10 | 11 | [[tool.towncrier.type]] 12 | directory = "breaking" 13 | name = "Breaking Changes" 14 | showcontent = true 15 | 16 | [[tool.towncrier.type]] 17 | directory = "feature" 18 | name = "Features" 19 | showcontent = true 20 | 21 | [[tool.towncrier.type]] 22 | directory = "deprecation" 23 | name = "Deprecations" 24 | showcontent = true 25 | 26 | [[tool.towncrier.type]] 27 | directory = "fix" 28 | name = "Fixes" 29 | showcontent = true 30 | 31 | [[tool.towncrier.type]] 32 | directory = "doc" 33 | name = "Documentation Changes" 34 | showcontent = true 35 | 36 | [[tool.towncrier.type]] 37 | directory = "misc" 38 | name = "Miscellaneous" 39 | showcontent = true 40 | -------------------------------------------------------------------------------- /requirements/build.txt: -------------------------------------------------------------------------------- 1 | -e .[build] 2 | -------------------------------------------------------------------------------- /requirements/dev.txt: -------------------------------------------------------------------------------- 1 | -e .[build,test,lint,typecheck,dev] 2 | -------------------------------------------------------------------------------- /requirements/lint.txt: -------------------------------------------------------------------------------- 1 | -e git+https://github.com/lablup/backend.ai-common@main#egg=backend.ai-common 2 | -e git+https://github.com/lablup/backend.ai-cli@main#egg=backend.ai-cli 3 | -e .[lint] 4 | -------------------------------------------------------------------------------- /requirements/main.txt: -------------------------------------------------------------------------------- 1 | -e . 2 | -------------------------------------------------------------------------------- /requirements/test.txt: -------------------------------------------------------------------------------- 1 | -e git+https://github.com/lablup/backend.ai-common@main#egg=backend.ai-common 2 | -e git+https://github.com/lablup/backend.ai-cli@main#egg=backend.ai-cli 3 | -e .[test] 4 | -------------------------------------------------------------------------------- /requirements/typecheck.txt: -------------------------------------------------------------------------------- 1 | -e git+https://github.com/lablup/backend.ai-common@main#egg=backend.ai-common 2 | -e git+https://github.com/lablup/backend.ai-cli@main#egg=backend.ai-cli 3 | -e .[typecheck] 4 | -------------------------------------------------------------------------------- /scripts/migrate-vfolder-structure.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from pathlib import Path 3 | 4 | 5 | def main(): 6 | input_path = Path(sys.argv[1]) # full path for vroot/local 7 | output_path = Path(sys.argv[2]) # full path for volume directory. ex. vfs/ 8 | print(input_path, output_path) 9 | subfolders = [x for x in input_path.iterdir() if Path(x).is_dir()] 10 | 11 | print("Number of folders ", len(subfolders)) 12 | try: 13 | 14 | for folder in subfolders: 15 | folder = str(folder) 16 | folder = folder.split("/")[-1] 17 | header_dir = folder[0:2] 18 | second_lvl_dir = folder[2:4] 19 | rest_dir = folder[4:] 20 | 21 | (output_path / header_dir / second_lvl_dir / rest_dir).mkdir(parents=True, exist_ok=True) 22 | 23 | Path.rename(input_path / folder, output_path / header_dir / second_lvl_dir / rest_dir) 24 | 25 | except OSError: 26 | print("Creation of the directories failed") 27 | finally: 28 | print("Successfully created the directories") 29 | 30 | 31 | if __name__ == '__main__': 32 | main() 33 | print("Done") 34 | -------------------------------------------------------------------------------- /scripts/query-test.py: -------------------------------------------------------------------------------- 1 | import code 2 | import configparser 3 | 4 | import sqlalchemy as sa 5 | from sqlalchemy.dialects import postgresql as psql 6 | 7 | from ai.backend.manager.models import ( 8 | agents, 9 | keypairs, 10 | kernels, 11 | users, 12 | ) 13 | from ai.backend.manager.models.utils import ( 14 | sql_json_increment, 15 | sql_json_merge, 16 | ) 17 | 18 | 19 | def main(): 20 | config = configparser.ConfigParser() 21 | config.read("alembic.ini") 22 | url = config["alembic"]["sqlalchemy.url"] 23 | engine = sa.create_engine(url) 24 | with engine.connect() as connection: 25 | code.interact(local={ 26 | 'sa': sa, 27 | 'conn': connection, 28 | 'psql': psql, 29 | 'agents': agents, 30 | 'keypairs': keypairs, 31 | 'kernels': kernels, 32 | 'users': users, 33 | 'sql_json_increment': sql_json_increment, 34 | 'sql_json_merge': sql_json_merge, 35 | }) 36 | 37 | 38 | if __name__ == "__main__": 39 | main() 40 | -------------------------------------------------------------------------------- /scripts/set-psql-superuser.sql: -------------------------------------------------------------------------------- 1 | ALTER ROLE lablup SUPERUSER; 2 | -------------------------------------------------------------------------------- /scripts/switch-redis.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | case "$1" in 4 | "single") 5 | echo "Changing Redis config to use a single Redis instance..." 6 | backend.ai mgr etcd delete --prefix config/redis 7 | backend.ai mgr etcd put-json config/redis config/sample.etcd.redis-single.json 8 | ;; 9 | "sentinel") 10 | echo "Changing Redis config to use a sentinel-based cluster..." 11 | backend.ai mgr etcd delete --prefix config/redis 12 | backend.ai mgr etcd put-json config/redis config/sample.etcd.redis-sentinel.json 13 | ;; 14 | "cluster") 15 | echo "Changing Redis config to use a Redis cluster..." 16 | echo "[ERROR] Not implemented yet." 17 | ;; 18 | *) 19 | echo "Unknown option. Choose from \"single\", \"sentinel\", and \"cluster\"." 20 | ;; 21 | esac 22 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup() 4 | -------------------------------------------------------------------------------- /src/ai/backend/manager/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '22.03.3' 2 | -------------------------------------------------------------------------------- /src/ai/backend/manager/api/__init__.py: -------------------------------------------------------------------------------- 1 | import enum 2 | 3 | 4 | class ManagerStatus(str, enum.Enum): 5 | TERMINATED = 'terminated' # deprecated 6 | PREPARING = 'preparing' # deprecated 7 | RUNNING = 'running' 8 | FROZEN = 'frozen' 9 | -------------------------------------------------------------------------------- /src/ai/backend/manager/api/context.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import TYPE_CHECKING 4 | 5 | import attr 6 | 7 | if TYPE_CHECKING: 8 | from ai.backend.common.bgtask import BackgroundTaskManager 9 | from ai.backend.common.events import EventDispatcher, EventProducer 10 | from ai.backend.common.plugin.hook import HookPluginContext 11 | from ai.backend.common.plugin.monitor import ErrorPluginContext, StatsPluginContext 12 | from ai.backend.common.types import RedisConnectionInfo 13 | 14 | from ..models.storage import StorageSessionManager 15 | from ..models.utils import ExtendedAsyncSAEngine 16 | from ..idle import IdleCheckerHost 17 | from ..plugin.webapp import WebappPluginContext 18 | from ..registry import AgentRegistry 19 | from ..config import LocalConfig, SharedConfig 20 | from ..types import DistributedLockFactory 21 | from .types import CORSOptions 22 | 23 | 24 | class BaseContext: 25 | pass 26 | 27 | 28 | @attr.s(slots=True, auto_attribs=True, init=False) 29 | class RootContext(BaseContext): 30 | pidx: int 31 | db: ExtendedAsyncSAEngine 32 | distributed_lock_factory: DistributedLockFactory 33 | event_dispatcher: EventDispatcher 34 | event_producer: EventProducer 35 | redis_live: RedisConnectionInfo 36 | redis_stat: RedisConnectionInfo 37 | redis_image: RedisConnectionInfo 38 | redis_stream: RedisConnectionInfo 39 | shared_config: SharedConfig 40 | local_config: LocalConfig 41 | cors_options: CORSOptions 42 | 43 | webapp_plugin_ctx: WebappPluginContext 44 | idle_checker_host: IdleCheckerHost 45 | storage_manager: StorageSessionManager 46 | hook_plugin_ctx: HookPluginContext 47 | 48 | registry: AgentRegistry 49 | 50 | error_monitor: ErrorPluginContext 51 | stats_monitor: StatsPluginContext 52 | background_task_manager: BackgroundTaskManager 53 | -------------------------------------------------------------------------------- /src/ai/backend/manager/api/py.typed: -------------------------------------------------------------------------------- 1 | marker 2 | -------------------------------------------------------------------------------- /src/ai/backend/manager/api/types.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import ( 4 | Awaitable, 5 | Callable, 6 | Iterable, 7 | AsyncContextManager, 8 | Mapping, 9 | Tuple, 10 | TYPE_CHECKING, 11 | ) 12 | from typing_extensions import TypeAlias 13 | 14 | from aiohttp import web 15 | import aiohttp_cors 16 | 17 | if TYPE_CHECKING: 18 | from .context import RootContext 19 | 20 | 21 | WebRequestHandler: TypeAlias = Callable[ 22 | [web.Request], 23 | Awaitable[web.StreamResponse], 24 | ] 25 | WebMiddleware: TypeAlias = Callable[ 26 | [web.Request, WebRequestHandler], 27 | Awaitable[web.StreamResponse], 28 | ] 29 | 30 | CORSOptions: TypeAlias = Mapping[str, aiohttp_cors.ResourceOptions] 31 | AppCreator: TypeAlias = Callable[ 32 | [CORSOptions], 33 | Tuple[web.Application, Iterable[WebMiddleware]], 34 | ] 35 | 36 | CleanupContext: TypeAlias = Callable[['RootContext'], AsyncContextManager[None]] 37 | -------------------------------------------------------------------------------- /src/ai/backend/manager/cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lablup/backend.ai-manager/c3e596b2877833a99893ca12a5e165475b7d0071/src/ai/backend/manager/cli/__init__.py -------------------------------------------------------------------------------- /src/ai/backend/manager/cli/context.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import atexit 4 | import contextlib 5 | import attr 6 | import os 7 | from typing import TYPE_CHECKING, AsyncIterator 8 | 9 | from ai.backend.common import redis 10 | from ai.backend.common.config import redis_config_iv 11 | from ai.backend.common.logging import Logger 12 | from ai.backend.common.types import RedisConnectionInfo 13 | 14 | from ai.backend.manager.config import SharedConfig 15 | from ai.backend.manager.defs import REDIS_STAT_DB, REDIS_LIVE_DB, REDIS_IMAGE_DB, REDIS_STREAM_DB 16 | 17 | 18 | if TYPE_CHECKING: 19 | from ..config import LocalConfig 20 | 21 | 22 | @attr.s(auto_attribs=True, frozen=True, slots=True) 23 | class CLIContext: 24 | logger: Logger 25 | local_config: LocalConfig 26 | 27 | 28 | @attr.s(auto_attribs=True, frozen=True, slots=True) 29 | class RedisConnectionSet: 30 | live: RedisConnectionInfo 31 | stat: RedisConnectionInfo 32 | image: RedisConnectionInfo 33 | stream: RedisConnectionInfo 34 | 35 | 36 | def init_logger(local_config: LocalConfig) -> Logger: 37 | if 'file' in local_config['logging']['drivers']: 38 | local_config['logging']['drivers'].remove('file') 39 | # log_endpoint = f'tcp://127.0.0.1:{find_free_port()}' 40 | ipc_base_path = local_config['manager']['ipc-base-path'] 41 | log_sockpath = ipc_base_path / f'manager-cli-{os.getpid()}.sock' 42 | log_endpoint = f'ipc://{log_sockpath}' 43 | 44 | def _clean_logger(): 45 | try: 46 | os.unlink(log_sockpath) 47 | except FileNotFoundError: 48 | pass 49 | 50 | atexit.register(_clean_logger) 51 | return Logger(local_config['logging'], is_master=True, log_endpoint=log_endpoint) 52 | 53 | 54 | @contextlib.asynccontextmanager 55 | async def redis_ctx(cli_ctx: CLIContext) -> AsyncIterator[RedisConnectionSet]: 56 | local_config = cli_ctx.local_config 57 | shared_config = SharedConfig( 58 | local_config['etcd']['addr'], 59 | local_config['etcd']['user'], 60 | local_config['etcd']['password'], 61 | local_config['etcd']['namespace'], 62 | ) 63 | await shared_config.reload() 64 | raw_redis_config = await shared_config.etcd.get_prefix('config/redis') 65 | local_config['redis'] = redis_config_iv.check(raw_redis_config) 66 | redis_live = redis.get_redis_object(shared_config.data['redis'], db=REDIS_LIVE_DB) 67 | redis_stat = redis.get_redis_object(shared_config.data['redis'], db=REDIS_STAT_DB) 68 | redis_image = redis.get_redis_object( 69 | shared_config.data['redis'], db=REDIS_IMAGE_DB, 70 | ) 71 | redis_stream = redis.get_redis_object( 72 | shared_config.data['redis'], db=REDIS_STREAM_DB, 73 | ) 74 | yield RedisConnectionSet( 75 | live=redis_live, 76 | stat=redis_stat, 77 | image=redis_image, 78 | stream=redis_stream, 79 | ) 80 | await redis_stream.close() 81 | await redis_image.close() 82 | await redis_stat.close() 83 | await redis_live.close() 84 | -------------------------------------------------------------------------------- /src/ai/backend/manager/cli/fixture.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import asyncio 4 | import logging 5 | import json 6 | from pathlib import Path 7 | from typing import TYPE_CHECKING 8 | from urllib.parse import quote_plus as urlquote 9 | 10 | import click 11 | import sqlalchemy as sa 12 | 13 | from ai.backend.common.logging import BraceStyleAdapter 14 | 15 | from ..models.base import populate_fixture 16 | 17 | if TYPE_CHECKING: 18 | from .context import CLIContext 19 | 20 | log = BraceStyleAdapter(logging.getLogger(__name__)) 21 | 22 | 23 | @click.group() 24 | def cli(): 25 | pass 26 | 27 | 28 | @cli.command() 29 | @click.argument('fixture_path', type=Path) 30 | @click.pass_obj 31 | def populate(cli_ctx: CLIContext, fixture_path) -> None: 32 | 33 | async def _impl(): 34 | log.info("Populating fixture '{0}' ...", fixture_path) 35 | try: 36 | fixture = json.loads(fixture_path.read_text(encoding='utf8')) 37 | except AttributeError: 38 | log.error('No such fixture.') 39 | return 40 | db_username = cli_ctx.local_config['db']['user'] 41 | db_password = cli_ctx.local_config['db']['password'] 42 | db_addr = cli_ctx.local_config['db']['addr'] 43 | db_name = cli_ctx.local_config['db']['name'] 44 | engine = sa.ext.asyncio.create_async_engine( 45 | f"postgresql+asyncpg://{urlquote(db_username)}:{urlquote(db_password)}@{db_addr}/{db_name}", 46 | ) 47 | try: 48 | await populate_fixture(engine, fixture) 49 | except: 50 | log.exception("Failed to populate fixtures due to the following error:") 51 | else: 52 | log.info("Done") 53 | log.warning("Some rows may be skipped if they already exist.") 54 | finally: 55 | await engine.dispose() 56 | 57 | """Populate fixtures.""" 58 | with cli_ctx.logger: 59 | asyncio.run(_impl()) 60 | 61 | 62 | @cli.command() 63 | @click.pass_obj 64 | def list(cli_ctx: CLIContext) -> None: 65 | """List all available fixtures.""" 66 | with cli_ctx.logger: 67 | log.warning('This command is deprecated.') 68 | -------------------------------------------------------------------------------- /src/ai/backend/manager/cli/gql.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | from typing import TYPE_CHECKING 5 | 6 | import click 7 | import graphene 8 | 9 | from ai.backend.common.logging import BraceStyleAdapter 10 | 11 | from ..models.gql import Queries, Mutations 12 | 13 | if TYPE_CHECKING: 14 | from .context import CLIContext 15 | 16 | log = BraceStyleAdapter(logging.getLogger(__name__)) 17 | 18 | 19 | @click.group() 20 | def cli(args) -> None: 21 | pass 22 | 23 | 24 | @cli.command() 25 | @click.pass_obj 26 | def show(cli_ctx: CLIContext) -> None: 27 | with cli_ctx.logger: 28 | schema = graphene.Schema( 29 | query=Queries, 30 | mutation=Mutations, 31 | auto_camelcase=False) 32 | log.info('======== GraphQL API Schema ========') 33 | print(str(schema)) 34 | -------------------------------------------------------------------------------- /src/ai/backend/manager/cli/image.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | 4 | import click 5 | 6 | from ai.backend.common.cli import MinMaxRange 7 | from ai.backend.common.logging import BraceStyleAdapter 8 | 9 | from .image_impl import ( 10 | alias as alias_impl, 11 | dealias as dealias_impl, 12 | forget_image as forget_image_impl, 13 | inspect_image as inspect_image_impl, 14 | list_images as list_images_impl, 15 | rescan_images as rescan_images_impl, 16 | set_image_resource_limit as set_image_resource_limit_impl, 17 | ) 18 | log = BraceStyleAdapter(logging.getLogger(__name__)) 19 | 20 | 21 | @click.group() 22 | def cli() -> None: 23 | pass 24 | 25 | 26 | @cli.command() 27 | @click.option('-s', '--short', is_flag=True, 28 | help='Show only the image references and digests.') 29 | @click.option('-i', '--installed', is_flag=True, 30 | help='Show only the installed images.') 31 | @click.pass_obj 32 | def list(cli_ctx, short, installed) -> None: 33 | '''List all configured images.''' 34 | with cli_ctx.logger: 35 | asyncio.run(list_images_impl(cli_ctx, short, installed)) 36 | 37 | 38 | @cli.command() 39 | @click.argument('canonical_or_alias') 40 | @click.argument('architecture') 41 | @click.pass_obj 42 | def inspect(cli_ctx, canonical_or_alias, architecture) -> None: 43 | '''Show the details of the given image or alias.''' 44 | with cli_ctx.logger: 45 | asyncio.run(inspect_image_impl(cli_ctx, canonical_or_alias, architecture)) 46 | 47 | 48 | @cli.command() 49 | @click.argument('canonical_or_alias') 50 | @click.argument('architecture') 51 | @click.pass_obj 52 | def forget(cli_ctx, canonical_or_alias, architecture) -> None: 53 | '''Forget (delete) a specific image.''' 54 | with cli_ctx.logger: 55 | asyncio.run(forget_image_impl(cli_ctx, canonical_or_alias, architecture)) 56 | 57 | 58 | @cli.command() 59 | @click.argument('canonical_or_alias') 60 | @click.argument('slot_type') 61 | @click.argument('range_value', type=MinMaxRange) 62 | @click.argument('architecture') 63 | @click.pass_obj 64 | def set_resource_limit( 65 | cli_ctx, 66 | canonical_or_alias, 67 | slot_type, 68 | range_value, 69 | architecture, 70 | ) -> None: 71 | '''Set the MIN:MAX values of a SLOT_TYPE limit for the given image REFERENCE.''' 72 | with cli_ctx.logger: 73 | asyncio.run(set_image_resource_limit_impl( 74 | cli_ctx, 75 | canonical_or_alias, 76 | slot_type, 77 | range_value, 78 | architecture, 79 | )) 80 | 81 | 82 | @cli.command() 83 | @click.argument('registry') 84 | @click.pass_obj 85 | def rescan(cli_ctx, registry) -> None: 86 | ''' 87 | Update the kernel image metadata from all configured docker registries. 88 | 89 | Pass the name (usually hostname or "lablup") of the Docker registry configured as REGISTRY. 90 | ''' 91 | with cli_ctx.logger: 92 | asyncio.run(rescan_images_impl(cli_ctx, registry)) 93 | 94 | 95 | @cli.command() 96 | @click.argument('alias') 97 | @click.argument('target') 98 | @click.argument('architecture') 99 | @click.pass_obj 100 | def alias(cli_ctx, alias, target, architecture) -> None: 101 | '''Add an image alias from the given alias to the target image reference.''' 102 | with cli_ctx.logger: 103 | asyncio.run(alias_impl(cli_ctx, alias, target, architecture)) 104 | 105 | 106 | @cli.command() 107 | @click.argument('alias') 108 | @click.pass_obj 109 | def dealias(cli_ctx, alias) -> None: 110 | '''Remove an alias.''' 111 | with cli_ctx.logger: 112 | asyncio.run(dealias_impl(cli_ctx, alias)) 113 | -------------------------------------------------------------------------------- /src/ai/backend/manager/container_registry/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Any, Mapping, Type, TYPE_CHECKING 4 | 5 | import yarl 6 | 7 | if TYPE_CHECKING: 8 | from .base import BaseContainerRegistry 9 | 10 | 11 | def get_container_registry(registry_info: Mapping[str, Any]) -> Type[BaseContainerRegistry]: 12 | registry_url = yarl.URL(registry_info['']) 13 | registry_type = registry_info.get('type', 'docker') 14 | cr_cls: Type[BaseContainerRegistry] 15 | if registry_url.host is not None and registry_url.host.endswith('.docker.io'): 16 | from .docker import DockerHubRegistry 17 | cr_cls = DockerHubRegistry 18 | elif registry_type == 'docker': 19 | from .docker import DockerRegistry_v2 20 | cr_cls = DockerRegistry_v2 21 | elif registry_type == 'harbor': 22 | from .harbor import HarborRegistry_v1 23 | cr_cls = HarborRegistry_v1 24 | elif registry_type == 'harbor2': 25 | from .harbor import HarborRegistry_v2 26 | cr_cls = HarborRegistry_v2 27 | else: 28 | raise RuntimeError(f"Unsupported registry type: {registry_type}") 29 | return cr_cls 30 | -------------------------------------------------------------------------------- /src/ai/backend/manager/defs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Common definitions/constants used throughout the manager. 3 | """ 4 | 5 | import enum 6 | import platform 7 | import re 8 | from typing import Final 9 | 10 | from ai.backend.common.docker import arch_name_aliases 11 | from ai.backend.common.types import SlotName, SlotTypes 12 | 13 | INTRINSIC_SLOTS: Final = { 14 | SlotName('cpu'): SlotTypes('count'), 15 | SlotName('mem'): SlotTypes('bytes'), 16 | } 17 | 18 | MANAGER_ARCH = platform.machine().lower().strip() 19 | 20 | 21 | DEFAULT_IMAGE_ARCH = arch_name_aliases.get(MANAGER_ARCH, MANAGER_ARCH) 22 | # DEFAULT_IMAGE_ARCH = 'x86_64' 23 | 24 | # The default container role name for multi-container sessions 25 | DEFAULT_ROLE: Final = "main" 26 | 27 | _RESERVED_VFOLDER_PATTERNS = [r'^\.[a-z0-9]+rc$', r'^\.[a-z0-9]+_profile$'] 28 | RESERVED_DOTFILES = ['.terminfo', '.jupyter', '.ssh', '.ssh/authorized_keys', '.local', '.config'] 29 | RESERVED_VFOLDERS = ['.terminfo', '.jupyter', '.tmux.conf', '.ssh', '/bin', '/boot', '/dev', '/etc', 30 | '/lib', '/lib64', '/media', '/mnt', '/opt', '/proc', '/root', '/run', '/sbin', 31 | '/srv', '/sys', '/tmp', '/usr', '/var', '/home'] 32 | RESERVED_VFOLDER_PATTERNS = [re.compile(x) for x in _RESERVED_VFOLDER_PATTERNS] 33 | 34 | # Redis database IDs depending on purposes 35 | REDIS_STAT_DB: Final = 0 36 | REDIS_RLIM_DB: Final = 1 37 | REDIS_LIVE_DB: Final = 2 38 | REDIS_IMAGE_DB: Final = 3 39 | REDIS_STREAM_DB: Final = 4 40 | 41 | 42 | # The unique identifiers for distributed locks. 43 | # To be used with PostgreSQL advisory locks, the values are defined as integers. 44 | class LockID(enum.IntEnum): 45 | LOCKID_TEST = 42 46 | LOCKID_SCHEDULE = 91 47 | LOCKID_PREPARE = 92 48 | LOCKID_SCHEDULE_TIMER = 191 49 | LOCKID_PREPARE_TIMER = 192 50 | LOCKID_LOG_CLEANUP_TIMER = 195 51 | LOCKID_IDLE_CHECK_TIMER = 196 52 | -------------------------------------------------------------------------------- /src/ai/backend/manager/exceptions.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import ( 4 | Any, 5 | List, 6 | Tuple, 7 | TypedDict, 8 | TYPE_CHECKING, 9 | ) 10 | 11 | from aiotools import TaskGroupError 12 | 13 | if TYPE_CHECKING: 14 | from ai.backend.common.types import AgentId 15 | 16 | 17 | class InvalidArgument(Exception): 18 | """ 19 | An internal exception class to represent invalid arguments in internal APIs. 20 | This is wrapped as InvalidAPIParameters in web request handlers. 21 | """ 22 | pass 23 | 24 | 25 | class AgentError(RuntimeError): 26 | """ 27 | A dummy exception class to distinguish agent-side errors passed via 28 | agent rpc calls. 29 | 30 | It carries two args tuple: the exception type and exception arguments from 31 | the agent. 32 | """ 33 | 34 | __slots__ = ( 35 | 'agent_id', 'exc_name', 'exc_repr', 'exc_tb', 36 | ) 37 | 38 | def __init__( 39 | self, 40 | agent_id: AgentId, 41 | exc_name: str, 42 | exc_repr: str, 43 | exc_args: Tuple[Any, ...], 44 | exc_tb: str = None, 45 | ) -> None: 46 | super().__init__(agent_id, exc_name, exc_repr, exc_args, exc_tb) 47 | self.agent_id = agent_id 48 | self.exc_name = exc_name 49 | self.exc_repr = exc_repr 50 | self.exc_args = exc_args 51 | self.exc_tb = exc_tb 52 | 53 | 54 | class MultiAgentError(TaskGroupError): 55 | """ 56 | An exception that is a collection of multiple errors from multiple agents. 57 | """ 58 | 59 | 60 | class ErrorDetail(TypedDict, total=False): 61 | src: str 62 | name: str 63 | repr: str 64 | agent_id: str # optional 65 | collection: List[Any] # optional; currently mypy cannot handle recursive types 66 | 67 | 68 | class ErrorStatusInfo(TypedDict): 69 | error: ErrorDetail 70 | 71 | 72 | def convert_to_status_data(e: Exception, is_debug: bool = False) -> ErrorStatusInfo: 73 | if isinstance(e, MultiAgentError): 74 | data = ErrorStatusInfo( 75 | error={ 76 | "src": "agent", 77 | "name": "MultiAgentError", 78 | "repr": f"MultiAgentError({len(e.__errors__)})", 79 | "collection": [ 80 | convert_to_status_data(sub_error, is_debug)['error'] 81 | for sub_error in 82 | e.__errors__ 83 | ], 84 | }, 85 | ) 86 | return data 87 | elif isinstance(e, AgentError): 88 | data = ErrorStatusInfo( 89 | error={ 90 | "src": "agent", 91 | "name": e.exc_name, 92 | "repr": e.exc_repr, 93 | }, 94 | ) 95 | if is_debug: 96 | data["error"]["agent_id"] = e.agent_id 97 | return data 98 | return ErrorStatusInfo( 99 | error={ 100 | "src": "other", 101 | "name": e.__class__.__name__, 102 | "repr": repr(e), 103 | }, 104 | ) 105 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .base import metadata 2 | 3 | from . import agent as _agent 4 | from . import domain as _domain 5 | from . import group as _group 6 | from . import image as _image 7 | from . import kernel as _kernel 8 | from . import keypair as _keypair 9 | from . import user as _user 10 | from . import vfolder as _vfolder 11 | from . import dotfile as _dotfile 12 | from . import resource_policy as _rpolicy 13 | from . import resource_preset as _rpreset 14 | from . import scaling_group as _sgroup 15 | from . import session_template as _sessiontemplate 16 | from . import storage as _storage 17 | from . import error_logs as _errorlogs 18 | 19 | __all__ = ( 20 | 'metadata', 21 | *_agent.__all__, 22 | *_domain.__all__, 23 | *_group.__all__, 24 | *_image.__all__, 25 | *_kernel.__all__, 26 | *_keypair.__all__, 27 | *_user.__all__, 28 | *_vfolder.__all__, 29 | *_dotfile.__all__, 30 | *_rpolicy.__all__, 31 | *_rpreset.__all__, 32 | *_sgroup.__all__, 33 | *_sessiontemplate.__all__, 34 | *_storage.__all__, 35 | *_errorlogs.__all__, 36 | ) 37 | 38 | from .agent import * # noqa 39 | from .domain import * # noqa 40 | from .group import * # noqa 41 | from .image import * # noqa 42 | from .kernel import * # noqa 43 | from .keypair import * # noqa 44 | from .user import * # noqa 45 | from .vfolder import * # noqa 46 | from .dotfile import * # noqa 47 | from .resource_policy import * # noqa 48 | from .resource_preset import * # noqa 49 | from .scaling_group import * # noqa 50 | from .session_template import * # noqa 51 | from .storage import * # noqa 52 | from .error_logs import * # noqa 53 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/README: -------------------------------------------------------------------------------- 1 | Generic single-database configuration. -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/env.py: -------------------------------------------------------------------------------- 1 | from __future__ import with_statement 2 | from alembic import context 3 | from sqlalchemy import engine_from_config, pool 4 | from logging.config import fileConfig 5 | from ai.backend.common.logging import is_active as logging_active 6 | 7 | # this is the Alembic Config object, which provides 8 | # access to the values within the .ini file in use. 9 | config = context.config 10 | 11 | # Interpret the config file for Python logging. 12 | # This line sets up loggers basically. 13 | 14 | if not logging_active.get(): 15 | fileConfig(config.config_file_name) 16 | 17 | # Import the shared metadata and all models. 18 | # (We need to explicilty import models because model modules 19 | # should be executed to add table definitions to the metadata.) 20 | from ai.backend.manager.models.base import metadata 21 | import ai.backend.manager.models.agent 22 | import ai.backend.manager.models.keypair 23 | import ai.backend.manager.models.kernel 24 | import ai.backend.manager.models.vfolder 25 | 26 | target_metadata = metadata 27 | 28 | # other values from the config, defined by the needs of env.py, 29 | # can be acquired: 30 | # my_important_option = config.get_main_option("my_important_option") 31 | # ... etc. 32 | 33 | 34 | def run_migrations_offline(): 35 | """Run migrations in 'offline' mode. 36 | 37 | This configures the context with just a URL 38 | and not an Engine, though an Engine is acceptable 39 | here as well. By skipping the Engine creation 40 | we don't even need a DBAPI to be available. 41 | 42 | Calls to context.execute() here emit the given string to the 43 | script output. 44 | 45 | """ 46 | url = config.get_main_option("sqlalchemy.url") 47 | context.configure( 48 | url=url, target_metadata=target_metadata, literal_binds=True) 49 | 50 | with context.begin_transaction(): 51 | context.run_migrations() 52 | 53 | 54 | def run_migrations_online(): 55 | """Run migrations in 'online' mode. 56 | 57 | In this scenario we need to create an Engine 58 | and associate a connection with the context. 59 | 60 | """ 61 | connectable = engine_from_config( 62 | config.get_section(config.config_ini_section), 63 | prefix='sqlalchemy.', 64 | poolclass=pool.NullPool) 65 | 66 | with connectable.connect() as connection: 67 | context.configure( 68 | connection=connection, 69 | target_metadata=target_metadata 70 | ) 71 | 72 | with context.begin_transaction(): 73 | context.run_migrations() 74 | 75 | if context.is_offline_mode(): 76 | run_migrations_offline() 77 | else: 78 | run_migrations_online() 79 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision | comma,n} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | ${imports if imports else ""} 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = ${repr(up_revision)} 14 | down_revision = ${repr(down_revision)} 15 | branch_labels = ${repr(branch_labels)} 16 | depends_on = ${repr(depends_on)} 17 | 18 | 19 | def upgrade(): 20 | ${upgrades if upgrades else "pass"} 21 | 22 | 23 | def downgrade(): 24 | ${downgrades if downgrades else "pass"} 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/01456c812164_add_idle_timeout_to_keypair_resource_.py: -------------------------------------------------------------------------------- 1 | """add-idle-timeout-to-keypair-resource-policy 2 | 3 | Revision ID: 01456c812164 4 | Revises: dbc1e053b880 5 | Create Date: 2019-02-22 22:16:47.685740 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '01456c812164' 14 | down_revision = 'dbc1e053b880' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.add_column('keypair_resource_policies', 21 | sa.Column('idle_timeout', sa.BigInteger(), 22 | nullable=False, server_default='1800')) 23 | op.alter_column('keypair_resource_policies', 'idle_timeout', 24 | server_default=None) 25 | 26 | 27 | def downgrade(): 28 | op.drop_column('keypair_resource_policies', 'idle_timeout') 29 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/015d84d5a5ef_add_image_table.py: -------------------------------------------------------------------------------- 1 | """add image table 2 | 3 | Revision ID: 015d84d5a5ef 4 | Revises: 60a1effa77d2 5 | Create Date: 2022-02-15 23:45:19.814677 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | from ai.backend.manager.models.base import ForeignKeyIDColumn, IDColumn, convention 13 | 14 | from ai.backend.manager.models.image import ImageType 15 | 16 | 17 | # revision identifiers, used by Alembic. 18 | revision = '015d84d5a5ef' 19 | down_revision = '60a1effa77d2' 20 | branch_labels = None 21 | depends_on = None 22 | 23 | 24 | def upgrade(): 25 | metadata = sa.MetaData(naming_convention=convention) 26 | op.create_table( 27 | 'images', metadata, 28 | IDColumn('id'), 29 | sa.Column('name', sa.String, index=True, nullable=False), 30 | sa.Column('image', sa.String, nullable=False, index=True), 31 | sa.Column( 32 | 'created_at', sa.DateTime(timezone=True), 33 | server_default=sa.func.now(), index=True), 34 | sa.Column('tag', sa.String, nullable=False, index=True), 35 | sa.Column('registry', sa.String, nullable=False, index=True), 36 | sa.Column('architecture', sa.String, nullable=False, server_default='x86_64', index=True), 37 | sa.Column('config_digest', sa.CHAR(length=72), nullable=False), 38 | sa.Column('size_bytes', sa.BigInteger, nullable=False), 39 | sa.Column('type', sa.Enum(ImageType, name='image_type'), nullable=False), 40 | sa.Column('accelerators', sa.String), 41 | sa.Column('labels', postgresql.JSONB(), nullable=False), 42 | sa.Column('resources', postgresql.JSONB(), nullable=False), 43 | sa.Index('ix_image_name_architecture', 'name', 'architecture', unique=True), 44 | sa.Index('ix_image_image_tag_registry', 'image', 'tag', 'registry'), 45 | ) 46 | 47 | op.create_table( 48 | 'image_aliases', metadata, 49 | IDColumn('id'), 50 | sa.Column('alias', sa.String, unique=True, index=True), 51 | ForeignKeyIDColumn('image', 'images.id', nullable=False), 52 | sa.Index('ix_image_alias_unique_ref', 'image', 'alias', unique=True), 53 | ) 54 | 55 | 56 | def downgrade(): 57 | op.drop_table('image_aliases') 58 | op.drop_table('images') 59 | op.execute('DROP TYPE image_type') 60 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/0262e50e90e0_add_ssh_keypair_into_keypair.py: -------------------------------------------------------------------------------- 1 | """add_ssh_keypair_into_keypair 2 | 3 | Revision ID: 0262e50e90e0 4 | Revises: 4b7b650bc30e 5 | Create Date: 2019-12-12 07:19:48.052928 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | from cryptography.hazmat.primitives import serialization as crypto_serialization 12 | from cryptography.hazmat.primitives.asymmetric import rsa 13 | from cryptography.hazmat.backends import default_backend as crypto_default_backend 14 | 15 | from ai.backend.manager.models.base import convention 16 | 17 | # revision identifiers, used by Alembic. 18 | revision = '0262e50e90e0' 19 | down_revision = '4b7b650bc30e' 20 | branch_labels = None 21 | depends_on = None 22 | 23 | 24 | def generate_ssh_keypair(): 25 | key = rsa.generate_private_key( 26 | backend=crypto_default_backend(), 27 | public_exponent=65537, 28 | key_size=2048 29 | ) 30 | private_key = key.private_bytes( 31 | crypto_serialization.Encoding.PEM, 32 | crypto_serialization.PrivateFormat.TraditionalOpenSSL, 33 | crypto_serialization.NoEncryption() 34 | ).decode("utf-8") 35 | public_key = key.public_key().public_bytes( 36 | crypto_serialization.Encoding.OpenSSH, 37 | crypto_serialization.PublicFormat.OpenSSH 38 | ).decode("utf-8") 39 | return (public_key, private_key) 40 | 41 | 42 | def upgrade(): 43 | op.add_column('keypairs', sa.Column('ssh_public_key', sa.String(length=750), nullable=True)) 44 | op.add_column('keypairs', sa.Column('ssh_private_key', sa.String(length=2000), nullable=True)) 45 | 46 | # partial table to be preserved and referred 47 | metadata = sa.MetaData(naming_convention=convention) 48 | keypairs = sa.Table( 49 | 'keypairs', metadata, 50 | sa.Column('access_key', sa.String(length=20), primary_key=True), 51 | sa.Column('ssh_public_key', sa.String(length=750), nullable=True), 52 | sa.Column('ssh_private_key', sa.String(length=2000), nullable=True), 53 | ) 54 | 55 | # Fill in SSH keypairs in every keypairs. 56 | conn = op.get_bind() 57 | query = sa.select([keypairs.c.access_key]).select_from(keypairs) 58 | rows = conn.execute(query).fetchall() 59 | for row in rows: 60 | pubkey, privkey = generate_ssh_keypair() 61 | query = (sa.update(keypairs) 62 | .values(ssh_public_key=pubkey, ssh_private_key=privkey) 63 | .where(keypairs.c.access_key == row.access_key)) 64 | conn.execute(query) 65 | 66 | 67 | def downgrade(): 68 | op.drop_column('keypairs', 'ssh_public_key') 69 | op.drop_column('keypairs', 'ssh_private_key') 70 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/02950808ca3d_add_agent_version.py: -------------------------------------------------------------------------------- 1 | """add-agent-version 2 | 3 | Revision ID: 02950808ca3d 4 | Revises: 4cc87e7fbfdf 5 | Create Date: 2019-06-02 21:14:12.320029 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '02950808ca3d' 14 | down_revision = '4cc87e7fbfdf' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column( 22 | 'agents', 23 | # Set the defualt to "19.06.0" for now (since it is the first version to have this field) 24 | # and let the heartbeat handler route to update with the exact value. 25 | sa.Column('compute_plugins', postgresql.JSONB(astext_type=sa.Text()), nullable=False, 26 | server_default=sa.text("'{}'::jsonb"))) 27 | op.add_column( 28 | 'agents', 29 | sa.Column('version', sa.String(length=64), nullable=False, 30 | server_default=sa.literal('19.06.0'))) 31 | # ### end Alembic commands ### 32 | 33 | 34 | def downgrade(): 35 | # ### commands auto generated by Alembic - please adjust! ### 36 | op.drop_column('agents', 'version') 37 | op.drop_column('agents', 'compute_plugins') 38 | # ### end Alembic commands ### 39 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/06184d82a211_add_session_creation_id.py: -------------------------------------------------------------------------------- 1 | """add-session_creation_id 2 | 3 | Revision ID: 06184d82a211 4 | Revises: 250e8656cf45 5 | Create Date: 2020-12-24 19:58:44.515321 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '06184d82a211' 14 | down_revision = '250e8656cf45' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.add_column('kernels', sa.Column('session_creation_id', sa.String(length=32), nullable=True)) 21 | 22 | 23 | def downgrade(): 24 | op.drop_column('kernels', 'session_creation_id') 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/0c5733f80e4d_index_kernel_timestamps.py: -------------------------------------------------------------------------------- 1 | """index-kernel-timestamps 2 | 3 | Revision ID: 0c5733f80e4d 4 | Revises: 9bd986a75a2a 5 | Create Date: 2019-09-24 15:58:58.932029 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '0c5733f80e4d' 14 | down_revision = '9bd986a75a2a' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.alter_column('kernels', 'status', 22 | existing_type=postgresql.ENUM('PENDING', 'PREPARING', 'BUILDING', 'PULLING', 'RUNNING', 'RESTARTING', 'RESIZING', 'SUSPENDED', 'TERMINATING', 'TERMINATED', 'ERROR', 'CANCELLED', name='kernelstatus'), 23 | nullable=False, 24 | existing_server_default=sa.text("'PENDING'::kernelstatus")) 25 | op.alter_column('kernels', 'type', 26 | existing_type=postgresql.ENUM('INTERACTIVE', 'BATCH', name='sessiontypes'), 27 | nullable=False, 28 | existing_server_default=sa.text("'INTERACTIVE'::sessiontypes")) 29 | op.create_index(op.f('ix_kernels_status_changed'), 'kernels', ['status_changed'], unique=False) 30 | op.create_index('ix_kernels_updated_order', 'kernels', ['created_at', 'terminated_at', 'status_changed'], unique=False) 31 | # ### end Alembic commands ### 32 | 33 | 34 | def downgrade(): 35 | # ### commands auto generated by Alembic - please adjust! ### 36 | op.drop_index('ix_kernels_updated_order', table_name='kernels') 37 | op.drop_index(op.f('ix_kernels_status_changed'), table_name='kernels') 38 | op.alter_column('kernels', 'type', 39 | existing_type=postgresql.ENUM('INTERACTIVE', 'BATCH', name='sessiontypes'), 40 | nullable=True, 41 | existing_server_default=sa.text("'INTERACTIVE'::sessiontypes")) 42 | op.alter_column('kernels', 'status', 43 | existing_type=postgresql.ENUM('PENDING', 'PREPARING', 'BUILDING', 'PULLING', 'RUNNING', 'RESTARTING', 'RESIZING', 'SUSPENDED', 'TERMINATING', 'TERMINATED', 'ERROR', 'CANCELLED', name='kernelstatus'), 44 | nullable=True, 45 | existing_server_default=sa.text("'PENDING'::kernelstatus")) 46 | # ### end Alembic commands ### 47 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/0d553d59f369_users_replace_is_active_to_status_and_its_info.py: -------------------------------------------------------------------------------- 1 | """replace_users_is_active_to_status_and_its_info 2 | 3 | Revision ID: 0d553d59f369 4 | Revises: 9cd61b1ae70d 5 | Create Date: 2020-07-04 23:44:09.191729 6 | 7 | """ 8 | import textwrap 9 | 10 | from alembic import op 11 | import sqlalchemy as sa 12 | from sqlalchemy.dialects import postgresql 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = '0d553d59f369' 16 | down_revision = '9cd61b1ae70d' 17 | branch_labels = None 18 | depends_on = None 19 | 20 | userstatus_choices = ( 21 | 'active', 22 | 'inactive', 23 | 'deleted', 24 | 'before-verification', 25 | ) 26 | userstatus = postgresql.ENUM( 27 | *userstatus_choices, 28 | name='userstatus' 29 | ) 30 | 31 | def upgrade(): 32 | userstatus.create(op.get_bind()) 33 | op.add_column( 34 | 'users', 35 | sa.Column('status', sa.Enum(*userstatus_choices, name='userstatus'), nullable=True) 36 | ) 37 | op.add_column('users', sa.Column('status_info', sa.Unicode(), nullable=True)) 38 | 39 | # Set user's status field. 40 | conn = op.get_bind() 41 | query = textwrap.dedent( 42 | "UPDATE users SET status = 'active', status_info = 'migrated' WHERE is_active = 't';" 43 | ) 44 | conn.execute(query) 45 | query = textwrap.dedent( 46 | "UPDATE users SET status = 'inactive', status_info = 'migrated' WHERE is_active <> 't';" 47 | ) 48 | conn.execute(query) 49 | 50 | op.alter_column('users', column_name='status', nullable=False) 51 | op.drop_column('users', 'is_active') 52 | 53 | 54 | def downgrade(): 55 | op.add_column('users', sa.Column('is_active', sa.Boolean(), nullable=True)) 56 | 57 | # Set user's is_active field. 58 | conn = op.get_bind() 59 | query = textwrap.dedent("UPDATE users SET is_active = 't' WHERE status = 'active';") 60 | conn.execute(query) 61 | query = textwrap.dedent("UPDATE users SET is_active = 'f' WHERE status <> 'active';") 62 | conn.execute(query) 63 | 64 | op.drop_column('users', 'status_info') 65 | op.drop_column('users', 'status') 66 | userstatus.drop(op.get_bind()) 67 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/0e558d06e0e3_add_service_ports.py: -------------------------------------------------------------------------------- 1 | """add-service-ports 2 | 3 | Revision ID: 0e558d06e0e3 4 | Revises: 10e39a34eed5 5 | Create Date: 2018-12-13 17:39:35.573747 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '0e558d06e0e3' 14 | down_revision = '10e39a34eed5' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.add_column('kernels', sa.Column('service_ports', sa.JSON(), nullable=True)) 21 | 22 | 23 | def downgrade(): 24 | op.drop_column('kernels', 'service_ports') 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/0f3bc98edaa0_more_status.py: -------------------------------------------------------------------------------- 1 | """more_status 2 | 3 | Revision ID: 0f3bc98edaa0 4 | Revises: 7ea324d0535b 5 | Create Date: 2017-08-11 13:12:55.236519 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '0f3bc98edaa0' 14 | down_revision = '7ea324d0535b' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | agentstatus = postgresql.ENUM( 19 | 'ALIVE', 'LOST', 'RESTARTING', 'TERMINATED', 20 | name='agentstatus', 21 | ) 22 | 23 | kernelstatus_choices = ( 24 | 'PREPARING', 'BUILDING', 'RUNNING', 25 | 'RESTARTING', 'RESIZING', 'SUSPENDED', 26 | 'TERMINATING', 'TERMINATED', 'ERROR', 27 | ) 28 | kernelstatus = postgresql.ENUM( 29 | *kernelstatus_choices, 30 | name='kernelstatus') 31 | 32 | 33 | def upgrade(): 34 | agentstatus.create(op.get_bind()) 35 | kernelstatus.create(op.get_bind()) 36 | op.add_column('agents', sa.Column('lost_at', sa.DateTime(timezone=True), nullable=True)) 37 | op.add_column('agents', sa.Column('status', sa.Enum('ALIVE', 'LOST', 'RESTARTING', 'TERMINATED', name='agentstatus'), nullable=False)) 38 | op.create_index(op.f('ix_agents_status'), 'agents', ['status'], unique=False) 39 | op.add_column('kernels', sa.Column('agent_addr', sa.String(length=128), nullable=False)) 40 | op.add_column('kernels', sa.Column('cpu_slot', sa.Integer(), nullable=False)) 41 | op.add_column('kernels', sa.Column('gpu_slot', sa.Integer(), nullable=False)) 42 | op.add_column('kernels', sa.Column('mem_slot', sa.Integer(), nullable=False)) 43 | op.add_column('kernels', sa.Column('repl_in_port', sa.Integer(), nullable=False)) 44 | op.add_column('kernels', sa.Column('repl_out_port', sa.Integer(), nullable=False)) 45 | op.add_column('kernels', sa.Column('stdin_port', sa.Integer(), nullable=False)) 46 | op.add_column('kernels', sa.Column('stdout_port', sa.Integer(), nullable=False)) 47 | op.drop_column('kernels', 'allocated_cores') 48 | op.add_column('kernels', sa.Column('cpu_set', sa.ARRAY(sa.Integer), nullable=True)) 49 | op.add_column('kernels', sa.Column('gpu_set', sa.ARRAY(sa.Integer), nullable=True)) 50 | op.alter_column('kernels', column_name='status', type_=sa.Enum(*kernelstatus_choices, name='kernelstatus'), 51 | postgresql_using='status::kernelstatus') 52 | 53 | 54 | def downgrade(): 55 | op.drop_column('kernels', 'stdout_port') 56 | op.drop_column('kernels', 'stdin_port') 57 | op.drop_column('kernels', 'repl_out_port') 58 | op.drop_column('kernels', 'repl_in_port') 59 | op.drop_column('kernels', 'mem_slot') 60 | op.drop_column('kernels', 'gpu_slot') 61 | op.drop_column('kernels', 'cpu_slot') 62 | op.drop_column('kernels', 'agent_addr') 63 | op.drop_index(op.f('ix_agents_status'), table_name='agents') 64 | op.drop_column('agents', 'status') 65 | op.drop_column('agents', 'lost_at') 66 | op.alter_column('kernels', column_name='status', type_=sa.String(length=64)) 67 | op.add_column('kernels', sa.Column('allocated_cores', sa.ARRAY(sa.Integer), nullable=True)) 68 | op.drop_column('kernels', 'cpu_set') 69 | op.drop_column('kernels', 'gpu_set') 70 | agentstatus.drop(op.get_bind()) 71 | kernelstatus.drop(op.get_bind()) 72 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/0f7a4b643940_.py: -------------------------------------------------------------------------------- 1 | """empty message 2 | 3 | Revision ID: 0f7a4b643940 4 | Revises: 7dd1d81c3204, 911023380bc9 5 | Create Date: 2022-03-14 06:20:12.850338 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '0f7a4b643940' 14 | down_revision = ('7dd1d81c3204', '911023380bc9') 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | pass 21 | 22 | 23 | def downgrade(): 24 | pass 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/10e39a34eed5_enlarge_kernels_lang_column_length.py: -------------------------------------------------------------------------------- 1 | """Enlarge kernels.lang column length 2 | 3 | Revision ID: 10e39a34eed5 4 | Revises: d582942886ad 5 | Create Date: 2018-10-29 13:52:10.583443 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '10e39a34eed5' 14 | down_revision = 'd582942886ad' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.alter_column('kernels', 'lang', 22 | existing_type=sa.String(length=64), 23 | type_=sa.String(length=512)) 24 | # ### end Alembic commands ### 25 | 26 | 27 | def downgrade(): 28 | # ### commands auto generated by Alembic - please adjust! ### 29 | op.alter_column('kernels', 'lang', 30 | existing_type=sa.String(length=512), 31 | type_=sa.String(length=64)) 32 | # ### end Alembic commands ### 33 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/11146ba02235_change_char_col_to_str.py: -------------------------------------------------------------------------------- 1 | """change char col to str 2 | 3 | Revision ID: 11146ba02235 4 | Revises: 0f7a4b643940 5 | Create Date: 2022-03-25 12:32:05.637628 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.sql.expression import bindparam 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '11146ba02235' 14 | down_revision = '0f7a4b643940' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | conn = op.get_bind() 21 | op.alter_column('agents', column_name='architecture', type_=sa.String(length=32)) 22 | query = ''' 23 | UPDATE agents 24 | SET architecture = TRIM (architecture); 25 | ''' 26 | conn.execute(query) 27 | 28 | def downgrade(): 29 | op.alter_column('agents', column_name='architecture', type_=sa.CHAR(length=32)) 30 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/185852ff9872_add_vfolder_permissions_table.py: -------------------------------------------------------------------------------- 1 | """add vfolder_permissions table 2 | 3 | Revision ID: 185852ff9872 4 | Revises: 1fa6a31ea8e3 5 | Create Date: 2018-07-05 16:02:05.225094 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from ai.backend.manager.models.base import GUID 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '185852ff9872' 15 | down_revision = '1fa6a31ea8e3' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | op.create_table( 23 | 'vfolder_permissions', 24 | sa.Column('permission', sa.String(length=2), nullable=True), 25 | sa.Column('vfolder', GUID(), nullable=False), 26 | sa.Column('access_key', sa.String(length=20), nullable=False), 27 | sa.ForeignKeyConstraint(['access_key'], ['keypairs.access_key'], 28 | name=op.f('fk_vfolder_permissions_access_key_keypairs')), 29 | sa.ForeignKeyConstraint(['vfolder'], ['vfolders.id'], 30 | name=op.f('fk_vfolder_permissions_vfolder_vfolders'), 31 | onupdate='CASCADE', ondelete='CASCADE') 32 | ) 33 | # ### end Alembic commands ### 34 | 35 | 36 | def downgrade(): 37 | # ### commands auto generated by Alembic - please adjust! ### 38 | op.drop_table('vfolder_permissions') 39 | # ### end Alembic commands ### 40 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/1e673659b283_add_clusterized_column_to_agents_table.py: -------------------------------------------------------------------------------- 1 | """Add clusterized column to agents table 2 | 3 | Revision ID: 1e673659b283 4 | Revises: d5cc54fd36b5 5 | Create Date: 2020-01-07 17:52:51.771357 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '1e673659b283' 14 | down_revision = 'd5cc54fd36b5' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.add_column( 21 | 'agents', 22 | sa.Column('clusterized', sa.Boolean, default=False) 23 | ) 24 | 25 | 26 | def downgrade(): 27 | op.drop_column('agents', 'clusterized') 28 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/1e8531583e20_add_dotfile_column_to_keypairs.py: -------------------------------------------------------------------------------- 1 | """Add dotfile column to keypairs 2 | 3 | Revision ID: 1e8531583e20 4 | Revises: ce209920f654 5 | Create Date: 2020-01-17 15:59:09.367691 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = '1e8531583e20' 13 | down_revision = 'ce209920f654' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.add_column( 20 | 'keypairs', 21 | sa.Column('dotfiles', sa.LargeBinary(length=64 * 1024), nullable=False, server_default='\\x90') 22 | ) 23 | 24 | 25 | def downgrade(): 26 | op.drop_column('keypairs', 'dotfiles') 27 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/1fa6a31ea8e3_add_inviter_field_for_vfolder_.py: -------------------------------------------------------------------------------- 1 | """add inviter field for vfolder_invitations 2 | 3 | Revision ID: 1fa6a31ea8e3 4 | Revises: 26d0c387e764 5 | Create Date: 2018-07-05 00:09:35.230704 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '1fa6a31ea8e3' 14 | down_revision = '26d0c387e764' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('vfolder_invitations', sa.Column('inviter', sa.String(length=256))) 22 | # ### end Alembic commands ### 23 | 24 | 25 | def downgrade(): 26 | # ### commands auto generated by Alembic - please adjust! ### 27 | op.drop_column('vfolder_invitations', 'inviter') 28 | # ### end Alembic commands ### 29 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/202b6dcbc159_add_internal_data_to_kernels.py: -------------------------------------------------------------------------------- 1 | """add-internal-data-to-kernels 2 | 3 | Revision ID: 202b6dcbc159 4 | Revises: 3f1dafab60b2 5 | Create Date: 2019-10-01 16:13:20.935285 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '202b6dcbc159' 14 | down_revision = '3f1dafab60b2' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('kernels', sa.Column('internal_data', postgresql.JSONB(astext_type=sa.Text()), nullable=True)) 22 | # ### end Alembic commands ### 23 | 24 | 25 | def downgrade(): 26 | # ### commands auto generated by Alembic - please adjust! ### 27 | op.drop_column('kernels', 'internal_data') 28 | # ### end Alembic commands ### 29 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/22964745c12b_add_total_resource_slots_to_group.py: -------------------------------------------------------------------------------- 1 | """add_total_resource_slots_to_group 2 | 3 | Revision ID: 22964745c12b 4 | Revises: 02950808ca3d 5 | Create Date: 2019-06-17 15:57:39.442741 6 | 7 | """ 8 | import textwrap 9 | from alembic import op 10 | import sqlalchemy as sa 11 | from sqlalchemy.dialects import postgresql 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '22964745c12b' 15 | down_revision = '02950808ca3d' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | op.add_column('domains', sa.Column('integration_id', sa.String(length=512), nullable=True)) 23 | op.alter_column('domains', 'total_resource_slots', 24 | existing_type=postgresql.JSONB(astext_type=sa.Text()), 25 | nullable=True) 26 | op.add_column('groups', sa.Column('integration_id', sa.String(length=512), nullable=True)) 27 | op.add_column('groups', sa.Column('total_resource_slots', 28 | postgresql.JSONB(astext_type=sa.Text()), nullable=True)) 29 | op.add_column('users', sa.Column('integration_id', sa.String(length=512), nullable=True)) 30 | # ### end Alembic commandk ### 31 | 32 | print('\nSet group\'s total_resource_slots with empty dictionary.') 33 | query = textwrap.dedent('''\ 34 | UPDATE groups SET total_resource_slots = '{}'::jsonb; 35 | ''') 36 | connection = op.get_bind() 37 | connection.execute(query) 38 | 39 | 40 | def downgrade(): 41 | # ### commands auto generated by Alembic - please adjust! ### 42 | op.drop_column('users', 'integration_id') 43 | op.drop_column('groups', 'total_resource_slots') 44 | op.drop_column('groups', 'integration_id') 45 | op.alter_column('domains', 'total_resource_slots', 46 | existing_type=postgresql.JSONB(astext_type=sa.Text()), 47 | nullable=False) 48 | op.drop_column('domains', 'integration_id') 49 | # ### end Alembic commands ### 50 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/22e52d03fc61_add_allowed_docker_registries_in_domains.py: -------------------------------------------------------------------------------- 1 | """add_allowed_docker_registries_in_domains 2 | 3 | Revision ID: 22e52d03fc61 4 | Revises: c401d78cc7b9 5 | Create Date: 2019-07-29 11:44:55.593760 6 | 7 | """ 8 | import os 9 | 10 | from alembic import op 11 | import sqlalchemy as sa 12 | from sqlalchemy.dialects import postgresql 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = '22e52d03fc61' 16 | down_revision = 'c401d78cc7b9' 17 | branch_labels = None 18 | depends_on = None 19 | 20 | 21 | def upgrade(): 22 | # ### commands auto generated by Alembic - please adjust! ### 23 | op.add_column('domains', 24 | sa.Column('allowed_docker_registries', 25 | postgresql.ARRAY(sa.String()), nullable=True)) 26 | # ### end Alembic commands ### 27 | 28 | print('\nSet default allowed_docker_registries.') 29 | allowed_registries = os.environ.get('ALLOWED_DOCKER_REGISTRIES', None) 30 | if allowed_registries: 31 | allowed_registries = allowed_registries.replace(' ', '') 32 | allowed_registries = '{index.docker.io,' + allowed_registries + '}' 33 | else: 34 | allowed_registries = '{index.docker.io}' 35 | connection = op.get_bind() 36 | query = ("UPDATE domains SET allowed_docker_registries = '{}';".format(allowed_registries)) 37 | connection.execute(query) 38 | 39 | op.alter_column('domains', column_name='allowed_docker_registries', 40 | nullable=False) 41 | 42 | 43 | def downgrade(): 44 | # ### commands auto generated by Alembic - please adjust! ### 45 | op.drop_column('domains', 'allowed_docker_registries') 46 | # ### end Alembic commands ### 47 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/250e8656cf45_add_status_data.py: -------------------------------------------------------------------------------- 1 | """add-status_data 2 | 3 | Revision ID: 250e8656cf45 4 | Revises: 57e717103287 5 | Create Date: 2020-12-23 14:19:08.801283 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '250e8656cf45' 14 | down_revision = '57e717103287' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.add_column('kernels', sa.Column('status_data', postgresql.JSONB(astext_type=sa.Text()), nullable=True)) 21 | 22 | 23 | def downgrade(): 24 | op.drop_column('kernels', 'status_data') 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/25e903510fa1_add_dotfiles_to_domains_and_groups.py: -------------------------------------------------------------------------------- 1 | """add_dotfiles_to_domains_and_groups 2 | 3 | Revision ID: 25e903510fa1 4 | Revises: 0d553d59f369 5 | Create Date: 2020-09-11 17:00:00.564219 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '25e903510fa1' 14 | down_revision = '0d553d59f369' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column( 22 | 'domains', 23 | sa.Column('dotfiles', sa.LargeBinary(length=65536), nullable=False, server_default='\\x90') 24 | ) 25 | op.add_column( 26 | 'groups', 27 | sa.Column('dotfiles', sa.LargeBinary(length=65536), nullable=True, server_default='\\x90') 28 | ) 29 | # ### end Alembic commands ### 30 | 31 | 32 | def downgrade(): 33 | # ### commands auto generated by Alembic - please adjust! ### 34 | op.drop_column('groups', 'dotfiles') 35 | op.drop_column('domains', 'dotfiles') 36 | # ### end Alembic commands ### 37 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/26d0c387e764_create_vfolder_invitations_table.py: -------------------------------------------------------------------------------- 1 | """create vfolder_invitations table 2 | 3 | Revision ID: 26d0c387e764 4 | Revises: f8a71c3bffa2 5 | Create Date: 2018-07-04 14:57:46.517587 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from ai.backend.manager.models.base import GUID 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '26d0c387e764' 15 | down_revision = 'f8a71c3bffa2' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | op.create_table( 23 | 'vfolder_invitations', 24 | sa.Column('id', GUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), 25 | sa.Column('permission', sa.String(length=2), nullable=True), 26 | sa.Column('invitee', sa.String(length=256), nullable=False), 27 | sa.Column('vfolder', GUID(), nullable=False), 28 | sa.ForeignKeyConstraint(['vfolder'], ['vfolders.id'], 29 | name=op.f('fk_vfolder_invitations_vfolder_vfolders'), 30 | onupdate='CASCADE', ondelete='CASCADE'), 31 | sa.PrimaryKeyConstraint('id', name=op.f('pk_vfolder_invitations')) 32 | ) 33 | # ### end Alembic commands ### 34 | 35 | 36 | def downgrade(): 37 | # ### commands auto generated by Alembic - please adjust! ### 38 | op.drop_table('vfolder_invitations') 39 | # ### end Alembic commands ### 40 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/2a82340fa30e_add_mounts_info_in_kernel_db.py: -------------------------------------------------------------------------------- 1 | """add_mounts_info_in_kernel_db 2 | 3 | Revision ID: 2a82340fa30e 4 | Revises: c1409ad0e8da 5 | Create Date: 2019-08-01 15:59:41.807766 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '2a82340fa30e' 14 | down_revision = 'c1409ad0e8da' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('kernels', sa.Column('mounts', sa.ARRAY(sa.String()), nullable=True)) 22 | # ### end Alembic commands ### 23 | 24 | 25 | def downgrade(): 26 | # ### commands auto generated by Alembic - please adjust! ### 27 | op.drop_column('kernels', 'mounts') 28 | # ### end Alembic commands ### 29 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/2b0931e4a059_convert_lang_to_image_and_registry.py: -------------------------------------------------------------------------------- 1 | """convert-lang-to-image-and-registry 2 | 3 | Revision ID: 2b0931e4a059 4 | Revises: f0f4ee907155 5 | Create Date: 2019-01-28 23:53:44.342786 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '2b0931e4a059' 14 | down_revision = 'f0f4ee907155' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.alter_column('kernels', column_name='lang', new_column_name='image') 22 | op.add_column('kernels', sa.Column('registry', sa.String(length=512), 23 | nullable=True)) 24 | # ### end Alembic commands ### 25 | 26 | 27 | def downgrade(): 28 | # ### commands auto generated by Alembic - please adjust! ### 29 | op.alter_column('kernels', column_name='image', new_column_name='lang') 30 | op.drop_column('kernels', 'registry') 31 | # ### end Alembic commands ### 32 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/352fa4f88f61_add_tpu_slot_on_kernel_model.py: -------------------------------------------------------------------------------- 1 | """add tpu slot on kernel model 2 | 3 | Revision ID: 352fa4f88f61 4 | Revises: 57b523dec0e8 5 | Create Date: 2018-11-12 11:39:30.613081 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '352fa4f88f61' 14 | down_revision = '57b523dec0e8' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('kernels', sa.Column('tpu_set', sa.ARRAY(sa.Integer()), nullable=True)) 22 | op.add_column('kernels', sa.Column('tpu_slot', sa.Float(), nullable=False, 23 | server_default='0')) 24 | op.alter_column('kernels', 'tpu_slot', server_default=None) 25 | # ### end Alembic commands ### 26 | 27 | 28 | def downgrade(): 29 | # ### commands auto generated by Alembic - please adjust! ### 30 | op.drop_column('kernels', 'tpu_slot') 31 | op.drop_column('kernels', 'tpu_set') 32 | # ### end Alembic commands ### 33 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/3bb80d1887d6_add_preopen_ports.py: -------------------------------------------------------------------------------- 1 | """add preopen ports 2 | 3 | Revision ID: 3bb80d1887d6 4 | Revises: 1e8531583e20 5 | Create Date: 2020-02-05 17:02:42.344726 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '3bb80d1887d6' 14 | down_revision = '1e8531583e20' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('kernels', sa.Column('preopen_ports', sa.ARRAY(sa.Integer()), nullable=True)) 22 | # ### end Alembic commands ### 23 | 24 | 25 | def downgrade(): 26 | # ### commands auto generated by Alembic - please adjust! ### 27 | op.drop_column('kernels', 'preopen_ports') 28 | # ### end Alembic commands ### 29 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/3cf19d906e71_.py: -------------------------------------------------------------------------------- 1 | """empty message 2 | 3 | Revision ID: 3cf19d906e71 4 | Revises: 22964745c12b, 5d8e6043455e 5 | Create Date: 2019-06-17 16:45:14.580560 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '3cf19d906e71' 14 | down_revision = ('22964745c12b', '5d8e6043455e') 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | pass 21 | 22 | 23 | def downgrade(): 24 | pass 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/3f1dafab60b2_merge.py: -------------------------------------------------------------------------------- 1 | """merge 2 | 3 | Revision ID: 3f1dafab60b2 4 | Revises: c092dabf3ee5, 6f5fe19894b7 5 | Create Date: 2019-09-30 04:34:42.092031 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '3f1dafab60b2' 14 | down_revision = ('c092dabf3ee5', '6f5fe19894b7') 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | pass 21 | 22 | 23 | def downgrade(): 24 | pass 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/4545f5c948b3_add_io_scratch_size_stats.py: -------------------------------------------------------------------------------- 1 | """add_io_scratch_size_stats 2 | 3 | Revision ID: 4545f5c948b3 4 | Revises: e7371ca5797a 5 | Create Date: 2017-10-10 15:57:48.463055 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = '4545f5c948b3' 13 | down_revision = 'e7371ca5797a' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.add_column('kernels', sa.Column('io_max_scratch_size', sa.BigInteger(), nullable=True)) 20 | op.drop_column('kernels', 'mem_cur_bytes') 21 | 22 | 23 | def downgrade(): 24 | op.add_column('kernels', sa.Column('mem_cur_bytes', sa.BIGINT(), autoincrement=False, nullable=True)) 25 | op.drop_column('kernels', 'io_max_scratch_size') 26 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/48ab2dfefba9_reindex_kernel_updated_order.py: -------------------------------------------------------------------------------- 1 | """reindex-kernel-updated-order 2 | 3 | Revision ID: 48ab2dfefba9 4 | Revises: 0c5733f80e4d 5 | Create Date: 2019-09-24 16:04:29.928068 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '48ab2dfefba9' 14 | down_revision = '0c5733f80e4d' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.drop_index('ix_kernels_updated_order', table_name='kernels') 22 | op.create_index('ix_kernels_updated_order', 'kernels', [sa.text("greatest('created_at', 'terminated_at', 'status_changed')")], unique=False) 23 | # ### end Alembic commands ### 24 | 25 | 26 | def downgrade(): 27 | # ### commands auto generated by Alembic - please adjust! ### 28 | op.drop_index('ix_kernels_updated_order', table_name='kernels') 29 | op.create_index('ix_kernels_updated_order', 'kernels', ['created_at', 'terminated_at', 'status_changed'], unique=False) 30 | # ### end Alembic commands ### 31 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/4b7b650bc30e_add_creator_in_vfolders.py: -------------------------------------------------------------------------------- 1 | """add_creator_in_vfolders 2 | 3 | Revision ID: 4b7b650bc30e 4 | Revises: 202b6dcbc159 5 | Create Date: 2019-10-30 00:23:57.085692 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '4b7b650bc30e' 14 | down_revision = '202b6dcbc159' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('vfolders', sa.Column('creator', sa.String(length=128), nullable=True)) 22 | # ### end Alembic commands ### 23 | 24 | 25 | def downgrade(): 26 | # ### commands auto generated by Alembic - please adjust! ### 27 | op.drop_column('vfolders', 'creator') 28 | # ### end Alembic commands ### 29 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/4b8a66fb8d82_revamp_keypairs.py: -------------------------------------------------------------------------------- 1 | """revamp_keypairs 2 | 3 | Revision ID: 4b8a66fb8d82 4 | Revises: 854bd902b1bc 5 | Create Date: 2017-09-13 01:57:42.355633 6 | 7 | """ 8 | from alembic import op 9 | 10 | # revision identifiers, used by Alembic. 11 | revision = '4b8a66fb8d82' 12 | down_revision = '854bd902b1bc' 13 | branch_labels = None 14 | depends_on = None 15 | 16 | 17 | def upgrade(): 18 | # ### commands auto generated by Alembic - please adjust! ### 19 | op.alter_column('keypairs', column_name='billing_plan', new_column_name='resource_policy') 20 | op.create_index(op.f('ix_keypairs_is_active'), 'keypairs', ['is_active'], unique=False) 21 | op.create_index(op.f('ix_keypairs_user_id'), 'keypairs', ['user_id'], unique=False) 22 | # ### end Alembic commands ### 23 | 24 | 25 | def downgrade(): 26 | # ### commands auto generated by Alembic - please adjust! ### 27 | op.alter_column('keypairs', column_name='resource_policy', new_column_name='billing_plan') 28 | op.drop_index(op.f('ix_keypairs_user_id'), table_name='keypairs') 29 | op.drop_index(op.f('ix_keypairs_is_active'), table_name='keypairs') 30 | # ### end Alembic commands ### 31 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/513164749de4_add_cancelled_to_kernelstatus.py: -------------------------------------------------------------------------------- 1 | """add-cancelled-to-kernelstatus 2 | 3 | Revision ID: 513164749de4 4 | Revises: 405aa2c39458 5 | Create Date: 2019-09-20 11:13:39.157834 6 | 7 | """ 8 | import textwrap 9 | 10 | from alembic import op 11 | import sqlalchemy as sa 12 | from sqlalchemy.dialects import postgresql 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = '513164749de4' 16 | down_revision = '405aa2c39458' 17 | branch_labels = None 18 | depends_on = None 19 | 20 | kernelstatus_new_values = [ 21 | 'PENDING', 22 | 'PREPARING', 23 | 'BUILDING', 24 | 'PULLING', 25 | 'RUNNING', 26 | 'RESTARTING', 27 | 'RESIZING', 28 | 'SUSPENDED', 29 | 'TERMINATING', 30 | 'TERMINATED', 31 | 'ERROR', 32 | 'CANCELLED' # added 33 | ] 34 | kernelstatus_new = postgresql.ENUM(*kernelstatus_new_values, name='kernelstatus') 35 | 36 | kernelstatus_old_values = [ 37 | 'PENDING', 38 | 'PREPARING', 39 | 'BUILDING', 40 | 'PULLING', 41 | 'RUNNING', 42 | 'RESTARTING', 43 | 'RESIZING', 44 | 'SUSPENDED', 45 | 'TERMINATING', 46 | 'TERMINATED', 47 | # 'ERROR', 48 | ] 49 | kernelstatus_old = postgresql.ENUM(*kernelstatus_old_values, name='kernelstatus') 50 | 51 | 52 | def upgrade(): 53 | conn = op.get_bind() 54 | conn.execute('DROP INDEX IF EXISTS ix_kernels_unique_sess_token;') 55 | conn.execute('ALTER TYPE kernelstatus RENAME TO kernelstatus_old;') 56 | kernelstatus_new.create(conn) 57 | conn.execute(textwrap.dedent('''\ 58 | ALTER TABLE kernels 59 | ALTER COLUMN "status" DROP DEFAULT, 60 | ALTER COLUMN "status" TYPE kernelstatus USING "status"::text::kernelstatus, 61 | ALTER COLUMN "status" SET DEFAULT 'PENDING'::kernelstatus; 62 | DROP TYPE kernelstatus_old; 63 | ''')) 64 | op.create_index( 65 | 'ix_kernels_unique_sess_token', 'kernels', ['access_key', 'sess_id'], 66 | unique=True, postgresql_where=sa.text( 67 | "status NOT IN ('TERMINATED', 'CANCELLED') and role = 'master'" 68 | )) 69 | 70 | 71 | def downgrade(): 72 | op.drop_index('ix_kernels_unique_sess_token', table_name='kernels') 73 | conn = op.get_bind() 74 | conn.execute('ALTER TYPE kernelstatus RENAME TO kernelstatus_new;') 75 | kernelstatus_old.create(conn) 76 | conn.execute(textwrap.dedent('''\ 77 | ALTER TABLE kernels 78 | ALTER COLUMN "status" DROP DEFAULT, 79 | ALTER COLUMN "status" TYPE kernelstatus USING ( 80 | CASE "status"::text 81 | WHEN 'CANCELLED' THEN 'TERMINATED' 82 | ELSE "status"::text 83 | END 84 | )::kernelstatus, 85 | ALTER COLUMN "status" SET DEFAULT 'PREPARING'::kernelstatus; 86 | DROP TYPE kernelstatus_new; 87 | ''')) 88 | op.create_index( 89 | 'ix_kernels_unique_sess_token', 'kernels', ['access_key', 'sess_id'], 90 | unique=True, postgresql_where=sa.text( 91 | "status != 'TERMINATED' and role = 'master'" 92 | )) 93 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/518ecf41f567_add_index_for_cluster_role.py: -------------------------------------------------------------------------------- 1 | """add-index-for-cluster_role 2 | 3 | Revision ID: 518ecf41f567 4 | Revises: dc9b66466e43 5 | Create Date: 2021-01-07 00:04:53.794638 6 | 7 | """ 8 | from alembic import op 9 | 10 | # revision identifiers, used by Alembic. 11 | revision = '518ecf41f567' 12 | down_revision = 'dc9b66466e43' 13 | branch_labels = None 14 | depends_on = None 15 | 16 | 17 | def upgrade(): 18 | # ### commands auto generated by Alembic - please adjust! ### 19 | op.create_index(op.f('ix_kernels_cluster_role'), 'kernels', ['cluster_role'], unique=False) 20 | op.create_index('ix_kernels_status_role', 'kernels', ['status', 'cluster_role'], unique=False) 21 | # ### end Alembic commands ### 22 | 23 | 24 | def downgrade(): 25 | # ### commands auto generated by Alembic - please adjust! ### 26 | op.drop_index('ix_kernels_status_role', table_name='kernels') 27 | op.drop_index(op.f('ix_kernels_cluster_role'), table_name='kernels') 28 | # ### end Alembic commands ### 29 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/51dddd79aa21_add_logs_column_on_kernel_table.py: -------------------------------------------------------------------------------- 1 | """Add logs column on kernel table 2 | 3 | Revision ID: 51dddd79aa21 4 | Revises: 3bb80d1887d6 5 | Create Date: 2020-02-11 14:45:55.496745 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '51dddd79aa21' 14 | down_revision = '3bb80d1887d6' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.add_column('kernels', sa.Column('container_log', sa.LargeBinary(), nullable=True)) 21 | 22 | 23 | def downgrade(): 24 | op.drop_column('kernels', 'container_log') 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/548cc8aa49c8_update_cluster_columns_in_kernels.py: -------------------------------------------------------------------------------- 1 | """update_cluster_columns_in_kernels 2 | 3 | Revision ID: 548cc8aa49c8 4 | Revises: 1e673659b283 5 | Create Date: 2020-09-08 18:50:05.594899 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = '548cc8aa49c8' 13 | down_revision = '1e673659b283' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.drop_index('ix_kernels_sess_id_role', table_name='kernels') 20 | op.drop_index('ix_kernels_unique_sess_token', table_name='kernels') 21 | 22 | op.add_column('kernels', sa.Column('cluster_size', sa.Integer, nullable=False, 23 | default=1, server_default=sa.text('1'))) 24 | op.add_column('kernels', sa.Column('cluster_hostname', sa.String(length=64), nullable=True)) 25 | conn = op.get_bind() 26 | query = "UPDATE kernels k " \ 27 | " SET cluster_size = (SELECT COUNT(*) FROM kernels j WHERE j.session_id = k.session_id);" 28 | conn.execute(query) 29 | query = "UPDATE kernels SET cluster_hostname = CONCAT(role, CAST(idx AS TEXT));" 30 | conn.execute(query) 31 | op.alter_column('kernels', 'cluster_hostname', nullable=False) 32 | 33 | op.alter_column('kernels', 'idx', new_column_name='cluster_idx', nullable=False) 34 | op.alter_column('kernels', 'role', new_column_name='cluster_role', nullable=False) 35 | 36 | op.create_index('ix_kernels_sess_id_role', 'kernels', ['session_id', 'cluster_role'], unique=False) 37 | op.create_index('ix_kernels_unique_sess_token', 'kernels', ['access_key', 'session_id'], unique=True, 38 | postgresql_where=sa.text("status NOT IN ('TERMINATED', 'CANCELLED') " 39 | "and cluster_role = 'main'")) 40 | 41 | 42 | def downgrade(): 43 | op.drop_index('ix_kernels_unique_sess_token', table_name='kernels') 44 | op.drop_index('ix_kernels_sess_id_role', table_name='kernels') 45 | 46 | op.alter_column('kernels', 'cluster_idx', new_column_name='idx') 47 | op.alter_column('kernels', 'cluster_role', new_column_name='role') 48 | op.drop_column('kernels', 'cluster_size') 49 | op.drop_column('kernels', 'cluster_hostname') 50 | 51 | op.create_index('ix_kernels_unique_sess_token', 'kernels', 52 | ['access_key', 'session_name'], unique=True, 53 | postgresql_where=sa.text("status NOT IN ('TERMINATED', 'CANCELLED') " 54 | "and role = 'main'")) 55 | op.create_index('ix_kernels_sess_id_role', 'kernels', ['session_name', 'role'], unique=False) 56 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/57b523dec0e8_add_tpu_slots.py: -------------------------------------------------------------------------------- 1 | """add tpu slots 2 | 3 | Revision ID: 57b523dec0e8 4 | Revises: 10e39a34eed5 5 | Create Date: 2018-11-12 10:54:45.271417 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '57b523dec0e8' 14 | down_revision = '10e39a34eed5' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('agents', sa.Column('tpu_slots', sa.Float(), nullable=False, 22 | server_default='0')) 23 | op.add_column('agents', sa.Column('used_tpu_slots', sa.Float(), nullable=False, 24 | server_default='0')) 25 | op.alter_column('agents', 'tpu_slots', server_default=None) 26 | op.alter_column('agents', 'used_tpu_slots', server_default=None) 27 | # ### end Alembic commands ### 28 | 29 | 30 | def downgrade(): 31 | # ### commands auto generated by Alembic - please adjust! ### 32 | op.drop_column('agents', 'used_tpu_slots') 33 | op.drop_column('agents', 'tpu_slots') 34 | # ### end Alembic commands ### 35 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/57e717103287_rename_clone_allowed_to_cloneable.py: -------------------------------------------------------------------------------- 1 | """rename-clone_allowed-to-cloneable 2 | 3 | Revision ID: 57e717103287 4 | Revises: eec98e65902a 5 | Create Date: 2020-10-04 14:14:55.167654 6 | 7 | """ 8 | from alembic import op 9 | 10 | # revision identifiers, used by Alembic. 11 | revision = '57e717103287' 12 | down_revision = 'eec98e65902a' 13 | branch_labels = None 14 | depends_on = None 15 | 16 | 17 | def upgrade(): 18 | op.alter_column('vfolders', 'clone_allowed', new_column_name='cloneable') 19 | 20 | 21 | def downgrade(): 22 | op.alter_column('vfolders', 'cloneable', new_column_name='clone_allowed') 23 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/5b45f28d2cac_add_resource_opts_in_kernels.py: -------------------------------------------------------------------------------- 1 | """add_resource_opts_in_kernels 2 | 3 | Revision ID: 5b45f28d2cac 4 | Revises: 9c89b9011872 5 | Create Date: 2019-09-08 10:07:20.971662 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '5b45f28d2cac' 14 | down_revision = '9c89b9011872' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('kernels', sa.Column('resource_opts', 22 | postgresql.JSONB(astext_type=sa.Text()), 23 | nullable=True)) 24 | # ### end Alembic commands ### 25 | 26 | 27 | def downgrade(): 28 | # ### commands auto generated by Alembic - please adjust! ### 29 | op.drop_column('kernels', 'resource_opts') 30 | # ### end Alembic commands ### 31 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/5de06da3c2b5_init.py: -------------------------------------------------------------------------------- 1 | """init 2 | 3 | Revision ID: 5de06da3c2b5 4 | Revises: 5 | Create Date: 2017-06-08 15:08:23.166237 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from ai.backend.manager.models.base import GUID 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '5de06da3c2b5' 15 | down_revision = None 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | op.execute('CREATE EXTENSION IF NOT EXISTS "uuid-ossp";') 22 | # ### commands auto generated by Alembic - please adjust! ### 23 | op.create_table( 24 | 'keypairs', 25 | sa.Column('user_id', sa.Integer(), nullable=True), 26 | sa.Column('access_key', sa.String(length=20), nullable=False), 27 | sa.Column('secret_key', sa.String(length=40), nullable=True), 28 | sa.Column('is_active', sa.Boolean(), nullable=True), 29 | sa.Column('billing_plan', sa.String(), nullable=True), 30 | sa.Column('created_at', sa.DateTime(timezone=True), 31 | server_default=sa.text('now()'), nullable=True), 32 | sa.Column('last_used', sa.DateTime(timezone=True), nullable=True), 33 | sa.Column('concurrency_limit', sa.Integer(), nullable=True), 34 | sa.Column('concurrency_used', sa.Integer(), nullable=True), 35 | sa.Column('rate_limit', sa.Integer(), nullable=True), 36 | sa.Column('num_queries', sa.Integer(), server_default='0', nullable=True), 37 | sa.PrimaryKeyConstraint('access_key') 38 | ) 39 | op.create_table( 40 | 'kernels', 41 | sa.Column('sess_id', GUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), 42 | sa.Column('lang', sa.String(length=64), nullable=True), 43 | sa.Column('access_key', sa.String(length=20), nullable=True), 44 | sa.Column('created_at', sa.DateTime(timezone=True), 45 | server_default=sa.text('now()'), nullable=True), 46 | sa.Column('terminated_at', sa.DateTime(timezone=True), nullable=True), 47 | sa.Column('status', sa.String(), nullable=True), 48 | sa.Column('agent_id', sa.String(), nullable=True), 49 | sa.Column('container_id', sa.String(), nullable=True), 50 | sa.ForeignKeyConstraint(['access_key'], ['keypairs.access_key'], ), 51 | sa.PrimaryKeyConstraint('sess_id') 52 | ) 53 | op.create_table( 54 | 'usage', 55 | sa.Column('id', GUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), 56 | sa.Column('access_key_id', sa.String(length=20), nullable=True), 57 | sa.Column('kernel_type', sa.String(), nullable=True), 58 | sa.Column('kernel_id', sa.String(), nullable=True), 59 | sa.Column('started_at', sa.DateTime(timezone=True), nullable=True), 60 | sa.Column('terminated_at', sa.DateTime(timezone=True), nullable=True), 61 | sa.Column('cpu_used', sa.Integer(), server_default='0', nullable=True), 62 | sa.Column('mem_used', sa.Integer(), server_default='0', nullable=True), 63 | sa.Column('io_used', sa.Integer(), server_default='0', nullable=True), 64 | sa.Column('net_used', sa.Integer(), server_default='0', nullable=True), 65 | sa.ForeignKeyConstraint(['access_key_id'], ['keypairs.access_key'], ), 66 | sa.PrimaryKeyConstraint('id') 67 | ) 68 | # ### end Alembic commands ### 69 | 70 | 71 | def downgrade(): 72 | # ### commands auto generated by Alembic - please adjust! ### 73 | op.drop_table('usage') 74 | op.drop_table('kernels') 75 | op.drop_table('keypairs') 76 | # ### end Alembic commands ### 77 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/5e88398bc340_add_unmanaged_path_column_to_vfolders.py: -------------------------------------------------------------------------------- 1 | """Add unmanaged_path column to vfolders 2 | 3 | Revision ID: 5e88398bc340 4 | Revises: d452bacd085c 5 | Create Date: 2019-11-28 13:41:03.545551 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '5e88398bc340' 14 | down_revision = 'd452bacd085c' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.add_column('vfolders', sa.Column('unmanaged_path', sa.String(length=512), nullable=True)) 21 | 22 | 23 | def downgrade(): 24 | op.drop_column('vfolders', 'unmanaged_path') 25 | 26 | 27 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/60a1effa77d2_add_coordinator_address_column_on_.py: -------------------------------------------------------------------------------- 1 | """Add wsproxy_addr column on scaling_group 2 | 3 | Revision ID: 60a1effa77d2 4 | Revises: 8679d0a7e22b 5 | Create Date: 2021-09-17 13:19:57.525513 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '60a1effa77d2' 14 | down_revision = '8679d0a7e22b' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.add_column('scaling_groups', sa.Column('wsproxy_addr', sa.String(length=1024), nullable=True)) 21 | 22 | 23 | def downgrade(): 24 | op.drop_column('scaling_groups', 'wsproxy_addr') 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/65c4a109bbc7_.py: -------------------------------------------------------------------------------- 1 | """Merge migration 2 | 3 | Revision ID: 65c4a109bbc7 4 | Revises: 0262e50e90e0, 5e88398bc340 5 | Create Date: 2019-12-16 01:42:44.316419 6 | 7 | """ 8 | 9 | 10 | # revision identifiers, used by Alembic. 11 | revision = '65c4a109bbc7' 12 | down_revision = ('0262e50e90e0', '5e88398bc340') 13 | branch_labels = None 14 | depends_on = None 15 | 16 | 17 | def upgrade(): 18 | pass 19 | 20 | 21 | def downgrade(): 22 | pass 23 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/6f1c1b83870a_merge_user_s_first__last_name_into_full_.py: -------------------------------------------------------------------------------- 1 | """merge user's first_/last_name into full_name 2 | 3 | Revision ID: 6f1c1b83870a 4 | Revises: 7a82e0c70122 5 | Create Date: 2019-05-22 15:52:57.173180 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '6f1c1b83870a' 14 | down_revision = '7a82e0c70122' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('users', sa.Column('full_name', sa.String(length=64), nullable=True)) 22 | op.drop_column('users', 'last_name') 23 | op.drop_column('users', 'first_name') 24 | # ### end Alembic commands ### 25 | 26 | 27 | def downgrade(): 28 | # ### commands auto generated by Alembic - please adjust! ### 29 | op.add_column('users', sa.Column('first_name', sa.VARCHAR(length=32), 30 | autoincrement=False, nullable=True)) 31 | op.add_column('users', sa.Column('last_name', sa.VARCHAR(length=32), 32 | autoincrement=False, nullable=True)) 33 | op.drop_column('users', 'full_name') 34 | # ### end Alembic commands ### 35 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/6f5fe19894b7_vfolder_invitation_state_to_enum_type.py: -------------------------------------------------------------------------------- 1 | """vfolder_invitation_state_to_enum_type 2 | 3 | Revision ID: 6f5fe19894b7 4 | Revises: 48ab2dfefba9 5 | Create Date: 2019-09-28 21:05:55.409422 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from ai.backend.manager.models import VFolderInvitationState 11 | from sqlalchemy.dialects import postgresql 12 | 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = '6f5fe19894b7' 16 | down_revision = '48ab2dfefba9' 17 | branch_labels = None 18 | depends_on = None 19 | 20 | vfinvs_choices = list(map(lambda v: v.value, VFolderInvitationState)) 21 | vfolderinvitationstate = postgresql.ENUM( 22 | *vfinvs_choices, 23 | name='vfolderinvitationstate', 24 | ) 25 | 26 | 27 | def upgrade(): 28 | # ### commands auto generated by Alembic - please adjust! ### 29 | pass 30 | # ### end Alembic commands ### 31 | vfolderinvitationstate.create(op.get_bind()) 32 | op.alter_column('vfolder_invitations', column_name='state', 33 | type_=sa.Enum(*vfinvs_choices, name='vfolderinvitationstate'), 34 | postgresql_using='state::vfolderinvitationstate') 35 | 36 | 37 | def downgrade(): 38 | # ### commands auto generated by Alembic - please adjust! ### 39 | pass 40 | # ### end Alembic commands ### 41 | vfolderinvitationstate.create(op.get_bind()) 42 | op.alter_column('vfolder_invitations', column_name='state', 43 | type_=sa.String(length=10), 44 | postgresql_using='state::text::vfolderinvitationstate') 45 | vfolderinvitationstate.drop(op.get_bind()) 46 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/7a82e0c70122_add_group_model.py: -------------------------------------------------------------------------------- 1 | """add group model 2 | 3 | Revision ID: 7a82e0c70122 4 | Revises: bae1a7326e8a 5 | Create Date: 2019-05-09 10:00:55.788734 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from ai.backend.manager.models.base import GUID 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '7a82e0c70122' 15 | down_revision = 'bae1a7326e8a' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | op.create_table( 22 | 'groups', 23 | sa.Column('id', GUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), 24 | sa.Column('name', sa.String(length=64), nullable=False), 25 | sa.Column('description', sa.String(length=512), nullable=True), 26 | sa.Column('is_active', sa.Boolean(), nullable=True), 27 | sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True), 28 | sa.Column('modified_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True), 29 | sa.Column('domain_name', sa.String(length=64), nullable=False), 30 | sa.ForeignKeyConstraint(['domain_name'], ['domains.name'], 31 | name=op.f('fk_groups_domain_name_domains'), 32 | onupdate='CASCADE', ondelete='CASCADE'), 33 | sa.PrimaryKeyConstraint('id', name=op.f('pk_groups')), 34 | sa.UniqueConstraint('name', 'domain_name', name='uq_groups_name_domain_name') 35 | ) 36 | op.create_index(op.f('ix_groups_domain_name'), 'groups', ['domain_name'], unique=False) 37 | op.create_table( 38 | 'association_groups_users', 39 | sa.Column('user_id', GUID(), nullable=False), 40 | sa.Column('group_id', GUID(), nullable=False), 41 | sa.ForeignKeyConstraint(['group_id'], ['groups.id'], 42 | name=op.f('fk_association_groups_users_group_id_groups'), 43 | onupdate='CASCADE', ondelete='CASCADE'), 44 | sa.ForeignKeyConstraint(['user_id'], ['users.uuid'], 45 | name=op.f('fk_association_groups_users_user_id_users'), 46 | onupdate='CASCADE', ondelete='CASCADE'), 47 | sa.UniqueConstraint('user_id', 'group_id', name='uq_association_user_id_group_id') 48 | ) 49 | 50 | 51 | def downgrade(): 52 | op.drop_table('association_groups_users') 53 | op.drop_index(op.f('ix_groups_domain_name'), table_name='groups') 54 | op.drop_table('groups') 55 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/7dd1d81c3204_add_vfolder_mounts_to_kernels.py: -------------------------------------------------------------------------------- 1 | """add-vfolder-mounts-to-kernels 2 | 3 | Revision ID: 7dd1d81c3204 4 | Revises: 60a1effa77d2 5 | Create Date: 2022-03-09 16:41:48.304128 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = '7dd1d81c3204' 13 | down_revision = '60a1effa77d2' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | # ### commands auto generated by Alembic - please adjust! ### 20 | op.add_column('kernels', sa.Column('vfolder_mounts', sa.JSON(), nullable=True)) 21 | op.create_index('ix_keypairs_resource_policy', 'keypairs', ['resource_policy'], unique=False) 22 | # ### end Alembic commands ### 23 | 24 | 25 | def downgrade(): 26 | # ### commands auto generated by Alembic - please adjust! ### 27 | op.drop_index('ix_keypairs_resource_policy', table_name='keypairs') 28 | op.drop_column('kernels', 'vfolder_mounts') 29 | # ### end Alembic commands ### 30 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/80176413d8aa_keypairs_get_is_admin.py: -------------------------------------------------------------------------------- 1 | """keypairs_get_is_admin 2 | 3 | Revision ID: 80176413d8aa 4 | Revises: 4b8a66fb8d82 5 | Create Date: 2017-09-14 16:01:59.994941 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.sql.expression import false 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '80176413d8aa' 14 | down_revision = '4b8a66fb8d82' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('keypairs', sa.Column('is_admin', sa.Boolean(), nullable=False, default=False, server_default=false())) 22 | op.create_index(op.f('ix_keypairs_is_admin'), 'keypairs', ['is_admin'], unique=False) 23 | # ### end Alembic commands ### 24 | 25 | 26 | def downgrade(): 27 | # ### commands auto generated by Alembic - please adjust! ### 28 | op.drop_index(op.f('ix_keypairs_is_admin'), table_name='keypairs') 29 | op.drop_column('keypairs', 'is_admin') 30 | # ### end Alembic commands ### 31 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/81c264528f20_add_max_session_lifetime.py: -------------------------------------------------------------------------------- 1 | """add-max-session-lifetime 2 | 3 | Revision ID: 81c264528f20 4 | Revises: d727b5da20e6 5 | Create Date: 2022-04-21 09:22:01.405710 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '81c264528f20' 14 | down_revision = 'd727b5da20e6' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.add_column('keypair_resource_policies', sa.Column('max_session_lifetime', sa.Integer(), server_default=sa.text('0'), nullable=False)) 21 | 22 | 23 | def downgrade(): 24 | op.drop_column('keypair_resource_policies', 'max_session_lifetime') 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/854bd902b1bc_change_kernel_identification.py: -------------------------------------------------------------------------------- 1 | """change-kernel-identification 2 | 3 | Revision ID: 854bd902b1bc 4 | Revises: 0f3bc98edaa0 5 | Create Date: 2017-08-21 17:08:20.581565 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from ai.backend.manager.models.base import GUID 11 | from sqlalchemy.dialects import postgresql 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '854bd902b1bc' 15 | down_revision = '0f3bc98edaa0' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | op.drop_constraint('fk_vfolder_attachment_vfolder_vfolders', 'vfolder_attachment', type_='foreignkey') 22 | op.drop_constraint('fk_vfolder_attachment_kernel_kernels', 'vfolder_attachment', type_='foreignkey') 23 | op.drop_constraint('pk_kernels', 'kernels', type_='primary') 24 | op.add_column('kernels', 25 | sa.Column('id', GUID(), 26 | server_default=sa.text('uuid_generate_v4()'), 27 | nullable=False)) 28 | op.add_column('kernels', sa.Column('role', sa.String(length=16), nullable=False, default='master')) 29 | op.create_primary_key('pk_kernels', 'kernels', ['id']) 30 | op.alter_column( 31 | 'kernels', 'sess_id', 32 | existing_type=postgresql.UUID(), 33 | type_=sa.String(length=64), 34 | nullable=True, 35 | existing_server_default=sa.text('uuid_generate_v4()')) 36 | op.create_index(op.f('ix_kernels_sess_id'), 'kernels', ['sess_id'], unique=False) 37 | op.create_index(op.f('ix_kernels_sess_id_role'), 'kernels', ['sess_id', 'role'], unique=False) 38 | op.create_foreign_key('fk_vfolder_attachment_vfolder_vfolders', 39 | 'vfolder_attachment', 'vfolders', 40 | ['vfolder'], ['id'], onupdate='CASCADE', ondelete='CASCADE') 41 | op.create_foreign_key('fk_vfolder_attachment_kernel_kernels', 42 | 'vfolder_attachment', 'kernels', 43 | ['kernel'], ['id'], onupdate='CASCADE', ondelete='CASCADE') 44 | 45 | 46 | def downgrade(): 47 | op.drop_constraint('fk_vfolder_attachment_vfolder_vfolders', 'vfolder_attachment', type_='foreignkey') 48 | op.drop_constraint('fk_vfolder_attachment_kernel_kernels', 'vfolder_attachment', type_='foreignkey') 49 | op.drop_constraint('pk_kernels', 'kernels', type_='primary') 50 | op.drop_index(op.f('ix_kernels_sess_id'), table_name='kernels') 51 | op.drop_index(op.f('ix_kernels_sess_id_role'), table_name='kernels') 52 | op.alter_column( 53 | 'kernels', 'sess_id', 54 | existing_type=sa.String(length=64), 55 | type_=postgresql.UUID(), 56 | nullable=False, 57 | existing_server_default=sa.text('uuid_generate_v4()'), 58 | postgresql_using='sess_id::uuid') 59 | op.create_primary_key('pk_kernels', 'kernels', ['sess_id']) 60 | op.drop_column('kernels', 'id') 61 | op.drop_column('kernels', 'role') 62 | op.create_foreign_key('fk_vfolder_attachment_vfolder_vfolders', 63 | 'vfolder_attachment', 'vfolders', 64 | ['vfolder'], ['id']) 65 | op.create_foreign_key('fk_vfolder_attachment_kernel_kernels', 66 | 'vfolder_attachment', 'kernels', 67 | ['kernel'], ['sess_id']) 68 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/8679d0a7e22b_add_scheduled_to_kernelstatus.py: -------------------------------------------------------------------------------- 1 | """add-scheduled-to-kernelstatus 2 | 3 | Revision ID: 8679d0a7e22b 4 | Revises: 518ecf41f567 5 | Create Date: 2021-04-01 14:24:27.885209 6 | 7 | """ 8 | import textwrap 9 | 10 | from alembic import op 11 | import sqlalchemy as sa 12 | from sqlalchemy.dialects import postgresql 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = '8679d0a7e22b' 16 | down_revision = '518ecf41f567' 17 | branch_labels = None 18 | depends_on = None 19 | 20 | kernelstatus_new_values = [ 21 | 'PENDING', 22 | 'SCHEDULED', # added 23 | 'PREPARING', 24 | 'BUILDING', 25 | 'PULLING', 26 | 'RUNNING', 27 | 'RESTARTING', 28 | 'RESIZING', 29 | 'SUSPENDED', 30 | 'TERMINATING', 31 | 'TERMINATED', 32 | 'ERROR', 33 | 'CANCELLED', 34 | ] 35 | kernelstatus_new = postgresql.ENUM(*kernelstatus_new_values, name='kernelstatus') 36 | 37 | kernelstatus_old_values = [ 38 | 'PENDING', 39 | 'PREPARING', 40 | 'BUILDING', 41 | 'PULLING', 42 | 'RUNNING', 43 | 'RESTARTING', 44 | 'RESIZING', 45 | 'SUSPENDED', 46 | 'TERMINATING', 47 | 'TERMINATED', 48 | 'ERROR', 49 | 'CANCELLED', 50 | ] 51 | kernelstatus_old = postgresql.ENUM(*kernelstatus_old_values, name='kernelstatus') 52 | 53 | 54 | def upgrade(): 55 | conn = op.get_bind() 56 | conn.execute('DROP INDEX IF EXISTS ix_kernels_unique_sess_token;') 57 | conn.execute('ALTER TYPE kernelstatus RENAME TO kernelstatus_old;') 58 | kernelstatus_new.create(conn) 59 | conn.execute(textwrap.dedent('''\ 60 | ALTER TABLE kernels 61 | ALTER COLUMN "status" DROP DEFAULT, 62 | ALTER COLUMN "status" TYPE kernelstatus USING "status"::text::kernelstatus, 63 | ALTER COLUMN "status" SET DEFAULT 'PENDING'::kernelstatus; 64 | DROP TYPE kernelstatus_old; 65 | ''')) 66 | # This also fixes the unique constraint columns: 67 | # (access_key, session_id) -> (access_key, session_name) 68 | op.create_index( 69 | 'ix_kernels_unique_sess_token', 'kernels', ['access_key', 'session_name'], 70 | unique=True, postgresql_where=sa.text( 71 | "status NOT IN ('TERMINATED', 'CANCELLED') and cluster_role = 'main'" 72 | )) 73 | 74 | 75 | def downgrade(): 76 | op.drop_index('ix_kernels_unique_sess_token', table_name='kernels') 77 | conn = op.get_bind() 78 | conn.execute('ALTER TYPE kernelstatus RENAME TO kernelstatus_new;') 79 | kernelstatus_old.create(conn) 80 | conn.execute(textwrap.dedent('''\ 81 | ALTER TABLE kernels 82 | ALTER COLUMN "status" DROP DEFAULT, 83 | ALTER COLUMN "status" TYPE kernelstatus USING ( 84 | CASE "status"::text 85 | WHEN 'SCHEDULED' THEN 'PREPARING' 86 | ELSE "status"::text 87 | END 88 | )::kernelstatus, 89 | ALTER COLUMN "status" SET DEFAULT 'PENDING'::kernelstatus; 90 | DROP TYPE kernelstatus_new; 91 | ''')) 92 | op.create_index( 93 | 'ix_kernels_unique_sess_token', 'kernels', ['access_key', 'session_id'], 94 | unique=True, postgresql_where=sa.text( 95 | "status != 'TERMINATED' and cluster_role = 'main'" 96 | )) 97 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/8e660aa31fe3_add_resource_presets.py: -------------------------------------------------------------------------------- 1 | """add-resource-presets 2 | 3 | Revision ID: 8e660aa31fe3 4 | Revises: 01456c812164 5 | Create Date: 2019-03-30 01:45:07.525096 6 | 7 | """ 8 | from alembic import op 9 | from decimal import Decimal 10 | import sqlalchemy as sa 11 | from ai.backend.manager.models.base import ResourceSlotColumn 12 | from ai.backend.manager.models import keypair_resource_policies 13 | from ai.backend.common.types import BinarySize, ResourceSlot 14 | 15 | 16 | # revision identifiers, used by Alembic. 17 | revision = '8e660aa31fe3' 18 | down_revision = '01456c812164' 19 | branch_labels = None 20 | depends_on = None 21 | 22 | 23 | def upgrade(): 24 | # ### commands auto generated by Alembic - please adjust! ### 25 | op.create_table( 26 | 'resource_presets', 27 | sa.Column('name', sa.String(length=256), nullable=False), 28 | sa.Column('resource_slots', 29 | ResourceSlotColumn(), 30 | nullable=False), 31 | sa.PrimaryKeyConstraint('name', name=op.f('pk_resource_presets')) 32 | ) 33 | # Add initial fixtures for resource presets 34 | query = ''' 35 | INSERT INTO resource_presets 36 | VALUES ( 37 | 'small', 38 | '{"cpu":"1","mem":"2147483648"}'::jsonb 39 | ); 40 | INSERT INTO resource_presets 41 | VALUES ( 42 | 'small-gpu', 43 | '{"cpu":"1","mem":"2147483648","cuda.device":"1","cuda.shares":"0.5"}'::jsonb 44 | ); 45 | INSERT INTO resource_presets 46 | VALUES ( 47 | 'medium', 48 | '{"cpu":"2","mem":"4294967296"}'::jsonb 49 | ); 50 | INSERT INTO resource_presets 51 | VALUES ( 52 | 'medium-gpu', 53 | '{"cpu":"2","mem":"4294967296","cuda.device":"1","cuda.shares":"1.0"}'::jsonb 54 | ); 55 | INSERT INTO resource_presets 56 | VALUES ( 57 | 'large', 58 | '{"cpu":"4","mem":"8589934592"}'::jsonb 59 | ); 60 | INSERT INTO resource_presets 61 | VALUES ( 62 | 'large-gpu', 63 | '{"cpu":"4","mem":"8589934592","cuda.device":"2","cuda.shares":"2.0"}'::jsonb 64 | ); 65 | ''' 66 | connection = op.get_bind() 67 | connection.execute(query) 68 | 69 | query = ''' 70 | SELECT name, total_resource_slots 71 | FROM keypair_resource_policies 72 | ''' 73 | connection = op.get_bind() 74 | result = connection.execute(query) 75 | updates = [] 76 | for row in result: 77 | converted = ResourceSlot(row['total_resource_slots']) 78 | if 'mem' in converted: 79 | converted['mem'] = Decimal(BinarySize.from_str(converted['mem'])) 80 | updates.append(( 81 | row['name'], 82 | converted, 83 | )) 84 | for name, slots in updates: 85 | query = ( 86 | sa.update(keypair_resource_policies) 87 | .values(total_resource_slots=slots) 88 | .where(keypair_resource_policies.c.name == name) 89 | ) 90 | connection.execute(query) 91 | 92 | # ### end Alembic commands ### 93 | 94 | 95 | def downgrade(): 96 | # ### commands auto generated by Alembic - please adjust! ### 97 | op.drop_table('resource_presets') 98 | # ### end Alembic commands ### 99 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/911023380bc9_add_architecture_column_on_agents.py: -------------------------------------------------------------------------------- 1 | """add architecture column on agents 2 | 3 | Revision ID: 911023380bc9 4 | Revises: 015d84d5a5ef 5 | Create Date: 2022-02-16 00:54:23.261212 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '911023380bc9' 14 | down_revision = '015d84d5a5ef' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.add_column( 21 | 'agents', 22 | sa.Column('architecture', sa.String, default='x86_64')) 23 | op.execute('UPDATE agents SET architecture=\'x86_64\'') 24 | op.alter_column('agents', 'architecture', nullable=False) 25 | op.add_column( 26 | 'kernels', 27 | sa.Column('architecture', sa.String, default='x86_64')) 28 | op.execute('UPDATE kernels SET architecture=\'x86_64\'') 29 | op.alter_column('kernels', 'architecture', nullable=False) 30 | 31 | 32 | def downgrade(): 33 | op.drop_column('kernels', 'architecture') 34 | op.drop_column('agents', 'architecture') 35 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/93e9d31d40bf_agent_add_region.py: -------------------------------------------------------------------------------- 1 | """agent_add_region 2 | 3 | Revision ID: 93e9d31d40bf 4 | Revises: 80176413d8aa 5 | Create Date: 2017-09-28 15:01:38.944738 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = '93e9d31d40bf' 13 | down_revision = '80176413d8aa' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | # ### commands auto generated by Alembic - please adjust! ### 20 | op.add_column('agents', sa.Column('region', sa.String(length=64), 21 | nullable=False, 22 | server_default='amazon/ap-northeast-2')) 23 | op.create_index(op.f('ix_agents_region'), 'agents', ['region'], unique=False) 24 | op.alter_column( 25 | 'keypairs', 'is_admin', 26 | existing_type=sa.BOOLEAN(), 27 | nullable=True, 28 | existing_server_default=sa.text('false')) 29 | # ### end Alembic commands ### 30 | 31 | 32 | def downgrade(): 33 | # ### commands auto generated by Alembic - please adjust! ### 34 | op.alter_column( 35 | 'keypairs', 'is_admin', 36 | existing_type=sa.BOOLEAN(), 37 | nullable=False, 38 | existing_server_default=sa.text('false')) 39 | op.drop_index(op.f('ix_agents_region'), table_name='agents') 40 | op.drop_column('agents', 'region') 41 | # ### end Alembic commands ### 42 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/97f6c80c8aa5_merge.py: -------------------------------------------------------------------------------- 1 | """merge 2 | 3 | Revision ID: 97f6c80c8aa5 4 | Revises: e421c02cf9e4, 25e903510fa1 5 | Create Date: 2020-09-28 18:00:35.664882 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '97f6c80c8aa5' 14 | down_revision = ('e421c02cf9e4', '25e903510fa1') 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | pass 21 | 22 | 23 | def downgrade(): 24 | pass 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/9bd986a75a2a_allow_kernels_scaling_group_nullable.py: -------------------------------------------------------------------------------- 1 | """allow_kernels_scaling_group_nullable 2 | 3 | Revision ID: 9bd986a75a2a 4 | Revises: 513164749de4 5 | Create Date: 2019-09-20 14:39:57.761791 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '9bd986a75a2a' 14 | down_revision = '513164749de4' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.alter_column('kernels', 'scaling_group', 22 | existing_type=sa.VARCHAR(length=64), 23 | nullable=True, 24 | existing_server_default=sa.text("'default'::character varying")) 25 | # ### end Alembic commands ### 26 | 27 | 28 | def downgrade(): 29 | # ### commands auto generated by Alembic - please adjust! ### 30 | op.alter_column('kernels', 'scaling_group', 31 | existing_type=sa.VARCHAR(length=64), 32 | nullable=False, 33 | existing_server_default=sa.text("'default'::character varying")) 34 | # ### end Alembic commands ### 35 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/9c89b9011872_add_attached_devices_field_in_kernels.py: -------------------------------------------------------------------------------- 1 | """add_attached_devices_field_in_kernels 2 | 3 | Revision ID: 9c89b9011872 4 | Revises: 2a82340fa30e 5 | Create Date: 2019-08-04 16:38:52.781990 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '9c89b9011872' 14 | down_revision = '2a82340fa30e' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('kernels', sa.Column('attached_devices', 22 | postgresql.JSONB(astext_type=sa.Text()), 23 | nullable=True)) 24 | # ### end Alembic commands ### 25 | 26 | 27 | def downgrade(): 28 | # ### commands auto generated by Alembic - please adjust! ### 29 | op.drop_column('kernels', 'attached_devices') 30 | # ### end Alembic commands ### 31 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/9cd61b1ae70d_add_scheduable_field_to_agents.py: -------------------------------------------------------------------------------- 1 | """add_scheduable_field_to_agents 2 | 3 | Revision ID: 9cd61b1ae70d 4 | Revises: e35332f8d23d 5 | Create Date: 2020-07-01 15:02:13.979828 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.sql.expression import true 11 | from sqlalchemy.dialects import postgresql 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '9cd61b1ae70d' 15 | down_revision = 'e35332f8d23d' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | # ### commands auto generated by Alembic - please adjust! ### 22 | op.add_column('agents', sa.Column( 23 | 'schedulable', sa.Boolean(), 24 | server_default=true(), 25 | default=True, 26 | nullable=False, 27 | )) 28 | # ### end Alembic commands ### 29 | 30 | 31 | def downgrade(): 32 | # ### commands auto generated by Alembic - please adjust! ### 33 | op.drop_column('agents', 'schedulable') 34 | # ### end Alembic commands ### 35 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/a1fd4e7b7782_enumerate_vfolder_perms.py: -------------------------------------------------------------------------------- 1 | """enumerate_vfolder_perms 2 | 3 | Revision ID: a1fd4e7b7782 4 | Revises: f9971fbb34d9 5 | Create Date: 2018-09-05 16:51:49.973195 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from ai.backend.manager.models import VFolderPermission 11 | from sqlalchemy.dialects import postgresql 12 | 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = 'a1fd4e7b7782' 16 | down_revision = 'f9971fbb34d9' 17 | branch_labels = None 18 | depends_on = None 19 | 20 | # NOTE: VFolderPermission is EnumValueType 21 | vfperm_choices = list(map(lambda v: v.value, VFolderPermission)) 22 | vfolderpermission = postgresql.ENUM( 23 | *vfperm_choices, 24 | name='vfolderpermission', 25 | ) 26 | 27 | 28 | def upgrade(): 29 | # ### commands auto generated by Alembic - please adjust! ### 30 | vfolderpermission.create(op.get_bind()) 31 | op.alter_column('vfolder_invitations', column_name='permission', 32 | type_=sa.Enum(*vfperm_choices, name='vfolderpermission'), 33 | postgresql_using='permission::vfolderpermission') 34 | op.alter_column('vfolder_permissions', column_name='permission', 35 | type_=sa.Enum(*vfperm_choices, name='vfolderpermission'), 36 | postgresql_using='permission::vfolderpermission') 37 | # ### end Alembic commands ### 38 | 39 | 40 | def downgrade(): 41 | # ### commands auto generated by Alembic - please adjust! ### 42 | op.alter_column('vfolder_invitations', column_name='permission', 43 | type_=sa.String(length=2), 44 | postgresql_using='permission::text::vfolderpermission') 45 | op.alter_column('vfolder_permissions', column_name='permission', 46 | type_=sa.String(length=2), 47 | postgresql_using='permission::text::vfolderpermission') 48 | vfolderpermission.drop(op.get_bind()) 49 | # ### end Alembic commands ### 50 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/a7ca9f175d5f_merge.py: -------------------------------------------------------------------------------- 1 | """merge 2 | 3 | Revision ID: a7ca9f175d5f 4 | Revises: d59ff89e7514, 11146ba02235 5 | Create Date: 2022-03-28 15:25:22.965843 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'a7ca9f175d5f' 14 | down_revision = ('d59ff89e7514', '11146ba02235') 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | pass 21 | 22 | 23 | def downgrade(): 24 | pass 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/babc74594aa6_add_partial_index_to_kernels.py: -------------------------------------------------------------------------------- 1 | """add-partial-index-to-kernels 2 | 3 | Revision ID: babc74594aa6 4 | Revises: c3e74dcf1808 5 | Create Date: 2018-01-04 14:33:39.173062 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'babc74594aa6' 14 | down_revision = 'c3e74dcf1808' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.create_index( 21 | op.f('ix_kernels_unique_sess_token'), 22 | 'kernels', ['access_key', 'sess_id'], 23 | unique=True, 24 | postgresql_where=sa.text( 25 | "kernels.status != 'TERMINATED' and " 26 | "kernels.role = 'master'"), 27 | ) 28 | 29 | 30 | def downgrade(): 31 | op.drop_index(op.f('ix_kernels_unique_sess_token'), table_name='kernels') 32 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/bae1a7326e8a_add_domain_model.py: -------------------------------------------------------------------------------- 1 | """add domain model 2 | 3 | Revision ID: bae1a7326e8a 4 | Revises: 819c2b3830a9 5 | Create Date: 2019-05-08 08:29:29.588817 6 | 7 | """ 8 | import textwrap 9 | from alembic import op 10 | import sqlalchemy as sa 11 | from sqlalchemy.dialects import postgresql 12 | from ai.backend.manager.models.base import ( 13 | convention, ResourceSlotColumn, 14 | ) 15 | 16 | 17 | # revision identifiers, used by Alembic. 18 | revision = 'bae1a7326e8a' 19 | down_revision = '819c2b3830a9' 20 | branch_labels = None 21 | depends_on = None 22 | 23 | 24 | def upgrade(): 25 | metadata = sa.MetaData(naming_convention=convention) 26 | 27 | # partial table to insert "default" domain 28 | domains = sa.Table( 29 | 'domains', metadata, 30 | sa.Column('name', sa.String(length=64), primary_key=True), 31 | sa.Column('description', sa.String(length=512)), 32 | sa.Column('is_active', sa.Boolean, default=True), 33 | sa.Column('total_resource_slots', ResourceSlotColumn(), nullable=False), 34 | ) 35 | 36 | op.create_table( 37 | 'domains', 38 | sa.Column('name', sa.String(length=64), nullable=False), 39 | sa.Column('description', sa.String(length=512), nullable=True), 40 | sa.Column('is_active', sa.Boolean(), nullable=True), 41 | sa.Column('created_at', sa.DateTime(timezone=True), 42 | server_default=sa.text('now()'), nullable=True), 43 | sa.Column('modified_at', sa.DateTime(timezone=True), 44 | server_default=sa.text('now()'), nullable=True), 45 | sa.Column('total_resource_slots', 46 | postgresql.JSONB(astext_type=sa.Text()), nullable=False), 47 | sa.PrimaryKeyConstraint('name', name=op.f('pk_domains')) 48 | ) 49 | op.add_column('users', sa.Column('domain_name', sa.String(length=64), nullable=True)) 50 | op.create_index(op.f('ix_users_domain_name'), 'users', ['domain_name'], unique=False) 51 | op.create_foreign_key(op.f('fk_users_domain_name_domains'), 52 | 'users', 'domains', ['domain_name'], ['name']) 53 | 54 | # Fill in users' domain_name column with default domain. 55 | # Create default domain if not exist. 56 | connection = op.get_bind() 57 | query = sa.select([domains]).select_from(domains).where(domains.c.name == 'default') 58 | results = connection.execute(query).first() 59 | if results is None: 60 | query = (sa.insert(domains) 61 | .values(name='default', 62 | description='Default domain', 63 | is_active=True, 64 | total_resource_slots='{}')) 65 | query = textwrap.dedent('''\ 66 | INSERT INTO domains (name, description, is_active, total_resource_slots) 67 | VALUES ('default', 'Default domain', True, '{}'::jsonb);''') 68 | connection.execute(query) 69 | 70 | # Fill in users' domain_name field. 71 | query = "UPDATE users SET domain_name = 'default' WHERE email != 'admin@lablup.com';" 72 | connection.execute(query) 73 | 74 | 75 | def downgrade(): 76 | op.drop_constraint(op.f('fk_users_domain_name_domains'), 'users', type_='foreignkey') 77 | op.drop_index(op.f('ix_users_domain_name'), table_name='users') 78 | op.drop_column('users', 'domain_name') 79 | op.drop_table('domains') 80 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/bf4bae8f942e_add_kernel_host.py: -------------------------------------------------------------------------------- 1 | """add-kernel-host 2 | 3 | Revision ID: bf4bae8f942e 4 | Revises: babc74594aa6 5 | Create Date: 2018-02-02 11:29:38.752576 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'bf4bae8f942e' 14 | down_revision = 'babc74594aa6' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.add_column('kernels', sa.Column('kernel_host', sa.String(length=128), nullable=True)) 21 | 22 | 23 | def downgrade(): 24 | op.drop_column('kernels', 'kernel_host') 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/c092dabf3ee5_add_batch_session.py: -------------------------------------------------------------------------------- 1 | """add-batch-session 2 | 3 | Revision ID: c092dabf3ee5 4 | Revises: c1409ad0e8da 5 | Create Date: 2019-08-01 15:18:20.306290 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'c092dabf3ee5' 14 | down_revision = '48ab2dfefba9' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | sessiontypes = postgresql.ENUM( 20 | 'INTERACTIVE', 'BATCH', 21 | name='sessiontypes') 22 | 23 | 24 | def upgrade(): 25 | # ### commands auto generated by Alembic - please adjust! ### 26 | op.add_column('kernels', sa.Column('startup_command', sa.Text(), nullable=True)) 27 | op.drop_column('kernels', 'type') 28 | op.add_column('kernels', 29 | sa.Column('sess_type', sa.Enum('INTERACTIVE', 'BATCH', name='sessiontypes'), 30 | nullable=False, server_default='INTERACTIVE')) 31 | op.create_index(op.f('ix_kernels_sess_type'), 'kernels', ['sess_type'], unique=False) 32 | # ### end Alembic commands ### 33 | 34 | 35 | def downgrade(): 36 | # ### commands auto generated by Alembic - please adjust! ### 37 | op.drop_index(op.f('ix_kernels_sess_type'), table_name='kernels') 38 | op.drop_column('kernels', 'sess_type') 39 | op.add_column('kernels', 40 | sa.Column('type', sa.Enum('INTERACTIVE', 'BATCH', name='sessiontypes'), 41 | nullable=False, server_default='INTERACTIVE')) 42 | op.drop_column('kernels', 'startup_command') 43 | # ### end Alembic commands ### 44 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/c1409ad0e8da_.py: -------------------------------------------------------------------------------- 1 | """empty message 2 | 3 | Revision ID: c1409ad0e8da 4 | Revises: 22e52d03fc61, 9a91532c8534 5 | Create Date: 2019-07-29 20:18:52.291350 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'c1409ad0e8da' 14 | down_revision = ('22e52d03fc61', '9a91532c8534') 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | pass 21 | 22 | 23 | def downgrade(): 24 | pass 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/c3e74dcf1808_add_environ_to_kernels.py: -------------------------------------------------------------------------------- 1 | """add_environ_to_kernels 2 | 3 | Revision ID: c3e74dcf1808 4 | Revises: d52bf5ec9ef3 5 | Create Date: 2017-11-15 11:31:54.083566 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = 'c3e74dcf1808' 13 | down_revision = 'd52bf5ec9ef3' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.add_column('kernels', sa.Column('environ', sa.ARRAY(sa.String()), nullable=True)) 20 | 21 | 22 | def downgrade(): 23 | op.drop_column('kernels', 'environ') 24 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/c401d78cc7b9_add_allowed_vfolder_hosts_to_domain_and_.py: -------------------------------------------------------------------------------- 1 | """add_allowed_vfolder_hosts_to_domain_and_group 2 | 3 | Revision ID: c401d78cc7b9 4 | Revises: 3cf19d906e71 5 | Create Date: 2019-06-26 11:34:55.426107 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'c401d78cc7b9' 14 | down_revision = '3cf19d906e71' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('domains', sa.Column('allowed_vfolder_hosts', 22 | postgresql.ARRAY(sa.String()), nullable=True)) 23 | op.add_column('groups', sa.Column('allowed_vfolder_hosts', 24 | postgresql.ARRAY(sa.String()), nullable=True)) 25 | # ### end Alembic commands ### 26 | 27 | print('\nSet domain and group\'s allowed_vfolder_hosts with empty array.') 28 | connection = op.get_bind() 29 | query = ("UPDATE domains SET allowed_vfolder_hosts = '{}';") 30 | connection.execute(query) 31 | query = ("UPDATE groups SET allowed_vfolder_hosts = '{}';") 32 | connection.execute(query) 33 | 34 | op.alter_column('domains', column_name='allowed_vfolder_hosts', nullable=False) 35 | op.alter_column('groups', column_name='allowed_vfolder_hosts', nullable=False) 36 | 37 | 38 | def downgrade(): 39 | # ### commands auto generated by Alembic - please adjust! ### 40 | op.drop_column('groups', 'allowed_vfolder_hosts') 41 | op.drop_column('domains', 'allowed_vfolder_hosts') 42 | # ### end Alembic commands ### 43 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/c481d3dc6c7d_add_shared_memory_to_resource_presets.py: -------------------------------------------------------------------------------- 1 | """add_shared_memory_to_resource_presets 2 | 3 | Revision ID: c481d3dc6c7d 4 | Revises: f5530eccf202 5 | Create Date: 2020-04-20 14:10:35.591063 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'c481d3dc6c7d' 14 | down_revision = 'f5530eccf202' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('resource_presets', sa.Column('shared_memory', sa.BigInteger(), nullable=True)) 22 | # ### end Alembic commands ### 23 | 24 | 25 | def downgrade(): 26 | # ### commands auto generated by Alembic - please adjust! ### 27 | op.drop_column('resource_presets', 'shared_memory') 28 | # ### end Alembic commands ### 29 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/ce209920f654_create_task_template_table.py: -------------------------------------------------------------------------------- 1 | """Create task_template table 2 | 3 | Revision ID: ce209920f654 4 | Revises: 5e88398bc340 5 | Create Date: 2019-12-16 13:39:13.210996 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql as pgsql 11 | from ai.backend.manager.models.base import GUID, IDColumn, ForeignKeyIDColumn 12 | 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = 'ce209920f654' 16 | down_revision = '65c4a109bbc7' 17 | branch_labels = None 18 | depends_on = None 19 | 20 | 21 | def upgrade(): 22 | op.create_table( 23 | 'session_templates', 24 | IDColumn('id'), 25 | sa.Column('created_at', sa.DateTime(timezone=True), 26 | server_default=sa.func.now(), index=True), 27 | sa.Column('is_active', sa.Boolean, default=True), 28 | sa.Column('type', 29 | sa.Enum('TASK', 'CLUSTER', name='templatetypes'), 30 | nullable=False, 31 | server_default='TASK' 32 | ), 33 | sa.Column('domain_name', sa.String(length=64), sa.ForeignKey('domains.name'), nullable=False), 34 | sa.Column('group_id', GUID, sa.ForeignKey('groups.id'), nullable=True), 35 | sa.Column('user_uuid', GUID, sa.ForeignKey('users.uuid'), nullable=False), 36 | 37 | sa.Column('name', sa.String(length=128), nullable=True), 38 | sa.Column('template', sa.String(length=16 * 1024), nullable=False) 39 | ) 40 | op.add_column( 41 | 'kernels', 42 | sa.Column('bootstrap_script', sa.String(length=4 * 1024), nullable=True) 43 | ) 44 | 45 | 46 | def downgrade(): 47 | op.drop_table('session_templates') 48 | op.execute('DROP TYPE templatetypes') 49 | op.drop_column('kernels', 'bootstrap_script') 50 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/d2aafa234374_create_error_logs_table.py: -------------------------------------------------------------------------------- 1 | """Create error_logs table 2 | 3 | Revision ID: d2aafa234374 4 | Revises: 3bb80d1887d6 5 | Create Date: 2020-02-12 13:55:12.450743 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | from ai.backend.manager.models.base import IDColumn, GUID 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = 'd2aafa234374' 16 | down_revision = '3bb80d1887d6' 17 | branch_labels = None 18 | depends_on = None 19 | 20 | 21 | def upgrade(): 22 | op.create_table( 23 | 'error_logs', 24 | IDColumn(), 25 | sa.Column('created_at', sa.DateTime(timezone=True), 26 | server_default=sa.func.now(), index=True), 27 | sa.Column('severity', sa.Enum('critical', 'error', 'warning', 'info', 'debug', name='errorlog_severity'), 28 | index=True), 29 | sa.Column('source', sa.String), 30 | sa.Column('user', GUID, sa.ForeignKey('users.uuid'), nullable=True, index=True), 31 | sa.Column('is_read', sa.Boolean, default=False, index=True), 32 | sa.Column('is_cleared', sa.Boolean, default=False, index=True), 33 | sa.Column('message', sa.Text), 34 | sa.Column('context_lang', sa.String), 35 | sa.Column('context_env', postgresql.JSONB()), 36 | sa.Column('request_url', sa.String, nullable=True), 37 | sa.Column('request_status', sa.Integer, nullable=True), 38 | sa.Column('traceback', sa.Text, nullable=True), 39 | ) 40 | 41 | 42 | def downgrade(): 43 | op.drop_table('error_logs') 44 | op.execute('DROP TYPE errorlog_severity') 45 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/d452bacd085c_add_mount_map_column_to_kernel.py: -------------------------------------------------------------------------------- 1 | """Add mount_map column to kernel 2 | 3 | Revision ID: d452bacd085c 4 | Revises: 4b7b650bc30e 5 | Create Date: 2019-11-19 14:43:12.728678 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql as pgsql 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'd452bacd085c' 15 | down_revision = '4b7b650bc30e' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | op.add_column('kernels', sa.Column('mount_map', pgsql.JSONB(), nullable=True, default={})) 22 | 23 | 24 | def downgrade(): 25 | op.drop_column('kernels', 'mount_map') 26 | 27 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/d463fc5d6109_add_clone_allowed_to_vfolders.py: -------------------------------------------------------------------------------- 1 | """add_clone_allowed_to_vfolders 2 | 3 | Revision ID: d463fc5d6109 4 | Revises: 0d553d59f369 5 | Create Date: 2020-09-01 16:57:55.339619 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'd463fc5d6109' 14 | down_revision = '0d553d59f369' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('vfolders', sa.Column('clone_allowed', sa.Boolean(), nullable=True)) 22 | op.execute("UPDATE vfolders SET clone_allowed = false") 23 | op.alter_column('vfolders', 'clone_allowed', nullable=False) 24 | # ### end Alembic commands ### 25 | 26 | 27 | def downgrade(): 28 | # ### commands auto generated by Alembic - please adjust! ### 29 | op.drop_column('vfolders', 'clone_allowed') 30 | # ### end Alembic commands ### 31 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/d52bf5ec9ef3_convert_cpu_gpu_slots_to_float.py: -------------------------------------------------------------------------------- 1 | """convert_cpu_gpu_slots_to_float 2 | 3 | Revision ID: d52bf5ec9ef3 4 | Revises: 4545f5c948b3 5 | Create Date: 2017-11-09 14:30:20.737908 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = 'd52bf5ec9ef3' 13 | down_revision = '4545f5c948b3' 14 | branch_labels = None 15 | depends_on = None 16 | 17 | 18 | def upgrade(): 19 | op.alter_column('agents', 'mem_slots', 20 | existing_type=sa.Integer(), 21 | type_=sa.BigInteger()) 22 | op.alter_column('agents', 'cpu_slots', 23 | existing_type=sa.Integer(), 24 | type_=sa.Float()) 25 | op.alter_column('agents', 'gpu_slots', 26 | existing_type=sa.Integer(), 27 | type_=sa.Float()) 28 | op.alter_column('agents', 'used_mem_slots', 29 | existing_type=sa.Integer(), 30 | type_=sa.BigInteger()) 31 | op.alter_column('agents', 'used_cpu_slots', 32 | existing_type=sa.Integer(), 33 | type_=sa.Float()) 34 | op.alter_column('agents', 'used_gpu_slots', 35 | existing_type=sa.Integer(), 36 | type_=sa.Float()) 37 | op.alter_column('kernels', 'mem_slot', 38 | existing_type=sa.Integer(), 39 | type_=sa.BigInteger()) 40 | op.alter_column('kernels', 'cpu_slot', 41 | existing_type=sa.Integer(), 42 | type_=sa.Float()) 43 | op.alter_column('kernels', 'gpu_slot', 44 | existing_type=sa.Integer(), 45 | type_=sa.Float()) 46 | 47 | 48 | def downgrade(): 49 | op.alter_column('agents', 'mem_slots', 50 | existing_type=sa.BigInteger(), 51 | type_=sa.Integer()) 52 | op.alter_column('agents', 'cpu_slots', 53 | existing_type=sa.Float(), 54 | type_=sa.Integer()) 55 | op.alter_column('agents', 'gpu_slots', 56 | existing_type=sa.Float(), 57 | type_=sa.Integer()) 58 | op.alter_column('agents', 'used_mem_slots', 59 | existing_type=sa.BigInteger(), 60 | type_=sa.Integer()) 61 | op.alter_column('agents', 'used_cpu_slots', 62 | existing_type=sa.Float(), 63 | type_=sa.Integer()) 64 | op.alter_column('agents', 'used_gpu_slots', 65 | existing_type=sa.Float(), 66 | type_=sa.Integer()) 67 | op.alter_column('kernels', 'mem_slot', 68 | existing_type=sa.BigInteger(), 69 | type_=sa.Integer()) 70 | op.alter_column('kernels', 'cpu_slot', 71 | existing_type=sa.Float(), 72 | type_=sa.Integer()) 73 | op.alter_column('kernels', 'gpu_slot', 74 | existing_type=sa.Float(), 75 | type_=sa.Integer()) 76 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/d582942886ad_add_tag_to_kernels.py: -------------------------------------------------------------------------------- 1 | """add tag to kernels 2 | 3 | Revision ID: d582942886ad 4 | Revises: a1fd4e7b7782 5 | Create Date: 2018-10-25 10:51:39.448309 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'd582942886ad' 14 | down_revision = 'a1fd4e7b7782' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('kernels', sa.Column('tag', sa.String(length=64), nullable=True)) 22 | # ### end Alembic commands ### 23 | 24 | 25 | def downgrade(): 26 | # ### commands auto generated by Alembic - please adjust! ### 27 | op.drop_column('kernels', 'tag') 28 | # ### end Alembic commands ### 29 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/d59ff89e7514_remove_keypair_concurrency_used.py: -------------------------------------------------------------------------------- 1 | """remove_keypair_concurrency_used 2 | 3 | Revision ID: d59ff89e7514 4 | Revises: 0f7a4b643940 5 | Create Date: 2022-03-21 16:43:29.899251 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'd59ff89e7514' 14 | down_revision = '0f7a4b643940' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.drop_column('keypairs', 'concurrency_used') 21 | 22 | 23 | def downgrade(): 24 | op.add_column('keypairs', sa.Column( 25 | 'concurrency_used', sa.Integer, nullable=True, default=0, server_default=0, 26 | )) 27 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/d643752544de_.py: -------------------------------------------------------------------------------- 1 | """Merge 51dd and d2aa 2 | 3 | Revision ID: d643752544de 4 | Revises: 51dddd79aa21, d2aafa234374 5 | Create Date: 2020-03-09 12:04:27.013567 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'd643752544de' 14 | down_revision = ('51dddd79aa21', 'd2aafa234374') 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | pass 21 | 22 | 23 | def downgrade(): 24 | pass 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/d727b5da20e6_add_callback_url_to_kernels.py: -------------------------------------------------------------------------------- 1 | """add-callback_url-to-kernels 2 | 3 | Revision ID: d727b5da20e6 4 | Revises: a7ca9f175d5f 5 | Create Date: 2022-03-31 07:22:28.426046 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | from ai.backend.manager.models.base import URLColumn 12 | 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = 'd727b5da20e6' 16 | down_revision = 'a7ca9f175d5f' 17 | branch_labels = None 18 | depends_on = None 19 | 20 | 21 | def upgrade(): 22 | op.add_column('kernels', sa.Column('callback_url', URLColumn(), nullable=True)) 23 | 24 | 25 | def downgrade(): 26 | op.drop_column('kernels', 'callback_url') 27 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/da24ff520049_add_starts_at_field_into_kernels.py: -------------------------------------------------------------------------------- 1 | """add_startsat_field_into_kernels 2 | 3 | Revision ID: da24ff520049 4 | Revises: 529113b08c2c 5 | Create Date: 2020-06-18 20:47:22.152831 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'da24ff520049' 14 | down_revision = '529113b08c2c' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('kernels', sa.Column('starts_at', sa.DateTime(timezone=True), nullable=True)) 22 | # ### end Alembic commands ### 23 | 24 | 25 | def downgrade(): 26 | # ### commands auto generated by Alembic - please adjust! ### 27 | op.drop_column('kernels', 'starts_at') 28 | # ### end Alembic commands ### 29 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/dc9b66466e43_remove_clusterized.py: -------------------------------------------------------------------------------- 1 | """remove-clusterized 2 | 3 | Revision ID: dc9b66466e43 4 | Revises: 06184d82a211 5 | Create Date: 2020-12-25 04:45:20.245137 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'dc9b66466e43' 14 | down_revision = '06184d82a211' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.drop_column('agents', 'clusterized') 21 | 22 | 23 | def downgrade(): 24 | op.add_column('agents', sa.Column('clusterized', sa.BOOLEAN(), autoincrement=False, nullable=True)) 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/e18ed5fcfedf_add_superadmin_role_for_user.py: -------------------------------------------------------------------------------- 1 | """add superadmin role for user 2 | 3 | Revision ID: e18ed5fcfedf 4 | Revises: c5e4e764f9e3 5 | Create Date: 2019-05-29 23:17:17.762968 6 | 7 | """ 8 | import textwrap 9 | 10 | from alembic import op 11 | from ai.backend.manager.models import UserRole 12 | 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = 'e18ed5fcfedf' 16 | down_revision = 'c5e4e764f9e3' 17 | branch_labels = None 18 | depends_on = None 19 | 20 | 21 | def upgrade(): 22 | # ### commands auto generated by Alembic - please adjust! ### 23 | pass 24 | # ### end Alembic commands ### 25 | 26 | # Add superadmin to user role choices. 27 | userrole_choices = list(map(lambda v: v.value, UserRole)) 28 | assert 'superadmin' in userrole_choices, 'superadmin in UserRole is required!' 29 | 30 | conn = op.get_bind() 31 | conn.execute('ALTER TYPE userrole RENAME TO userrole__;') 32 | conn.execute('CREATE TYPE userrole as enum (%s)' % ("'" + "','".join(userrole_choices) + "'")) 33 | conn.execute(textwrap.dedent('''\ 34 | ALTER TABLE users 35 | ALTER COLUMN role TYPE userrole USING role::text::userrole; 36 | ''')) 37 | conn.execute('DROP TYPE userrole__;') 38 | 39 | # Set admin@lablup.com's role as superadmin. 40 | # Also, set admin@lablup.com's domain to default. 41 | # 42 | # We have judged superadmin as an admin user not associated with any domain. 43 | # This results in broken code execution for superadmin since doamain_name should not be null. 44 | # So, this policy is changed to simply adopt superadmin role, and superadmin can also have 45 | # domain and groups as well. 46 | query = "SELECT uuid FROM users where email = 'admin@lablup.com';" 47 | result = conn.execute(query).first() 48 | uuid = result.uuid if hasattr(result, 'uuid') else None 49 | if uuid is not None: # update only when admin@lablup.com user exist 50 | query = textwrap.dedent('''\ 51 | UPDATE users SET domain_name = 'default', role = 'superadmin' 52 | WHERE email = 'admin@lablup.com'; 53 | ''') 54 | conn.execute(query) 55 | 56 | 57 | def downgrade(): 58 | # ### commands auto generated by Alembic - please adjust! ### 59 | pass 60 | # ### end Alembic commands ### 61 | 62 | userrole_choices = list(map(lambda v: v.value, UserRole)) 63 | if 'superadmin' in userrole_choices: 64 | userrole_choices.remove('superadmin') 65 | conn = op.get_bind() 66 | 67 | # First, change all superadmin role to admin. 68 | query = textwrap.dedent("UPDATE users SET role = 'admin' WHERE role = 'superadmin';") 69 | conn.execute(query) 70 | 71 | # Remove superadmin from user role choices. 72 | conn.execute('ALTER TYPE userrole RENAME TO userrole___;') 73 | conn.execute('CREATE TYPE userrole as enum (%s)' % ("'" + "','".join(userrole_choices) + "'")) 74 | conn.execute(textwrap.dedent('''\ 75 | ALTER TABLE users 76 | ALTER COLUMN role TYPE userrole USING role::text::userrole; 77 | ''')) 78 | conn.execute('DROP TYPE userrole___;') 79 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/e35332f8d23d_add_modified_at_to_users_and_kernels.py: -------------------------------------------------------------------------------- 1 | """add_modified_at_to_users_and_kernels 2 | 3 | Revision ID: e35332f8d23d 4 | Revises: da24ff520049 5 | Create Date: 2020-07-01 14:02:11.022032 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from sqlalchemy.dialects import postgresql 11 | from sqlalchemy.sql.expression import bindparam 12 | 13 | from ai.backend.manager.models.base import convention, IDColumn 14 | 15 | # revision identifiers, used by Alembic. 16 | revision = 'e35332f8d23d' 17 | down_revision = 'da24ff520049' 18 | branch_labels = None 19 | depends_on = None 20 | 21 | 22 | def upgrade(): 23 | metadata = sa.MetaData(naming_convention=convention) 24 | # partial table to be preserved and referred 25 | users = sa.Table( 26 | 'users', metadata, 27 | IDColumn('uuid'), 28 | sa.Column('created_at', sa.DateTime(timezone=True), 29 | server_default=sa.func.now()), 30 | sa.Column('modified_at', sa.DateTime(timezone=True), 31 | server_default=sa.func.now(), onupdate=sa.func.current_timestamp()), 32 | ) 33 | keypairs = sa.Table( 34 | 'keypairs', metadata, 35 | sa.Column('access_key', sa.String(length=20), primary_key=True), 36 | sa.Column('created_at', sa.DateTime(timezone=True), 37 | server_default=sa.func.now()), 38 | sa.Column('modified_at', sa.DateTime(timezone=True), 39 | server_default=sa.func.now(), onupdate=sa.func.current_timestamp()), 40 | ) 41 | 42 | # ### commands auto generated by Alembic - please adjust! ### 43 | op.add_column('keypairs', sa.Column('modified_at', sa.DateTime(timezone=True), 44 | server_default=sa.text('now()'), nullable=True)) 45 | op.add_column('users', sa.Column('modified_at', sa.DateTime(timezone=True), 46 | server_default=sa.text('now()'), nullable=True)) 47 | # ### end Alembic commands ### 48 | 49 | conn = op.get_bind() 50 | 51 | # Set user's modified_at with the value of created_at. 52 | query = sa.select([users.c.uuid, users.c.created_at]).select_from(users) 53 | updates = [] 54 | for row in conn.execute(query).fetchall(): 55 | updates.append({'b_uuid': row['uuid'], 'modified_at': row['created_at']}) 56 | if updates: 57 | query = (sa.update(users) 58 | .values(modified_at=bindparam('modified_at')) 59 | .where(users.c.uuid == bindparam('b_uuid'))) 60 | conn.execute(query, updates) 61 | 62 | # Set keypairs's modified_at with the value of created_at. 63 | query = sa.select([keypairs.c.access_key, keypairs.c.created_at]).select_from(keypairs) 64 | updates = [] 65 | for row in conn.execute(query).fetchall(): 66 | updates.append({'b_access_key': row['access_key'], 'modified_at': row['created_at']}) 67 | if updates: 68 | query = (sa.update(keypairs) 69 | .values(modified_at=bindparam('modified_at')) 70 | .where(keypairs.c.access_key == bindparam('b_access_key'))) 71 | conn.execute(query, updates) 72 | 73 | 74 | def downgrade(): 75 | # ### commands auto generated by Alembic - please adjust! ### 76 | op.drop_column('users', 'modified_at') 77 | op.drop_column('keypairs', 'modified_at') 78 | # ### end Alembic commands ### 79 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/e421c02cf9e4_rename_kernel_dependencies_to_session_.py: -------------------------------------------------------------------------------- 1 | """rename_kernel_dependencies_to_session_dependencies 2 | 3 | Revision ID: e421c02cf9e4 4 | Revises: 548cc8aa49c8 5 | Create Date: 2020-09-14 10:45:40.218548 6 | 7 | """ 8 | from alembic import op 9 | 10 | # revision identifiers, used by Alembic. 11 | revision = 'e421c02cf9e4' 12 | down_revision = '548cc8aa49c8' 13 | branch_labels = None 14 | depends_on = None 15 | 16 | 17 | def upgrade(): 18 | op.drop_constraint('fk_kernel_dependencies_depends_on_kernels', 'kernel_dependencies') 19 | op.drop_constraint('fk_kernel_dependencies_kernel_id_kernels', 'kernel_dependencies') 20 | op.rename_table('kernel_dependencies', 'session_dependencies') 21 | op.alter_column('session_dependencies', 'kernel_id', new_column_name='session_id') 22 | op.execute('ALTER INDEX pk_kernel_dependencies ' 23 | 'RENAME TO pk_session_dependencies') 24 | op.execute('ALTER INDEX ix_kernel_dependencies_depends_on ' 25 | 'RENAME TO ix_session_dependencies_depends_on') 26 | op.execute('ALTER INDEX ix_kernel_dependencies_kernel_id ' 27 | 'RENAME TO ix_session_dependencies_session_id') 28 | # NOTE: we keep the fkey target as "kernels.id" instead of "kernels.session_id" 29 | # because fkey target must be a unique index and in Backend.AI `kernels.session_id` 30 | # is same to the main kernel's `kernels.id`. 31 | op.create_foreign_key(None, 'session_dependencies', 'kernels', ['session_id'], ['id'], 32 | onupdate='CASCADE', ondelete='CASCADE') 33 | op.create_foreign_key(None, 'session_dependencies', 'kernels', ['depends_on'], ['id'], 34 | onupdate='CASCADE', ondelete='CASCADE') 35 | 36 | 37 | def downgrade(): 38 | op.drop_constraint('fk_session_dependencies_depends_on_kernels', 'session_dependencies') 39 | op.drop_constraint('fk_session_dependencies_session_id_kernels', 'session_dependencies') 40 | op.rename_table('session_dependencies', 'kernel_dependencies') 41 | op.alter_column('kernel_dependencies', 'session_id', new_column_name='kernel_id') 42 | op.execute('ALTER INDEX pk_session_dependencies ' 43 | 'RENAME TO pk_kernel_dependencies') 44 | op.execute('ALTER INDEX ix_session_dependencies_depends_on ' 45 | 'RENAME TO ix_kernel_dependencies_depends_on') 46 | op.execute('ALTER INDEX ix_session_dependencies_session_id ' 47 | 'RENAME TO ix_kernel_dependencies_kernel_id') 48 | op.create_foreign_key(None, 'kernel_dependencies', 'kernels', ['kernel_id'], ['id'], 49 | onupdate='CASCADE', ondelete='CASCADE') 50 | op.create_foreign_key(None, 'kernel_dependencies', 'kernels', ['depends_on'], ['id'], 51 | onupdate='CASCADE', ondelete='CASCADE') 52 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/e7371ca5797a_rename_mem_stats.py: -------------------------------------------------------------------------------- 1 | """rename_mem_stats 2 | 3 | Revision ID: e7371ca5797a 4 | Revises: 93e9d31d40bf 5 | Create Date: 2017-10-10 13:01:37.169568 6 | 7 | """ 8 | from alembic import op 9 | 10 | # revision identifiers, used by Alembic. 11 | revision = 'e7371ca5797a' 12 | down_revision = '93e9d31d40bf' 13 | branch_labels = None 14 | depends_on = None 15 | 16 | 17 | def upgrade(): 18 | op.alter_column('kernels', column_name='max_mem_bytes', new_column_name='mem_max_bytes') 19 | op.alter_column('kernels', column_name='cur_mem_bytes', new_column_name='mem_cur_bytes') 20 | 21 | 22 | def downgrade(): 23 | op.alter_column('kernels', column_name='mem_max_bytes', new_column_name='max_mem_bytes') 24 | op.alter_column('kernels', column_name='mem_cur_bytes', new_column_name='cur_mem_bytes') 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/ed666f476f39_add_bootstrap_script_to_keypairs.py: -------------------------------------------------------------------------------- 1 | """add_bootstrap_script_to_keypairs 2 | 3 | Revision ID: ed666f476f39 4 | Revises: d643752544de 5 | Create Date: 2020-03-15 17:40:46.754121 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'ed666f476f39' 14 | down_revision = 'd643752544de' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('keypairs', sa.Column('bootstrap_script', 22 | sa.String(length=64 * 1024), 23 | nullable=False, 24 | server_default='')) 25 | # ### end Alembic commands ### 26 | 27 | 28 | def downgrade(): 29 | # ### commands auto generated by Alembic - please adjust! ### 30 | op.drop_column('keypairs', 'bootstrap_script') 31 | # ### end Alembic commands ### 32 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/eec98e65902a_merge_with_vfolder_clone.py: -------------------------------------------------------------------------------- 1 | """merge-with-vfolder-clone 2 | 3 | Revision ID: eec98e65902a 4 | Revises: d463fc5d6109, 97f6c80c8aa5 5 | Create Date: 2020-10-03 18:11:06.270486 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'eec98e65902a' 14 | down_revision = ('d463fc5d6109', '97f6c80c8aa5') 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | pass 21 | 22 | 23 | def downgrade(): 24 | pass 25 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/f5530eccf202_add_kernels_uuid_prefix_index.py: -------------------------------------------------------------------------------- 1 | """add-kernels-uuid-prefix-index 2 | 3 | Revision ID: f5530eccf202 4 | Revises: ed666f476f39 5 | Create Date: 2020-03-25 17:29:50.696450 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'f5530eccf202' 14 | down_revision = 'ed666f476f39' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | op.create_index( 21 | op.f('ix_kernels_uuid_prefix'), 22 | 'kernels', 23 | [sa.text('CAST("id" AS VARCHAR) COLLATE "C"')], 24 | ) 25 | 26 | 27 | def downgrade(): 28 | op.drop_index( 29 | op.f('ix_kernels_uuid_prefix'), 30 | 'kernels', 31 | ) 32 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/f8a71c3bffa2_stringify_userid.py: -------------------------------------------------------------------------------- 1 | """stringify_userid 2 | 3 | Revision ID: f8a71c3bffa2 4 | Revises: bf4bae8f942e 5 | Create Date: 2018-06-17 13:52:13.346856 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | from ai.backend.manager.models.base import convention 11 | import os 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'f8a71c3bffa2' 15 | down_revision = 'bf4bae8f942e' 16 | branch_labels = None 17 | depends_on = None 18 | 19 | 20 | def upgrade(): 21 | metadata = sa.MetaData(naming_convention=convention) 22 | keypairs = sa.Table( 23 | 'keypairs', metadata, 24 | sa.Column('user_id', sa.String(length=256), index=True), 25 | ) 26 | 27 | print('Choose keypairs.user_id column migrate option:') 28 | print(' [a] Convert all numeric user IDs to strings directly') 29 | print(' [b] Convert numeric user IDs to strings using a mapping table\n' 30 | ' (user_id_map.txt must be present in the current working directory\n' 31 | ' which contains a space-sep.list of numeric and string ID pairs.)') 32 | print('NOTE: If you choose [b], you will not be able to downgrade!') 33 | 34 | choice = os.environ.get('STRINGIFY_USERID_CHOICE') 35 | if choice is None: 36 | while True: 37 | choice = input('Your choice? [a/b] ') 38 | if choice in ('a', 'b'): 39 | break 40 | print('Invalid choice.') 41 | continue 42 | 43 | op.alter_column('keypairs', 'user_id', 44 | existing_type=sa.Integer(), 45 | type_=sa.String(length=256)) 46 | 47 | # NOTE: We do the data migration after converting column type. 48 | 49 | if choice == 'b': 50 | # query all unique user ids 51 | q = sa.select([keypairs.c.user_id]).group_by(keypairs.c.user_id) 52 | rows = op.get_bind().execute(q) 53 | user_ids = set(int(row.user_id) for row in rows) 54 | print(f'There are {len(user_ids)} unique user IDs.') 55 | 56 | user_id_map = {} 57 | with open('user_id_map.txt', 'r') as f: 58 | for line in f: 59 | num_id, str_id = line.split(maxsplit=1) 60 | assert len(str_id) <= 256, \ 61 | f'Too long target user ID! ({num_id} -> {str_id!r})' 62 | user_id_map[int(num_id)] = str_id 63 | 64 | map_diff = user_ids - set(user_id_map.keys()) 65 | assert len(map_diff) == 0, \ 66 | f'There are unmapped user IDs!\n{map_diff}' 67 | 68 | for num_id, str_id in user_id_map.items(): 69 | op.execute( 70 | keypairs.update() 71 | .values({'user_id': str_id}) 72 | .where(keypairs.c.user_id == str(num_id)) 73 | ) 74 | 75 | 76 | def downgrade(): 77 | op.alter_column('keypairs', 'user_id', 78 | existing_type=sa.Integer(), 79 | type_=sa.String(length=256)) 80 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/f9971fbb34d9_add_state_column_to_vfolder_invitations.py: -------------------------------------------------------------------------------- 1 | """add state column to vfolder_invitations 2 | 3 | Revision ID: f9971fbb34d9 4 | Revises: 185852ff9872 5 | Create Date: 2018-07-12 23:30:14.942845 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'f9971fbb34d9' 14 | down_revision = '185852ff9872' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.add_column('vfolder_invitations', sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True)) 22 | op.add_column('vfolder_invitations', sa.Column('state', sa.String(length=10), nullable=True)) 23 | # ### end Alembic commands ### 24 | 25 | 26 | def downgrade(): 27 | # ### commands auto generated by Alembic - please adjust! ### 28 | op.drop_column('vfolder_invitations', 'state') 29 | op.drop_column('vfolder_invitations', 'created_at') 30 | # ### end Alembic commands ### 31 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/alembic/versions/ff4bfca66bf8_.py: -------------------------------------------------------------------------------- 1 | """empty message 2 | 3 | Revision ID: ff4bfca66bf8 4 | Revises: 0e558d06e0e3, 352fa4f88f61 5 | Create Date: 2018-12-24 22:42:54.188099 6 | 7 | """ 8 | 9 | 10 | # revision identifiers, used by Alembic. 11 | revision = 'ff4bfca66bf8' 12 | down_revision = ('0e558d06e0e3', '352fa4f88f61') 13 | branch_labels = None 14 | depends_on = None 15 | 16 | 17 | def upgrade(): 18 | pass 19 | 20 | 21 | def downgrade(): 22 | pass 23 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/dotfile.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from pathlib import PurePosixPath 4 | from typing import Any, Mapping, Sequence, TYPE_CHECKING 5 | 6 | import sqlalchemy as sa 7 | if TYPE_CHECKING: 8 | from sqlalchemy.ext.asyncio import ( 9 | AsyncConnection as SAConnection, 10 | ) 11 | 12 | from ai.backend.common import msgpack 13 | from ai.backend.common.types import VFolderMount 14 | 15 | from ..api.exceptions import BackendError 16 | from ..types import UserScope 17 | from .keypair import keypairs 18 | from .domain import query_domain_dotfiles 19 | from .group import query_group_dotfiles 20 | 21 | __all__ = ( 22 | 'prepare_dotfiles', 23 | ) 24 | 25 | 26 | async def prepare_dotfiles( 27 | conn: SAConnection, 28 | user_scope: UserScope, 29 | access_key: str, 30 | vfolder_mounts: Sequence[VFolderMount], 31 | ) -> Mapping[str, Any]: 32 | # Feed SSH keypair and dotfiles if exists. 33 | internal_data = {} 34 | query = ( 35 | sa.select([ 36 | keypairs.c.ssh_public_key, 37 | keypairs.c.ssh_private_key, 38 | keypairs.c.dotfiles, 39 | ]) 40 | .select_from(keypairs) 41 | .where(keypairs.c.access_key == access_key) 42 | ) 43 | result = await conn.execute(query) 44 | row = result.first() 45 | dotfiles = msgpack.unpackb(row['dotfiles']) 46 | internal_data.update({'dotfiles': dotfiles}) 47 | if row['ssh_public_key'] and row['ssh_private_key']: 48 | internal_data['ssh_keypair'] = { 49 | 'public_key': row['ssh_public_key'], 50 | 'private_key': row['ssh_private_key'], 51 | } 52 | # use dotfiles in the priority of keypair > group > domain 53 | dotfile_paths = set(map(lambda x: x['path'], dotfiles)) 54 | # add keypair dotfiles 55 | internal_data.update({'dotfiles': list(dotfiles)}) 56 | # add group dotfiles 57 | dotfiles, _ = await query_group_dotfiles(conn, user_scope.group_id) 58 | for dotfile in dotfiles: 59 | if dotfile['path'] not in dotfile_paths: 60 | internal_data['dotfiles'].append(dotfile) 61 | dotfile_paths.add(dotfile['path']) 62 | # add domain dotfiles 63 | dotfiles, _ = await query_domain_dotfiles(conn, user_scope.domain_name) 64 | for dotfile in dotfiles: 65 | if dotfile['path'] not in dotfile_paths: 66 | internal_data['dotfiles'].append(dotfile) 67 | dotfile_paths.add(dotfile['path']) 68 | # reverse the dotfiles list so that higher priority can overwrite 69 | # in case the actual path is the same 70 | internal_data['dotfiles'].reverse() 71 | 72 | # check if there is no name conflict of dotfile and vfolder 73 | vfolder_kernel_paths = {m.kernel_path for m in vfolder_mounts} 74 | for dotfile in internal_data.get('dotfiles', []): 75 | dotfile_path = PurePosixPath(dotfile['path']) 76 | if not dotfile_path.is_absolute(): 77 | dotfile_path = PurePosixPath('/home/work', dotfile['path']) 78 | if dotfile_path in vfolder_kernel_paths: 79 | raise BackendError( 80 | f"There is a kernel-side path from vfolders that conflicts with " 81 | f"a dotfile '{dotfile['path']}'.", 82 | ) 83 | 84 | return internal_data 85 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/error_logs.py: -------------------------------------------------------------------------------- 1 | import sqlalchemy as sa 2 | from sqlalchemy.dialects import postgresql 3 | 4 | from .base import metadata, IDColumn, GUID 5 | __all__ = [ 6 | 'error_logs', 7 | ] 8 | 9 | error_logs = sa.Table( 10 | 'error_logs', metadata, 11 | IDColumn(), 12 | sa.Column('created_at', sa.DateTime(timezone=True), 13 | server_default=sa.func.now(), index=True), 14 | sa.Column('severity', sa.Enum('critical', 'error', 'warning', name='errorlog_severity'), 15 | index=True), 16 | sa.Column('source', sa.String), 17 | sa.Column('user', GUID, sa.ForeignKey('users.uuid'), nullable=True, index=True), 18 | sa.Column('is_read', sa.Boolean, default=False, index=True), 19 | sa.Column('is_cleared', sa.Boolean, default=False, index=True), 20 | sa.Column('message', sa.Text), 21 | sa.Column('context_lang', sa.String), 22 | sa.Column('context_env', postgresql.JSONB()), 23 | sa.Column('request_url', sa.String, nullable=True), 24 | sa.Column('request_status', sa.Integer, nullable=True), 25 | sa.Column('traceback', sa.Text, nullable=True), 26 | ) 27 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/minilang/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import ( 2 | Any, 3 | Callable, 4 | Optional, 5 | Tuple, 6 | ) 7 | 8 | FieldSpecItem = Tuple[str, Optional[Callable[[str], Any]]] 9 | -------------------------------------------------------------------------------- /src/ai/backend/manager/models/minilang/ordering.py: -------------------------------------------------------------------------------- 1 | from typing import ( 2 | Mapping, 3 | ) 4 | 5 | from lark import Lark, LarkError, Transformer 6 | import sqlalchemy as sa 7 | 8 | __all__ = ( 9 | 'QueryOrderParser', 10 | ) 11 | 12 | _grammar = r""" 13 | ?start: expr 14 | expr : [col ("," col)*] 15 | col : ORDER? CNAME 16 | ORDER : "+" | "-" 17 | %import common.CNAME 18 | %import common.WS 19 | %ignore WS 20 | """ 21 | _parser = Lark( 22 | _grammar, 23 | parser='lalr', 24 | maybe_placeholders=False, 25 | ) 26 | 27 | 28 | class QueryOrderTransformer(Transformer): 29 | 30 | def __init__(self, sa_table: sa.Table, column_map: Mapping[str, str] = None) -> None: 31 | super().__init__() 32 | self._sa_table = sa_table 33 | self._column_map = column_map 34 | 35 | def _get_col(self, col_name: str) -> sa.Column: 36 | try: 37 | if self._column_map: 38 | col = self._sa_table.c[self._column_map[col_name]] 39 | else: 40 | col = self._sa_table.c[col_name] 41 | return col 42 | except KeyError: 43 | raise ValueError("Unknown/unsupported field name", col_name) 44 | 45 | def col(self, *args): 46 | children = args[0] 47 | if len(children) == 2: 48 | op = children[0].value 49 | col = self._get_col(children[1].value) 50 | else: 51 | op = "+" # assume ascending if not marked 52 | col = self._get_col(children[0].value) 53 | if op == "+": 54 | return col.asc() 55 | elif op == "-": 56 | return col.desc() 57 | 58 | expr = tuple 59 | 60 | 61 | class QueryOrderParser(): 62 | 63 | def __init__(self, column_map: Mapping[str, str] = None) -> None: 64 | self._column_map = column_map 65 | self._parser = _parser 66 | 67 | def append_ordering( 68 | self, 69 | sa_query: sa.sql.Select, 70 | order_expr: str, 71 | ) -> sa.sql.Select: 72 | """ 73 | Parse the given filter expression and build the where clause based on the first target table from 74 | the given SQLAlchemy query object. 75 | """ 76 | table = sa_query.froms[0] 77 | try: 78 | ast = self._parser.parse(order_expr) 79 | orders = QueryOrderTransformer(table, self._column_map).transform(ast) 80 | except LarkError as e: 81 | raise ValueError(f"Query ordering parsing error: {e}") 82 | return sa_query.order_by(*orders) 83 | -------------------------------------------------------------------------------- /src/ai/backend/manager/pglock.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import Any, AsyncContextManager 4 | 5 | from ai.backend.common.lock import AbstractDistributedLock 6 | 7 | from .models.utils import ExtendedAsyncSAEngine 8 | from .defs import LockID 9 | 10 | 11 | class PgAdvisoryLock(AbstractDistributedLock): 12 | 13 | _lock_ctx: AsyncContextManager | None 14 | 15 | def __init__(self, db: ExtendedAsyncSAEngine, lock_id: LockID) -> None: 16 | self.db = db 17 | self.lock_id = lock_id 18 | self._lock_ctx = None 19 | 20 | async def __aenter__(self) -> Any: 21 | self._lock_ctx = self.db.advisory_lock(self.lock_id) 22 | await self._lock_ctx.__aenter__() 23 | 24 | async def __aexit__(self, *exc_info) -> bool | None: 25 | assert self._lock_ctx is not None 26 | try: 27 | return await self._lock_ctx.__aexit__(*exc_info) 28 | finally: 29 | self._lock_ctx = None 30 | -------------------------------------------------------------------------------- /src/ai/backend/manager/plugin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lablup/backend.ai-manager/c3e596b2877833a99893ca12a5e165475b7d0071/src/ai/backend/manager/plugin/__init__.py -------------------------------------------------------------------------------- /src/ai/backend/manager/plugin/exceptions.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module defines a series of Backend.AI's plugin-specific errors. 3 | """ 4 | from aiohttp import web 5 | from ai.backend.manager.api.exceptions import BackendError 6 | 7 | 8 | class PluginError(web.HTTPBadRequest, BackendError): 9 | error_type = 'https://api.backend.ai/probs/plugin-error' 10 | error_title = 'Plugin generated error' 11 | 12 | 13 | class PluginConfigurationError(PluginError): 14 | error_type = 'https://api.backend.ai/probs/plugin-config-error' 15 | error_title = 'Plugin configuration error' 16 | -------------------------------------------------------------------------------- /src/ai/backend/manager/plugin/webapp.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | from typing import ( 3 | Tuple, 4 | Sequence, 5 | ) 6 | 7 | from aiohttp import web 8 | 9 | from ai.backend.common.plugin import AbstractPlugin, BasePluginContext 10 | from ai.backend.manager.api.types import CORSOptions, WebMiddleware 11 | 12 | 13 | class WebappPlugin(AbstractPlugin, metaclass=ABCMeta): 14 | """ 15 | Webapp plugins should create a valid aiohttp.web.Application instance. The returned app 16 | instance will be a subapp of the root app defined by the manager, and additional user-properties 17 | will be set as defined in ``ai.backend.gateway.server.PUBLIC_INTERFACES``. 18 | 19 | The init/cleanup methods of the plugin are ignored and the manager uses the standard aiohttp's 20 | application lifecycle handlers attached to the returned app instance. 21 | """ 22 | 23 | @abstractmethod 24 | async def create_app( 25 | self, cors_options: CORSOptions, 26 | ) -> Tuple[web.Application, Sequence[WebMiddleware]]: 27 | pass 28 | 29 | 30 | class WebappPluginContext(BasePluginContext[WebappPlugin]): 31 | plugin_group = 'backendai_webapp_v20' 32 | -------------------------------------------------------------------------------- /src/ai/backend/manager/py.typed: -------------------------------------------------------------------------------- 1 | marker 2 | -------------------------------------------------------------------------------- /src/ai/backend/manager/scheduler/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lablup/backend.ai-manager/c3e596b2877833a99893ca12a5e165475b7d0071/src/ai/backend/manager/scheduler/__init__.py -------------------------------------------------------------------------------- /src/ai/backend/manager/scheduler/mof.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from typing import ( 4 | Optional, 5 | Sequence, 6 | ) 7 | 8 | import trafaret as t 9 | 10 | from ai.backend.common.types import ( 11 | AccessKey, 12 | AgentId, 13 | SessionId, 14 | ResourceSlot, 15 | ) 16 | 17 | from .types import ( 18 | AbstractScheduler, 19 | PendingSession, 20 | ExistingSession, 21 | AgentContext, 22 | KernelInfo, 23 | ) 24 | 25 | 26 | class MOFScheduler(AbstractScheduler): 27 | """Minimum Occupied slot First Scheduler""" 28 | 29 | config_iv = t.Dict({}).allow_extra('*') 30 | 31 | def pick_session( 32 | self, 33 | total_capacity: ResourceSlot, 34 | pending_sessions: Sequence[PendingSession], 35 | existing_sessions: Sequence[ExistingSession], 36 | ) -> Optional[SessionId]: 37 | # Just pick the first pending session. 38 | return SessionId(pending_sessions[0].session_id) 39 | 40 | def _assign_agent( 41 | self, 42 | agents: Sequence[AgentContext], 43 | access_key: AccessKey, 44 | requested_slots: ResourceSlot, 45 | ) -> Optional[AgentId]: 46 | # return min occupied slot agent or None 47 | return next((one_agent.agent_id for one_agent in (sorted( 48 | (agent for agent in agents if ( 49 | (agent.available_slots - agent.occupied_slots) 50 | >= requested_slots 51 | )), 52 | key=lambda a: a.occupied_slots) 53 | )), None) 54 | 55 | def assign_agent_for_session( 56 | self, 57 | agents: Sequence[AgentContext], 58 | pending_session: PendingSession, 59 | ) -> Optional[AgentId]: 60 | return self._assign_agent( 61 | agents, pending_session.access_key, pending_session.requested_slots, 62 | ) 63 | 64 | def assign_agent_for_kernel( 65 | self, 66 | agents: Sequence[AgentContext], 67 | pending_kernel: KernelInfo, 68 | ) -> Optional[AgentId]: 69 | return self._assign_agent( 70 | agents, pending_kernel.access_key, pending_kernel.requested_slots, 71 | ) 72 | -------------------------------------------------------------------------------- /src/ai/backend/manager/types.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import attr 4 | import enum 5 | import uuid 6 | from typing import ( 7 | Protocol, 8 | TYPE_CHECKING, 9 | ) 10 | 11 | from sqlalchemy.ext.asyncio import AsyncConnection as SAConnection 12 | from sqlalchemy.engine.row import Row 13 | 14 | if TYPE_CHECKING: 15 | from ai.backend.common.lock import AbstractDistributedLock 16 | from .defs import LockID 17 | 18 | 19 | class SessionGetter(Protocol): 20 | 21 | def __call__(self, *, db_connection: SAConnection) -> Row: 22 | ... 23 | 24 | 25 | # Sentinel is a special object that indicates a special status instead of a value 26 | # where the user expects a value. 27 | # According to the discussion in https://github.com/python/typing/issues/236, 28 | # we define our Sentinel type as an enum with only one special value. 29 | # This enables passing of type checks by "value is sentinel" (or "value is Sentinel.token") 30 | # instead of more expensive "isinstance(value, Sentinel)" because we can assure type checkers 31 | # to think there is no other possible instances of the Sentinel type. 32 | 33 | class Sentinel(enum.Enum): 34 | token = 0 35 | 36 | 37 | @attr.define(slots=True) 38 | class UserScope: 39 | domain_name: str 40 | group_id: uuid.UUID 41 | user_uuid: uuid.UUID 42 | user_role: str 43 | 44 | 45 | class DistributedLockFactory(Protocol): 46 | 47 | def __call__(self, lock_id: LockID, lifetime_hint: float) -> AbstractDistributedLock: 48 | ... 49 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/lablup/backend.ai-manager/c3e596b2877833a99893ca12a5e165475b7d0071/tests/__init__.py -------------------------------------------------------------------------------- /tests/api/test_config.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import AsyncMock 2 | 3 | import pytest 4 | 5 | 6 | @pytest.mark.asyncio 7 | async def test_register_myself(shared_config, mocker): 8 | instance_id = 'i-test-manager' 9 | from ai.backend.manager import config as config_mod 10 | mocked_get_instance_id = AsyncMock(return_value=instance_id) 11 | mocker.patch.object(config_mod, 'get_instance_id', mocked_get_instance_id) 12 | 13 | await shared_config.register_myself() 14 | mocked_get_instance_id.await_count == 1 15 | data = await shared_config.etcd.get_prefix(f'nodes/manager/{instance_id}') 16 | assert data[''] == 'up' 17 | 18 | await shared_config.deregister_myself() 19 | mocked_get_instance_id.await_count == 2 20 | data = await shared_config.etcd.get_prefix(f'nodes/manager/{instance_id}') 21 | assert len(data) == 0 22 | -------------------------------------------------------------------------------- /tests/api/test_exceptions.py: -------------------------------------------------------------------------------- 1 | import json 2 | import pickle 3 | 4 | from ai.backend.manager.api.exceptions import BackendError, BackendAgentError 5 | from ai.backend.common.utils import odict 6 | 7 | 8 | def test_backend_error_obj(): 9 | eobj = BackendError() 10 | assert eobj.args == (eobj.status_code, eobj.reason, eobj.error_type) 11 | assert eobj.body == json.dumps(odict( 12 | ('type', eobj.error_type), ('title', eobj.error_title), 13 | )).encode() 14 | 15 | extra_msg = '!@#$' 16 | eobj = BackendError(extra_msg) 17 | assert extra_msg in str(eobj) 18 | assert extra_msg in repr(eobj) 19 | 20 | 21 | def test_backend_error_obj_pickle(): 22 | eobj = BackendError() 23 | encoded = pickle.dumps(eobj) 24 | decoded = pickle.loads(encoded) 25 | assert eobj.status_code == decoded.status_code 26 | assert eobj.error_type == decoded.error_type 27 | assert eobj.error_title == decoded.error_title 28 | assert eobj.content_type == decoded.content_type 29 | assert eobj.extra_msg == decoded.extra_msg 30 | 31 | 32 | def test_backend_agent_error_obj(): 33 | eobj = BackendAgentError('timeout') 34 | 35 | assert eobj.args == (eobj.status_code, eobj.reason, 36 | eobj.error_type, eobj.agent_error_type) 37 | assert eobj.body == json.dumps(odict( 38 | ('type', eobj.error_type), 39 | ('title', eobj.error_title), 40 | ('agent-details', odict( 41 | ('type', eobj.agent_error_type), 42 | ('title', eobj.agent_error_title), 43 | )), 44 | )).encode() 45 | 46 | 47 | def test_backend_agent_error_obj_pickle(): 48 | eobj = BackendAgentError('timeout') 49 | encoded = pickle.dumps(eobj) 50 | decoded = pickle.loads(encoded) 51 | assert eobj.body == decoded.body 52 | assert eobj.agent_details == decoded.agent_details 53 | -------------------------------------------------------------------------------- /tests/api/test_ratelimit.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import pytest 4 | 5 | from ai.backend.manager.server import ( 6 | database_ctx, 7 | event_dispatcher_ctx, 8 | hook_plugin_ctx, 9 | monitoring_ctx, 10 | redis_ctx, 11 | shared_config_ctx, 12 | ) 13 | import ai.backend.manager.api.ratelimit as rlim 14 | 15 | 16 | @pytest.mark.asyncio 17 | async def test_check_rlim_for_anonymous_query( 18 | etcd_fixture, 19 | database_fixture, 20 | create_app_and_client, 21 | ): 22 | app, client = await create_app_and_client( 23 | [ 24 | shared_config_ctx, 25 | redis_ctx, 26 | event_dispatcher_ctx, 27 | database_ctx, 28 | monitoring_ctx, 29 | hook_plugin_ctx, 30 | ], 31 | ['.auth', '.ratelimit'], 32 | ) 33 | ret = await client.get('/') 34 | assert ret.status == 200 35 | assert '1000' == ret.headers['X-RateLimit-Limit'] 36 | assert '1000' == ret.headers['X-RateLimit-Remaining'] 37 | assert str(rlim._rlim_window) == ret.headers['X-RateLimit-Window'] 38 | 39 | 40 | @pytest.mark.asyncio 41 | async def test_check_rlim_for_authorized_query( 42 | etcd_fixture, 43 | database_fixture, 44 | create_app_and_client, 45 | get_headers, 46 | ): 47 | app, client = await create_app_and_client( 48 | [ 49 | shared_config_ctx, 50 | redis_ctx, 51 | event_dispatcher_ctx, 52 | database_ctx, 53 | monitoring_ctx, 54 | hook_plugin_ctx, 55 | ], 56 | ['.auth', '.ratelimit'], 57 | ) 58 | url = '/auth/test' 59 | req_bytes = json.dumps({'echo': 'hello!'}).encode() 60 | headers = get_headers('POST', url, req_bytes) 61 | ret = await client.post(url, data=req_bytes, headers=headers) 62 | 63 | assert ret.status == 200 64 | # The default example keypair's ratelimit is 30000. 65 | assert '30000' == ret.headers['X-RateLimit-Limit'] 66 | assert '29999' == ret.headers['X-RateLimit-Remaining'] 67 | assert str(rlim._rlim_window) == ret.headers['X-RateLimit-Window'] 68 | -------------------------------------------------------------------------------- /tests/api/test_utils.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import pytest 4 | 5 | from ai.backend.manager.models import verify_dotfile_name, verify_vfolder_name 6 | from ai.backend.manager.api.utils import ( 7 | call_non_bursty, 8 | mask_sensitive_keys, 9 | ) 10 | 11 | 12 | @pytest.mark.asyncio 13 | async def test_call_non_bursty(): 14 | key = 'x' 15 | execution_count = 0 16 | 17 | async def execute(): 18 | nonlocal execution_count 19 | await asyncio.sleep(0) 20 | execution_count += 1 21 | 22 | # ensure reset 23 | await asyncio.sleep(0.11) 24 | 25 | # check run as coroutine 26 | execution_count = 0 27 | with pytest.raises(TypeError): 28 | await call_non_bursty(key, execute()) 29 | 30 | # check run as coroutinefunction 31 | execution_count = 0 32 | await call_non_bursty(key, execute) 33 | assert execution_count == 1 34 | await asyncio.sleep(0.11) 35 | 36 | # check burstiness control 37 | execution_count = 0 38 | for _ in range(129): 39 | await call_non_bursty(key, execute) 40 | assert execution_count == 3 41 | await asyncio.sleep(0.01) 42 | await call_non_bursty(key, execute) 43 | assert execution_count == 3 44 | await asyncio.sleep(0.11) 45 | await call_non_bursty(key, execute) 46 | assert execution_count == 4 47 | for _ in range(64): 48 | await call_non_bursty(key, execute) 49 | assert execution_count == 5 50 | 51 | 52 | def test_vfolder_name_validator(): 53 | assert not verify_vfolder_name('.bashrc') 54 | assert not verify_vfolder_name('.terminfo') 55 | assert verify_vfolder_name('bashrc') 56 | assert verify_vfolder_name('.config') 57 | assert verify_vfolder_name('bin') 58 | assert verify_vfolder_name('boot') 59 | assert verify_vfolder_name('root') 60 | assert not verify_vfolder_name('/bin') 61 | assert not verify_vfolder_name('/boot') 62 | assert not verify_vfolder_name('/root') 63 | assert verify_vfolder_name('/home/work/bin') 64 | assert verify_vfolder_name('/home/work/boot') 65 | assert verify_vfolder_name('/home/work/root') 66 | assert verify_vfolder_name('home/work') 67 | 68 | 69 | def test_dotfile_name_validator(): 70 | assert not verify_dotfile_name('.terminfo') 71 | assert not verify_dotfile_name('.config') 72 | assert not verify_dotfile_name('.ssh/authorized_keys') 73 | assert verify_dotfile_name('.bashrc') 74 | assert verify_dotfile_name('.ssh/id_rsa') 75 | 76 | 77 | def test_mask_sensitive_keys(): 78 | a = {'a': 123, 'my-Secret': 'hello'} 79 | b = mask_sensitive_keys(a) 80 | # original is untouched 81 | assert a['a'] == 123 82 | assert a['my-Secret'] == 'hello' 83 | # cloned has masked fields 84 | assert b['a'] == 123 85 | assert b['my-Secret'] == '***' 86 | -------------------------------------------------------------------------------- /tests/models/test_dbutils.py: -------------------------------------------------------------------------------- 1 | import aiotools 2 | import pytest 3 | import sqlalchemy as sa 4 | 5 | from ai.backend.manager.models.utils import execute_with_retry 6 | 7 | 8 | @pytest.mark.asyncio 9 | async def test_execute_with_retry(): 10 | 11 | class DummyDBError(Exception): 12 | def __init__(self, pgcode): 13 | self.pgcode = pgcode 14 | 15 | async def txn_func_generic_failure(): 16 | raise sa.exc.IntegrityError('DUMMY_SQL', params=None, orig=DummyDBError('999')) 17 | 18 | async def txn_func_generic_failure_2(): 19 | raise ZeroDivisionError("oops") 20 | 21 | async def txn_func_permanent_serialization_failure(): 22 | raise sa.exc.DBAPIError('DUMMY_SQL', params=None, orig=DummyDBError('40001')) 23 | 24 | _fail_count = 0 25 | 26 | async def txn_func_temporary_serialization_failure(): 27 | nonlocal _fail_count 28 | _fail_count += 1 29 | if _fail_count == 10: 30 | return 1234 31 | raise sa.exc.DBAPIError('DUMMY_SQL', params=None, orig=DummyDBError('40001')) 32 | 33 | vclock = aiotools.VirtualClock() 34 | with vclock.patch_loop(): 35 | 36 | with pytest.raises(sa.exc.IntegrityError): 37 | await execute_with_retry(txn_func_generic_failure) 38 | 39 | with pytest.raises(ZeroDivisionError): 40 | await execute_with_retry(txn_func_generic_failure_2) 41 | 42 | with pytest.raises(RuntimeError) as e: 43 | await execute_with_retry(txn_func_permanent_serialization_failure) 44 | assert "serialization failed" in e.value.args[0].lower() 45 | 46 | ret = await execute_with_retry(txn_func_temporary_serialization_failure) 47 | assert ret == 1234 48 | -------------------------------------------------------------------------------- /tests/sample-ssl-cert/sample.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICRzCCAbACCQCIXVju6dmcdzANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJL 3 | UjETMBEGA1UECBMKU29tZS1TdGF0ZTEOMAwGA1UEBxMFU2VvdWwxDzANBgNVBAoT 4 | BkxhYmx1cDEPMA0GA1UECxMGRGV2T3BzMRIwEAYDVQQDEwlsb2NhbGhvc3QwHhcN 5 | MTYxMDEwMDI0MTA2WhcNMjYxMDA4MDI0MTA2WjBoMQswCQYDVQQGEwJLUjETMBEG 6 | A1UECBMKU29tZS1TdGF0ZTEOMAwGA1UEBxMFU2VvdWwxDzANBgNVBAoTBkxhYmx1 7 | cDEPMA0GA1UECxMGRGV2T3BzMRIwEAYDVQQDEwlsb2NhbGhvc3QwgZ8wDQYJKoZI 8 | hvcNAQEBBQADgY0AMIGJAoGBANWBj4K90ZI7mSco5vLT1YZb/57xgb8e0qOFq0wG 9 | bSFfTl//6bzw0G3+GPl/2L/9DMMivi7HS9iAT9/T7NusiHNDPhC8bqRnQYOYO67s 10 | k7UCXeOkMl59MJqU4rn4IhHj8X1huOW8BosDMCkRx9PuS9FHUTJsCp1vnxi0G4Lo 11 | uP5rAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAigmiXFi4n6h1B8w01l5Q38Ge1Rpp 12 | +7fHAI+4FyNnsJKBhuCBX4AMmqLzgzNDpGyv4QEEUzMWERuAP0vpYNRj09i+xAXB 13 | DeFgrIGbEKCbG4Ukp9U4R5kewp+qJnBfwGlBA1r9SF2ejWr7fPobGj1SrviZrLZ5 14 | f/7uWD54ie5aPkk= 15 | -----END CERTIFICATE----- 16 | -------------------------------------------------------------------------------- /tests/sample-ssl-cert/sample.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIBqDCCARECAQAwaDELMAkGA1UEBhMCS1IxEzARBgNVBAgTClNvbWUtU3RhdGUx 3 | DjAMBgNVBAcTBVNlb3VsMQ8wDQYDVQQKEwZMYWJsdXAxDzANBgNVBAsTBkRldk9w 4 | czESMBAGA1UEAxMJbG9jYWxob3N0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB 5 | gQDVgY+CvdGSO5knKOby09WGW/+e8YG/HtKjhatMBm0hX05f/+m88NBt/hj5f9i/ 6 | /QzDIr4ux0vYgE/f0+zbrIhzQz4QvG6kZ0GDmDuu7JO1Al3jpDJefTCalOK5+CIR 7 | 4/F9YbjlvAaLAzApEcfT7kvRR1EybAqdb58YtBuC6Lj+awIDAQABoAAwDQYJKoZI 8 | hvcNAQEFBQADgYEAB+QwJKRAW9Du7MvZKE8xVuKamI3q13vuAOK+uFWU4iIwqfgR 9 | OhjCrizkStOIRcScsKu023hmEhph8XHHN1IBOm3EjQ4iOZqXBgKAoEMiqPJjGRGk 10 | LAQ7KPuDFv5QumKbTbd+mfvu56+o5U086+fo5pKVAcXsjNf9Sc90JEF4dtE= 11 | -----END CERTIFICATE REQUEST----- 12 | -------------------------------------------------------------------------------- /tests/sample-ssl-cert/sample.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIICXQIBAAKBgQDVgY+CvdGSO5knKOby09WGW/+e8YG/HtKjhatMBm0hX05f/+m8 3 | 8NBt/hj5f9i//QzDIr4ux0vYgE/f0+zbrIhzQz4QvG6kZ0GDmDuu7JO1Al3jpDJe 4 | fTCalOK5+CIR4/F9YbjlvAaLAzApEcfT7kvRR1EybAqdb58YtBuC6Lj+awIDAQAB 5 | AoGAbywYWv6V+mv4EnD02Ko++8g5sTyVz7uv+J+ok1yhRIhI2in6PnyyOyPdQ0Uz 6 | yrxsAcu8dcUmlCQz8xt5sOUE4vOyXXgXil4v7/amMmKwhPXKssCwYA58U5S5e/I1 7 | DVHw4OaxT7qiPPZteZaJa2QgH1ihtXhNGbqYeTv9nBeEKAkCQQD6Bb8TLiWz1GFV 8 | YgnEk+wAHX7f0RfQAwqr3W3Xc+Os0iLGt1s0Wu7kvnzzWMBQAMSXjLjvEABTM8zP 9 | eXx7dpQdAkEA2pxRMU4ZjFjTQy/CJtRf7aWFj+0ctGv/2D0VXdmv7ArrjTVkAD9e 10 | culPueqzKcdC53fZn8SnHuiA2FTBcGLGJwJBAMC4rzmEp9E/Uyuyn17kutS357V0 11 | gkt4HMCvtVyPWx86901/xpDLyzuNTdlyPwMsJF3BPkggaG+6DRScS4ULuU0CQC1Y 12 | Y1cQ1ifQfPHgxCr9vnAy90NlcaDTDhyyfu4aq20Qzs9ZlcafXl4Dmy/7SPKPjIcq 13 | yw9i4S9+FsvIuN8w/d0CQQCLEZ3PGNorU+lvfVl3YbAek/qY7bN5fdUI23+6JImD 14 | O/NvyY0RKCgNo8EnCFtVqgE9YI7DvcMHT338Kmizj8FL 15 | -----END RSA PRIVATE KEY----- 16 | -------------------------------------------------------------------------------- /tests/test_advisory_lock.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | import pytest 4 | 5 | from ai.backend.manager.defs import LockID 6 | from ai.backend.manager.models.utils import ExtendedAsyncSAEngine 7 | 8 | 9 | @pytest.mark.asyncio 10 | async def test_lock(database_engine: ExtendedAsyncSAEngine) -> None: 11 | 12 | enter_count = 0 13 | done_count = 0 14 | 15 | async def critical_section(db: ExtendedAsyncSAEngine) -> None: 16 | nonlocal enter_count 17 | async with db.advisory_lock(LockID.LOCKID_TEST): 18 | enter_count += 1 19 | await asyncio.sleep(1.0) 20 | 21 | tasks = [] 22 | for idx in range(5): 23 | tasks.append( 24 | asyncio.create_task( 25 | critical_section(database_engine), 26 | name=f"critical-section-{idx}", 27 | ), 28 | ) 29 | await asyncio.sleep(0.5) 30 | 31 | async with database_engine.connect() as conn: 32 | result = await conn.exec_driver_sql( 33 | "SELECT objid, granted, pid FROM pg_locks " 34 | "WHERE locktype = 'advisory' AND objid = 42;", 35 | ) 36 | rows = result.fetchall() 37 | print(rows) 38 | result = await conn.exec_driver_sql( 39 | "SELECT objid, granted FROM pg_locks " 40 | "WHERE locktype = 'advisory' AND objid = 42 AND granted = 't';", 41 | ) 42 | rows = result.fetchall() 43 | assert len(rows) == 1 44 | 45 | await asyncio.sleep(2.5) 46 | for t in tasks: 47 | if t.done(): 48 | done_count += 1 49 | else: 50 | try: 51 | t.cancel() 52 | await t 53 | except asyncio.CancelledError: 54 | pass 55 | await asyncio.sleep(0.1) 56 | 57 | assert 2 <= done_count <= 3 58 | assert enter_count >= done_count 59 | 60 | # Check all tasks have unlocked. 61 | async with database_engine.connect() as conn: 62 | result = await conn.exec_driver_sql( 63 | "SELECT objid, granted, pid FROM pg_locks " 64 | "WHERE locktype = 'advisory' AND objid = 42 AND granted = 't';", 65 | ) 66 | rows = result.fetchall() 67 | print(rows) 68 | assert len(rows) == 0 69 | -------------------------------------------------------------------------------- /tests/test_image.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | import uuid 3 | 4 | import pytest 5 | import sqlalchemy as sa 6 | from sqlalchemy.ext.asyncio import ( 7 | AsyncSession, 8 | create_async_engine, 9 | ) 10 | from sqlalchemy.ext.declarative import declarative_base 11 | from sqlalchemy.orm import ( 12 | selectinload, 13 | sessionmaker, 14 | ) 15 | 16 | from ai.backend.common.docker import ImageRef 17 | 18 | from ai.backend.manager.models import ( 19 | update_aliases_from_file, 20 | ImageAliasRow, 21 | ImageRow, 22 | ) 23 | from ai.backend.manager.models.base import metadata as old_metadata 24 | from ai.backend.manager.models.utils import regenerate_table 25 | 26 | column_keys = ['nullable', 'index', 'unique', 'primary_key'] 27 | 28 | 29 | @pytest.fixture 30 | async def virtual_image_db(): 31 | engine = create_async_engine('sqlite+aiosqlite:///:memory:', echo=True) 32 | base = declarative_base() 33 | metadata = base.metadata 34 | 35 | regenerate_table(old_metadata.tables['images'], metadata) 36 | regenerate_table(old_metadata.tables['image_aliases'], metadata) 37 | ImageAliasRow.metadata = metadata 38 | ImageRow.metadata = metadata 39 | async_session = sessionmaker(engine, class_=AsyncSession, autoflush=False) 40 | async with engine.begin() as conn: 41 | await conn.run_sync(metadata.create_all) 42 | await conn.commit() 43 | async with async_session() as session: 44 | image_1 = ImageRow( 45 | 'index.docker.io/lablup/test-python:latest', 'x86_64', 46 | 'index.docker.io', 'lablup/test-python', 'latest', 47 | 'sha256:2d577a600afe2d1b38d78bc2ee5abe3bd350890d0652e48096249694e074f9c3', 48 | 123123123, 'COMPUTE', '', {}, {}, 49 | ) 50 | image_1.id = uuid.uuid4() 51 | image_2 = ImageRow( 52 | 'index.docker.io/lablup/test-python:3.6-debian', 'aarch64', 53 | 'index.docker.io', 'lablup/test-python', '3.6-debian', 54 | 'sha256:2d577a600afe2d1b38d78bc2ee5abe3bd350890d0652e48096249694e074f9c3', 55 | 123123123, 'COMPUTE', '', {}, {}, 56 | ) 57 | image_2.id = uuid.uuid4() 58 | session.add(image_1) 59 | session.add(image_2) 60 | await session.commit() 61 | yield async_session 62 | await engine.dispose() 63 | 64 | 65 | @pytest.fixture 66 | async def image_aliases(tmpdir): 67 | content = ''' 68 | aliases: 69 | - ['my-python', 'test-python:latest', 'x86_64'] 70 | - ['my-python:3.6', 'test-python:3.6-debian', 'aarch64'] # preferred 71 | ''' 72 | p = Path(tmpdir) / 'test-image-aliases.yml' 73 | p.write_text(content) 74 | 75 | yield p 76 | 77 | 78 | @pytest.mark.asyncio 79 | async def test_update_aliases_from_file(virtual_image_db, image_aliases): 80 | async_session = virtual_image_db 81 | async with async_session() as session: 82 | created_aliases = await update_aliases_from_file(session, image_aliases) 83 | for alias in created_aliases: 84 | alias.id = uuid.uuid4() 85 | await session.commit() 86 | result = await session.execute( 87 | sa.select(ImageAliasRow).options(selectinload(ImageAliasRow.image)), 88 | ) 89 | aliases = {} 90 | for row in result.scalars().all(): 91 | aliases[row.alias] = row.image.image_ref 92 | assert aliases == { 93 | 'my-python': ImageRef('lablup/test-python:latest', architecture='x86_64'), 94 | 'my-python:3.6': ImageRef('lablup/test-python:3.6-debian', architecture='aarch64'), 95 | } 96 | --------------------------------------------------------------------------------