├── .editorconfig ├── .flake8 ├── .gitattributes ├── .gitignore ├── .pre-commit-config.yaml ├── .pylintrc ├── DeepSeek_VL2_paper.pdf ├── LICENSE-CODE ├── LICENSE-MODEL ├── Makefile ├── README.md ├── deepseek_vl2 ├── __init__.py ├── models │ ├── __init__.py │ ├── configuration_deepseek.py │ ├── conversation.py │ ├── modeling_deepseek.py │ ├── modeling_deepseek_vl_v2.py │ ├── processing_deepseek_vl_v2.py │ └── siglip_vit.py ├── serve │ ├── __init__.py │ ├── app_modules │ │ ├── __init__.py │ │ ├── gradio_utils.py │ │ ├── overwrites.py │ │ ├── presets.py │ │ └── utils.py │ ├── assets │ │ ├── Kelpy-Codos.js │ │ ├── avatar.png │ │ ├── custom.css │ │ ├── custom.js │ │ ├── favicon.ico │ │ └── simsun.ttc │ └── inference.py └── utils │ ├── __init__.py │ └── io.py ├── images ├── badge.svg ├── grounding_conversation_1.jpeg ├── icl_vg_2.jpeg ├── incontext_visual_grounding_1.jpeg ├── logo.png ├── logo.svg ├── monday.jpg ├── multi_image_1.jpeg ├── multi_image_2.jpeg ├── multi_image_3.jpeg ├── qr.jpeg ├── sample.jpg ├── vg_2.jpeg ├── visual_grounding_1.jpeg ├── visual_grounding_2.jpg ├── visual_grounding_3.png ├── vl2_teaser.jpeg └── vqa_1.jpg ├── inference.py ├── pyproject.toml ├── requirements.txt └── web_demo.py /.editorconfig: -------------------------------------------------------------------------------- 1 | # https://editorconfig.org/ 2 | 3 | root = true 4 | 5 | [*] 6 | charset = utf-8 7 | end_of_line = lf 8 | indent_style = space 9 | indent_size = 4 10 | trim_trailing_whitespace = true 11 | insert_final_newline = true 12 | 13 | [*.py] 14 | indent_size = 4 15 | src_paths=evaluation 16 | 17 | [*.{yaml,yml,json}] 18 | indent_size = 2 19 | 20 | [*.md] 21 | indent_size = 2 22 | x-soft-wrap-text = true 23 | 24 | [*.rst] 25 | indent_size = 4 26 | x-soft-wrap-text = true 27 | 28 | [*.{bib,tex}] 29 | indent_size = 2 30 | 31 | [Makefile] 32 | indent_style = tab 33 | 34 | [*.sh] 35 | indent_style = tab 36 | 37 | [*.bat] 38 | end_of_line = crlf 39 | indent_style = tab 40 | 41 | [*.{cpp,h,cu,cuh}] 42 | indent_size = 2 43 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | max-doc-length = 100 4 | select = B,C,E,F,W,Y,SIM 5 | ignore = 6 | # E203: whitespace before ':' 7 | # W503: line break before binary operator 8 | # W504: line break after binary operator 9 | # format by black 10 | E203,W503,W504, 11 | # E501: line too long 12 | # W505: doc line too long 13 | # too long docstring due to long example blocks 14 | E501,W505, 15 | per-file-ignores = 16 | # F401: module imported but unused 17 | # intentionally unused imports 18 | __init__.py: F401 19 | # F401: module imported but unused 20 | # F403: unable to detect undefined names 21 | # F405: member mey be undefined, or defined from star imports 22 | # members populated from optree 23 | # E301: expected 1 blank line 24 | # E302: expected 2 blank lines 25 | # E305: expected 2 blank lines after class or function definition 26 | # E701: multiple statements on one line (colon) 27 | # E704: multiple statements on one line (def) 28 | # format by black 29 | *.pyi: E301,E302,E305,E701,E704 30 | exclude = 31 | .git, 32 | .vscode, 33 | venv, 34 | third-party, 35 | __pycache__, 36 | docs/source/conf.py, 37 | build, 38 | dist, 39 | examples, 40 | tests 41 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text eol=lf 2 | *.ipynb linguist-detectable=false 3 | 4 | *.png binary 5 | *.jpg binary 6 | *.jpeg binary 7 | *.gif binary 8 | *.pdf binary 9 | *.ttc binary 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ##### Python.gitignore ##### 2 | # Byte-compiled / optimized / DLL files 3 | __pycache__/ 4 | *.py[cod] 5 | *$py.class 6 | 7 | # C extensions 8 | *.so 9 | 10 | # Distribution / packaging 11 | .Python 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | wheelhouse/ 25 | share/python-wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | *.whl 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | *.py,cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | cover/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | docs/source/_build/ 77 | _autosummary/ 78 | 79 | # PyBuilder 80 | .pybuilder/ 81 | target/ 82 | 83 | # Jupyter Notebook 84 | .ipynb_checkpoints 85 | 86 | # IPython 87 | profile_default/ 88 | ipython_config.py 89 | 90 | # pyenv 91 | # For a library or package, you might want to ignore these files since the code is 92 | # intended to run in multiple environments; otherwise, check them in: 93 | .python-version 94 | 95 | # pipenv 96 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 97 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 98 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 99 | # install all needed dependencies. 100 | #Pipfile.lock 101 | 102 | # poetry 103 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 104 | # This is especially recommended for binary packages to ensure reproducibility, and is more 105 | # commonly ignored for libraries. 106 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 107 | #poetry.lock 108 | 109 | # pdm 110 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 111 | #pdm.lock 112 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 113 | # in version control. 114 | # https://pdm.fming.dev/#use-with-ide 115 | .pdm.toml 116 | 117 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 118 | __pypackages__/ 119 | 120 | # Celery stuff 121 | celerybeat-schedule 122 | celerybeat.pid 123 | 124 | # SageMath parsed files 125 | *.sage.py 126 | 127 | # Environments 128 | .env 129 | .venv 130 | env/ 131 | venv/ 132 | ENV/ 133 | env.bak/ 134 | venv.bak/ 135 | 136 | # Spyder project settings 137 | .spyderproject 138 | .spyproject 139 | 140 | # Rope project settings 141 | .ropeproject 142 | 143 | # mkdocs documentation 144 | /site 145 | 146 | # ruff 147 | .ruff_cache/ 148 | 149 | # mypy 150 | .mypy_cache/ 151 | .dmypy.json 152 | dmypy.json 153 | 154 | # Pyre type checker 155 | .pyre/ 156 | 157 | # pytype static type analyzer 158 | .pytype/ 159 | 160 | # Cython debug symbols 161 | cython_debug/ 162 | 163 | # PyCharm 164 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 165 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 166 | # and can be added to the global gitignore or merged into this file. For a more nuclear 167 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 168 | .idea/ 169 | 170 | 171 | ##### macOS.gitignore ##### 172 | # General 173 | .DS_Store 174 | .AppleDouble 175 | .LSOverride 176 | 177 | # Icon must end with two \r 178 | Icon 179 | 180 | # Thumbnails 181 | ._* 182 | 183 | # Files that might appear in the root of a volume 184 | .DocumentRevisions-V100 185 | .fseventsd 186 | .Spotlight-V100 187 | .TemporaryItems 188 | .Trashes 189 | .VolumeIcon.icns 190 | .com.apple.timemachine.donotpresent 191 | 192 | # Directories potentially created on remote AFP share 193 | .AppleDB 194 | .AppleDesktop 195 | Network Trash Folder 196 | Temporary Items 197 | .apdisk 198 | 199 | 200 | ##### Linux.gitignore ##### 201 | *~ 202 | 203 | # Temporary files which can be created if a process still has a handle open of a deleted file 204 | .fuse_hidden* 205 | 206 | # KDE directory preferences 207 | .directory 208 | 209 | # Linux trash folder which might appear on any partition or disk 210 | .Trash-* 211 | 212 | # .nfs files are created when an open file is removed but is still being accessed 213 | .nfs* 214 | 215 | 216 | ##### Windows.gitignore ##### 217 | # Windows thumbnail cache files 218 | Thumbs.db 219 | Thumbs.db:encryptable 220 | ehthumbs.db 221 | ehthumbs_vista.db 222 | 223 | # Dump file 224 | *.stackdump 225 | 226 | # Folder config file 227 | [Dd]esktop.ini 228 | 229 | # Recycle Bin used on file shares 230 | $RECYCLE.BIN/ 231 | 232 | # Windows Installer files 233 | *.cab 234 | *.msi 235 | *.msix 236 | *.msm 237 | *.msp 238 | 239 | # Windows shortcuts 240 | *.lnk 241 | 242 | 243 | ##### Archives.gitignore ##### 244 | # It's better to unpack these files and commit the raw source because 245 | # git has its own built in compression methods. 246 | *.7z 247 | *.jar 248 | *.rar 249 | *.zip 250 | *.gz 251 | *.gzip 252 | *.tgz 253 | *.bzip 254 | *.bzip2 255 | *.bz2 256 | *.xz 257 | *.lzma 258 | *.cab 259 | *.xar 260 | 261 | # Packing-only formats 262 | *.iso 263 | *.tar 264 | 265 | # Package management formats 266 | *.dmg 267 | *.xpi 268 | *.gem 269 | *.egg 270 | *.deb 271 | *.rpm 272 | *.msi 273 | *.msm 274 | *.msp 275 | *.txz 276 | 277 | 278 | ##### Xcode.gitignore ##### 279 | # Xcode 280 | # 281 | # gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore 282 | 283 | ## User settings 284 | xcuserdata/ 285 | 286 | ## Compatibility with Xcode 8 and earlier (ignoring not required starting Xcode 9) 287 | *.xcscmblueprint 288 | *.xccheckout 289 | 290 | ## Compatibility with Xcode 3 and earlier (ignoring not required starting Xcode 4) 291 | build/ 292 | DerivedData/ 293 | *.moved-aside 294 | *.pbxuser 295 | !default.pbxuser 296 | *.mode1v3 297 | !default.mode1v3 298 | *.mode2v3 299 | !default.mode2v3 300 | *.perspectivev3 301 | !default.perspectivev3 302 | 303 | ## Gcc Patch 304 | /*.gcno 305 | 306 | 307 | ##### JetBrains.gitignore ##### 308 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm 309 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 310 | 311 | # User settings 312 | .idea/* 313 | 314 | # User-specific stuff 315 | .idea/**/workspace.xml 316 | .idea/**/tasks.xml 317 | .idea/**/usage.statistics.xml 318 | .idea/**/dictionaries 319 | .idea/**/shelf 320 | 321 | # Generated files 322 | .idea/**/contentModel.xml 323 | 324 | # Sensitive or high-churn files 325 | .idea/**/dataSources/ 326 | .idea/**/dataSources.ids 327 | .idea/**/dataSources.local.xml 328 | .idea/**/sqlDataSources.xml 329 | .idea/**/dynamic.xml 330 | .idea/**/uiDesigner.xml 331 | .idea/**/dbnavigator.xml 332 | 333 | # Gradle 334 | .idea/**/gradle.xml 335 | .idea/**/libraries 336 | 337 | # Gradle and Maven with auto-import 338 | # When using Gradle or Maven with auto-import, you should exclude module files, 339 | # since they will be recreated, and may cause churn. Uncomment if using 340 | # auto-import. 341 | # .idea/artifacts 342 | # .idea/compiler.xml 343 | # .idea/jarRepositories.xml 344 | # .idea/modules.xml 345 | # .idea/*.iml 346 | # .idea/modules 347 | # *.iml 348 | # *.ipr 349 | 350 | # CMake 351 | cmake-build-*/ 352 | 353 | # Mongo Explorer plugin 354 | .idea/**/mongoSettings.xml 355 | 356 | # File-based project format 357 | *.iws 358 | 359 | # IntelliJ 360 | out/ 361 | 362 | # mpeltonen/sbt-idea plugin 363 | .idea_modules/ 364 | 365 | # JIRA plugin 366 | atlassian-ide-plugin.xml 367 | 368 | # Cursive Clojure plugin 369 | .idea/replstate.xml 370 | 371 | # Crashlytics plugin (for Android Studio and IntelliJ) 372 | com_crashlytics_export_strings.xml 373 | crashlytics.properties 374 | crashlytics-build.properties 375 | fabric.properties 376 | 377 | # Editor-based Rest Client 378 | .idea/httpRequests 379 | 380 | # Android studio 3.1+ serialized cache file 381 | .idea/caches/build_file_checksums.ser 382 | 383 | 384 | ##### VisualStudioCode.gitignore ##### 385 | .vscode/* 386 | # !.vscode/settings.json 387 | # !.vscode/tasks.json 388 | # !.vscode/launch.json 389 | !.vscode/extensions.json 390 | *.code-workspace 391 | 392 | # Local History for Visual Studio Code 393 | .history/ 394 | 395 | 396 | ##### Vim.gitignore ##### 397 | # Swap 398 | .*.s[a-v][a-z] 399 | !*.svg # comment out if you don't need vector files 400 | .*.sw[a-p] 401 | .s[a-rt-v][a-z] 402 | .ss[a-gi-z] 403 | .sw[a-p] 404 | 405 | # Session 406 | Session.vim 407 | Sessionx.vim 408 | 409 | # Temporary 410 | .netrwhist 411 | *~ 412 | # Auto-generated tag files 413 | tags 414 | # Persistent undo 415 | [._]*.un~ 416 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # See https://pre-commit.com for more information 2 | # See https://pre-commit.com/hooks.html for more hooks 3 | ci: 4 | skip: [pylint] 5 | autofix_prs: true 6 | autofix_commit_msg: "fix: [pre-commit.ci] auto fixes [...]" 7 | autoupdate_commit_msg: "chore(pre-commit): [pre-commit.ci] autoupdate" 8 | autoupdate_schedule: monthly 9 | default_stages: [commit, push, manual] 10 | repos: 11 | - repo: https://github.com/pre-commit/pre-commit-hooks 12 | rev: v4.5.0 13 | hooks: 14 | - id: check-symlinks 15 | - id: destroyed-symlinks 16 | - id: trailing-whitespace 17 | - id: end-of-file-fixer 18 | - id: check-yaml 19 | - id: check-toml 20 | - id: check-ast 21 | - id: check-added-large-files 22 | - id: check-merge-conflict 23 | - id: check-executables-have-shebangs 24 | - id: check-shebang-scripts-are-executable 25 | - id: detect-private-key 26 | - id: debug-statements 27 | - id: double-quote-string-fixer 28 | - repo: https://github.com/astral-sh/ruff-pre-commit 29 | rev: v0.1.5 30 | hooks: 31 | - id: ruff 32 | args: [--fix, --exit-non-zero-on-fix] 33 | - repo: https://github.com/PyCQA/isort 34 | rev: 5.12.0 35 | hooks: 36 | - id: isort 37 | - repo: https://github.com/psf/black 38 | rev: 23.11.0 39 | hooks: 40 | - id: black-jupyter 41 | - repo: https://github.com/asottile/pyupgrade 42 | rev: v3.15.0 43 | hooks: 44 | - id: pyupgrade 45 | args: [--py38-plus] # sync with requires-python 46 | exclude: | 47 | (?x)( 48 | ^images/ 49 | ) 50 | - repo: https://github.com/pycqa/flake8 51 | rev: 6.1.0 52 | hooks: 53 | - id: flake8 54 | additional_dependencies: 55 | - flake8-bugbear 56 | - flake8-comprehensions 57 | - flake8-docstrings 58 | - flake8-pyi 59 | - flake8-simplify 60 | exclude: | 61 | (?x)( 62 | ^images/ 63 | ) 64 | - repo: local 65 | hooks: 66 | - id: pylint 67 | name: pylint 68 | entry: pylint 69 | language: system 70 | types: [python] 71 | require_serial: true 72 | exclude: | 73 | (?x)( 74 | ^images/ 75 | ) 76 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MAIN] 2 | 3 | # Analyse import fallback blocks. This can be used to support both Python 2 and 4 | # 3 compatible code, which means that the block might have code that exists 5 | # only in one or another interpreter, leading to false positives when analysed. 6 | analyse-fallback-blocks=no 7 | 8 | # Load and enable all available extensions. Use --list-extensions to see a list 9 | # all available extensions. 10 | #enable-all-extensions= 11 | 12 | # In error mode, messages with a category besides ERROR or FATAL are 13 | # suppressed, and no reports are done by default. Error mode is compatible with 14 | # disabling specific errors. 15 | #errors-only= 16 | 17 | # Always return a 0 (non-error) status code, even if lint errors are found. 18 | # This is primarily useful in continuous integration scripts. 19 | #exit-zero= 20 | 21 | # A comma-separated list of package or module names from where C extensions may 22 | # be loaded. Extensions are loading into the active Python interpreter and may 23 | # run arbitrary code. 24 | extension-pkg-allow-list= 25 | 26 | # A comma-separated list of package or module names from where C extensions may 27 | # be loaded. Extensions are loading into the active Python interpreter and may 28 | # run arbitrary code. (This is an alternative name to extension-pkg-allow-list 29 | # for backward compatibility.) 30 | extension-pkg-whitelist= 31 | 32 | # Return non-zero exit code if any of these messages/categories are detected, 33 | # even if score is above --fail-under value. Syntax same as enable. Messages 34 | # specified are enabled, while categories only check already-enabled messages. 35 | fail-on= 36 | 37 | # Specify a score threshold under which the program will exit with error. 38 | fail-under=10 39 | 40 | # Interpret the stdin as a python script, whose filename needs to be passed as 41 | # the module_or_package argument. 42 | #from-stdin= 43 | 44 | # Files or directories to be skipped. They should be base names, not paths. 45 | ignore=CVS,.vscode,.history 46 | 47 | # Add files or directories matching the regular expressions patterns to the 48 | # ignore-list. The regex matches against paths and can be in Posix or Windows 49 | # format. Because '\' represents the directory delimiter on Windows systems, it 50 | # can't be used as an escape character. 51 | ignore-paths=^images/$ 52 | 53 | # Files or directories matching the regular expression patterns are skipped. 54 | # The regex matches against base names, not paths. The default value ignores 55 | # Emacs file locks 56 | ignore-patterns=^\.# 57 | 58 | # List of module names for which member attributes should not be checked 59 | # (useful for modules/projects where namespaces are manipulated during runtime 60 | # and thus existing member attributes cannot be deduced by static analysis). It 61 | # supports qualified module names, as well as Unix pattern matching. 62 | ignored-modules= 63 | 64 | # Python code to execute, usually for sys.path manipulation such as 65 | # pygtk.require(). 66 | #init-hook= 67 | 68 | # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the 69 | # number of processors available to use, and will cap the count on Windows to 70 | # avoid hangs. 71 | jobs=0 72 | 73 | # Control the amount of potential inferred values when inferring a single 74 | # object. This can help the performance when dealing with large functions or 75 | # complex, nested conditions. 76 | limit-inference-results=100 77 | 78 | # List of plugins (as comma separated values of python module names) to load, 79 | # usually to register additional checkers. 80 | load-plugins= 81 | 82 | # Pickle collected data for later comparisons. 83 | persistent=yes 84 | 85 | # Minimum Python version to use for version dependent checks. Will default to 86 | # the version used to run pylint. 87 | py-version=3.8 # the lowest version we support (sync with requires-python in pyproject.toml) 88 | 89 | # Discover python modules and packages in the file system subtree. 90 | recursive=no 91 | 92 | # When enabled, pylint would attempt to guess common misconfiguration and emit 93 | # user-friendly hints instead of false-positive error messages. 94 | suggestion-mode=yes 95 | 96 | # Allow loading of arbitrary C extensions. Extensions are imported into the 97 | # active Python interpreter and may run arbitrary code. 98 | unsafe-load-any-extension=no 99 | 100 | # In verbose mode, extra non-checker-related info will be displayed. 101 | #verbose= 102 | 103 | 104 | [BASIC] 105 | 106 | # Naming style matching correct argument names. 107 | argument-naming-style=snake_case 108 | 109 | # Regular expression matching correct argument names. Overrides argument- 110 | # naming-style. If left empty, argument names will be checked with the set 111 | # naming style. 112 | #argument-rgx= 113 | 114 | # Naming style matching correct attribute names. 115 | attr-naming-style=snake_case 116 | 117 | # Regular expression matching correct attribute names. Overrides attr-naming- 118 | # style. If left empty, attribute names will be checked with the set naming 119 | # style. 120 | #attr-rgx= 121 | 122 | # Bad variable names which should always be refused, separated by a comma. 123 | bad-names=foo, 124 | bar, 125 | baz, 126 | toto, 127 | tutu, 128 | tata 129 | 130 | # Bad variable names regexes, separated by a comma. If names match any regex, 131 | # they will always be refused 132 | bad-names-rgxs= 133 | 134 | # Naming style matching correct class attribute names. 135 | class-attribute-naming-style=any 136 | 137 | # Regular expression matching correct class attribute names. Overrides class- 138 | # attribute-naming-style. If left empty, class attribute names will be checked 139 | # with the set naming style. 140 | #class-attribute-rgx= 141 | 142 | # Naming style matching correct class constant names. 143 | class-const-naming-style=UPPER_CASE 144 | 145 | # Regular expression matching correct class constant names. Overrides class- 146 | # const-naming-style. If left empty, class constant names will be checked with 147 | # the set naming style. 148 | #class-const-rgx= 149 | 150 | # Naming style matching correct class names. 151 | class-naming-style=PascalCase 152 | 153 | # Regular expression matching correct class names. Overrides class-naming- 154 | # style. If left empty, class names will be checked with the set naming style. 155 | #class-rgx= 156 | 157 | # Naming style matching correct constant names. 158 | const-naming-style=UPPER_CASE 159 | 160 | # Regular expression matching correct constant names. Overrides const-naming- 161 | # style. If left empty, constant names will be checked with the set naming 162 | # style. 163 | #const-rgx= 164 | 165 | # Minimum line length for functions/classes that require docstrings, shorter 166 | # ones are exempt. 167 | docstring-min-length=-1 168 | 169 | # Naming style matching correct function names. 170 | function-naming-style=snake_case 171 | 172 | # Regular expression matching correct function names. Overrides function- 173 | # naming-style. If left empty, function names will be checked with the set 174 | # naming style. 175 | #function-rgx= 176 | 177 | # Good variable names which should always be accepted, separated by a comma. 178 | good-names=i, 179 | j, 180 | k, 181 | ex, 182 | Run, 183 | _, 184 | op, 185 | fn, 186 | f, 187 | g, 188 | p, 189 | u, 190 | t, 191 | lr, 192 | mu, 193 | nu, 194 | x, 195 | y 196 | 197 | # Good variable names regexes, separated by a comma. If names match any regex, 198 | # they will always be accepted 199 | good-names-rgxs= 200 | 201 | # Include a hint for the correct naming format with invalid-name. 202 | include-naming-hint=no 203 | 204 | # Naming style matching correct inline iteration names. 205 | inlinevar-naming-style=any 206 | 207 | # Regular expression matching correct inline iteration names. Overrides 208 | # inlinevar-naming-style. If left empty, inline iteration names will be checked 209 | # with the set naming style. 210 | #inlinevar-rgx= 211 | 212 | # Naming style matching correct method names. 213 | method-naming-style=snake_case 214 | 215 | # Regular expression matching correct method names. Overrides method-naming- 216 | # style. If left empty, method names will be checked with the set naming style. 217 | #method-rgx= 218 | 219 | # Naming style matching correct module names. 220 | module-naming-style=snake_case 221 | 222 | # Regular expression matching correct module names. Overrides module-naming- 223 | # style. If left empty, module names will be checked with the set naming style. 224 | #module-rgx= 225 | 226 | # Colon-delimited sets of names that determine each other's naming style when 227 | # the name regexes allow several styles. 228 | name-group= 229 | 230 | # Regular expression which should only match function or class names that do 231 | # not require a docstring. 232 | no-docstring-rgx=^_ 233 | 234 | # List of decorators that produce properties, such as abc.abstractproperty. Add 235 | # to this list to register other decorators that produce valid properties. 236 | # These decorators are taken in consideration only for invalid-name. 237 | property-classes=abc.abstractproperty 238 | 239 | # Regular expression matching correct type variable names. If left empty, type 240 | # variable names will be checked with the set naming style. 241 | #typevar-rgx= 242 | 243 | # Naming style matching correct variable names. 244 | variable-naming-style=snake_case 245 | 246 | # Regular expression matching correct variable names. Overrides variable- 247 | # naming-style. If left empty, variable names will be checked with the set 248 | # naming style. 249 | #variable-rgx= 250 | 251 | 252 | [CLASSES] 253 | 254 | # Warn about protected attribute access inside special methods 255 | check-protected-access-in-special-methods=no 256 | 257 | # List of method names used to declare (i.e. assign) instance attributes. 258 | defining-attr-methods=__init__, 259 | __new__, 260 | setUp, 261 | __post_init__ 262 | 263 | # List of member names, which should be excluded from the protected access 264 | # warning. 265 | exclude-protected=_asdict, 266 | _fields, 267 | _replace, 268 | _source, 269 | _make 270 | 271 | # List of valid names for the first argument in a class method. 272 | valid-classmethod-first-arg=cls 273 | 274 | # List of valid names for the first argument in a metaclass class method. 275 | valid-metaclass-classmethod-first-arg=cls 276 | 277 | 278 | [DESIGN] 279 | 280 | # List of regular expressions of class ancestor names to ignore when counting 281 | # public methods (see R0903) 282 | exclude-too-few-public-methods= 283 | 284 | # List of qualified class names to ignore when counting class parents (see 285 | # R0901) 286 | ignored-parents= 287 | 288 | # Maximum number of arguments for function / method. 289 | max-args=5 290 | 291 | # Maximum number of attributes for a class (see R0902). 292 | max-attributes=7 293 | 294 | # Maximum number of boolean expressions in an if statement (see R0916). 295 | max-bool-expr=5 296 | 297 | # Maximum number of branch for function / method body. 298 | max-branches=12 299 | 300 | # Maximum number of locals for function / method body. 301 | max-locals=15 302 | 303 | # Maximum number of parents for a class (see R0901). 304 | max-parents=7 305 | 306 | # Maximum number of public methods for a class (see R0904). 307 | max-public-methods=20 308 | 309 | # Maximum number of return / yield for function / method body. 310 | max-returns=6 311 | 312 | # Maximum number of statements in function / method body. 313 | max-statements=50 314 | 315 | # Minimum number of public methods for a class (see R0903). 316 | min-public-methods=2 317 | 318 | 319 | [EXCEPTIONS] 320 | 321 | # Exceptions that will emit a warning when caught. 322 | overgeneral-exceptions=builtins.BaseException, 323 | builtins.Exception 324 | 325 | 326 | [FORMAT] 327 | 328 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. 329 | expected-line-ending-format= 330 | 331 | # Regexp for a line that is allowed to be longer than the limit. 332 | ignore-long-lines=^\s*(# )??$ 333 | 334 | # Number of spaces of indent required inside a hanging or continued line. 335 | indent-after-paren=4 336 | 337 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 338 | # tab). 339 | indent-string=' ' 340 | 341 | # Maximum number of characters on a single line. 342 | max-line-length=120 343 | 344 | # Maximum number of lines in a module. 345 | max-module-lines=1000 346 | 347 | # Allow the body of a class to be on the same line as the declaration if body 348 | # contains single statement. 349 | single-line-class-stmt=no 350 | 351 | # Allow the body of an if to be on the same line as the test if there is no 352 | # else. 353 | single-line-if-stmt=no 354 | 355 | 356 | [IMPORTS] 357 | 358 | # List of modules that can be imported at any level, not just the top level 359 | # one. 360 | allow-any-import-level= 361 | 362 | # Allow wildcard imports from modules that define __all__. 363 | allow-wildcard-with-all=no 364 | 365 | # Deprecated modules which should not be used, separated by a comma. 366 | deprecated-modules= 367 | 368 | # Output a graph (.gv or any supported image format) of external dependencies 369 | # to the given file (report RP0402 must not be disabled). 370 | ext-import-graph= 371 | 372 | # Output a graph (.gv or any supported image format) of all (i.e. internal and 373 | # external) dependencies to the given file (report RP0402 must not be 374 | # disabled). 375 | import-graph= 376 | 377 | # Output a graph (.gv or any supported image format) of internal dependencies 378 | # to the given file (report RP0402 must not be disabled). 379 | int-import-graph= 380 | 381 | # Force import order to recognize a module as part of the standard 382 | # compatibility libraries. 383 | known-standard-library= 384 | 385 | # Force import order to recognize a module as part of a third party library. 386 | known-third-party=enchant 387 | 388 | # Couples of modules and preferred modules, separated by a comma. 389 | preferred-modules= 390 | 391 | 392 | [LOGGING] 393 | 394 | # The type of string formatting that logging methods do. `old` means using % 395 | # formatting, `new` is for `{}` formatting. 396 | logging-format-style=old 397 | 398 | # Logging modules to check that the string format arguments are in logging 399 | # function parameter format. 400 | logging-modules=logging 401 | 402 | 403 | [MESSAGES CONTROL] 404 | 405 | # Only show warnings with the listed confidence levels. Leave empty to show 406 | # all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, 407 | # UNDEFINED. 408 | confidence=HIGH, 409 | CONTROL_FLOW, 410 | INFERENCE, 411 | INFERENCE_FAILURE, 412 | UNDEFINED 413 | 414 | # Disable the message, report, category or checker with the given id(s). You 415 | # can either give multiple identifiers separated by comma (,) or put this 416 | # option multiple times (only on the command line, not in the configuration 417 | # file where it should appear only once). You can also use "--disable=all" to 418 | # disable everything first and then re-enable specific checks. For example, if 419 | # you want to run only the similarities checker, you can use "--disable=all 420 | # --enable=similarities". If you want to run only the classes checker, but have 421 | # no Warning level messages displayed, use "--disable=all --enable=classes 422 | # --disable=W". 423 | disable=duplicate-code, 424 | consider-using-from-import 425 | 426 | # Enable the message, report, category or checker with the given id(s). You can 427 | # either give multiple identifier separated by comma (,) or put this option 428 | # multiple time (only on the command line, not in the configuration file where 429 | # it should appear only once). See also the "--disable" option for examples. 430 | enable=c-extension-no-member 431 | 432 | 433 | [METHOD_ARGS] 434 | 435 | # List of qualified names (i.e., library.method) which require a timeout 436 | # parameter e.g. 'requests.api.get,requests.api.post' 437 | timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request 438 | 439 | 440 | [MISCELLANEOUS] 441 | 442 | # List of note tags to take in consideration, separated by a comma. 443 | notes=FIXME, 444 | XXX, 445 | TODO 446 | 447 | # Regular expression of note tags to take in consideration. 448 | notes-rgx= 449 | 450 | 451 | [REFACTORING] 452 | 453 | # Maximum number of nested blocks for function / method body 454 | max-nested-blocks=5 455 | 456 | # Complete name of functions that never returns. When checking for 457 | # inconsistent-return-statements if a never returning function is called then 458 | # it will be considered as an explicit return statement and no message will be 459 | # printed. 460 | never-returning-functions=sys.exit,argparse.parse_error 461 | 462 | 463 | [REPORTS] 464 | 465 | # Python expression which should return a score less than or equal to 10. You 466 | # have access to the variables 'fatal', 'error', 'warning', 'refactor', 467 | # 'convention', and 'info' which contain the number of messages in each 468 | # category, as well as 'statement' which is the total number of statements 469 | # analyzed. This score is used by the global evaluation report (RP0004). 470 | evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) 471 | 472 | # Template used to display messages. This is a python new-style format string 473 | # used to format the message information. See doc for all details. 474 | msg-template= 475 | 476 | # Set the output format. Available formats are text, parseable, colorized, json 477 | # and msvs (visual studio). You can also give a reporter class, e.g. 478 | # mypackage.mymodule.MyReporterClass. 479 | #output-format= 480 | 481 | # Tells whether to display a full report or only the messages. 482 | reports=no 483 | 484 | # Activate the evaluation score. 485 | score=yes 486 | 487 | 488 | [SIMILARITIES] 489 | 490 | # Comments are removed from the similarity computation 491 | ignore-comments=yes 492 | 493 | # Docstrings are removed from the similarity computation 494 | ignore-docstrings=yes 495 | 496 | # Imports are removed from the similarity computation 497 | ignore-imports=yes 498 | 499 | # Signatures are removed from the similarity computation 500 | ignore-signatures=yes 501 | 502 | # Minimum lines number of a similarity. 503 | min-similarity-lines=4 504 | 505 | 506 | [SPELLING] 507 | 508 | # Limits count of emitted suggestions for spelling mistakes. 509 | max-spelling-suggestions=4 510 | 511 | # Spelling dictionary name. Available dictionaries: en_AU (hunspell), en_CA 512 | # (hunspell), en_GB (hunspell), en_US (hunspell), en_ZA (hunspell). 513 | spelling-dict= 514 | 515 | # List of comma separated words that should be considered directives if they 516 | # appear at the beginning of a comment and should not be checked. 517 | spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: 518 | 519 | # List of comma separated words that should not be checked. 520 | spelling-ignore-words= 521 | 522 | # A path to a file that contains the private dictionary; one word per line. 523 | spelling-private-dict-file=docs/source/spelling_wordlist.txt 524 | 525 | # Tells whether to store unknown words to the private dictionary (see the 526 | # --spelling-private-dict-file option) instead of raising a message. 527 | spelling-store-unknown-words=no 528 | 529 | 530 | [STRING] 531 | 532 | # This flag controls whether inconsistent-quotes generates a warning when the 533 | # character used as a quote delimiter is used inconsistently within a module. 534 | check-quote-consistency=no 535 | 536 | # This flag controls whether the implicit-str-concat should generate a warning 537 | # on implicit string concatenation in sequences defined over several lines. 538 | check-str-concat-over-line-jumps=no 539 | 540 | 541 | [TYPECHECK] 542 | 543 | # List of decorators that produce context managers, such as 544 | # contextlib.contextmanager. Add to this list to register other decorators that 545 | # produce valid context managers. 546 | contextmanager-decorators=contextlib.contextmanager 547 | 548 | # List of members which are set dynamically and missed by pylint inference 549 | # system, and so shouldn't trigger E1101 when accessed. Python regular 550 | # expressions are accepted. 551 | generated-members=numpy.*, 552 | torch.* 553 | 554 | # Tells whether missing members accessed in mixin class should be ignored. A 555 | # class is considered mixin if its name matches the mixin-class-rgx option. 556 | ignore-mixin-members=yes 557 | 558 | # Tells whether to warn about missing members when the owner of the attribute 559 | # is inferred to be None. 560 | ignore-none=yes 561 | 562 | # This flag controls whether pylint should warn about no-member and similar 563 | # checks whenever an opaque object is returned when inferring. The inference 564 | # can return multiple potential results while evaluating a Python object, but 565 | # some branches might not be evaluated, which results in partial inference. In 566 | # that case, it might be useful to still emit no-member and other checks for 567 | # the rest of the inferred objects. 568 | ignore-on-opaque-inference=yes 569 | 570 | # List of symbolic message names to ignore for Mixin members. 571 | ignored-checks-for-mixins=no-member, 572 | not-async-context-manager, 573 | not-context-manager, 574 | attribute-defined-outside-init 575 | 576 | # List of class names for which member attributes should not be checked (useful 577 | # for classes with dynamically set attributes). This supports the use of 578 | # qualified names. 579 | ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace 580 | 581 | # Show a hint with possible names when a member name was not found. The aspect 582 | # of finding the hint is based on edit distance. 583 | missing-member-hint=yes 584 | 585 | # The minimum edit distance a name should have in order to be considered a 586 | # similar match for a missing member name. 587 | missing-member-hint-distance=1 588 | 589 | # The total number of similar names that should be taken in consideration when 590 | # showing a hint for a missing member. 591 | missing-member-max-choices=1 592 | 593 | # Regex pattern to define which classes are considered mixins. 594 | mixin-class-rgx=.*[Mm]ixin 595 | 596 | # List of decorators that change the signature of a decorated function. 597 | signature-mutators= 598 | 599 | 600 | [VARIABLES] 601 | 602 | # List of additional names supposed to be defined in builtins. Remember that 603 | # you should avoid defining new builtins when possible. 604 | additional-builtins= 605 | 606 | # Tells whether unused global variables should be treated as a violation. 607 | allow-global-unused-variables=yes 608 | 609 | # List of names allowed to shadow builtins 610 | allowed-redefined-builtins= 611 | 612 | # List of strings which can identify a callback function by name. A callback 613 | # name must start or end with one of those strings. 614 | callbacks=cb_, 615 | _cb 616 | 617 | # A regular expression matching the name of dummy variables (i.e. expected to 618 | # not be used). 619 | dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ 620 | 621 | # Argument names that match this expression will be ignored. 622 | ignored-argument-names=_.*|^ignored_|^unused_ 623 | 624 | # Tells whether we should check for unused import in __init__ files. 625 | init-import=no 626 | 627 | # List of qualified module names which can have objects that can redefine 628 | # builtins. 629 | redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io 630 | -------------------------------------------------------------------------------- /DeepSeek_VL2_paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/DeepSeek_VL2_paper.pdf -------------------------------------------------------------------------------- /LICENSE-CODE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 DeepSeek 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /LICENSE-MODEL: -------------------------------------------------------------------------------- 1 | DEEPSEEK LICENSE AGREEMENT 2 | 3 | Version 1.0, 23 October 2023 4 | 5 | Copyright (c) 2023 DeepSeek 6 | 7 | Section I: PREAMBLE 8 | 9 | Large generative models are being widely adopted and used, and have the potential to transform the way individuals conceive and benefit from AI or ML technologies. 10 | 11 | Notwithstanding the current and potential benefits that these artifacts can bring to society at large, there are also concerns about potential misuses of them, either due to their technical limitations or ethical considerations. 12 | 13 | In short, this license strives for both the open and responsible downstream use of the accompanying model. When it comes to the open character, we took inspiration from open source permissive licenses regarding the grant of IP rights. Referring to the downstream responsible use, we added use-based restrictions not permitting the use of the model in very specific scenarios, in order for the licensor to be able to enforce the license in case potential misuses of the Model may occur. At the same time, we strive to promote open and responsible research on generative models for content generation. 14 | 15 | Even though downstream derivative versions of the model could be released under different licensing terms, the latter will always have to include - at minimum - the same use-based restrictions as the ones in the original license (this license). We believe in the intersection between open and responsible AI development; thus, this agreement aims to strike a balance between both in order to enable responsible open-science in the field of AI. 16 | 17 | This License governs the use of the model (and its derivatives) and is informed by the model card associated with the model. 18 | 19 | NOW THEREFORE, You and DeepSeek agree as follows: 20 | 21 | 1. Definitions 22 | "License" means the terms and conditions for use, reproduction, and Distribution as defined in this document. 23 | "Data" means a collection of information and/or content extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License. 24 | "Output" means the results of operating a Model as embodied in informational content resulting therefrom. 25 | "Model" means any accompanying machine-learning based assemblies (including checkpoints), consisting of learnt weights, parameters (including optimizer states), corresponding to the model architecture as embodied in the Complementary Material, that have been trained or tuned, in whole or in part on the Data, using the Complementary Material. 26 | "Derivatives of the Model" means all modifications to the Model, works based on the Model, or any other model which is created or initialized by transfer of patterns of the weights, parameters, activations or output of the Model, to the other model, in order to cause the other model to perform similarly to the Model, including - but not limited to - distillation methods entailing the use of intermediate data representations or methods based on the generation of synthetic data by the Model for training the other model. 27 | "Complementary Material" means the accompanying source code and scripts used to define, run, load, benchmark or evaluate the Model, and used to prepare data for training or evaluation, if any. This includes any accompanying documentation, tutorials, examples, etc, if any. 28 | "Distribution" means any transmission, reproduction, publication or other sharing of the Model or Derivatives of the Model to a third party, including providing the Model as a hosted service made available by electronic or other remote means - e.g. API-based or web access. 29 | "DeepSeek" (or "we") means Beijing DeepSeek Artificial Intelligence Fundamental Technology Research Co., Ltd., Hangzhou DeepSeek Artificial Intelligence Fundamental Technology Research Co., Ltd. and/or any of their affiliates. 30 | "You" (or "Your") means an individual or Legal Entity exercising permissions granted by this License and/or making use of the Model for whichever purpose and in any field of use, including usage of the Model in an end-use application - e.g. chatbot, translator, etc. 31 | "Third Parties" means individuals or legal entities that are not under common control with DeepSeek or You. 32 | 33 | Section II: INTELLECTUAL PROPERTY RIGHTS 34 | 35 | Both copyright and patent grants apply to the Model, Derivatives of the Model and Complementary Material. The Model and Derivatives of the Model are subject to additional terms as described in Section III. 36 | 37 | 2. Grant of Copyright License. Subject to the terms and conditions of this License, DeepSeek hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare, publicly display, publicly perform, sublicense, and distribute the Complementary Material, the Model, and Derivatives of the Model. 38 | 39 | 3. Grant of Patent License. Subject to the terms and conditions of this License and where and as applicable, DeepSeek hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this paragraph) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Model and the Complementary Material, where such license applies only to those patent claims licensable by DeepSeek that are necessarily infringed by its contribution(s). If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Model and/or Complementary Material constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for the Model and/or works shall terminate as of the date such litigation is asserted or filed. 40 | 41 | 42 | Section III: CONDITIONS OF USAGE, DISTRIBUTION AND REDISTRIBUTION 43 | 44 | 4. Distribution and Redistribution. You may host for Third Party remote access purposes (e.g. software-as-a-service), reproduce and distribute copies of the Model or Derivatives of the Model thereof in any medium, with or without modifications, provided that You meet the following conditions: 45 | a. Use-based restrictions as referenced in paragraph 5 MUST be included as an enforceable provision by You in any type of legal agreement (e.g. a license) governing the use and/or distribution of the Model or Derivatives of the Model, and You shall give notice to subsequent users You Distribute to, that the Model or Derivatives of the Model are subject to paragraph 5. This provision does not apply to the use of Complementary Material. 46 | b. You must give any Third Party recipients of the Model or Derivatives of the Model a copy of this License; 47 | c. You must cause any modified files to carry prominent notices stating that You changed the files; 48 | d. You must retain all copyright, patent, trademark, and attribution notices excluding those notices that do not pertain to any part of the Model, Derivatives of the Model. 49 | e. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions - respecting paragraph 4.a. – for use, reproduction, or Distribution of Your modifications, or for any such Derivatives of the Model as a whole, provided Your use, reproduction, and Distribution of the Model otherwise complies with the conditions stated in this License. 50 | 51 | 5. Use-based restrictions. The restrictions set forth in Attachment A are considered Use-based restrictions. Therefore You cannot use the Model and the Derivatives of the Model for the specified restricted uses. You may use the Model subject to this License, including only for lawful purposes and in accordance with the License. Use may include creating any content with, finetuning, updating, running, training, evaluating and/or reparametrizing the Model. You shall require all of Your users who use the Model or a Derivative of the Model to comply with the terms of this paragraph (paragraph 5). 52 | 53 | 6. The Output You Generate. Except as set forth herein, DeepSeek claims no rights in the Output You generate using the Model. You are accountable for the Output you generate and its subsequent uses. No use of the output can contravene any provision as stated in the License. 54 | 55 | Section IV: OTHER PROVISIONS 56 | 57 | 7. Updates and Runtime Restrictions. To the maximum extent permitted by law, DeepSeek reserves the right to restrict (remotely or otherwise) usage of the Model in violation of this License. 58 | 59 | 8. Trademarks and related. Nothing in this License permits You to make use of DeepSeek’ trademarks, trade names, logos or to otherwise suggest endorsement or misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by DeepSeek. 60 | 61 | 9. Personal information, IP rights and related. This Model may contain personal information and works with IP rights. You commit to complying with applicable laws and regulations in the handling of personal information and the use of such works. Please note that DeepSeek's license granted to you to use the Model does not imply that you have obtained a legitimate basis for processing the related information or works. As an independent personal information processor and IP rights user, you need to ensure full compliance with relevant legal and regulatory requirements when handling personal information and works with IP rights that may be contained in the Model, and are willing to assume solely any risks and consequences that may arise from that. 62 | 63 | 10. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, DeepSeek provides the Model and the Complementary Material on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Model, Derivatives of the Model, and the Complementary Material and assume any risks associated with Your exercise of permissions under this License. 64 | 65 | 11. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall DeepSeek be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Model and the Complementary Material (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if DeepSeek has been advised of the possibility of such damages. 66 | 67 | 12. Accepting Warranty or Additional Liability. While redistributing the Model, Derivatives of the Model and the Complementary Material thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of DeepSeek, and only if You agree to indemnify, defend, and hold DeepSeek harmless for any liability incurred by, or claims asserted against, DeepSeek by reason of your accepting any such warranty or additional liability. 68 | 69 | 13. If any provision of this License is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein. 70 | 71 | 14. Governing Law and Jurisdiction. This agreement will be governed and construed under PRC laws without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this agreement. The courts located in the domicile of Hangzhou DeepSeek Artificial Intelligence Fundamental Technology Research Co., Ltd. shall have exclusive jurisdiction of any dispute arising out of this agreement. 72 | 73 | END OF TERMS AND CONDITIONS 74 | 75 | Attachment A 76 | 77 | Use Restrictions 78 | 79 | You agree not to use the Model or Derivatives of the Model: 80 | 81 | - In any way that violates any applicable national or international law or regulation or infringes upon the lawful rights and interests of any third party; 82 | - For military use in any way; 83 | - For the purpose of exploiting, harming or attempting to exploit or harm minors in any way; 84 | - To generate or disseminate verifiably false information and/or content with the purpose of harming others; 85 | - To generate or disseminate inappropriate content subject to applicable regulatory requirements; 86 | - To generate or disseminate personal identifiable information without due authorization or for unreasonable use; 87 | - To defame, disparage or otherwise harass others; 88 | - For fully automated decision making that adversely impacts an individual’s legal rights or otherwise creates or modifies a binding, enforceable obligation; 89 | - For any use intended to or which has the effect of discriminating against or harming individuals or groups based on online or offline social behavior or known or predicted personal or personality characteristics; 90 | - To exploit any of the vulnerabilities of a specific group of persons based on their age, social, physical or mental characteristics, in order to materially distort the behavior of a person pertaining to that group in a manner that causes or is likely to cause that person or another person physical or psychological harm; 91 | - For any use intended to or which has the effect of discriminating against individuals or groups based on legally protected characteristics or categories. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | print-% : ; @echo $* = $($*) 2 | PROJECT_NAME = DeepSeek-VL 3 | COPYRIGHT = "DeepSeek." 4 | PROJECT_PATH = deepseek_vl 5 | SHELL = /bin/bash 6 | SOURCE_FOLDERS = deepseek_vl 7 | PYTHON_FILES = $(shell find $(SOURCE_FOLDERS) -type f -name "*.py" -o -name "*.pyi") cli_chat.py inference.py 8 | COMMIT_HASH = $(shell git log -1 --format=%h) 9 | PATH := $(HOME)/go/bin:$(PATH) 10 | PYTHON ?= $(shell command -v python3 || command -v python) 11 | PYTESTOPTS ?= 12 | 13 | .PHONY: default 14 | default: install 15 | 16 | # Tools Installation 17 | 18 | check_pip_install = $(PYTHON) -m pip show $(1) &>/dev/null || (cd && $(PYTHON) -m pip install $(1) --upgrade) 19 | check_pip_install_extra = $(PYTHON) -m pip show $(1) &>/dev/null || (cd && $(PYTHON) -m pip install $(2) --upgrade) 20 | 21 | pylint-install: 22 | $(call check_pip_install_extra,pylint,pylint[spelling]) 23 | $(call check_pip_install,pyenchant) 24 | 25 | flake8-install: 26 | $(call check_pip_install,flake8) 27 | $(call check_pip_install,flake8-bugbear) 28 | $(call check_pip_install,flake8-comprehensions) 29 | $(call check_pip_install,flake8-docstrings) 30 | $(call check_pip_install,flake8-pyi) 31 | $(call check_pip_install,flake8-simplify) 32 | 33 | py-format-install: 34 | $(call check_pip_install,isort) 35 | $(call check_pip_install_extra,black,black[jupyter]) 36 | 37 | ruff-install: 38 | $(call check_pip_install,ruff) 39 | 40 | mypy-install: 41 | $(call check_pip_install,mypy) 42 | 43 | pre-commit-install: 44 | $(call check_pip_install,pre-commit) 45 | $(PYTHON) -m pre_commit install --install-hooks 46 | 47 | go-install: 48 | # requires go >= 1.16 49 | command -v go || (sudo apt-get install -y golang && sudo ln -sf /usr/lib/go/bin/go /usr/bin/go) 50 | 51 | addlicense-install: go-install 52 | command -v addlicense || go install github.com/google/addlicense@latest 53 | 54 | addlicense: addlicense-install 55 | addlicense -c $(COPYRIGHT) -ignore tests/coverage.xml -l mit -y 2023-$(shell date +"%Y") -check $(SOURCE_FOLDERS) 56 | 57 | # Python linters 58 | 59 | pylint: pylint-install 60 | $(PYTHON) -m pylint $(PROJECT_PATH) 61 | 62 | flake8: flake8-install 63 | $(PYTHON) -m flake8 --count --show-source --statistics 64 | 65 | py-format: py-format-install 66 | $(PYTHON) -m isort --project $(PROJECT_PATH) --check $(PYTHON_FILES) && \ 67 | $(PYTHON) -m black --check $(PYTHON_FILES) 68 | 69 | ruff: ruff-install 70 | $(PYTHON) -m ruff check . 71 | 72 | ruff-fix: ruff-install 73 | $(PYTHON) -m ruff check . --fix --exit-non-zero-on-fix 74 | 75 | mypy: mypy-install 76 | $(PYTHON) -m mypy $(PROJECT_PATH) --install-types --non-interactive 77 | 78 | pre-commit: pre-commit-install 79 | $(PYTHON) -m pre_commit run --all-files 80 | 81 | # Utility functions 82 | 83 | lint: ruff flake8 py-format mypy pylint addlicense 84 | 85 | format: py-format-install ruff-install addlicense-install 86 | $(PYTHON) -m isort --project $(PROJECT_PATH) $(PYTHON_FILES) 87 | $(PYTHON) -m black $(PYTHON_FILES) 88 | $(PYTHON) -m ruff check . --fix --exit-zero 89 | addlicense -c $(COPYRIGHT) -ignore tests/coverage.xml -l mit -y 2023-$(shell date +"%Y") $(SOURCE_FOLDERS) cli_chat.py inference.py 90 | 91 | clean-py: 92 | find . -type f -name '*.py[co]' -delete 93 | find . -depth -type d -name "__pycache__" -exec rm -r "{}" + 94 | find . -depth -type d -name ".ruff_cache" -exec rm -r "{}" + 95 | find . -depth -type d -name ".mypy_cache" -exec rm -r "{}" + 96 | 97 | clean: clean-py 98 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |
6 | DeepSeek AI 7 |
8 |
9 |
10 | 11 | 12 | Homepage 13 | 14 | 15 | Chat 16 | 17 | 18 | Hugging Face 19 | 20 | 21 |
22 | 23 | 24 |
25 | 26 | 27 | Discord 28 | 29 | 30 | Wechat 31 | 32 | 33 | Twitter Follow 34 | 35 | 36 |
37 | 38 |
39 | 40 | 41 | Code License 42 | 43 | 44 | Model License 45 | 46 |
47 | 48 | 49 |

50 | 📥 Model Download | 51 | ⚡ Quick Start | 52 | 📜 License | 53 | 📖 Citation
54 | 📄 Paper Link | 55 | 📄 Arxiv Paper Link | 56 | 👁️ Demo 57 |

58 | 59 | ## 1. Introduction 60 | 61 | Introducing DeepSeek-VL2, an advanced series of large Mixture-of-Experts (MoE) Vision-Language Models that significantly improves upon its predecessor, DeepSeek-VL. DeepSeek-VL2 demonstrates superior capabilities across various tasks, including but not limited to visual question answering, optical character recognition, document/table/chart understanding, and visual grounding. Our model series is composed of three variants: DeepSeek-VL2-Tiny, DeepSeek-VL2-Small and DeepSeek-VL2, with 1.0B, 2.8B and 4.5B activated parameters respectively. 62 | DeepSeek-VL2 achieves competitive or state-of-the-art performance with similar or fewer activated parameters compared to existing open-source dense and MoE-based models. 63 | 64 | 65 | [DeepSeek-VL2: Mixture-of-Experts Vision-Language Models for Advanced Multimodal Understanding]() 66 | 67 | Zhiyu Wu*, Xiaokang Chen*, Zizheng Pan*, Xingchao Liu*, Wen Liu**, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, Zhenda Xie, Yu Wu, Kai Hu, Jiawei Wang, Yaofeng Sun, Yukun Li, Yishi Piao, Kang Guan, Aixin Liu, Xin Xie, Yuxiang You, Kai Dong, Xingkai Yu, Haowei Zhang, Liang Zhao, Yisong Wang, Chong Ruan*** (* Equal Contribution, ** Project Lead, *** Corresponding author) 68 | 69 | ![](./images/vl2_teaser.jpeg) 70 | 71 | ## 2. Release 72 | ✅ 2025-2-6: Naive Implemented Gradio Demo on Huggingface Space [deepseek-vl2-small](https://huggingface.co/spaces/deepseek-ai/deepseek-vl2-small). 73 | 74 | ✅ 2024-12-25: Gradio Demo Example, Incremental Prefilling and VLMEvalKit Support. 75 | 76 | ✅ 2024-12-13: DeepSeek-VL2 family released, including DeepSeek-VL2-tiny, DeepSeek-VL2-small, DeepSeek-VL2. 77 | 78 | ## 3. Model Download 79 | 80 | We release the DeepSeek-VL2 family, including DeepSeek-VL2-tiny, DeepSeek-VL2-small, DeepSeek-VL2. 81 | To support a broader and more diverse range of research within both academic and commercial communities. 82 | Please note that the use of this model is subject to the terms outlined in [License section](#5-license). 83 | 84 | ### Huggingface 85 | 86 | | Model | Sequence Length | Download | 87 | |--------------|-----------------|-----------------------------------------------------------------------------| 88 | | DeepSeek-VL2-tiny | 4096 | [🤗 Hugging Face](https://huggingface.co/deepseek-ai/deepseek-vl2-tiny) | 89 | | DeepSeek-VL2-small | 4096 | [🤗 Hugging Face](https://huggingface.co/deepseek-ai/deepseek-vl2-small) | 90 | | DeepSeek-VL2 | 4096 | [🤗 Hugging Face](https://huggingface.co/deepseek-ai/deepseek-vl2) | 91 | 92 | 93 | ## 4. Quick Start 94 | 95 | ### Installation 96 | 97 | On the basis of `Python >= 3.8` environment, install the necessary dependencies by running the following command: 98 | 99 | ```shell 100 | pip install -e . 101 | ``` 102 | 103 | ### Simple Inference Example with One Image 104 | 105 | **Note: You may need 80GB GPU memory to run this script with deepseek-vl2-small and even larger for deepseek-vl2.** 106 | 107 | ```python 108 | import torch 109 | from transformers import AutoModelForCausalLM 110 | 111 | from deepseek_vl2.models import DeepseekVLV2Processor, DeepseekVLV2ForCausalLM 112 | from deepseek_vl2.utils.io import load_pil_images 113 | 114 | 115 | # specify the path to the model 116 | model_path = "deepseek-ai/deepseek-vl2-tiny" 117 | vl_chat_processor: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path) 118 | tokenizer = vl_chat_processor.tokenizer 119 | 120 | vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True) 121 | vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval() 122 | 123 | ## single image conversation example 124 | ## Please note that <|ref|> and <|/ref|> are designed specifically for the object localization feature. These special tokens are not required for normal conversations. 125 | ## If you would like to experience the grounded captioning functionality (responses that include both object localization and reasoning), you need to add the special token <|grounding|> at the beginning of the prompt. Examples could be found in Figure 9 of our paper. 126 | conversation = [ 127 | { 128 | "role": "<|User|>", 129 | "content": "\n<|ref|>The giraffe at the back.<|/ref|>.", 130 | "images": ["./images/visual_grounding_1.jpeg"], 131 | }, 132 | {"role": "<|Assistant|>", "content": ""}, 133 | ] 134 | 135 | # load images and prepare for inputs 136 | pil_images = load_pil_images(conversation) 137 | prepare_inputs = vl_chat_processor( 138 | conversations=conversation, 139 | images=pil_images, 140 | force_batchify=True, 141 | system_prompt="" 142 | ).to(vl_gpt.device) 143 | 144 | # run image encoder to get the image embeddings 145 | inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs) 146 | 147 | # run the model to get the response 148 | outputs = vl_gpt.language.generate( 149 | inputs_embeds=inputs_embeds, 150 | attention_mask=prepare_inputs.attention_mask, 151 | pad_token_id=tokenizer.eos_token_id, 152 | bos_token_id=tokenizer.bos_token_id, 153 | eos_token_id=tokenizer.eos_token_id, 154 | max_new_tokens=512, 155 | do_sample=False, 156 | use_cache=True 157 | ) 158 | 159 | answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=False) 160 | print(f"{prepare_inputs['sft_format'][0]}", answer) 161 | ``` 162 | 163 | And the output is something like: 164 | ``` 165 | <|User|>: 166 | <|ref|>The giraffe at the back.<|/ref|>. 167 | 168 | <|Assistant|>: <|ref|>The giraffe at the back.<|/ref|><|det|>[[580, 270, 999, 900]]<|/det|><|end▁of▁sentence|> 169 | ``` 170 | 171 | ### Simple Inference Example with Multiple Images 172 | 173 | **Note: You may need 80GB GPU memory to run this script with deepseek-vl2-small and even larger for deepseek-vl2.** 174 | 175 | ```python 176 | import torch 177 | from transformers import AutoModelForCausalLM 178 | 179 | from deepseek_vl2.models import DeepseekVLV2Processor, DeepseekVLV2ForCausalLM 180 | from deepseek_vl2.utils.io import load_pil_images 181 | 182 | 183 | # specify the path to the model 184 | model_path = "deepseek-ai/deepseek-vl2-tiny" 185 | vl_chat_processor: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path) 186 | tokenizer = vl_chat_processor.tokenizer 187 | 188 | vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True) 189 | vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval() 190 | 191 | # multiple images/interleaved image-text 192 | conversation = [ 193 | { 194 | "role": "<|User|>", 195 | "content": "This is image_1: \n" 196 | "This is image_2: \n" 197 | "This is image_3: \n Can you tell me what are in the images?", 198 | "images": [ 199 | "images/multi_image_1.jpeg", 200 | "images/multi_image_2.jpeg", 201 | "images/multi_image_3.jpeg", 202 | ], 203 | }, 204 | {"role": "<|Assistant|>", "content": ""} 205 | ] 206 | 207 | # load images and prepare for inputs 208 | pil_images = load_pil_images(conversation) 209 | prepare_inputs = vl_chat_processor( 210 | conversations=conversation, 211 | images=pil_images, 212 | force_batchify=True, 213 | system_prompt="" 214 | ).to(vl_gpt.device) 215 | 216 | # run image encoder to get the image embeddings 217 | inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs) 218 | 219 | # run the model to get the response 220 | outputs = vl_gpt.language.generate( 221 | inputs_embeds=inputs_embeds, 222 | attention_mask=prepare_inputs.attention_mask, 223 | pad_token_id=tokenizer.eos_token_id, 224 | bos_token_id=tokenizer.bos_token_id, 225 | eos_token_id=tokenizer.eos_token_id, 226 | max_new_tokens=512, 227 | do_sample=False, 228 | use_cache=True 229 | ) 230 | 231 | answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=False) 232 | print(f"{prepare_inputs['sft_format'][0]}", answer) 233 | ``` 234 | 235 | And the output is something like: 236 | ``` 237 | <|User|>: This is image_1: 238 | This is image_2: 239 | This is image_3: 240 | Can you tell me what are in the images? 241 | 242 | <|Assistant|>: The images show three different types of vegetables. Image_1 features carrots, which are orange with green tops. Image_2 displays corn cobs, which are yellow with green husks. Image_3 contains raw pork ribs, which are pinkish-red with some marbling.<|end▁of▁sentence|> 243 | ``` 244 | 245 | ### Simple Inference Example with Incremental Prefilling 246 | 247 | **Note: We use incremental prefilling to inference within 40GB GPU using deepseek-vl2-small.** 248 | 249 | ```python 250 | import torch 251 | from transformers import AutoModelForCausalLM 252 | 253 | from deepseek_vl2.models import DeepseekVLV2Processor, DeepseekVLV2ForCausalLM 254 | from deepseek_vl2.utils.io import load_pil_images 255 | 256 | 257 | # specify the path to the model 258 | model_path = "deepseek-ai/deepseek-vl2-small" 259 | vl_chat_processor: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path) 260 | tokenizer = vl_chat_processor.tokenizer 261 | 262 | vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True) 263 | vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval() 264 | 265 | # multiple images/interleaved image-text 266 | conversation = [ 267 | { 268 | "role": "<|User|>", 269 | "content": "This is image_1: \n" 270 | "This is image_2: \n" 271 | "This is image_3: \n Can you tell me what are in the images?", 272 | "images": [ 273 | "images/multi_image_1.jpeg", 274 | "images/multi_image_2.jpeg", 275 | "images/multi_image_3.jpeg", 276 | ], 277 | }, 278 | {"role": "<|Assistant|>", "content": ""} 279 | ] 280 | 281 | # load images and prepare for inputs 282 | pil_images = load_pil_images(conversation) 283 | prepare_inputs = vl_chat_processor( 284 | conversations=conversation, 285 | images=pil_images, 286 | force_batchify=True, 287 | system_prompt="" 288 | ).to(vl_gpt.device) 289 | 290 | with torch.no_grad(): 291 | # run image encoder to get the image embeddings 292 | inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs) 293 | 294 | # incremental_prefilling when using 40G GPU for vl2-small 295 | inputs_embeds, past_key_values = vl_gpt.incremental_prefilling( 296 | input_ids=prepare_inputs.input_ids, 297 | images=prepare_inputs.images, 298 | images_seq_mask=prepare_inputs.images_seq_mask, 299 | images_spatial_crop=prepare_inputs.images_spatial_crop, 300 | attention_mask=prepare_inputs.attention_mask, 301 | chunk_size=512 # prefilling size 302 | ) 303 | 304 | # run the model to get the response 305 | outputs = vl_gpt.generate( 306 | inputs_embeds=inputs_embeds, 307 | input_ids=prepare_inputs.input_ids, 308 | images=prepare_inputs.images, 309 | images_seq_mask=prepare_inputs.images_seq_mask, 310 | images_spatial_crop=prepare_inputs.images_spatial_crop, 311 | attention_mask=prepare_inputs.attention_mask, 312 | past_key_values=past_key_values, 313 | 314 | pad_token_id=tokenizer.eos_token_id, 315 | bos_token_id=tokenizer.bos_token_id, 316 | eos_token_id=tokenizer.eos_token_id, 317 | max_new_tokens=512, 318 | 319 | do_sample=False, 320 | use_cache=True, 321 | ) 322 | 323 | answer = tokenizer.decode(outputs[0][len(prepare_inputs.input_ids[0]):].cpu().tolist(), skip_special_tokens=False) 324 | 325 | print(f"{prepare_inputs['sft_format'][0]}", answer) 326 | ``` 327 | 328 | And the output is something like: 329 | ``` 330 | <|User|>: This is image_1: 331 | This is image_2: 332 | This is image_3: 333 | Can you tell me what are in the images? 334 | 335 | <|Assistant|>: The first image contains carrots. The second image contains corn. The third image contains meat.<|end▁of▁sentence|> 336 | ``` 337 | 338 | Parse the bounding box coordinates, please refer to [parse_ref_bbox](https://github.com/deepseek-ai/DeepSeek-VL2/blob/main/deepseek_vl2/serve/app_modules/utils.py#L270-L298). 339 | 340 | 341 | ### Full Inference Example 342 | ```shell 343 | # without incremental prefilling 344 | CUDA_VISIBLE_DEVICES=0 python inference.py --model_path "deepseek-ai/deepseek-vl2" 345 | 346 | # with incremental prefilling, when using 40G GPU for vl2-small 347 | CUDA_VISIBLE_DEVICES=0 python inference.py --model_path "deepseek-ai/deepseek-vl2-small" --chunk_size 512 348 | 349 | ``` 350 | 351 | 352 | ### Gradio Demo 353 | 354 | * Install the necessary dependencies: 355 | ```shell 356 | pip install -e .[gradio] 357 | ``` 358 | 359 | * then run the following command: 360 | 361 | ```shell 362 | # vl2-tiny, 3.37B-MoE in total, activated 1B, can be run on a single GPU < 40GB 363 | CUDA_VISIBLE_DEVICES=2 python web_demo.py \ 364 | --model_name "deepseek-ai/deepseek-vl2-tiny" \ 365 | --port 37914 366 | 367 | 368 | # vl2-small, 16.1B-MoE in total, activated 2.4B 369 | # If run on A100 40GB GPU, you need to set the `--chunk_size 512` for incremental prefilling for saving memory and it might be slow. 370 | # If run on > 40GB GPU, you can ignore the `--chunk_size 512` for faster response. 371 | CUDA_VISIBLE_DEVICES=2 python web_demo.py \ 372 | --model_name "deepseek-ai/deepseek-vl2-small" \ 373 | --port 37914 \ 374 | --chunk_size 512 375 | 376 | # # vl27.5-MoE in total, activated 4.2B 377 | CUDA_VISIBLE_DEVICES=2 python web_demo.py \ 378 | --model_name "deepseek-ai/deepseek-vl2" \ 379 | --port 37914 380 | ``` 381 | 382 | * **Important**: This is a basic and native demo implementation without any deployment optimizations, which may result in slower performance. For production environments, consider using optimized deployment solutions, such as vllm, sglang, lmdeploy, etc. These optimizations will help achieve faster response times and better cost efficiency. 383 | 384 | ## 5. License 385 | 386 | This code repository is licensed under [MIT License](./LICENSE-CODE). The use of DeepSeek-VL2 models is subject to [DeepSeek Model License](./LICENSE-MODEL). DeepSeek-VL2 series supports commercial use. 387 | 388 | ## 6. Citation 389 | 390 | ``` 391 | @misc{wu2024deepseekvl2mixtureofexpertsvisionlanguagemodels, 392 | title={DeepSeek-VL2: Mixture-of-Experts Vision-Language Models for Advanced Multimodal Understanding}, 393 | author={Zhiyu Wu and Xiaokang Chen and Zizheng Pan and Xingchao Liu and Wen Liu and Damai Dai and Huazuo Gao and Yiyang Ma and Chengyue Wu and Bingxuan Wang and Zhenda Xie and Yu Wu and Kai Hu and Jiawei Wang and Yaofeng Sun and Yukun Li and Yishi Piao and Kang Guan and Aixin Liu and Xin Xie and Yuxiang You and Kai Dong and Xingkai Yu and Haowei Zhang and Liang Zhao and Yisong Wang and Chong Ruan}, 394 | year={2024}, 395 | eprint={2412.10302}, 396 | archivePrefix={arXiv}, 397 | primaryClass={cs.CV}, 398 | url={https://arxiv.org/abs/2412.10302}, 399 | } 400 | ``` 401 | 402 | ## 7. Contact 403 | 404 | If you have any questions, please raise an issue or contact us at [service@deepseek.com](mailto:service@deepseek.com). 405 | -------------------------------------------------------------------------------- /deepseek_vl2/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023-2024 DeepSeek. 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | # this software and associated documentation files (the "Software"), to deal in 5 | # the Software without restriction, including without limitation the rights to 6 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | # the Software, and to permit persons to whom the Software is furnished to do so, 8 | # subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in all 11 | # copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | 20 | 21 | # check if python version is above 3.10 22 | import sys 23 | 24 | if sys.version_info >= (3, 10): 25 | print("Python version is above 3.10, patching the collections module.") 26 | # Monkey patch collections 27 | import collections 28 | import collections.abc 29 | 30 | for type_name in collections.abc.__all__: 31 | setattr(collections, type_name, getattr(collections.abc, type_name)) 32 | -------------------------------------------------------------------------------- /deepseek_vl2/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023-2024 DeepSeek. 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | # this software and associated documentation files (the "Software"), to deal in 5 | # the Software without restriction, including without limitation the rights to 6 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | # the Software, and to permit persons to whom the Software is furnished to do so, 8 | # subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in all 11 | # copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | 20 | from .processing_deepseek_vl_v2 import DeepseekVLV2Processor 21 | from .modeling_deepseek_vl_v2 import DeepseekVLV2ForCausalLM 22 | 23 | __all__ = [ 24 | "DeepseekVLV2Processor", 25 | "DeepseekVLV2ForCausalLM", 26 | ] 27 | -------------------------------------------------------------------------------- /deepseek_vl2/models/configuration_deepseek.py: -------------------------------------------------------------------------------- 1 | from transformers.configuration_utils import PretrainedConfig 2 | from transformers.utils import logging 3 | 4 | logger = logging.get_logger(__name__) 5 | 6 | DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {} 7 | class DeepseekV2Config(PretrainedConfig): 8 | r""" 9 | This is the configuration class to store the configuration of a [`DeepseekV2Model`]. It is used to instantiate an DeepSeek 10 | model according to the specified arguments, defining the model architecture. Instantiating a configuration with the 11 | defaults will yield a similar configuration to that of the DeepSeek-V2 with multi-latent attention. 12 | 13 | Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the 14 | documentation from [`PretrainedConfig`] for more information. 15 | 16 | 17 | Args: 18 | vocab_size (`int`, *optional*, defaults to 102400): 19 | Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the 20 | `inputs_ids` passed when calling [`DeepseekV2Model`] 21 | hidden_size (`int`, *optional*, defaults to 4096): 22 | Dimension of the hidden representations. 23 | intermediate_size (`int`, *optional*, defaults to 11008): 24 | Dimension of the MLP representations. 25 | moe_intermediate_size (`int`, *optional*, defaults to 1407): 26 | Dimension of the MoE representations. 27 | num_hidden_layers (`int`, *optional*, defaults to 32): 28 | Number of hidden layers in the Transformer decoder. 29 | num_attention_heads (`int`, *optional*, defaults to 32): 30 | Number of attention heads for each attention layer in the Transformer decoder. 31 | n_shared_experts (`int`, *optional*, defaults to None): 32 | Number of shared experts, None means dense model. 33 | n_routed_experts (`int`, *optional*, defaults to None): 34 | Number of routed experts, None means dense model. 35 | routed_scaling_factor (`float`, *optional*, defaults to 1.0): 36 | Scaling factor or routed experts. 37 | topk_method (`str`, *optional*, defaults to `gready`): 38 | Topk method used in routed gate. 39 | n_group (`int`, *optional*, defaults to None): 40 | Number of groups for routed experts. 41 | topk_group (`int`, *optional*, defaults to None): 42 | Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups). 43 | num_experts_per_tok (`int`, *optional*, defaults to None): 44 | Number of selected experts, None means dense model. 45 | moe_layer_freq (`int`, *optional*, defaults to 1): 46 | The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers. 47 | first_k_dense_replace (`int`, *optional*, defaults to 0): 48 | Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head). 49 | \--k dense layers--/ 50 | norm_topk_prob (`bool`, *optional*, defaults to False): 51 | Whether to normalize the weights of the routed experts. 52 | scoring_func (`str`, *optional*, defaults to 'softmax'): 53 | Method of computing expert weights. 54 | aux_loss_alpha (`float`, *optional*, defaults to 0.001): 55 | Auxiliary loss weight coefficient. 56 | seq_aux = (`bool`, *optional*, defaults to True): 57 | Whether to compute the auxiliary loss for each individual sample. 58 | num_key_value_heads (`int`, *optional*): 59 | This is the number of key_value heads that should be used to implement Grouped Query Attention. If 60 | `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if 61 | `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When 62 | converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed 63 | by meanpooling all the original heads within that group. For more details checkout [this 64 | paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to 65 | `num_attention_heads`. 66 | hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): 67 | The non-linear activation function (function or string) in the decoder. 68 | max_position_embeddings (`int`, *optional*, defaults to 2048): 69 | The maximum sequence length that this model might ever be used with. 70 | initializer_range (`float`, *optional*, defaults to 0.02): 71 | The standard deviation of the truncated_normal_initializer for initializing all weight matrices. 72 | rms_norm_eps (`float`, *optional*, defaults to 1e-06): 73 | The epsilon used by the rms normalization layers. 74 | use_cache (`bool`, *optional*, defaults to `True`): 75 | Whether or not the model should return the last key/values attentions (not used by all models). Only 76 | relevant if `config.is_decoder=True`. 77 | pad_token_id (`int`, *optional*): 78 | Padding token id. 79 | bos_token_id (`int`, *optional*, defaults to 1): 80 | Beginning of stream token id. 81 | eos_token_id (`int`, *optional*, defaults to 2): 82 | End of stream token id. 83 | pretraining_tp (`int`, *optional*, defaults to 1): 84 | Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this 85 | document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is 86 | necessary to ensure exact reproducibility of the pretraining results. Please refer to [this 87 | issue](https://github.com/pytorch/pytorch/issues/76232). 88 | tie_word_embeddings (`bool`, *optional*, defaults to `False`): 89 | Whether to tie weight embeddings 90 | rope_theta (`float`, *optional*, defaults to 10000.0): 91 | The base period of the RoPE embeddings. 92 | rope_scaling (`Dict`, *optional*): 93 | Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling 94 | strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is 95 | `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update 96 | `max_position_embeddings` to the expected new maximum. 97 | attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): 98 | Whether to use a bias in the query, key, value and output projection layers during self-attention. 99 | attention_dropout (`float`, *optional*, defaults to 0.0): 100 | The dropout ratio for the attention probabilities. 101 | use_mla (`bool`, *optional*, defaults to `True`): Use multi-latent attention or multi-head attention. If True, 102 | the model will use multi-latent attention, otherwise, it will use multi-head attention. 103 | 104 | ```python 105 | >>> from transformers import DeepseekV2Model, DeepseekV2Config 106 | 107 | >>> # Initializing a Deepseek-V2 style configuration 108 | >>> configuration = DeepseekV2Config() 109 | 110 | >>> # Accessing the model configuration 111 | >>> configuration = model.config 112 | ```""" 113 | 114 | model_type = "deepseek_v2" 115 | keys_to_ignore_at_inference = ["past_key_values"] 116 | 117 | def __init__( 118 | self, 119 | vocab_size=102400, 120 | hidden_size=4096, 121 | intermediate_size=11008, 122 | moe_intermediate_size = 1407, 123 | num_hidden_layers=30, 124 | num_attention_heads=32, 125 | num_key_value_heads=32, 126 | n_shared_experts = None, 127 | n_routed_experts = None, 128 | ep_size = 1, 129 | routed_scaling_factor = 1.0, 130 | kv_lora_rank = 512, 131 | q_lora_rank = 1536, 132 | qk_rope_head_dim = 64, 133 | v_head_dim = 128, 134 | qk_nope_head_dim = 128, 135 | topk_method = 'gready', 136 | n_group = None, 137 | topk_group = None, 138 | num_experts_per_tok = None, 139 | moe_layer_freq = 1, 140 | first_k_dense_replace = 0, 141 | norm_topk_prob = False, 142 | scoring_func = 'softmax', 143 | aux_loss_alpha = 0.001, 144 | seq_aux = True, 145 | hidden_act="silu", 146 | max_position_embeddings=2048, 147 | initializer_range=0.02, 148 | rms_norm_eps=1e-6, 149 | use_cache=True, 150 | pad_token_id=None, 151 | bos_token_id=100000, 152 | eos_token_id=100001, 153 | pretraining_tp=1, 154 | tie_word_embeddings=False, 155 | rope_theta=10000.0, 156 | rope_scaling=None, 157 | attention_bias=False, 158 | attention_dropout=0.0, 159 | use_mla=True, 160 | **kwargs, 161 | ): 162 | self.vocab_size = vocab_size 163 | self.max_position_embeddings = max_position_embeddings 164 | self.hidden_size = hidden_size 165 | self.intermediate_size = intermediate_size 166 | self.moe_intermediate_size = moe_intermediate_size 167 | self.num_hidden_layers = num_hidden_layers 168 | self.num_attention_heads = num_attention_heads 169 | self.n_shared_experts = n_shared_experts 170 | self.n_routed_experts = n_routed_experts 171 | self.ep_size = ep_size 172 | self.routed_scaling_factor = routed_scaling_factor 173 | self.kv_lora_rank = kv_lora_rank 174 | self.q_lora_rank = q_lora_rank 175 | self.qk_rope_head_dim = qk_rope_head_dim 176 | self.v_head_dim = v_head_dim 177 | self.qk_nope_head_dim = qk_nope_head_dim 178 | self.topk_method = topk_method 179 | self.n_group = n_group 180 | self.topk_group = topk_group 181 | self.num_experts_per_tok = num_experts_per_tok 182 | self.moe_layer_freq = moe_layer_freq 183 | self.first_k_dense_replace = first_k_dense_replace 184 | self.norm_topk_prob = norm_topk_prob 185 | self.scoring_func = scoring_func 186 | self.aux_loss_alpha = aux_loss_alpha 187 | self.seq_aux = seq_aux 188 | # for backward compatibility 189 | if num_key_value_heads is None: 190 | num_key_value_heads = num_attention_heads 191 | 192 | self.num_key_value_heads = num_key_value_heads 193 | self.hidden_act = hidden_act 194 | self.initializer_range = initializer_range 195 | self.rms_norm_eps = float(rms_norm_eps) 196 | self.pretraining_tp = pretraining_tp 197 | self.use_cache = use_cache 198 | self.rope_theta = rope_theta 199 | self.rope_scaling = rope_scaling 200 | self.attention_bias = attention_bias 201 | self.attention_dropout = attention_dropout 202 | self.use_mla = use_mla 203 | 204 | super().__init__( 205 | pad_token_id=pad_token_id, 206 | bos_token_id=bos_token_id, 207 | eos_token_id=eos_token_id, 208 | tie_word_embeddings=tie_word_embeddings, 209 | **kwargs, 210 | ) 211 | -------------------------------------------------------------------------------- /deepseek_vl2/models/conversation.py: -------------------------------------------------------------------------------- 1 | """ 2 | From https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py 3 | """ 4 | 5 | import dataclasses 6 | from enum import IntEnum, auto 7 | from typing import Any, Dict, List 8 | 9 | 10 | class SeparatorStyle(IntEnum): 11 | """Separator styles.""" 12 | 13 | DeepSeek = auto() 14 | DeepSeekV2 = auto() 15 | PLAIN = auto() 16 | ALIGNMENT = auto() 17 | 18 | 19 | @dataclasses.dataclass 20 | class Conversation: 21 | """A class that manages prompt templates and keeps all conversation history.""" 22 | 23 | # The name of this template 24 | name: str 25 | # The template of the system prompt 26 | system_template: str = "{system_message}" 27 | # The system message 28 | system_message: str = "" 29 | # The names of two roles 30 | roles: List[str] = (("USER", "ASSISTANT"),) 31 | # All messages. Each item is (role, message). 32 | messages: List[List[str]] = () 33 | # The number of few shot examples 34 | offset: int = 0 35 | # The separator style and configurations 36 | sep_style: SeparatorStyle = SeparatorStyle.DeepSeek 37 | sep: str = "\n" 38 | sep2: str = None 39 | # Stop criteria (the default one is EOS token) 40 | stop_str: str = None 41 | # Stops generation if meeting any token in this list 42 | stop_token_ids: List[int] = None 43 | 44 | def get_prompt(self) -> str: 45 | """Get the prompt for generation.""" 46 | system_prompt = self.system_template.format(system_message=self.system_message) 47 | if self.sep_style == SeparatorStyle.DeepSeek: 48 | seps = [self.sep, self.sep2] 49 | if system_prompt == "" or system_prompt is None: 50 | ret = "" 51 | else: 52 | ret = system_prompt + seps[0] 53 | for i, (role, message) in enumerate(self.messages): 54 | if message: 55 | ret += role + ": " + message + seps[i % 2] 56 | else: 57 | ret += role + ":" 58 | return ret 59 | elif self.sep_style == SeparatorStyle.DeepSeekV2: 60 | seps = [self.sep, self.sep2] 61 | if system_prompt == "" or system_prompt is None: 62 | ret = "" 63 | else: 64 | ret = system_prompt + seps[0] 65 | for i, (role, message) in enumerate(self.messages): 66 | if message: 67 | if role == "User": 68 | ret += "<|sft▁begin|>\n" + message + self.sep #<|sft▁begin|>User Input<|sft▁end|>\nResponse<|end▁of▁sentence|> 69 | else: 70 | ret += message + self.sep2 71 | else: 72 | ret = ret 73 | return ret 74 | 75 | elif self.sep_style == SeparatorStyle.PLAIN: 76 | seps = [self.sep, self.sep2] 77 | ret = "" 78 | for i, (role, message) in enumerate(self.messages): 79 | if message: 80 | if type(message) is tuple: 81 | message, _, _ = message 82 | if i % 2 == 0: 83 | ret += message + seps[i % 2] 84 | else: 85 | ret += message + seps[i % 2] 86 | else: 87 | ret += "" 88 | return ret 89 | elif self.sep_style == SeparatorStyle.ALIGNMENT: 90 | seps = [self.sep, self.sep2] 91 | ret = "" 92 | for i, (role, message) in enumerate(self.messages): 93 | if message: 94 | if type(message) is tuple: 95 | message, _, _ = message 96 | if i % 2 == 0: 97 | ret += '\n' + seps[i % 2] 98 | else: 99 | ret += message + seps[i % 2] 100 | else: 101 | ret += "" 102 | return ret 103 | else: 104 | raise ValueError(f"Invalid style: {self.sep_style}") 105 | 106 | def set_system_message(self, system_message: str): 107 | """Set the system message.""" 108 | self.system_message = system_message 109 | 110 | def append_message(self, role: str, message: str): 111 | """Append a new message.""" 112 | self.messages.append([role, message]) 113 | 114 | def update_last_message(self, message: str): 115 | """Update the last output. 116 | 117 | The last message is typically set to be None when constructing the prompt, 118 | so we need to update it in-place after getting the response from a model. 119 | """ 120 | self.messages[-1][1] = message 121 | 122 | def reset_message(self): 123 | """Reset a new message.""" 124 | self.messages = [] 125 | 126 | def to_gradio_chatbot(self): 127 | """Convert the conversation to gradio chatbot format.""" 128 | ret = [] 129 | for i, (role, msg) in enumerate(self.messages[self.offset :]): 130 | if i % 2 == 0: 131 | ret.append([msg, None]) 132 | else: 133 | ret[-1][-1] = msg 134 | return ret 135 | 136 | def to_openai_api_messages(self): 137 | """Convert the conversation to OpenAI chat completion format.""" 138 | system_prompt = self.system_template.format(system_message=self.system_message) 139 | ret = [{"role": "system", "content": system_prompt}] 140 | 141 | for i, (_, msg) in enumerate(self.messages[self.offset :]): 142 | if i % 2 == 0: 143 | ret.append({"role": "user", "content": msg}) 144 | else: 145 | if msg is not None: 146 | ret.append({"role": "assistant", "content": msg}) 147 | return ret 148 | 149 | def copy(self): 150 | return Conversation( 151 | name=self.name, 152 | system_template=self.system_template, 153 | system_message=self.system_message, 154 | roles=self.roles, 155 | messages=[[x, y] for x, y in self.messages], 156 | offset=self.offset, 157 | sep_style=self.sep_style, 158 | sep=self.sep, 159 | sep2=self.sep2, 160 | stop_str=self.stop_str, 161 | stop_token_ids=self.stop_token_ids, 162 | ) 163 | 164 | def dict(self): 165 | return { 166 | "template_name": self.name, 167 | "system_message": self.system_message, 168 | "roles": self.roles, 169 | "messages": self.messages, 170 | "offset": self.offset, 171 | } 172 | 173 | 174 | # A global registry for all conversation templates 175 | conv_templates: Dict[str, Conversation] = {} 176 | 177 | 178 | def register_conv_template(template: Conversation, override: bool = False): 179 | """Register a new conversation template.""" 180 | if not override: 181 | assert template.name not in conv_templates, f"{template.name} has been registered." 182 | 183 | conv_templates[template.name] = template 184 | 185 | 186 | def get_conv_template(name: str) -> Conversation: 187 | """Get a conversation template.""" 188 | return conv_templates[name].copy() 189 | 190 | 191 | # register_conv_template( 192 | # Conversation( 193 | # name="deepseek", 194 | # system_template="{system_message}", 195 | # # system_message="You are a helpful assistant. Please answer truthfully and write out your " 196 | # # "thinking step by step to be sure you get the right answer.", 197 | # system_message="", 198 | # roles=("User", "Assistant"), 199 | # messages=(), 200 | # offset=0, 201 | # sep_style=SeparatorStyle.DeepSeek, 202 | # sep="\n\n", 203 | # sep2="<|end▁of▁sentence|>", 204 | # stop_token_ids=[100001], 205 | # stop_str=["User:", "<|end▁of▁sentence|>"] 206 | # ) 207 | # ) 208 | register_conv_template( 209 | Conversation( 210 | name="deepseek", 211 | system_template="{system_message}", 212 | # system_message="You are a helpful assistant. Please answer truthfully and write out your " 213 | # "thinking step by step to be sure you get the right answer.", 214 | system_message="", 215 | roles=("<|User|>", "<|Assistant|>"), 216 | messages=(), 217 | offset=0, 218 | sep_style=SeparatorStyle.DeepSeek, 219 | sep="\n\n", 220 | sep2="<|end▁of▁sentence|>", 221 | stop_token_ids=[100001], 222 | stop_str=["User:", "<|end▁of▁sentence|>"] 223 | ) 224 | ) 225 | # register_conv_template( 226 | # Conversation( 227 | # name="deepseekv2", 228 | # system_template="{system_message}", 229 | # system_message="", 230 | # roles=("User", "Assistant"), 231 | # messages=(), 232 | # offset=0, 233 | # sep_style=SeparatorStyle.DeepSeekV2, 234 | # sep="\n<|sft▁end|>", 235 | # sep2="<|end▁of▁sentence|>", 236 | # stop_token_ids=[100001], 237 | # stop_str=["User:", "<|end▁of▁sentence|>"] 238 | # ) 239 | # ) 240 | register_conv_template( 241 | Conversation( 242 | name="deepseekv2", 243 | system_template="{system_message}", 244 | system_message="", 245 | roles=("||", "||"), 246 | messages=(), 247 | offset=0, 248 | sep_style=SeparatorStyle.DeepSeekV2, 249 | sep="\n<|sft▁end|>", 250 | sep2="<|end▁of▁sentence|>", 251 | stop_token_ids=[100001], 252 | stop_str=["User:", "<|end▁of▁sentence|>"] 253 | ) 254 | ) 255 | 256 | 257 | register_conv_template( 258 | Conversation( 259 | name="plain", 260 | system_template="", 261 | system_message="", 262 | roles=("", ""), 263 | messages=(), 264 | offset=0, 265 | sep_style=SeparatorStyle.PLAIN, 266 | sep="", 267 | sep2="", 268 | stop_token_ids=[100001], 269 | stop_str=[''], 270 | ) 271 | ) 272 | 273 | 274 | register_conv_template( 275 | Conversation( 276 | name="alignment", 277 | system_template="", 278 | system_message="", 279 | roles=("", ""), 280 | messages=(), 281 | offset=0, 282 | sep_style=SeparatorStyle.ALIGNMENT, 283 | sep="", 284 | sep2="", 285 | stop_token_ids=[100001], 286 | stop_str=[''], 287 | ) 288 | ) 289 | 290 | 291 | if __name__ == "__main__": 292 | print("deepseek template:") 293 | conv = get_conv_template("deepseek") 294 | conv.append_message(conv.roles[0], "Hello!") 295 | conv.append_message(conv.roles[1], "Hi! This is Tony.") 296 | conv.append_message(conv.roles[0], "Who are you?") 297 | conv.append_message(conv.roles[1], "I am a helpful assistant.") 298 | conv.append_message(conv.roles[0], "How are you?") 299 | conv.append_message(conv.roles[1], None) 300 | print(conv.get_prompt()) 301 | 302 | print("deepseekv2 template:") 303 | conv = get_conv_template("deepseekv2") 304 | conv.append_message(conv.roles[0], "Hello!") 305 | conv.append_message(conv.roles[1], "Hi! This is Tony.") 306 | conv.append_message(conv.roles[0], "Who are you?") 307 | conv.append_message(conv.roles[1], "I am a helpful assistant.") 308 | conv.append_message(conv.roles[0], "How are you?") 309 | conv.append_message(conv.roles[1], None) 310 | print(conv.get_prompt()) 311 | -------------------------------------------------------------------------------- /deepseek_vl2/serve/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/deepseek_vl2/serve/__init__.py -------------------------------------------------------------------------------- /deepseek_vl2/serve/app_modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/deepseek_vl2/serve/app_modules/__init__.py -------------------------------------------------------------------------------- /deepseek_vl2/serve/app_modules/gradio_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023-2024 DeepSeek. 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | # this software and associated documentation files (the "Software"), to deal in 5 | # the Software without restriction, including without limitation the rights to 6 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | # the Software, and to permit persons to whom the Software is furnished to do so, 8 | # subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in all 11 | # copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | 20 | from functools import wraps 21 | 22 | import gradio as gr 23 | 24 | 25 | def wrap_gen_fn(gen_fn): 26 | @wraps(gen_fn) 27 | def wrapped_gen_fn(prompt, *args, **kwargs): 28 | try: 29 | yield from gen_fn(prompt, *args, **kwargs) 30 | except gr.Error as g_err: 31 | raise g_err 32 | except Exception as e: 33 | raise gr.Error(f"Failed to generate text: {e}") from e 34 | 35 | return wrapped_gen_fn 36 | 37 | 38 | def delete_last_conversation(chatbot, history): 39 | if len(history) % 2 != 0: 40 | gr.Error("history length is not even") 41 | return ( 42 | chatbot, 43 | history, 44 | "Delete Done", 45 | ) 46 | 47 | if len(chatbot) > 0: 48 | chatbot.pop() 49 | 50 | if len(history) > 0 and len(history) % 2 == 0: 51 | history.pop() 52 | history.pop() 53 | 54 | return ( 55 | chatbot, 56 | history, 57 | "Delete Done", 58 | ) 59 | 60 | 61 | def reset_state(): 62 | return [], [], None, "Reset Done" 63 | 64 | 65 | def reset_textbox(): 66 | return gr.update(value=""), "" 67 | 68 | 69 | def cancel_outputing(): 70 | return "Stop Done" 71 | 72 | 73 | class State: 74 | interrupted = False 75 | 76 | def interrupt(self): 77 | self.interrupted = True 78 | 79 | def recover(self): 80 | self.interrupted = False 81 | 82 | 83 | shared_state = State() 84 | -------------------------------------------------------------------------------- /deepseek_vl2/serve/app_modules/overwrites.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023-2024 DeepSeek. 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | # this software and associated documentation files (the "Software"), to deal in 5 | # the Software without restriction, including without limitation the rights to 6 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | # the Software, and to permit persons to whom the Software is furnished to do so, 8 | # subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in all 11 | # copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | 20 | from __future__ import annotations 21 | 22 | import logging 23 | from typing import List, Tuple 24 | 25 | from deepseek_vl2.serve.app_modules.presets import gr 26 | from deepseek_vl2.serve.app_modules.utils import convert_asis, convert_mdtext, detect_converted_mark 27 | 28 | 29 | def compact_text_chunks(self, prompt, text_chunks: List[str]) -> List[str]: 30 | logging.debug("Compacting text chunks...🚀🚀🚀") 31 | combined_str = [c.strip() for c in text_chunks if c.strip()] 32 | combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)] 33 | combined_str = "\n\n".join(combined_str) 34 | # resplit based on self.max_chunk_overlap 35 | text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1) 36 | return text_splitter.split_text(combined_str) 37 | 38 | 39 | def postprocess( 40 | self, y: List[Tuple[str | None, str | None]] 41 | ) -> List[Tuple[str | None, str | None]]: 42 | """ 43 | Parameters: 44 | y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. 45 | Returns: 46 | List of tuples representing the message and response. Each message and response will be a string of HTML. 47 | """ 48 | if y is None or y == []: 49 | return [] 50 | temp = [] 51 | for x in y: 52 | user, bot = x 53 | if not detect_converted_mark(user): 54 | user = convert_asis(user) 55 | if not detect_converted_mark(bot): 56 | bot = convert_mdtext(bot) 57 | temp.append((user, bot)) 58 | return temp 59 | 60 | 61 | with open("deepseek_vl2/serve/assets/custom.js", "r", encoding="utf-8") as f, open( 62 | "deepseek_vl2/serve/assets/Kelpy-Codos.js", "r", encoding="utf-8" 63 | ) as f2: 64 | customJS = f.read() 65 | kelpyCodos = f2.read() 66 | 67 | 68 | def reload_javascript(): 69 | print("Reloading javascript...") 70 | js = f"" 71 | 72 | def template_response(*args, **kwargs): 73 | res = GradioTemplateResponseOriginal(*args, **kwargs) 74 | res.body = res.body.replace(b"", f"{js}".encode("utf8")) 75 | res.init_headers() 76 | return res 77 | 78 | gr.routes.templates.TemplateResponse = template_response 79 | 80 | 81 | GradioTemplateResponseOriginal = gr.routes.templates.TemplateResponse 82 | -------------------------------------------------------------------------------- /deepseek_vl2/serve/app_modules/presets.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023-2024 DeepSeek. 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | # this software and associated documentation files (the "Software"), to deal in 5 | # the Software without restriction, including without limitation the rights to 6 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | # the Software, and to permit persons to whom the Software is furnished to do so, 8 | # subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in all 11 | # copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | 20 | # -*- coding:utf-8 -*- 21 | import gradio as gr 22 | 23 | title = """

Chat with DeepSeek-VL2

""" 24 | description_top = """Special Tokens: ``, Visual Grounding: `<|ref|>{query}<|/ref|>`, Grounding Conversation: `<|grounding|>{question}`""" 25 | description = """""" 26 | CONCURRENT_COUNT = 1 27 | MAX_EVENTS = 10 28 | MAX_IMAGE_SIZE = 800 29 | MIN_IMAGE_SIZE = 400 30 | 31 | BOX2COLOR = { 32 | 0: (255, 0, 0), 33 | 1: (0, 255, 0), 34 | 2: (0, 0, 255), 35 | 3: (0, 255, 255), 36 | 4: (255, 255, 0), 37 | 5: (255, 0, 255), 38 | 6: (127, 127, 127), 39 | 7: (255, 255, 127), 40 | 8: (255, 127, 255), 41 | 9: (127, 255, 255), 42 | 10: (127, 127, 255), 43 | 11: (127, 255, 127), 44 | 12: (255, 127, 127), 45 | } 46 | 47 | 48 | ALREADY_CONVERTED_MARK = "" 49 | 50 | small_and_beautiful_theme = gr.themes.Soft( 51 | primary_hue=gr.themes.Color( 52 | c50="#EBFAF2", 53 | c100="#CFF3E1", 54 | c200="#A8EAC8", 55 | c300="#77DEA9", 56 | c400="#3FD086", 57 | c500="#02C160", 58 | c600="#06AE56", 59 | c700="#05974E", 60 | c800="#057F45", 61 | c900="#04673D", 62 | c950="#2E5541", 63 | name="small_and_beautiful", 64 | ), 65 | secondary_hue=gr.themes.Color( 66 | c50="#576b95", 67 | c100="#576b95", 68 | c200="#576b95", 69 | c300="#576b95", 70 | c400="#576b95", 71 | c500="#576b95", 72 | c600="#576b95", 73 | c700="#576b95", 74 | c800="#576b95", 75 | c900="#576b95", 76 | c950="#576b95", 77 | ), 78 | neutral_hue=gr.themes.Color( 79 | name="gray", 80 | c50="#f6f7f8", 81 | # c100="#f3f4f6", 82 | c100="#F2F2F2", 83 | c200="#e5e7eb", 84 | c300="#d1d5db", 85 | c400="#B2B2B2", 86 | c500="#808080", 87 | c600="#636363", 88 | c700="#515151", 89 | c800="#393939", 90 | # c900="#272727", 91 | c900="#2B2B2B", 92 | c950="#171717", 93 | ), 94 | radius_size=gr.themes.sizes.radius_sm, 95 | ).set( 96 | # button_primary_background_fill="*primary_500", 97 | button_primary_background_fill_dark="*primary_600", 98 | # button_primary_background_fill_hover="*primary_400", 99 | # button_primary_border_color="*primary_500", 100 | button_primary_border_color_dark="*primary_600", 101 | button_primary_text_color="white", 102 | button_primary_text_color_dark="white", 103 | button_secondary_background_fill="*neutral_100", 104 | button_secondary_background_fill_hover="*neutral_50", 105 | button_secondary_background_fill_dark="*neutral_900", 106 | button_secondary_text_color="*neutral_800", 107 | button_secondary_text_color_dark="white", 108 | # background_fill_primary="#F7F7F7", 109 | # background_fill_primary_dark="#1F1F1F", 110 | # block_title_text_color="*primary_500", 111 | block_title_background_fill_dark="*primary_900", 112 | block_label_background_fill_dark="*primary_900", 113 | input_background_fill="#F6F6F6", 114 | # chatbot_code_background_color_dark="*neutral_950", 115 | ) 116 | -------------------------------------------------------------------------------- /deepseek_vl2/serve/app_modules/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023-2024 DeepSeek. 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | # this software and associated documentation files (the "Software"), to deal in 5 | # the Software without restriction, including without limitation the rights to 6 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | # the Software, and to permit persons to whom the Software is furnished to do so, 8 | # subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in all 11 | # copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | 20 | # -*- coding:utf-8 -*- 21 | from __future__ import annotations 22 | 23 | import html 24 | import logging 25 | import io 26 | import os 27 | import re 28 | import base64 29 | import time 30 | from PIL import Image, ImageDraw, ImageFont 31 | 32 | import mdtex2html 33 | from markdown import markdown 34 | from pygments import highlight 35 | from pygments.formatters import HtmlFormatter 36 | from pygments.lexers import ClassNotFound, get_lexer_by_name, guess_lexer 37 | 38 | from deepseek_vl2.serve.app_modules.presets import ( 39 | ALREADY_CONVERTED_MARK, 40 | BOX2COLOR, 41 | MAX_IMAGE_SIZE, 42 | MIN_IMAGE_SIZE 43 | ) 44 | 45 | logger = logging.getLogger("gradio_logger") 46 | 47 | 48 | def configure_logger(): 49 | logger = logging.getLogger("gradio_logger") 50 | logger.setLevel(logging.DEBUG) 51 | 52 | timestr = time.strftime("%Y%m%d-%H%M%S") 53 | os.makedirs("deepseek_vl2/serve/logs", exist_ok=True) 54 | file_handler = logging.FileHandler( 55 | f"deepseek_vl2/serve/logs/{timestr}_gradio_log.log" 56 | ) 57 | console_handler = logging.StreamHandler() 58 | 59 | formatter = logging.Formatter( 60 | "%(asctime)s - %(name)s - %(levelname)s - %(message)s" 61 | ) 62 | console_handler.setFormatter(formatter) 63 | file_handler.setFormatter(formatter) 64 | 65 | console_handler.setLevel(logging.INFO) 66 | file_handler.setLevel(logging.INFO) 67 | 68 | logger.addHandler(console_handler) 69 | logger.addHandler(file_handler) 70 | 71 | return logger 72 | 73 | 74 | def strip_stop_words(x, stop_words): 75 | for w in stop_words: 76 | if w in x: 77 | return x[: x.index(w)].strip() 78 | return x.strip() 79 | 80 | 81 | def format_output(history, text, x): 82 | updated_history = history + [[text, x]] 83 | a = [[y[0], convert_to_markdown(y[1])] for y in updated_history] 84 | return a, updated_history 85 | 86 | 87 | def markdown_to_html_with_syntax_highlight(md_str): # deprecated 88 | def replacer(match): 89 | lang = match.group(1) or "text" 90 | code = match.group(2) 91 | 92 | try: 93 | lexer = get_lexer_by_name(lang, stripall=True) 94 | except ValueError: 95 | lexer = get_lexer_by_name("text", stripall=True) 96 | 97 | formatter = HtmlFormatter() 98 | highlighted_code = highlight(code, lexer, formatter) 99 | 100 | return f'
{highlighted_code}
' 101 | 102 | code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```" 103 | md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE) 104 | 105 | html_str = markdown(md_str) 106 | return html_str 107 | 108 | 109 | def normalize_markdown(md_text: str) -> str: # deprecated 110 | lines = md_text.split("\n") 111 | normalized_lines = [] 112 | inside_list = False 113 | 114 | for i, line in enumerate(lines): 115 | if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()): 116 | if not inside_list and i > 0 and lines[i - 1].strip() != "": 117 | normalized_lines.append("") 118 | inside_list = True 119 | normalized_lines.append(line) 120 | elif inside_list and line.strip() == "": 121 | if i < len(lines) - 1 and not re.match( 122 | r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip() 123 | ): 124 | normalized_lines.append(line) 125 | continue 126 | else: 127 | inside_list = False 128 | normalized_lines.append(line) 129 | 130 | return "\n".join(normalized_lines) 131 | 132 | 133 | def convert_mdtext(md_text): 134 | code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL) 135 | inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL) 136 | code_blocks = code_block_pattern.findall(md_text) 137 | non_code_parts = code_block_pattern.split(md_text)[::2] 138 | 139 | result = [] 140 | for non_code, code in zip(non_code_parts, code_blocks + [""]): 141 | if non_code.strip(): 142 | non_code = normalize_markdown(non_code) 143 | if inline_code_pattern.search(non_code): 144 | result.append(markdown(non_code, extensions=["tables"])) 145 | else: 146 | result.append(mdtex2html.convert(non_code, extensions=["tables"])) 147 | if code.strip(): 148 | code = f"\n```{code}\n\n```" 149 | code = markdown_to_html_with_syntax_highlight(code) 150 | result.append(code) 151 | result = "".join(result) 152 | result += ALREADY_CONVERTED_MARK 153 | return result 154 | 155 | 156 | def convert_asis(userinput): 157 | return f'

{html.escape(userinput)}

{ALREADY_CONVERTED_MARK}' 158 | 159 | 160 | def is_stop_word_or_prefix(s: str, stop_words: list) -> bool: 161 | return any(s.endswith(stop_word) for stop_word in stop_words) 162 | 163 | 164 | def detect_converted_mark(userinput): 165 | return bool(userinput.endswith(ALREADY_CONVERTED_MARK)) 166 | 167 | 168 | def detect_language(code): 169 | first_line = "" if code.startswith("\n") else code.strip().split("\n", 1)[0] 170 | language = first_line.lower() if first_line else "" 171 | code_without_language = code[len(first_line) :].lstrip() if first_line else code 172 | return language, code_without_language 173 | 174 | 175 | def convert_to_markdown(text): 176 | text = text.replace("$", "$") 177 | text = text.replace("\r\n", "\n") 178 | 179 | def replace_leading_tabs_and_spaces(line): 180 | new_line = [] 181 | 182 | for char in line: 183 | if char == "\t": 184 | new_line.append(" ") 185 | elif char == " ": 186 | new_line.append(" ") 187 | else: 188 | break 189 | return "".join(new_line) + line[len(new_line) :] 190 | 191 | markdown_text = "" 192 | lines = text.split("\n") 193 | in_code_block = False 194 | 195 | for line in lines: 196 | if in_code_block is False and line.startswith("```"): 197 | in_code_block = True 198 | markdown_text += f"{line}\n" 199 | elif in_code_block is True and line.startswith("```"): 200 | in_code_block = False 201 | markdown_text += f"{line}\n" 202 | elif in_code_block: 203 | markdown_text += f"{line}\n" 204 | else: 205 | line = replace_leading_tabs_and_spaces(line) 206 | line = re.sub(r"^(#)", r"\\\1", line) 207 | markdown_text += f"{line} \n" 208 | 209 | return markdown_text 210 | 211 | 212 | def add_language_tag(text): 213 | def detect_language(code_block): 214 | try: 215 | lexer = guess_lexer(code_block) 216 | return lexer.name.lower() 217 | except ClassNotFound: 218 | return "" 219 | 220 | code_block_pattern = re.compile(r"(```)(\w*\n[^`]+```)", re.MULTILINE) 221 | 222 | def replacement(match): 223 | code_block = match.group(2) 224 | if match.group(2).startswith("\n"): 225 | language = detect_language(code_block) 226 | return ( 227 | f"```{language}{code_block}```" if language else f"```\n{code_block}```" 228 | ) 229 | else: 230 | return match.group(1) + code_block + "```" 231 | 232 | text2 = code_block_pattern.sub(replacement, text) 233 | return text2 234 | 235 | 236 | def is_variable_assigned(var_name: str) -> bool: 237 | return var_name in locals() 238 | 239 | 240 | def pil_to_base64( 241 | image: Image.Image, 242 | alt: str = "user upload image", 243 | resize: bool = True, 244 | max_size: int = MAX_IMAGE_SIZE, 245 | min_size: int = MIN_IMAGE_SIZE, 246 | format: str = "JPEG", 247 | quality: int = 95 248 | ) -> str: 249 | 250 | if resize: 251 | max_hw, min_hw = max(image.size), min(image.size) 252 | aspect_ratio = max_hw / min_hw 253 | shortest_edge = int(min(max_size / aspect_ratio, min_size, min_hw)) 254 | longest_edge = int(shortest_edge * aspect_ratio) 255 | W, H = image.size 256 | if H > W: 257 | H, W = longest_edge, shortest_edge 258 | else: 259 | H, W = shortest_edge, longest_edge 260 | image = image.resize((W, H)) 261 | 262 | buffered = io.BytesIO() 263 | image.save(buffered, format=format, quality=quality) 264 | img_b64_str = base64.b64encode(buffered.getvalue()).decode() 265 | img_str = f'{alt}' 266 | 267 | return img_str 268 | 269 | 270 | def parse_ref_bbox(response, image: Image.Image): 271 | try: 272 | image = image.copy() 273 | image_w, image_h = image.size 274 | draw = ImageDraw.Draw(image) 275 | 276 | ref = re.findall(r'<\|ref\|>.*?<\|/ref\|>', response) 277 | bbox = re.findall(r'<\|det\|>.*?<\|/det\|>', response) 278 | assert len(ref) == len(bbox) 279 | 280 | if len(ref) == 0: 281 | return None 282 | 283 | boxes, labels = [], [] 284 | for box, label in zip(bbox, ref): 285 | box = box.replace('<|det|>', '').replace('<|/det|>', '') 286 | label = label.replace('<|ref|>', '').replace('<|/ref|>', '') 287 | box = box[1:-1] 288 | for onebox in re.findall(r'\[.*?\]', box): 289 | boxes.append(eval(onebox)) 290 | labels.append(label) 291 | 292 | for indice, (box, label) in enumerate(zip(boxes, labels)): 293 | box = ( 294 | int(box[0] / 999 * image_w), 295 | int(box[1] / 999 * image_h), 296 | int(box[2] / 999 * image_w), 297 | int(box[3] / 999 * image_h), 298 | ) 299 | 300 | box_color = BOX2COLOR[indice % len(BOX2COLOR.keys())] 301 | box_width = 3 302 | draw.rectangle(box, outline=box_color, width=box_width) 303 | 304 | text_x = box[0] 305 | text_y = box[1] - 20 306 | text_color = box_color 307 | font = ImageFont.truetype("deepseek_vl2/serve/assets/simsun.ttc", size=20) 308 | draw.text((text_x, text_y), label, font=font, fill=text_color) 309 | 310 | # print(f"boxes = {boxes}, labels = {labels}, re-render = {image}") 311 | return image 312 | except: 313 | return None 314 | 315 | 316 | def display_example(image_list): 317 | images_html = "" 318 | for i, img_path in enumerate(image_list): 319 | image = Image.open(img_path) 320 | buffered = io.BytesIO() 321 | image.save(buffered, format="PNG", quality=100) 322 | img_b64_str = base64.b64encode(buffered.getvalue()).decode() 323 | img_str = f'{img_path}' 324 | images_html += img_str 325 | 326 | result_html = f""" 327 |
328 |
{images_html}
329 |
330 | """ 331 | 332 | return result_html 333 | 334 | -------------------------------------------------------------------------------- /deepseek_vl2/serve/assets/Kelpy-Codos.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2023-2024 DeepSeek. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a copy of 5 | * this software and associated documentation files (the "Software"), to deal in 6 | * the Software without restriction, including without limitation the rights to 7 | * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 8 | * the Software, and to permit persons to whom the Software is furnished to do so, 9 | * subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in all 12 | * copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 16 | * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 17 | * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 18 | * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 19 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 | */ 21 | 22 | // ==UserScript== 23 | // @name Kelpy Codos 24 | // @namespace https://github.com/Keldos-Li/Kelpy-Codos 25 | // @version 1.0.5 26 | // @author Keldos; https://keldos.me/ 27 | // @description Add copy button to PRE tags before CODE tag, for Chuanhu ChatGPT especially. 28 | // Based on Chuanhu ChatGPT version: ac04408 (2023-3-22) 29 | // @license GPL-3.0 30 | // @grant none 31 | // ==/UserScript== 32 | 33 | (function () { 34 | "use strict"; 35 | 36 | function addCopyButton(pre) { 37 | var code = pre.querySelector("code"); 38 | if (!code) { 39 | return; // 如果没有找到 元素,则不添加按钮 40 | } 41 | var firstChild = code.firstChild; 42 | if (!firstChild) { 43 | return; // 如果 元素没有子节点,则不添加按钮 44 | } 45 | var button = document.createElement("button"); 46 | button.textContent = "\uD83D\uDCCE"; // 使用 📎 符号作为“复制”按钮的文本 47 | button.style.position = "relative"; 48 | button.style.float = "right"; 49 | button.style.fontSize = "1em"; // 可选:调整按钮大小 50 | button.style.background = "none"; // 可选:去掉背景颜色 51 | button.style.border = "none"; // 可选:去掉边框 52 | button.style.cursor = "pointer"; // 可选:显示指针样式 53 | button.addEventListener("click", function () { 54 | var range = document.createRange(); 55 | range.selectNodeContents(code); 56 | range.setStartBefore(firstChild); // 将范围设置为第一个子节点之前 57 | var selection = window.getSelection(); 58 | selection.removeAllRanges(); 59 | selection.addRange(range); 60 | 61 | try { 62 | var success = document.execCommand("copy"); 63 | if (success) { 64 | button.textContent = "\u2714"; 65 | setTimeout(function () { 66 | button.textContent = "\uD83D\uDCCE"; // 恢复按钮为“复制” 67 | }, 2000); 68 | } else { 69 | button.textContent = "\u2716"; 70 | } 71 | } catch (e) { 72 | console.error(e); 73 | button.textContent = "\u2716"; 74 | } 75 | 76 | selection.removeAllRanges(); 77 | }); 78 | code.insertBefore(button, firstChild); // 将按钮插入到第一个子元素之前 79 | } 80 | 81 | function handleNewElements(mutationsList, observer) { 82 | for (var mutation of mutationsList) { 83 | if (mutation.type === "childList") { 84 | for (var node of mutation.addedNodes) { 85 | if (node.nodeName === "PRE") { 86 | addCopyButton(node); 87 | } 88 | } 89 | } 90 | } 91 | } 92 | 93 | var observer = new MutationObserver(handleNewElements); 94 | observer.observe(document.documentElement, { 95 | childList: true, 96 | subtree: true, 97 | }); 98 | 99 | document.querySelectorAll("pre").forEach(addCopyButton); 100 | })(); 101 | -------------------------------------------------------------------------------- /deepseek_vl2/serve/assets/avatar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/deepseek_vl2/serve/assets/avatar.png -------------------------------------------------------------------------------- /deepseek_vl2/serve/assets/custom.css: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2023-2024 DeepSeek. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a copy of 5 | * this software and associated documentation files (the "Software"), to deal in 6 | * the Software without restriction, including without limitation the rights to 7 | * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 8 | * the Software, and to permit persons to whom the Software is furnished to do so, 9 | * subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in all 12 | * copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 16 | * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 17 | * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 18 | * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 19 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 | */ 21 | 22 | :root { 23 | --chatbot-color-light: #f3f3f3; 24 | --chatbot-color-dark: #121111; 25 | } 26 | 27 | /* status_display */ 28 | #status_display { 29 | display: flex; 30 | min-height: 2.5em; 31 | align-items: flex-end; 32 | justify-content: flex-end; 33 | } 34 | #status_display p { 35 | font-size: 0.85em; 36 | font-family: monospace; 37 | color: var(--body-text-color-subdued); 38 | } 39 | 40 | /* usage_display */ 41 | #usage_display { 42 | height: 1em; 43 | } 44 | #usage_display p { 45 | padding: 0 1em; 46 | font-size: 0.85em; 47 | font-family: monospace; 48 | color: var(--body-text-color-subdued); 49 | } 50 | /* list */ 51 | ol:not(.options), 52 | ul:not(.options) { 53 | padding-inline-start: 2em !important; 54 | } 55 | 56 | /* Thank @Keldos-Li for fixing it */ 57 | /* Light mode (default) */ 58 | #deepseek_chatbot { 59 | background-color: var(--chatbot-color-light) !important; 60 | color: #000000 !important; 61 | } 62 | [data-testid="bot"] { 63 | background-color: #ffffff !important; 64 | } 65 | [data-testid="user"] { 66 | background-color: #95ec69 !important; 67 | } 68 | 69 | /* Dark mode */ 70 | .dark #deepseek_chatbot { 71 | background-color: var(--chatbot-color-dark) !important; 72 | color: #ffffff !important; 73 | } 74 | .dark [data-testid="bot"] { 75 | background-color: #2c2c2c !important; 76 | } 77 | .dark [data-testid="user"] { 78 | background-color: #26b561 !important; 79 | } 80 | 81 | #deepseek_chatbot { 82 | height: 100%; 83 | min-height: 800px; 84 | flex-grow: 1; 85 | overflow: auto; 86 | } 87 | 88 | [class*="message"] { 89 | border-radius: var(--radius-xl) !important; 90 | border: none; 91 | padding: var(--spacing-xl) !important; 92 | font-size: var(--text-md) !important; 93 | line-height: var(--line-md) !important; 94 | min-height: calc(var(--text-md) * var(--line-md) + 2 * var(--spacing-xl)); 95 | min-width: calc(var(--text-md) * var(--line-md) + 2 * var(--spacing-xl)); 96 | } 97 | [data-testid="bot"] { 98 | max-width: 85%; 99 | border-bottom-left-radius: 0 !important; 100 | } 101 | [data-testid="user"] { 102 | max-width: 85%; 103 | width: auto !important; 104 | border-bottom-right-radius: 0 !important; 105 | } 106 | /* Table */ 107 | table { 108 | margin: 1em 0; 109 | border-collapse: collapse; 110 | empty-cells: show; 111 | } 112 | td, 113 | th { 114 | border: 1.2px solid var(--border-color-primary) !important; 115 | padding: 0.2em; 116 | } 117 | thead { 118 | background-color: rgba(175, 184, 193, 0.2); 119 | } 120 | thead th { 121 | padding: 0.5em 0.2em; 122 | } 123 | /* Inline code */ 124 | #deepseek_chatbot code { 125 | display: inline; 126 | white-space: break-spaces; 127 | border-radius: 6px; 128 | margin: 0 2px 0 2px; 129 | padding: 0.2em 0.4em 0.1em 0.4em; 130 | background-color: rgba(175, 184, 193, 0.2); 131 | } 132 | /* Code block */ 133 | #deepseek_chatbot pre code { 134 | display: block; 135 | overflow: auto; 136 | white-space: pre; 137 | background-color: #1c1d1e !important; 138 | border-radius: 10px; 139 | padding: 1.4em 1.2em 0em 1.4em; 140 | margin: 1.2em 2em 1.2em 0.5em; 141 | color: #fdf8f8; 142 | box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2); 143 | } 144 | /* Hightlight */ 145 | #deepseek_chatbot .highlight { 146 | background-color: transparent; 147 | } 148 | #deepseek_chatbot .highlight .hll { 149 | background-color: #49483e; 150 | } 151 | #deepseek_chatbot .highlight .c { 152 | color: #75715e; 153 | } /* Comment */ 154 | #deepseek_chatbot .highlight .err { 155 | color: #960050; 156 | background-color: #1e0010; 157 | } /* Error */ 158 | #deepseek_chatbot .highlight .k { 159 | color: #66d9ef; 160 | } /* Keyword */ 161 | #deepseek_chatbot .highlight .l { 162 | color: #ae81ff; 163 | } /* Literal */ 164 | #deepseek_chatbot .highlight .n { 165 | color: #f8f8f2; 166 | } /* Name */ 167 | #deepseek_chatbot .highlight .o { 168 | color: #f92672; 169 | } /* Operator */ 170 | #deepseek_chatbot .highlight .p { 171 | color: #f8f8f2; 172 | } /* Punctuation */ 173 | #deepseek_chatbot .highlight .ch { 174 | color: #75715e; 175 | } /* Comment.Hashbang */ 176 | #deepseek_chatbot .highlight .cm { 177 | color: #75715e; 178 | } /* Comment.Multiline */ 179 | #deepseek_chatbot .highlight .cp { 180 | color: #75715e; 181 | } /* Comment.Preproc */ 182 | #deepseek_chatbot .highlight .cpf { 183 | color: #75715e; 184 | } /* Comment.PreprocFile */ 185 | #deepseek_chatbot .highlight .c1 { 186 | color: #75715e; 187 | } /* Comment.Single */ 188 | #deepseek_chatbot .highlight .cs { 189 | color: #75715e; 190 | } /* Comment.Special */ 191 | #deepseek_chatbot .highlight .gd { 192 | color: #f92672; 193 | } /* Generic.Deleted */ 194 | #deepseek_chatbot .highlight .ge { 195 | font-style: italic; 196 | } /* Generic.Emph */ 197 | #deepseek_chatbot .highlight .gi { 198 | color: #a6e22e; 199 | } /* Generic.Inserted */ 200 | #deepseek_chatbot .highlight .gs { 201 | font-weight: bold; 202 | } /* Generic.Strong */ 203 | #deepseek_chatbot .highlight .gu { 204 | color: #75715e; 205 | } /* Generic.Subheading */ 206 | #deepseek_chatbot .highlight .kc { 207 | color: #66d9ef; 208 | } /* Keyword.Constant */ 209 | #deepseek_chatbot .highlight .kd { 210 | color: #66d9ef; 211 | } /* Keyword.Declaration */ 212 | #deepseek_chatbot .highlight .kn { 213 | color: #f92672; 214 | } /* Keyword.Namespace */ 215 | #deepseek_chatbot .highlight .kp { 216 | color: #66d9ef; 217 | } /* Keyword.Pseudo */ 218 | #deepseek_chatbot .highlight .kr { 219 | color: #66d9ef; 220 | } /* Keyword.Reserved */ 221 | #deepseek_chatbot .highlight .kt { 222 | color: #66d9ef; 223 | } /* Keyword.Type */ 224 | #deepseek_chatbot .highlight .ld { 225 | color: #e6db74; 226 | } /* Literal.Date */ 227 | #deepseek_chatbot .highlight .m { 228 | color: #ae81ff; 229 | } /* Literal.Number */ 230 | #deepseek_chatbot .highlight .s { 231 | color: #e6db74; 232 | } /* Literal.String */ 233 | #deepseek_chatbot .highlight .na { 234 | color: #a6e22e; 235 | } /* Name.Attribute */ 236 | #deepseek_chatbot .highlight .nb { 237 | color: #f8f8f2; 238 | } /* Name.Builtin */ 239 | #deepseek_chatbot .highlight .nc { 240 | color: #a6e22e; 241 | } /* Name.Class */ 242 | #deepseek_chatbot .highlight .no { 243 | color: #66d9ef; 244 | } /* Name.Constant */ 245 | #deepseek_chatbot .highlight .nd { 246 | color: #a6e22e; 247 | } /* Name.Decorator */ 248 | #deepseek_chatbot .highlight .ni { 249 | color: #f8f8f2; 250 | } /* Name.Entity */ 251 | #deepseek_chatbot .highlight .ne { 252 | color: #a6e22e; 253 | } /* Name.Exception */ 254 | #deepseek_chatbot .highlight .nf { 255 | color: #a6e22e; 256 | } /* Name.Function */ 257 | #deepseek_chatbot .highlight .nl { 258 | color: #f8f8f2; 259 | } /* Name.Label */ 260 | #deepseek_chatbot .highlight .nn { 261 | color: #f8f8f2; 262 | } /* Name.Namespace */ 263 | #deepseek_chatbot .highlight .nx { 264 | color: #a6e22e; 265 | } /* Name.Other */ 266 | #deepseek_chatbot .highlight .py { 267 | color: #f8f8f2; 268 | } /* Name.Property */ 269 | #deepseek_chatbot .highlight .nt { 270 | color: #f92672; 271 | } /* Name.Tag */ 272 | #deepseek_chatbot .highlight .nv { 273 | color: #f8f8f2; 274 | } /* Name.Variable */ 275 | #deepseek_chatbot .highlight .ow { 276 | color: #f92672; 277 | } /* Operator.Word */ 278 | #deepseek_chatbot .highlight .w { 279 | color: #f8f8f2; 280 | } /* Text.Whitespace */ 281 | #deepseek_chatbot .highlight .mb { 282 | color: #ae81ff; 283 | } /* Literal.Number.Bin */ 284 | #deepseek_chatbot .highlight .mf { 285 | color: #ae81ff; 286 | } /* Literal.Number.Float */ 287 | #deepseek_chatbot .highlight .mh { 288 | color: #ae81ff; 289 | } /* Literal.Number.Hex */ 290 | #deepseek_chatbot .highlight .mi { 291 | color: #ae81ff; 292 | } /* Literal.Number.Integer */ 293 | #deepseek_chatbot .highlight .mo { 294 | color: #ae81ff; 295 | } /* Literal.Number.Oct */ 296 | #deepseek_chatbot .highlight .sa { 297 | color: #e6db74; 298 | } /* Literal.String.Affix */ 299 | #deepseek_chatbot .highlight .sb { 300 | color: #e6db74; 301 | } /* Literal.String.Backtick */ 302 | #deepseek_chatbot .highlight .sc { 303 | color: #e6db74; 304 | } /* Literal.String.Char */ 305 | #deepseek_chatbot .highlight .dl { 306 | color: #e6db74; 307 | } /* Literal.String.Delimiter */ 308 | #deepseek_chatbot .highlight .sd { 309 | color: #e6db74; 310 | } /* Literal.String.Doc */ 311 | #deepseek_chatbot .highlight .s2 { 312 | color: #e6db74; 313 | } /* Literal.String.Double */ 314 | #deepseek_chatbot .highlight .se { 315 | color: #ae81ff; 316 | } /* Literal.String.Escape */ 317 | #deepseek_chatbot .highlight .sh { 318 | color: #e6db74; 319 | } /* Literal.String.Heredoc */ 320 | #deepseek_chatbot .highlight .si { 321 | color: #e6db74; 322 | } /* Literal.String.Interpol */ 323 | #deepseek_chatbot .highlight .sx { 324 | color: #e6db74; 325 | } /* Literal.String.Other */ 326 | #deepseek_chatbot .highlight .sr { 327 | color: #e6db74; 328 | } /* Literal.String.Regex */ 329 | #deepseek_chatbot .highlight .s1 { 330 | color: #e6db74; 331 | } /* Literal.String.Single */ 332 | #deepseek_chatbot .highlight .ss { 333 | color: #e6db74; 334 | } /* Literal.String.Symbol */ 335 | #deepseek_chatbot .highlight .bp { 336 | color: #f8f8f2; 337 | } /* Name.Builtin.Pseudo */ 338 | #deepseek_chatbot .highlight .fm { 339 | color: #a6e22e; 340 | } /* Name.Function.Magic */ 341 | #deepseek_chatbot .highlight .vc { 342 | color: #f8f8f2; 343 | } /* Name.Variable.Class */ 344 | #deepseek_chatbot .highlight .vg { 345 | color: #f8f8f2; 346 | } /* Name.Variable.Global */ 347 | #deepseek_chatbot .highlight .vi { 348 | color: #f8f8f2; 349 | } /* Name.Variable.Instance */ 350 | #deepseek_chatbot .highlight .vm { 351 | color: #f8f8f2; 352 | } /* Name.Variable.Magic */ 353 | #deepseek_chatbot .highlight .il { 354 | color: #ae81ff; 355 | } /* Literal.Number.Integer.Long */ 356 | -------------------------------------------------------------------------------- /deepseek_vl2/serve/assets/custom.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2023-2024 DeepSeek. 3 | * 4 | * Permission is hereby granted, free of charge, to any person obtaining a copy of 5 | * this software and associated documentation files (the "Software"), to deal in 6 | * the Software without restriction, including without limitation the rights to 7 | * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 8 | * the Software, and to permit persons to whom the Software is furnished to do so, 9 | * subject to the following conditions: 10 | * 11 | * The above copyright notice and this permission notice shall be included in all 12 | * copies or substantial portions of the Software. 13 | * 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 16 | * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 17 | * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 18 | * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 19 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 | */ 21 | 22 | // custom javascript here 23 | -------------------------------------------------------------------------------- /deepseek_vl2/serve/assets/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/deepseek_vl2/serve/assets/favicon.ico -------------------------------------------------------------------------------- /deepseek_vl2/serve/assets/simsun.ttc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/deepseek_vl2/serve/assets/simsun.ttc -------------------------------------------------------------------------------- /deepseek_vl2/serve/inference.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023-2024 DeepSeek. 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | # this software and associated documentation files (the "Software"), to deal in 5 | # the Software without restriction, including without limitation the rights to 6 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | # the Software, and to permit persons to whom the Software is furnished to do so, 8 | # subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in all 11 | # copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | 20 | from threading import Thread 21 | from typing import List 22 | 23 | import torch 24 | import transformers 25 | from transformers import ( 26 | AutoModelForCausalLM, 27 | StoppingCriteria, 28 | StoppingCriteriaList, 29 | TextIteratorStreamer, 30 | ) 31 | 32 | from deepseek_vl2.models import DeepseekVLV2Processor, DeepseekVLV2ForCausalLM 33 | from deepseek_vl2.models.conversation import Conversation 34 | 35 | 36 | def load_model(model_path, dtype=torch.bfloat16): 37 | vl_chat_processor = DeepseekVLV2Processor.from_pretrained(model_path) 38 | tokenizer = vl_chat_processor.tokenizer 39 | 40 | vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained( 41 | model_path, trust_remote_code=True, torch_dtype=dtype 42 | ) 43 | vl_gpt = vl_gpt.cuda().eval() 44 | return tokenizer, vl_gpt, vl_chat_processor 45 | 46 | 47 | def convert_conversation_to_prompts(conversation: Conversation): 48 | conv_prompts = [] 49 | 50 | last_image = None 51 | 52 | messages = conversation.messages 53 | for i in range(0, len(messages), 2): 54 | 55 | if isinstance(messages[i][1], tuple): 56 | text, images = messages[i][1] 57 | last_image = images[-1] 58 | else: 59 | text, images = messages[i][1], [] 60 | 61 | prompt = { 62 | "role": messages[i][0], 63 | "content": text, 64 | "images": images 65 | } 66 | response = {"role": messages[i + 1][0], "content": messages[i + 1][1]} 67 | conv_prompts.extend([prompt, response]) 68 | 69 | return conv_prompts, last_image 70 | 71 | 72 | class StoppingCriteriaSub(StoppingCriteria): 73 | def __init__(self, stops=[], encounters=1): 74 | super().__init__() 75 | self.stops = [stop.to("cuda") for stop in stops] 76 | 77 | def __call__( 78 | self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs 79 | ): 80 | for stop in self.stops: 81 | if input_ids.shape[-1] < len(stop): 82 | continue 83 | if torch.all((stop == input_ids[0][-len(stop) :])).item(): 84 | return True 85 | 86 | return False 87 | 88 | 89 | @torch.inference_mode() 90 | def deepseek_generate( 91 | conversations: list, 92 | vl_gpt: torch.nn.Module, 93 | vl_chat_processor: DeepseekVLV2Processor, 94 | tokenizer: transformers.PreTrainedTokenizer, 95 | stop_words: list, 96 | max_length: int = 256, 97 | temperature: float = 1.0, 98 | top_p: float = 1.0, 99 | repetition_penalty: float = 1.1, 100 | chunk_size: int = -1 101 | ): 102 | pil_images = [] 103 | for message in conversations: 104 | if "images" not in message: 105 | continue 106 | pil_images.extend(message["images"]) 107 | 108 | prepare_inputs = vl_chat_processor.__call__( 109 | conversations=conversations, 110 | images=pil_images, 111 | inference_mode=True, 112 | force_batchify=True, 113 | system_prompt="" 114 | ).to(vl_gpt.device) 115 | 116 | return generate( 117 | vl_gpt, 118 | tokenizer, 119 | prepare_inputs, 120 | max_gen_len=max_length, 121 | temperature=temperature, 122 | repetition_penalty=repetition_penalty, 123 | top_p=top_p, 124 | stop_words=stop_words, 125 | chunk_size=chunk_size 126 | ) 127 | 128 | 129 | @torch.inference_mode() 130 | def generate( 131 | vl_gpt, 132 | tokenizer, 133 | prepare_inputs, 134 | max_gen_len: int = 256, 135 | temperature: float = 0, 136 | repetition_penalty=1.1, 137 | top_p: float = 0.95, 138 | stop_words: List[str] = [], 139 | chunk_size: int = -1 140 | ): 141 | """Stream the text output from the multimodality model with prompt and image inputs.""" 142 | streamer = TextIteratorStreamer(tokenizer, skip_prompt=True) 143 | 144 | stop_words_ids = [ 145 | torch.tensor(tokenizer.encode(stop_word)) for stop_word in stop_words 146 | ] 147 | stopping_criteria = StoppingCriteriaList( 148 | [StoppingCriteriaSub(stops=stop_words_ids)] 149 | ) 150 | 151 | if chunk_size != -1: 152 | inputs_embeds, past_key_values = vl_gpt.incremental_prefilling( 153 | input_ids=prepare_inputs.input_ids, 154 | images=prepare_inputs.images, 155 | images_seq_mask=prepare_inputs.images_seq_mask, 156 | images_spatial_crop=prepare_inputs.images_spatial_crop, 157 | attention_mask=prepare_inputs.attention_mask, 158 | chunk_size=chunk_size 159 | ) 160 | else: 161 | inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs) 162 | past_key_values = None 163 | 164 | generation_config = dict( 165 | inputs_embeds=inputs_embeds, 166 | input_ids=prepare_inputs.input_ids, 167 | images=prepare_inputs.images, 168 | images_seq_mask=prepare_inputs.images_seq_mask, 169 | images_spatial_crop=prepare_inputs.images_spatial_crop, 170 | attention_mask=prepare_inputs.attention_mask, 171 | past_key_values=past_key_values, 172 | pad_token_id=tokenizer.eos_token_id, 173 | bos_token_id=tokenizer.bos_token_id, 174 | eos_token_id=tokenizer.eos_token_id, 175 | max_new_tokens=max_gen_len, 176 | do_sample=True, 177 | use_cache=True, 178 | streamer=streamer, 179 | stopping_criteria=stopping_criteria, 180 | ) 181 | 182 | if temperature > 0: 183 | generation_config.update( 184 | { 185 | "do_sample": True, 186 | "top_p": top_p, 187 | "temperature": temperature, 188 | "repetition_penalty": repetition_penalty, 189 | } 190 | ) 191 | else: 192 | generation_config["do_sample"] = False 193 | 194 | thread = Thread(target=vl_gpt.generate, kwargs=generation_config) 195 | thread.start() 196 | 197 | yield from streamer 198 | -------------------------------------------------------------------------------- /deepseek_vl2/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023-2024 DeepSeek. 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | # this software and associated documentation files (the "Software"), to deal in 5 | # the Software without restriction, including without limitation the rights to 6 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | # the Software, and to permit persons to whom the Software is furnished to do so, 8 | # subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in all 11 | # copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | -------------------------------------------------------------------------------- /deepseek_vl2/utils/io.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023-2024 DeepSeek. 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | # this software and associated documentation files (the "Software"), to deal in 5 | # the Software without restriction, including without limitation the rights to 6 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | # the Software, and to permit persons to whom the Software is furnished to do so, 8 | # subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in all 11 | # copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | 20 | import json 21 | from typing import Dict, List 22 | 23 | import PIL.Image 24 | import torch 25 | from transformers import AutoModelForCausalLM 26 | 27 | 28 | def load_pretrained_model(model_path: str): 29 | 30 | from deepseek_vl2.models.processing_deepseek_vl_v2 import DeepseekVLV2Processor 31 | from deepseek_vl2.models.modeling_deepseek_vl_v2 import DeepseekVLV2ForCausalLM 32 | 33 | vl_chat_processor = DeepseekVLV2Processor.from_pretrained(model_path) 34 | tokenizer = vl_chat_processor.tokenizer 35 | 36 | vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained( 37 | model_path, trust_remote_code=True 38 | ) 39 | vl_gpt = vl_gpt.to(torch.bfloat16).cuda().eval() 40 | 41 | return tokenizer, vl_chat_processor, vl_gpt 42 | 43 | 44 | def load_pil_images(conversations: List[Dict[str, str]]) -> List[PIL.Image.Image]: 45 | """ 46 | 47 | Args: 48 | conversations (List[Dict[str, str]]): the conversations with a list of messages. An example is : 49 | [ 50 | { 51 | "role": "User", 52 | "content": "\nExtract all information from this image and convert them into markdown format.", 53 | "images": ["./examples/table_datasets.png"] 54 | }, 55 | {"role": "Assistant", "content": ""}, 56 | ] 57 | 58 | Returns: 59 | pil_images (List[PIL.Image.Image]): the list of PIL images. 60 | 61 | """ 62 | 63 | pil_images = [] 64 | 65 | for message in conversations: 66 | if "images" not in message: 67 | continue 68 | 69 | for image_path in message["images"]: 70 | pil_img = PIL.Image.open(image_path) 71 | pil_img = pil_img.convert("RGB") 72 | pil_images.append(pil_img) 73 | 74 | return pil_images 75 | 76 | 77 | def load_json(filepath): 78 | with open(filepath, "r") as f: 79 | data = json.load(f) 80 | return data 81 | -------------------------------------------------------------------------------- /images/badge.svg: -------------------------------------------------------------------------------- 1 | DeepSeek: HomepageDeepSeekHomepage 2 | -------------------------------------------------------------------------------- /images/grounding_conversation_1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/grounding_conversation_1.jpeg -------------------------------------------------------------------------------- /images/icl_vg_2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/icl_vg_2.jpeg -------------------------------------------------------------------------------- /images/incontext_visual_grounding_1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/incontext_visual_grounding_1.jpeg -------------------------------------------------------------------------------- /images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/logo.png -------------------------------------------------------------------------------- /images/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | Created with Pixso. 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /images/monday.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/monday.jpg -------------------------------------------------------------------------------- /images/multi_image_1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/multi_image_1.jpeg -------------------------------------------------------------------------------- /images/multi_image_2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/multi_image_2.jpeg -------------------------------------------------------------------------------- /images/multi_image_3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/multi_image_3.jpeg -------------------------------------------------------------------------------- /images/qr.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/qr.jpeg -------------------------------------------------------------------------------- /images/sample.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/sample.jpg -------------------------------------------------------------------------------- /images/vg_2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/vg_2.jpeg -------------------------------------------------------------------------------- /images/visual_grounding_1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/visual_grounding_1.jpeg -------------------------------------------------------------------------------- /images/visual_grounding_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/visual_grounding_2.jpg -------------------------------------------------------------------------------- /images/visual_grounding_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/visual_grounding_3.png -------------------------------------------------------------------------------- /images/vl2_teaser.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/vl2_teaser.jpeg -------------------------------------------------------------------------------- /images/vqa_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/deepseek-ai/DeepSeek-VL2/ef9f91e2b6426536b83294c11742c27be66361b1/images/vqa_1.jpg -------------------------------------------------------------------------------- /inference.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023-2024 DeepSeek. 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | # this software and associated documentation files (the "Software"), to deal in 5 | # the Software without restriction, including without limitation the rights to 6 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | # the Software, and to permit persons to whom the Software is furnished to do so, 8 | # subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in all 11 | # copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | 20 | from argparse import ArgumentParser 21 | from typing import List, Dict 22 | import torch 23 | from transformers import AutoModelForCausalLM 24 | import PIL.Image 25 | 26 | from deepseek_vl2.models import DeepseekVLV2ForCausalLM, DeepseekVLV2Processor 27 | from deepseek_vl2.serve.app_modules.utils import parse_ref_bbox 28 | 29 | 30 | def load_pil_images(conversations: List[Dict[str, str]]) -> List[PIL.Image.Image]: 31 | """ 32 | 33 | Args: 34 | conversations (List[Dict[str, str]]): the conversations with a list of messages. An example is : 35 | [ 36 | { 37 | "role": "User", 38 | "content": "\nExtract all information from this image and convert them into markdown format.", 39 | "images": ["./examples/table_datasets.png"] 40 | }, 41 | {"role": "Assistant", "content": ""}, 42 | ] 43 | 44 | Returns: 45 | pil_images (List[PIL.Image.Image]): the list of PIL images. 46 | 47 | """ 48 | 49 | pil_images = [] 50 | 51 | for message in conversations: 52 | if "images" not in message: 53 | continue 54 | 55 | for image_path in message["images"]: 56 | pil_img = PIL.Image.open(image_path) 57 | pil_img = pil_img.convert("RGB") 58 | pil_images.append(pil_img) 59 | 60 | return pil_images 61 | 62 | 63 | def main(args): 64 | 65 | dtype = torch.bfloat16 66 | 67 | # specify the path to the model 68 | model_path = args.model_path 69 | vl_chat_processor: DeepseekVLV2Processor = DeepseekVLV2Processor.from_pretrained(model_path) 70 | tokenizer = vl_chat_processor.tokenizer 71 | 72 | vl_gpt: DeepseekVLV2ForCausalLM = AutoModelForCausalLM.from_pretrained( 73 | model_path, 74 | trust_remote_code=True, 75 | torch_dtype=dtype 76 | ) 77 | vl_gpt = vl_gpt.cuda().eval() 78 | 79 | # multiple images conversation example 80 | # Please note that <|grounding|> token is specifically designed for the grounded caption feature. It is not needed for normal conversations. 81 | conversation = [ 82 | { 83 | "role": "<|User|>", 84 | "content": "\n\n<|grounding|>In the first image, an object within the red rectangle is marked. Locate the object of the same category in the second image.", 85 | "images": [ 86 | "images/incontext_visual_grounding_1.jpeg", 87 | "images/icl_vg_2.jpeg" 88 | ], 89 | }, 90 | {"role": "<|Assistant|>", "content": ""}, 91 | ] 92 | 93 | 94 | # load images and prepare for inputs 95 | pil_images = load_pil_images(conversation) 96 | print(f"len(pil_images) = {len(pil_images)}") 97 | 98 | prepare_inputs = vl_chat_processor.__call__( 99 | conversations=conversation, 100 | images=pil_images, 101 | force_batchify=True, 102 | system_prompt="" 103 | ).to(vl_gpt.device, dtype=dtype) 104 | 105 | with torch.no_grad(): 106 | 107 | if args.chunk_size == -1: 108 | inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs) 109 | past_key_values = None 110 | else: 111 | # incremental_prefilling when using 40G GPU for vl2-small 112 | inputs_embeds, past_key_values = vl_gpt.incremental_prefilling( 113 | input_ids=prepare_inputs.input_ids, 114 | images=prepare_inputs.images, 115 | images_seq_mask=prepare_inputs.images_seq_mask, 116 | images_spatial_crop=prepare_inputs.images_spatial_crop, 117 | attention_mask=prepare_inputs.attention_mask, 118 | chunk_size=args.chunk_size 119 | ) 120 | 121 | # run the model to get the response 122 | outputs = vl_gpt.generate( 123 | # inputs_embeds=inputs_embeds[:, -1:], 124 | # input_ids=prepare_inputs.input_ids[:, -1:], 125 | inputs_embeds=inputs_embeds, 126 | input_ids=prepare_inputs.input_ids, 127 | images=prepare_inputs.images, 128 | images_seq_mask=prepare_inputs.images_seq_mask, 129 | images_spatial_crop=prepare_inputs.images_spatial_crop, 130 | attention_mask=prepare_inputs.attention_mask, 131 | past_key_values=past_key_values, 132 | 133 | pad_token_id=tokenizer.eos_token_id, 134 | bos_token_id=tokenizer.bos_token_id, 135 | eos_token_id=tokenizer.eos_token_id, 136 | max_new_tokens=512, 137 | 138 | # do_sample=False, 139 | # repetition_penalty=1.1, 140 | 141 | do_sample=True, 142 | temperature=0.4, 143 | top_p=0.9, 144 | repetition_penalty=1.1, 145 | 146 | use_cache=True, 147 | ) 148 | 149 | answer = tokenizer.decode(outputs[0][len(prepare_inputs.input_ids[0]):].cpu().tolist(), skip_special_tokens=False) 150 | print(f"{prepare_inputs['sft_format'][0]}", answer) 151 | 152 | vg_image = parse_ref_bbox(answer, image=pil_images[-1]) 153 | if vg_image is not None: 154 | vg_image.save("./vg.jpg", format="JPEG", quality=85) 155 | 156 | 157 | if __name__ == "__main__": 158 | parser = ArgumentParser() 159 | parser.add_argument("--model_path", type=str, required=True, 160 | default="deepseek-ai/deepseek-vl2", 161 | help="model name or local path to the model") 162 | parser.add_argument("--chunk_size", type=int, default=-1, 163 | help="chunk size for the model for prefiiling. " 164 | "When using 40G gpu for vl2-small, set a chunk_size for incremental_prefilling." 165 | "Otherwise, default value is -1, which means we do not use incremental_prefilling.") 166 | args = parser.parse_args() 167 | main(args) 168 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=40.6.0", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "deepseek_vl2" 7 | version = "1.0.0" 8 | description = "DeepSeek-VL2" 9 | authors = [{name = "DeepSeek-AI"}] 10 | license = {file = "LICENSE-CODE"} 11 | urls = {homepage = "https://github.com/deepseek-ai/DeepSeek-VL2"} 12 | readme = "README.md" 13 | requires-python = ">=3.8" 14 | dependencies = [ 15 | "torch==2.0.1", 16 | "transformers==4.38.2", 17 | "timm>=0.9.16", 18 | "xformers>=0.0.21", 19 | "accelerate", 20 | "sentencepiece", 21 | "attrdict", 22 | "einops", 23 | ] 24 | 25 | [project.optional-dependencies] 26 | gradio = [ 27 | "gradio==3.48.0", 28 | "gradio-client==0.6.1", 29 | "mdtex2html==1.3.0", 30 | "pypinyin==0.50.0", 31 | "tiktoken==0.5.2", 32 | "tqdm==4.64.0", 33 | "colorama==0.4.5", 34 | "Pygments==2.12.0", 35 | "markdown==3.4.1", 36 | "SentencePiece==0.1.96" 37 | ] 38 | lint = [ 39 | "isort", 40 | "black[jupyter] >= 22.6.0", 41 | "pylint[spelling] >= 2.15.0", 42 | "flake8", 43 | "flake8-bugbear", 44 | "flake8-comprehensions", 45 | "flake8-docstrings", 46 | "flake8-pyi", 47 | "flake8-simplify", 48 | "ruff", 49 | "pyenchant", 50 | "pre-commit", 51 | ] 52 | 53 | [tool.setuptools] 54 | packages = {find = {exclude = ["images"]}} 55 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch==2.0.1 2 | transformers==4.38.2 3 | xformers>=0.0.21 4 | timm>=0.9.16 5 | accelerate 6 | sentencepiece 7 | attrdict 8 | einops 9 | 10 | # for gradio demo 11 | gradio==3.48.0 12 | gradio-client==0.6.1 13 | mdtex2html==1.3.0 14 | pypinyin==0.50.0 15 | tiktoken==0.5.2 16 | tqdm==4.64.0 17 | colorama==0.4.5 18 | Pygments==2.12.0 19 | markdown==3.4.1 20 | SentencePiece==0.1.96 21 | -------------------------------------------------------------------------------- /web_demo.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023-2024 DeepSeek. 2 | # 3 | # Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | # this software and associated documentation files (the "Software"), to deal in 5 | # the Software without restriction, including without limitation the rights to 6 | # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | # the Software, and to permit persons to whom the Software is furnished to do so, 8 | # subject to the following conditions: 9 | # 10 | # The above copyright notice and this permission notice shall be included in all 11 | # copies or substantial portions of the Software. 12 | # 13 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | 20 | # -*- coding:utf-8 -*- 21 | from argparse import ArgumentParser 22 | 23 | import io 24 | import sys 25 | import base64 26 | from PIL import Image 27 | 28 | import gradio as gr 29 | import torch 30 | 31 | from deepseek_vl2.serve.app_modules.gradio_utils import ( 32 | cancel_outputing, 33 | delete_last_conversation, 34 | reset_state, 35 | reset_textbox, 36 | wrap_gen_fn, 37 | ) 38 | from deepseek_vl2.serve.app_modules.overwrites import reload_javascript 39 | from deepseek_vl2.serve.app_modules.presets import ( 40 | CONCURRENT_COUNT, 41 | MAX_EVENTS, 42 | description, 43 | description_top, 44 | title 45 | ) 46 | from deepseek_vl2.serve.app_modules.utils import ( 47 | configure_logger, 48 | is_variable_assigned, 49 | strip_stop_words, 50 | parse_ref_bbox, 51 | pil_to_base64, 52 | display_example 53 | ) 54 | 55 | from deepseek_vl2.serve.inference import ( 56 | convert_conversation_to_prompts, 57 | deepseek_generate, 58 | load_model, 59 | ) 60 | from deepseek_vl2.models.conversation import SeparatorStyle 61 | 62 | logger = configure_logger() 63 | 64 | MODELS = [ 65 | "DeepSeek-VL2-tiny", 66 | "DeepSeek-VL2-small", 67 | "DeepSeek-VL2", 68 | 69 | "deepseek-ai/deepseek-vl2-tiny", 70 | "deepseek-ai/deepseek-vl2-small", 71 | "deepseek-ai/deepseek-vl2", 72 | ] 73 | 74 | DEPLOY_MODELS = dict() 75 | IMAGE_TOKEN = "" 76 | 77 | examples_list = [ 78 | # visual grounding - 1 79 | [ 80 | ["images/visual_grounding_1.jpeg"], 81 | "<|ref|>The giraffe at the back.<|/ref|>", 82 | ], 83 | 84 | # visual grounding - 2 85 | [ 86 | ["images/visual_grounding_2.jpg"], 87 | "找到<|ref|>淡定姐<|/ref|>", 88 | ], 89 | 90 | # visual grounding - 3 91 | [ 92 | ["images/visual_grounding_3.png"], 93 | "Find all the <|ref|>Watermelon slices<|/ref|>", 94 | ], 95 | 96 | # grounding conversation 97 | [ 98 | ["images/grounding_conversation_1.jpeg"], 99 | "<|grounding|>I want to throw out the trash now, what should I do?", 100 | ], 101 | 102 | # in-context visual grounding 103 | [ 104 | [ 105 | "images/incontext_visual_grounding_1.jpeg", 106 | "images/icl_vg_2.jpeg" 107 | ], 108 | "<|grounding|>In the first image, an object within the red rectangle is marked. Locate the object of the same category in the second image." 109 | ], 110 | 111 | # vqa 112 | [ 113 | ["images/vqa_1.jpg"], 114 | "Describe each stage of this image in detail", 115 | ], 116 | 117 | # multi-images 118 | [ 119 | [ 120 | "images/multi_image_1.jpeg", 121 | "images/multi_image_2.jpeg", 122 | "images/multi_image_3.jpeg" 123 | ], 124 | "能帮我用这几个食材做一道菜吗?", 125 | ] 126 | 127 | ] 128 | 129 | 130 | def fetch_model(model_name: str, dtype=torch.bfloat16): 131 | global args, DEPLOY_MODELS 132 | 133 | if args.local_path: 134 | model_path = args.local_path 135 | else: 136 | model_path = model_name 137 | 138 | if model_name in DEPLOY_MODELS: 139 | model_info = DEPLOY_MODELS[model_name] 140 | print(f"{model_name} has been loaded.") 141 | else: 142 | print(f"{model_name} is loading...") 143 | DEPLOY_MODELS[model_name] = load_model(model_path, dtype=dtype) 144 | print(f"Load {model_name} successfully...") 145 | model_info = DEPLOY_MODELS[model_name] 146 | 147 | return model_info 148 | 149 | 150 | def generate_prompt_with_history( 151 | text, images, history, vl_chat_processor, tokenizer, max_length=2048 152 | ): 153 | """ 154 | Generate a prompt with history for the deepseek application. 155 | 156 | Args: 157 | text (str): The text prompt. 158 | images (list[PIL.Image.Image]): The image prompt. 159 | history (list): List of previous conversation messages. 160 | tokenizer: The tokenizer used for encoding the prompt. 161 | max_length (int): The maximum length of the prompt. 162 | 163 | Returns: 164 | tuple: A tuple containing the generated prompt, image list, conversation, and conversation copy. If the prompt could not be generated within the max_length limit, returns None. 165 | """ 166 | global IMAGE_TOKEN 167 | 168 | sft_format = "deepseek" 169 | user_role_ind = 0 170 | bot_role_ind = 1 171 | 172 | # Initialize conversation 173 | conversation = vl_chat_processor.new_chat_template() 174 | 175 | if history: 176 | conversation.messages = history 177 | 178 | if images is not None and len(images) > 0: 179 | 180 | num_image_tags = text.count(IMAGE_TOKEN) 181 | num_images = len(images) 182 | 183 | if num_images > num_image_tags: 184 | pad_image_tags = num_images - num_image_tags 185 | image_tokens = "\n".join([IMAGE_TOKEN] * pad_image_tags) 186 | 187 | # append the in a new line after the text prompt 188 | text = image_tokens + "\n" + text 189 | elif num_images < num_image_tags: 190 | remove_image_tags = num_image_tags - num_images 191 | text = text.replace(IMAGE_TOKEN, "", remove_image_tags) 192 | 193 | # print(f"prompt = {text}, len(images) = {len(images)}") 194 | text = (text, images) 195 | 196 | conversation.append_message(conversation.roles[user_role_ind], text) 197 | conversation.append_message(conversation.roles[bot_role_ind], "") 198 | 199 | # Create a copy of the conversation to avoid history truncation in the UI 200 | conversation_copy = conversation.copy() 201 | logger.info("=" * 80) 202 | logger.info(get_prompt(conversation)) 203 | 204 | rounds = len(conversation.messages) // 2 205 | 206 | for _ in range(rounds): 207 | current_prompt = get_prompt(conversation) 208 | current_prompt = ( 209 | current_prompt.replace("", "") 210 | if sft_format == "deepseek" 211 | else current_prompt 212 | ) 213 | 214 | if torch.tensor(tokenizer.encode(current_prompt)).size(-1) <= max_length: 215 | return conversation_copy 216 | 217 | if len(conversation.messages) % 2 != 0: 218 | gr.Error("The messages between user and assistant are not paired.") 219 | return 220 | 221 | try: 222 | for _ in range(2): # pop out two messages in a row 223 | conversation.messages.pop(0) 224 | except IndexError: 225 | gr.Error("Input text processing failed, unable to respond in this round.") 226 | return None 227 | 228 | gr.Error("Prompt could not be generated within max_length limit.") 229 | return None 230 | 231 | 232 | def to_gradio_chatbot(conv): 233 | """Convert the conversation to gradio chatbot format.""" 234 | ret = [] 235 | for i, (role, msg) in enumerate(conv.messages[conv.offset:]): 236 | if i % 2 == 0: 237 | if type(msg) is tuple: 238 | msg, images = msg 239 | 240 | if isinstance(images, list): 241 | for j, image in enumerate(images): 242 | if isinstance(image, str): 243 | with open(image, "rb") as f: 244 | data = f.read() 245 | img_b64_str = base64.b64encode(data).decode() 246 | image_str = (f'') 248 | else: 249 | image_str = pil_to_base64(image, f"user upload image_{j}", max_size=800, min_size=400) 250 | 251 | # replace the tag in the message 252 | msg = msg.replace(IMAGE_TOKEN, image_str, 1) 253 | 254 | else: 255 | pass 256 | 257 | ret.append([msg, None]) 258 | else: 259 | ret[-1][-1] = msg 260 | return ret 261 | 262 | 263 | def to_gradio_history(conv): 264 | """Convert the conversation to gradio history state.""" 265 | return conv.messages[conv.offset:] 266 | 267 | 268 | def get_prompt(conv) -> str: 269 | """Get the prompt for generation.""" 270 | system_prompt = conv.system_template.format(system_message=conv.system_message) 271 | if conv.sep_style == SeparatorStyle.DeepSeek: 272 | seps = [conv.sep, conv.sep2] 273 | if system_prompt == "" or system_prompt is None: 274 | ret = "" 275 | else: 276 | ret = system_prompt + seps[0] 277 | for i, (role, message) in enumerate(conv.messages): 278 | if message: 279 | if type(message) is tuple: # multimodal message 280 | message, _ = message 281 | ret += role + ": " + message + seps[i % 2] 282 | else: 283 | ret += role + ":" 284 | return ret 285 | else: 286 | return conv.get_prompt() 287 | 288 | 289 | def transfer_input(input_text, input_images): 290 | print("transferring input text and input image") 291 | 292 | return ( 293 | input_text, 294 | input_images, 295 | gr.update(value=""), 296 | gr.update(value=None), 297 | gr.Button(visible=True) 298 | ) 299 | 300 | 301 | @wrap_gen_fn 302 | def predict( 303 | text, 304 | images, 305 | chatbot, 306 | history, 307 | top_p, 308 | temperature, 309 | repetition_penalty, 310 | max_length_tokens, 311 | max_context_length_tokens, 312 | model_select_dropdown, 313 | ): 314 | """ 315 | Function to predict the response based on the user's input and selected model. 316 | 317 | Parameters: 318 | user_text (str): The input text from the user. 319 | user_image (str): The input image from the user. 320 | chatbot (str): The chatbot's name. 321 | history (str): The history of the chat. 322 | top_p (float): The top-p parameter for the model. 323 | temperature (float): The temperature parameter for the model. 324 | max_length_tokens (int): The maximum length of tokens for the model. 325 | max_context_length_tokens (int): The maximum length of context tokens for the model. 326 | model_select_dropdown (str): The selected model from the dropdown. 327 | 328 | Returns: 329 | generator: A generator that yields the chatbot outputs, history, and status. 330 | """ 331 | print("running the prediction function") 332 | try: 333 | tokenizer, vl_gpt, vl_chat_processor = fetch_model(model_select_dropdown) 334 | 335 | if text == "": 336 | yield chatbot, history, "Empty context." 337 | return 338 | except KeyError: 339 | yield [[text, "No Model Found"]], [], "No Model Found" 340 | return 341 | 342 | if images is None: 343 | images = [] 344 | 345 | # load images 346 | pil_images = [] 347 | for img_or_file in images: 348 | try: 349 | # load as pil image 350 | if isinstance(images, Image.Image): 351 | pil_images.append(img_or_file) 352 | else: 353 | image = Image.open(img_or_file.name).convert("RGB") 354 | pil_images.append(image) 355 | except Exception as e: 356 | print(f"Error loading image: {e}") 357 | 358 | conversation = generate_prompt_with_history( 359 | text, 360 | pil_images, 361 | history, 362 | vl_chat_processor, 363 | tokenizer, 364 | max_length=max_context_length_tokens, 365 | ) 366 | all_conv, last_image = convert_conversation_to_prompts(conversation) 367 | 368 | stop_words = conversation.stop_str 369 | gradio_chatbot_output = to_gradio_chatbot(conversation) 370 | 371 | full_response = "" 372 | with torch.no_grad(): 373 | for x in deepseek_generate( 374 | conversations=all_conv, 375 | vl_gpt=vl_gpt, 376 | vl_chat_processor=vl_chat_processor, 377 | tokenizer=tokenizer, 378 | stop_words=stop_words, 379 | max_length=max_length_tokens, 380 | temperature=temperature, 381 | repetition_penalty=repetition_penalty, 382 | top_p=top_p, 383 | chunk_size=args.chunk_size 384 | ): 385 | full_response += x 386 | response = strip_stop_words(full_response, stop_words) 387 | conversation.update_last_message(response) 388 | gradio_chatbot_output[-1][1] = response 389 | 390 | # sys.stdout.write(x) 391 | # sys.stdout.flush() 392 | 393 | yield gradio_chatbot_output, to_gradio_history(conversation), "Generating..." 394 | 395 | if last_image is not None: 396 | # TODO always render the last image's visual grounding image 397 | vg_image = parse_ref_bbox(response, last_image) 398 | if vg_image is not None: 399 | vg_base64 = pil_to_base64(vg_image, f"vg", max_size=800, min_size=400) 400 | gradio_chatbot_output[-1][1] += vg_base64 401 | yield gradio_chatbot_output, to_gradio_history(conversation), "Generating..." 402 | 403 | print("flushed result to gradio") 404 | torch.cuda.empty_cache() 405 | 406 | if is_variable_assigned("x"): 407 | print(f"{model_select_dropdown}:\n{text}\n{'-' * 80}\n{x}\n{'=' * 80}") 408 | print( 409 | f"temperature: {temperature}, " 410 | f"top_p: {top_p}, " 411 | f"repetition_penalty: {repetition_penalty}, " 412 | f"max_length_tokens: {max_length_tokens}" 413 | ) 414 | 415 | yield gradio_chatbot_output, to_gradio_history(conversation), "Generate: Success" 416 | 417 | 418 | # @wrap_gen_fn 419 | def retry( 420 | text, 421 | images, 422 | chatbot, 423 | history, 424 | top_p, 425 | temperature, 426 | repetition_penalty, 427 | max_length_tokens, 428 | max_context_length_tokens, 429 | model_select_dropdown, 430 | ): 431 | if len(history) == 0: 432 | yield (chatbot, history, "Empty context") 433 | return 434 | 435 | chatbot.pop() 436 | history.pop() 437 | text = history.pop()[-1] 438 | if type(text) is tuple: 439 | text, image = text 440 | 441 | yield from predict( 442 | text, 443 | images, 444 | chatbot, 445 | history, 446 | top_p, 447 | temperature, 448 | repetition_penalty, 449 | max_length_tokens, 450 | max_context_length_tokens, 451 | model_select_dropdown, 452 | args.chunk_size 453 | ) 454 | 455 | 456 | def preview_images(files): 457 | if files is None: 458 | return [] 459 | 460 | image_paths = [] 461 | for file in files: 462 | # 使用 file.name 获取文件路径 463 | # image = Image.open(file.name) 464 | image_paths.append(file.name) 465 | return image_paths # 返回所有图片路径,用于预览 466 | 467 | 468 | def build_demo(args): 469 | # fetch model 470 | if not args.lazy_load: 471 | fetch_model(args.model_name) 472 | 473 | with open("deepseek_vl2/serve/assets/custom.css", "r", encoding="utf-8") as f: 474 | customCSS = f.read() 475 | 476 | with gr.Blocks(theme=gr.themes.Soft()) as demo: 477 | history = gr.State([]) 478 | input_text = gr.State() 479 | input_images = gr.State() 480 | 481 | with gr.Row(): 482 | gr.HTML(title) 483 | status_display = gr.Markdown("Success", elem_id="status_display") 484 | gr.Markdown(description_top) 485 | 486 | with gr.Row(equal_height=True): 487 | with gr.Column(scale=4): 488 | with gr.Row(): 489 | chatbot = gr.Chatbot( 490 | elem_id="deepseek_chatbot", 491 | show_share_button=True, 492 | bubble_full_width=False, 493 | height=600, 494 | ) 495 | with gr.Row(): 496 | with gr.Column(scale=4): 497 | text_box = gr.Textbox( 498 | show_label=False, placeholder="Enter text", container=False 499 | ) 500 | with gr.Column( 501 | min_width=70, 502 | ): 503 | submitBtn = gr.Button("Send") 504 | with gr.Column( 505 | min_width=70, 506 | ): 507 | cancelBtn = gr.Button("Stop") 508 | with gr.Row(): 509 | emptyBtn = gr.Button( 510 | "🧹 New Conversation", 511 | ) 512 | retryBtn = gr.Button("🔄 Regenerate") 513 | delLastBtn = gr.Button("🗑️ Remove Last Turn") 514 | 515 | with gr.Column(): 516 | upload_images = gr.Files(file_types=["image"], show_label=True) 517 | gallery = gr.Gallery(columns=[3], height="200px", show_label=True) 518 | 519 | upload_images.change(preview_images, inputs=upload_images, outputs=gallery) 520 | 521 | with gr.Tab(label="Parameter Setting") as parameter_row: 522 | top_p = gr.Slider( 523 | minimum=-0, 524 | maximum=1.0, 525 | value=0.9, 526 | step=0.05, 527 | interactive=True, 528 | label="Top-p", 529 | ) 530 | temperature = gr.Slider( 531 | minimum=0, 532 | maximum=1.0, 533 | value=0.1, 534 | step=0.1, 535 | interactive=True, 536 | label="Temperature", 537 | ) 538 | repetition_penalty = gr.Slider( 539 | minimum=0.0, 540 | maximum=2.0, 541 | value=1.1, 542 | step=0.1, 543 | interactive=True, 544 | label="Repetition penalty", 545 | ) 546 | max_length_tokens = gr.Slider( 547 | minimum=0, 548 | maximum=4096, 549 | value=2048, 550 | step=8, 551 | interactive=True, 552 | label="Max Generation Tokens", 553 | ) 554 | max_context_length_tokens = gr.Slider( 555 | minimum=0, 556 | maximum=8192, 557 | value=4096, 558 | step=128, 559 | interactive=True, 560 | label="Max History Tokens", 561 | ) 562 | model_select_dropdown = gr.Dropdown( 563 | label="Select Models", 564 | choices=[args.model_name], 565 | multiselect=False, 566 | value=args.model_name, 567 | interactive=True, 568 | ) 569 | 570 | # show images, but not visible 571 | show_images = gr.HTML(visible=False) 572 | # show_images = gr.Image(type="pil", interactive=False, visible=False) 573 | 574 | def format_examples(examples_list): 575 | examples = [] 576 | for images, texts in examples_list: 577 | examples.append([images, display_example(images), texts]) 578 | 579 | return examples 580 | 581 | gr.Examples( 582 | examples=format_examples(examples_list), 583 | inputs=[upload_images, show_images, text_box], 584 | ) 585 | 586 | gr.Markdown(description) 587 | 588 | input_widgets = [ 589 | input_text, 590 | input_images, 591 | chatbot, 592 | history, 593 | top_p, 594 | temperature, 595 | repetition_penalty, 596 | max_length_tokens, 597 | max_context_length_tokens, 598 | model_select_dropdown, 599 | ] 600 | output_widgets = [chatbot, history, status_display] 601 | 602 | transfer_input_args = dict( 603 | fn=transfer_input, 604 | inputs=[text_box, upload_images], 605 | outputs=[input_text, input_images, text_box, upload_images, submitBtn], 606 | show_progress=True, 607 | ) 608 | 609 | predict_args = dict( 610 | fn=predict, 611 | inputs=input_widgets, 612 | outputs=output_widgets, 613 | show_progress=True, 614 | ) 615 | 616 | retry_args = dict( 617 | fn=retry, 618 | inputs=input_widgets, 619 | outputs=output_widgets, 620 | show_progress=True, 621 | ) 622 | 623 | reset_args = dict( 624 | fn=reset_textbox, inputs=[], outputs=[text_box, status_display] 625 | ) 626 | 627 | predict_events = [ 628 | text_box.submit(**transfer_input_args).then(**predict_args), 629 | submitBtn.click(**transfer_input_args).then(**predict_args), 630 | ] 631 | 632 | emptyBtn.click(reset_state, outputs=output_widgets, show_progress=True) 633 | emptyBtn.click(**reset_args) 634 | retryBtn.click(**retry_args) 635 | 636 | delLastBtn.click( 637 | delete_last_conversation, 638 | [chatbot, history], 639 | output_widgets, 640 | show_progress=True, 641 | ) 642 | 643 | cancelBtn.click(cancel_outputing, [], [status_display], cancels=predict_events) 644 | 645 | return demo 646 | 647 | 648 | if __name__ == "__main__": 649 | parser = ArgumentParser() 650 | parser.add_argument("--model_name", type=str, required=True, choices=MODELS, help="model name") 651 | parser.add_argument("--local_path", type=str, default="", help="huggingface ckpt, optional") 652 | parser.add_argument("--ip", type=str, default="0.0.0.0", help="ip address") 653 | parser.add_argument("--port", type=int, default=37913, help="port number") 654 | parser.add_argument("--root_path", type=str, default="", help="root path") 655 | parser.add_argument("--lazy_load", action='store_true') 656 | parser.add_argument("--chunk_size", type=int, default=-1, 657 | help="chunk size for the model for prefiiling. " 658 | "When using 40G gpu for vl2-small, set a chunk_size for incremental_prefilling." 659 | "Otherwise, default value is -1, which means we do not use incremental_prefilling.") 660 | args = parser.parse_args() 661 | 662 | demo = build_demo(args) 663 | demo.title = "DeepSeek-VL2 Chatbot" 664 | 665 | reload_javascript() 666 | demo.queue(concurrency_count=CONCURRENT_COUNT, max_size=MAX_EVENTS).launch( 667 | # share=False, 668 | share=True, 669 | favicon_path="deepseek_vl2/serve/assets/favicon.ico", 670 | inbrowser=False, 671 | server_name=args.ip, 672 | server_port=args.port, 673 | root_path=args.root_path 674 | ) 675 | --------------------------------------------------------------------------------