├── .Rbuildignore ├── .github ├── .gitignore └── workflows │ ├── R-CMD-check.yaml │ ├── pkgdown.yaml │ └── rhub.yaml ├── .gitignore ├── DESCRIPTION ├── LICENSE.md ├── NAMESPACE ├── NEWS.md ├── R ├── chat.r ├── embedding.r ├── lib.R ├── models.r ├── rollama-package.R └── utils.r ├── README.Rmd ├── README.md ├── _pkgdown.yml ├── codecov.yml ├── cran-comments.md ├── inst ├── WORDLIST └── extdata │ ├── logo.png │ └── modelfile.txt ├── man ├── chat_history.Rd ├── check_model_installed.Rd ├── create_model.Rd ├── embed_text.Rd ├── figures │ ├── logo.png │ └── logo.svg ├── list_models.Rd ├── make_query.Rd ├── ping_ollama.Rd ├── pull_model.Rd ├── query.Rd ├── rollama-options.Rd └── rollama-package.Rd ├── rollama.Rproj ├── tests ├── spelling.R ├── testthat.R └── testthat │ ├── setup-models.R │ ├── test-aaa.R │ ├── test-chat.R │ ├── test-embedding.R │ ├── test-make_query.R │ ├── test-models.R │ └── test-utils.R ├── update.r └── vignettes ├── .gitignore ├── README.md ├── annotation.Rmd ├── annotation.Rmd.orig ├── figures └── smldemo-1.png ├── hf-gguf.Rmd ├── hf-gguf.Rmd.orig ├── image-annotation.Rmd ├── image-annotation.Rmd.orig ├── text-embedding.Rmd └── text-embedding.Rmd.orig /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^rollama\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^LICENSE\.md$ 4 | ^README\.Rmd$ 5 | ^doc$ 6 | ^Meta$ 7 | ^\.github$ 8 | ^_pkgdown\.yml$ 9 | ^docs$ 10 | ^pkgdown$ 11 | ^update.r 12 | ^cran-comments\.md$ 13 | ^CRAN-SUBMISSION$ 14 | ^docker-compose.yml$ 15 | ^paper$ 16 | ^codecov\.yml$ 17 | ^use_github_release2\.r$ 18 | -------------------------------------------------------------------------------- /.github/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | -------------------------------------------------------------------------------- /.github/workflows/R-CMD-check.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | branches: [main, master] 8 | 9 | name: R-CMD-check 10 | 11 | jobs: 12 | R-CMD-check: 13 | runs-on: ${{ matrix.config.os }} 14 | 15 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 16 | 17 | strategy: 18 | fail-fast: false 19 | matrix: 20 | config: 21 | - {os: macos-latest, r: 'release'} 22 | - {os: windows-latest, r: 'release'} 23 | - {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'} 24 | - {os: ubuntu-latest, r: 'release'} 25 | - {os: ubuntu-latest, r: 'oldrel-1'} 26 | 27 | env: 28 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 29 | R_KEEP_PKG_SOURCE: yes 30 | 31 | steps: 32 | - uses: actions/checkout@v3 33 | 34 | - uses: r-lib/actions/setup-pandoc@v2 35 | 36 | - uses: r-lib/actions/setup-r@v2 37 | with: 38 | r-version: ${{ matrix.config.r }} 39 | http-user-agent: ${{ matrix.config.http-user-agent }} 40 | use-public-rspm: true 41 | 42 | - uses: r-lib/actions/setup-r-dependencies@v2 43 | with: 44 | extra-packages: any::rcmdcheck, any::covr 45 | needs: check 46 | 47 | # run Ollama Docker image 48 | - name: Start Docker Container 49 | if: matrix.config.os == 'ubuntu-latest' && matrix.config.r == 'release' 50 | run: | 51 | docker pull ollama/ollama:latest 52 | docker run -d --name ollama -p 11434:11434 ollama/ollama:latest 53 | sleep 30s # wait a bit for ollama startup 54 | # pull default model 55 | Rscript -e 'rollama::pull_model()' 56 | 57 | - uses: r-lib/actions/check-r-package@v2 58 | with: 59 | upload-snapshots: true 60 | 61 | - name: Test coverage 62 | if: matrix.config.os == 'ubuntu-latest' && matrix.config.r == 'release' 63 | run: | 64 | cov <- covr::package_coverage( 65 | quiet = FALSE, 66 | clean = FALSE, 67 | install_path = file.path(normalizePath(Sys.getenv("RUNNER_TEMP"), winslash = "/"), "package") 68 | ) 69 | covr::to_cobertura(cov) 70 | shell: Rscript {0} 71 | 72 | - uses: codecov/codecov-action@v4 73 | if: matrix.config.os == 'ubuntu-latest' && matrix.config.r == 'release' 74 | with: 75 | # Fail if error if not on PR, or if on PR and token is given 76 | fail_ci_if_error: ${{ github.event_name != 'pull_request' || secrets.CODECOV_TOKEN }} 77 | file: ./cobertura.xml 78 | plugin: noop 79 | disable_search: true 80 | token: ${{ secrets.CODECOV_TOKEN }} 81 | 82 | - name: Show testthat output 83 | if: matrix.config.os == 'ubuntu-latest' && matrix.config.r == 'release' 84 | run: | 85 | ## -------------------------------------------------------------------- 86 | find '${{ runner.temp }}/package' -name 'testthat.Rout*' -exec cat '{}' \; || true 87 | shell: bash 88 | 89 | - name: Upload test results 90 | if: failure() 91 | uses: actions/upload-artifact@v4 92 | with: 93 | name: coverage-test-failures 94 | path: ${{ runner.temp }}/package 95 | 96 | -------------------------------------------------------------------------------- /.github/workflows/pkgdown.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | branches: [main, master] 8 | release: 9 | types: [published] 10 | workflow_dispatch: 11 | 12 | name: pkgdown 13 | 14 | jobs: 15 | pkgdown: 16 | runs-on: ubuntu-latest 17 | # Only restrict concurrency for non-PR jobs 18 | concurrency: 19 | group: pkgdown-${{ github.event_name != 'pull_request' || github.run_id }} 20 | env: 21 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 22 | permissions: 23 | contents: write 24 | steps: 25 | - uses: actions/checkout@v3 26 | 27 | - uses: r-lib/actions/setup-pandoc@v2 28 | 29 | - uses: r-lib/actions/setup-r@v2 30 | with: 31 | use-public-rspm: true 32 | 33 | - uses: r-lib/actions/setup-r-dependencies@v2 34 | with: 35 | extra-packages: any::pkgdown, local::. 36 | needs: website 37 | 38 | - name: Build site 39 | run: pkgdown::build_site_github_pages(new_process = FALSE, install = FALSE) 40 | shell: Rscript {0} 41 | 42 | - name: Deploy to GitHub pages 🚀 43 | if: github.event_name != 'pull_request' 44 | uses: JamesIves/github-pages-deploy-action@v4.4.1 45 | with: 46 | clean: false 47 | branch: gh-pages 48 | folder: docs 49 | -------------------------------------------------------------------------------- /.github/workflows/rhub.yaml: -------------------------------------------------------------------------------- 1 | # R-hub's generic GitHub Actions workflow file. It's canonical location is at 2 | # https://github.com/r-hub/actions/blob/v1/workflows/rhub.yaml 3 | # You can update this file to a newer version using the rhub2 package: 4 | # 5 | # rhub::rhub_setup() 6 | # 7 | # It is unlikely that you need to modify this file manually. 8 | 9 | name: R-hub 10 | run-name: "${{ github.event.inputs.id }}: ${{ github.event.inputs.name || format('Manually run by {0}', github.triggering_actor) }}" 11 | 12 | on: 13 | workflow_dispatch: 14 | inputs: 15 | config: 16 | description: 'A comma separated list of R-hub platforms to use.' 17 | type: string 18 | default: 'linux,windows,macos' 19 | name: 20 | description: 'Run name. You can leave this empty now.' 21 | type: string 22 | id: 23 | description: 'Unique ID. You can leave this empty now.' 24 | type: string 25 | 26 | jobs: 27 | 28 | setup: 29 | runs-on: ubuntu-latest 30 | outputs: 31 | containers: ${{ steps.rhub-setup.outputs.containers }} 32 | platforms: ${{ steps.rhub-setup.outputs.platforms }} 33 | 34 | steps: 35 | # NO NEED TO CHECKOUT HERE 36 | - uses: r-hub/actions/setup@v1 37 | with: 38 | config: ${{ github.event.inputs.config }} 39 | id: rhub-setup 40 | 41 | linux-containers: 42 | needs: setup 43 | if: ${{ needs.setup.outputs.containers != '[]' }} 44 | runs-on: ubuntu-latest 45 | name: ${{ matrix.config.label }} 46 | strategy: 47 | fail-fast: false 48 | matrix: 49 | config: ${{ fromJson(needs.setup.outputs.containers) }} 50 | container: 51 | image: ${{ matrix.config.container }} 52 | 53 | steps: 54 | - uses: r-hub/actions/checkout@v1 55 | - uses: r-hub/actions/platform-info@v1 56 | with: 57 | token: ${{ secrets.RHUB_TOKEN }} 58 | job-config: ${{ matrix.config.job-config }} 59 | - uses: r-hub/actions/setup-deps@v1 60 | with: 61 | token: ${{ secrets.RHUB_TOKEN }} 62 | job-config: ${{ matrix.config.job-config }} 63 | - uses: r-hub/actions/run-check@v1 64 | with: 65 | token: ${{ secrets.RHUB_TOKEN }} 66 | job-config: ${{ matrix.config.job-config }} 67 | 68 | other-platforms: 69 | needs: setup 70 | if: ${{ needs.setup.outputs.platforms != '[]' }} 71 | runs-on: ${{ matrix.config.os }} 72 | name: ${{ matrix.config.label }} 73 | strategy: 74 | fail-fast: false 75 | matrix: 76 | config: ${{ fromJson(needs.setup.outputs.platforms) }} 77 | 78 | steps: 79 | - uses: r-hub/actions/checkout@v1 80 | - uses: r-hub/actions/setup-r@v1 81 | with: 82 | job-config: ${{ matrix.config.job-config }} 83 | token: ${{ secrets.RHUB_TOKEN }} 84 | - uses: r-hub/actions/platform-info@v1 85 | with: 86 | token: ${{ secrets.RHUB_TOKEN }} 87 | job-config: ${{ matrix.config.job-config }} 88 | - uses: r-hub/actions/setup-deps@v1 89 | with: 90 | job-config: ${{ matrix.config.job-config }} 91 | token: ${{ secrets.RHUB_TOKEN }} 92 | - uses: r-hub/actions/run-check@v1 93 | with: 94 | job-config: ${{ matrix.config.job-config }} 95 | token: ${{ secrets.RHUB_TOKEN }} 96 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .Rdata 4 | .httr-oauth 5 | .DS_Store 6 | .quarto 7 | inst/doc 8 | /doc/ 9 | /Meta/ 10 | progress.r 11 | docs 12 | pkgdown 13 | docker-compose.yml 14 | paper/paper.jats 15 | paper/paper.pdf 16 | paper/paper_files 17 | load-experiments.R 18 | use_github_release2.r 19 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: rollama 2 | Title: Communicate with 'Ollama' to Run Large Language Models Locally 3 | Version: 0.2.2.9000 4 | Authors@R: 5 | c(person(given = "Johannes B.", 6 | family = "Gruber", 7 | email = "JohannesB.Gruber@gmail.com", 8 | role = c("aut", "cre"), 9 | comment = c(ORCID = "0000-0001-9177-1772")), 10 | person(given = "Maximilian", 11 | family = "Weber", 12 | role = c("aut", "ctb"), 13 | comment = c(ORCID = "0000-0002-1174-449X"))) 14 | Description: Wraps the 'Ollama' API, which can be used to 15 | communicate with generative large language models locally. 16 | License: GPL (>= 3) 17 | Encoding: UTF-8 18 | Roxygen: list(markdown = TRUE) 19 | RoxygenNote: 7.3.2 20 | Imports: 21 | callr, 22 | cli, 23 | dplyr, 24 | httr2, 25 | jsonlite, 26 | methods, 27 | prettyunits, 28 | purrr, 29 | rlang, 30 | tibble, 31 | withr 32 | Suggests: 33 | base64enc, 34 | covr, 35 | glue, 36 | knitr, 37 | rmarkdown, 38 | spelling, 39 | testthat (>= 3.0.0) 40 | Depends: 41 | R (>= 4.1.0) 42 | VignetteBuilder: knitr 43 | Config/testthat/edition: 3 44 | Language: en-US 45 | URL: https://jbgruber.github.io/rollama/, https://github.com/JBGruber/rollama 46 | BugReports: https://github.com/JBGruber/rollama/issues 47 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | GNU General Public License 2 | ========================== 3 | 4 | _Version 3, 29 June 2007_ 5 | _Copyright © 2007 Free Software Foundation, Inc. <>_ 6 | 7 | Everyone is permitted to copy and distribute verbatim copies of this license 8 | document, but changing it is not allowed. 9 | 10 | ## Preamble 11 | 12 | The GNU General Public License is a free, copyleft license for software and other 13 | kinds of works. 14 | 15 | The licenses for most software and other practical works are designed to take away 16 | your freedom to share and change the works. By contrast, the GNU General Public 17 | License is intended to guarantee your freedom to share and change all versions of a 18 | program--to make sure it remains free software for all its users. We, the Free 19 | Software Foundation, use the GNU General Public License for most of our software; it 20 | applies also to any other work released this way by its authors. You can apply it to 21 | your programs, too. 22 | 23 | When we speak of free software, we are referring to freedom, not price. Our General 24 | Public Licenses are designed to make sure that you have the freedom to distribute 25 | copies of free software (and charge for them if you wish), that you receive source 26 | code or can get it if you want it, that you can change the software or use pieces of 27 | it in new free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you these rights or 30 | asking you to surrender the rights. Therefore, you have certain responsibilities if 31 | you distribute copies of the software, or if you modify it: responsibilities to 32 | respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether gratis or for a fee, 35 | you must pass on to the recipients the same freedoms that you received. You must make 36 | sure that they, too, receive or can get the source code. And you must show them these 37 | terms so they know their rights. 38 | 39 | Developers that use the GNU GPL protect your rights with two steps: **(1)** assert 40 | copyright on the software, and **(2)** offer you this License giving you legal permission 41 | to copy, distribute and/or modify it. 42 | 43 | For the developers' and authors' protection, the GPL clearly explains that there is 44 | no warranty for this free software. For both users' and authors' sake, the GPL 45 | requires that modified versions be marked as changed, so that their problems will not 46 | be attributed erroneously to authors of previous versions. 47 | 48 | Some devices are designed to deny users access to install or run modified versions of 49 | the software inside them, although the manufacturer can do so. This is fundamentally 50 | incompatible with the aim of protecting users' freedom to change the software. The 51 | systematic pattern of such abuse occurs in the area of products for individuals to 52 | use, which is precisely where it is most unacceptable. Therefore, we have designed 53 | this version of the GPL to prohibit the practice for those products. If such problems 54 | arise substantially in other domains, we stand ready to extend this provision to 55 | those domains in future versions of the GPL, as needed to protect the freedom of 56 | users. 57 | 58 | Finally, every program is threatened constantly by software patents. States should 59 | not allow patents to restrict development and use of software on general-purpose 60 | computers, but in those that do, we wish to avoid the special danger that patents 61 | applied to a free program could make it effectively proprietary. To prevent this, the 62 | GPL assures that patents cannot be used to render the program non-free. 63 | 64 | The precise terms and conditions for copying, distribution and modification follow. 65 | 66 | ## TERMS AND CONDITIONS 67 | 68 | ### 0. Definitions 69 | 70 | “This License” refers to version 3 of the GNU General Public License. 71 | 72 | “Copyright” also means copyright-like laws that apply to other kinds of 73 | works, such as semiconductor masks. 74 | 75 | “The Program” refers to any copyrightable work licensed under this 76 | License. Each licensee is addressed as “you”. “Licensees” and 77 | “recipients” may be individuals or organizations. 78 | 79 | To “modify” a work means to copy from or adapt all or part of the work in 80 | a fashion requiring copyright permission, other than the making of an exact copy. The 81 | resulting work is called a “modified version” of the earlier work or a 82 | work “based on” the earlier work. 83 | 84 | A “covered work” means either the unmodified Program or a work based on 85 | the Program. 86 | 87 | To “propagate” a work means to do anything with it that, without 88 | permission, would make you directly or secondarily liable for infringement under 89 | applicable copyright law, except executing it on a computer or modifying a private 90 | copy. Propagation includes copying, distribution (with or without modification), 91 | making available to the public, and in some countries other activities as well. 92 | 93 | To “convey” a work means any kind of propagation that enables other 94 | parties to make or receive copies. Mere interaction with a user through a computer 95 | network, with no transfer of a copy, is not conveying. 96 | 97 | An interactive user interface displays “Appropriate Legal Notices” to the 98 | extent that it includes a convenient and prominently visible feature that **(1)** 99 | displays an appropriate copyright notice, and **(2)** tells the user that there is no 100 | warranty for the work (except to the extent that warranties are provided), that 101 | licensees may convey the work under this License, and how to view a copy of this 102 | License. If the interface presents a list of user commands or options, such as a 103 | menu, a prominent item in the list meets this criterion. 104 | 105 | ### 1. Source Code 106 | 107 | The “source code” for a work means the preferred form of the work for 108 | making modifications to it. “Object code” means any non-source form of a 109 | work. 110 | 111 | A “Standard Interface” means an interface that either is an official 112 | standard defined by a recognized standards body, or, in the case of interfaces 113 | specified for a particular programming language, one that is widely used among 114 | developers working in that language. 115 | 116 | The “System Libraries” of an executable work include anything, other than 117 | the work as a whole, that **(a)** is included in the normal form of packaging a Major 118 | Component, but which is not part of that Major Component, and **(b)** serves only to 119 | enable use of the work with that Major Component, or to implement a Standard 120 | Interface for which an implementation is available to the public in source code form. 121 | A “Major Component”, in this context, means a major essential component 122 | (kernel, window system, and so on) of the specific operating system (if any) on which 123 | the executable work runs, or a compiler used to produce the work, or an object code 124 | interpreter used to run it. 125 | 126 | The “Corresponding Source” for a work in object code form means all the 127 | source code needed to generate, install, and (for an executable work) run the object 128 | code and to modify the work, including scripts to control those activities. However, 129 | it does not include the work's System Libraries, or general-purpose tools or 130 | generally available free programs which are used unmodified in performing those 131 | activities but which are not part of the work. For example, Corresponding Source 132 | includes interface definition files associated with source files for the work, and 133 | the source code for shared libraries and dynamically linked subprograms that the work 134 | is specifically designed to require, such as by intimate data communication or 135 | control flow between those subprograms and other parts of the work. 136 | 137 | The Corresponding Source need not include anything that users can regenerate 138 | automatically from other parts of the Corresponding Source. 139 | 140 | The Corresponding Source for a work in source code form is that same work. 141 | 142 | ### 2. Basic Permissions 143 | 144 | All rights granted under this License are granted for the term of copyright on the 145 | Program, and are irrevocable provided the stated conditions are met. This License 146 | explicitly affirms your unlimited permission to run the unmodified Program. The 147 | output from running a covered work is covered by this License only if the output, 148 | given its content, constitutes a covered work. This License acknowledges your rights 149 | of fair use or other equivalent, as provided by copyright law. 150 | 151 | You may make, run and propagate covered works that you do not convey, without 152 | conditions so long as your license otherwise remains in force. You may convey covered 153 | works to others for the sole purpose of having them make modifications exclusively 154 | for you, or provide you with facilities for running those works, provided that you 155 | comply with the terms of this License in conveying all material for which you do not 156 | control copyright. Those thus making or running the covered works for you must do so 157 | exclusively on your behalf, under your direction and control, on terms that prohibit 158 | them from making any copies of your copyrighted material outside their relationship 159 | with you. 160 | 161 | Conveying under any other circumstances is permitted solely under the conditions 162 | stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 163 | 164 | ### 3. Protecting Users' Legal Rights From Anti-Circumvention Law 165 | 166 | No covered work shall be deemed part of an effective technological measure under any 167 | applicable law fulfilling obligations under article 11 of the WIPO copyright treaty 168 | adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention 169 | of such measures. 170 | 171 | When you convey a covered work, you waive any legal power to forbid circumvention of 172 | technological measures to the extent such circumvention is effected by exercising 173 | rights under this License with respect to the covered work, and you disclaim any 174 | intention to limit operation or modification of the work as a means of enforcing, 175 | against the work's users, your or third parties' legal rights to forbid circumvention 176 | of technological measures. 177 | 178 | ### 4. Conveying Verbatim Copies 179 | 180 | You may convey verbatim copies of the Program's source code as you receive it, in any 181 | medium, provided that you conspicuously and appropriately publish on each copy an 182 | appropriate copyright notice; keep intact all notices stating that this License and 183 | any non-permissive terms added in accord with section 7 apply to the code; keep 184 | intact all notices of the absence of any warranty; and give all recipients a copy of 185 | this License along with the Program. 186 | 187 | You may charge any price or no price for each copy that you convey, and you may offer 188 | support or warranty protection for a fee. 189 | 190 | ### 5. Conveying Modified Source Versions 191 | 192 | You may convey a work based on the Program, or the modifications to produce it from 193 | the Program, in the form of source code under the terms of section 4, provided that 194 | you also meet all of these conditions: 195 | 196 | * **a)** The work must carry prominent notices stating that you modified it, and giving a 197 | relevant date. 198 | * **b)** The work must carry prominent notices stating that it is released under this 199 | License and any conditions added under section 7. This requirement modifies the 200 | requirement in section 4 to “keep intact all notices”. 201 | * **c)** You must license the entire work, as a whole, under this License to anyone who 202 | comes into possession of a copy. This License will therefore apply, along with any 203 | applicable section 7 additional terms, to the whole of the work, and all its parts, 204 | regardless of how they are packaged. This License gives no permission to license the 205 | work in any other way, but it does not invalidate such permission if you have 206 | separately received it. 207 | * **d)** If the work has interactive user interfaces, each must display Appropriate Legal 208 | Notices; however, if the Program has interactive interfaces that do not display 209 | Appropriate Legal Notices, your work need not make them do so. 210 | 211 | A compilation of a covered work with other separate and independent works, which are 212 | not by their nature extensions of the covered work, and which are not combined with 213 | it such as to form a larger program, in or on a volume of a storage or distribution 214 | medium, is called an “aggregate” if the compilation and its resulting 215 | copyright are not used to limit the access or legal rights of the compilation's users 216 | beyond what the individual works permit. Inclusion of a covered work in an aggregate 217 | does not cause this License to apply to the other parts of the aggregate. 218 | 219 | ### 6. Conveying Non-Source Forms 220 | 221 | You may convey a covered work in object code form under the terms of sections 4 and 222 | 5, provided that you also convey the machine-readable Corresponding Source under the 223 | terms of this License, in one of these ways: 224 | 225 | * **a)** Convey the object code in, or embodied in, a physical product (including a 226 | physical distribution medium), accompanied by the Corresponding Source fixed on a 227 | durable physical medium customarily used for software interchange. 228 | * **b)** Convey the object code in, or embodied in, a physical product (including a 229 | physical distribution medium), accompanied by a written offer, valid for at least 230 | three years and valid for as long as you offer spare parts or customer support for 231 | that product model, to give anyone who possesses the object code either **(1)** a copy of 232 | the Corresponding Source for all the software in the product that is covered by this 233 | License, on a durable physical medium customarily used for software interchange, for 234 | a price no more than your reasonable cost of physically performing this conveying of 235 | source, or **(2)** access to copy the Corresponding Source from a network server at no 236 | charge. 237 | * **c)** Convey individual copies of the object code with a copy of the written offer to 238 | provide the Corresponding Source. This alternative is allowed only occasionally and 239 | noncommercially, and only if you received the object code with such an offer, in 240 | accord with subsection 6b. 241 | * **d)** Convey the object code by offering access from a designated place (gratis or for 242 | a charge), and offer equivalent access to the Corresponding Source in the same way 243 | through the same place at no further charge. You need not require recipients to copy 244 | the Corresponding Source along with the object code. If the place to copy the object 245 | code is a network server, the Corresponding Source may be on a different server 246 | (operated by you or a third party) that supports equivalent copying facilities, 247 | provided you maintain clear directions next to the object code saying where to find 248 | the Corresponding Source. Regardless of what server hosts the Corresponding Source, 249 | you remain obligated to ensure that it is available for as long as needed to satisfy 250 | these requirements. 251 | * **e)** Convey the object code using peer-to-peer transmission, provided you inform 252 | other peers where the object code and Corresponding Source of the work are being 253 | offered to the general public at no charge under subsection 6d. 254 | 255 | A separable portion of the object code, whose source code is excluded from the 256 | Corresponding Source as a System Library, need not be included in conveying the 257 | object code work. 258 | 259 | A “User Product” is either **(1)** a “consumer product”, which 260 | means any tangible personal property which is normally used for personal, family, or 261 | household purposes, or **(2)** anything designed or sold for incorporation into a 262 | dwelling. In determining whether a product is a consumer product, doubtful cases 263 | shall be resolved in favor of coverage. For a particular product received by a 264 | particular user, “normally used” refers to a typical or common use of 265 | that class of product, regardless of the status of the particular user or of the way 266 | in which the particular user actually uses, or expects or is expected to use, the 267 | product. A product is a consumer product regardless of whether the product has 268 | substantial commercial, industrial or non-consumer uses, unless such uses represent 269 | the only significant mode of use of the product. 270 | 271 | “Installation Information” for a User Product means any methods, 272 | procedures, authorization keys, or other information required to install and execute 273 | modified versions of a covered work in that User Product from a modified version of 274 | its Corresponding Source. The information must suffice to ensure that the continued 275 | functioning of the modified object code is in no case prevented or interfered with 276 | solely because modification has been made. 277 | 278 | If you convey an object code work under this section in, or with, or specifically for 279 | use in, a User Product, and the conveying occurs as part of a transaction in which 280 | the right of possession and use of the User Product is transferred to the recipient 281 | in perpetuity or for a fixed term (regardless of how the transaction is 282 | characterized), the Corresponding Source conveyed under this section must be 283 | accompanied by the Installation Information. But this requirement does not apply if 284 | neither you nor any third party retains the ability to install modified object code 285 | on the User Product (for example, the work has been installed in ROM). 286 | 287 | The requirement to provide Installation Information does not include a requirement to 288 | continue to provide support service, warranty, or updates for a work that has been 289 | modified or installed by the recipient, or for the User Product in which it has been 290 | modified or installed. Access to a network may be denied when the modification itself 291 | materially and adversely affects the operation of the network or violates the rules 292 | and protocols for communication across the network. 293 | 294 | Corresponding Source conveyed, and Installation Information provided, in accord with 295 | this section must be in a format that is publicly documented (and with an 296 | implementation available to the public in source code form), and must require no 297 | special password or key for unpacking, reading or copying. 298 | 299 | ### 7. Additional Terms 300 | 301 | “Additional permissions” are terms that supplement the terms of this 302 | License by making exceptions from one or more of its conditions. Additional 303 | permissions that are applicable to the entire Program shall be treated as though they 304 | were included in this License, to the extent that they are valid under applicable 305 | law. If additional permissions apply only to part of the Program, that part may be 306 | used separately under those permissions, but the entire Program remains governed by 307 | this License without regard to the additional permissions. 308 | 309 | When you convey a copy of a covered work, you may at your option remove any 310 | additional permissions from that copy, or from any part of it. (Additional 311 | permissions may be written to require their own removal in certain cases when you 312 | modify the work.) You may place additional permissions on material, added by you to a 313 | covered work, for which you have or can give appropriate copyright permission. 314 | 315 | Notwithstanding any other provision of this License, for material you add to a 316 | covered work, you may (if authorized by the copyright holders of that material) 317 | supplement the terms of this License with terms: 318 | 319 | * **a)** Disclaiming warranty or limiting liability differently from the terms of 320 | sections 15 and 16 of this License; or 321 | * **b)** Requiring preservation of specified reasonable legal notices or author 322 | attributions in that material or in the Appropriate Legal Notices displayed by works 323 | containing it; or 324 | * **c)** Prohibiting misrepresentation of the origin of that material, or requiring that 325 | modified versions of such material be marked in reasonable ways as different from the 326 | original version; or 327 | * **d)** Limiting the use for publicity purposes of names of licensors or authors of the 328 | material; or 329 | * **e)** Declining to grant rights under trademark law for use of some trade names, 330 | trademarks, or service marks; or 331 | * **f)** Requiring indemnification of licensors and authors of that material by anyone 332 | who conveys the material (or modified versions of it) with contractual assumptions of 333 | liability to the recipient, for any liability that these contractual assumptions 334 | directly impose on those licensors and authors. 335 | 336 | All other non-permissive additional terms are considered “further 337 | restrictions” within the meaning of section 10. If the Program as you received 338 | it, or any part of it, contains a notice stating that it is governed by this License 339 | along with a term that is a further restriction, you may remove that term. If a 340 | license document contains a further restriction but permits relicensing or conveying 341 | under this License, you may add to a covered work material governed by the terms of 342 | that license document, provided that the further restriction does not survive such 343 | relicensing or conveying. 344 | 345 | If you add terms to a covered work in accord with this section, you must place, in 346 | the relevant source files, a statement of the additional terms that apply to those 347 | files, or a notice indicating where to find the applicable terms. 348 | 349 | Additional terms, permissive or non-permissive, may be stated in the form of a 350 | separately written license, or stated as exceptions; the above requirements apply 351 | either way. 352 | 353 | ### 8. Termination 354 | 355 | You may not propagate or modify a covered work except as expressly provided under 356 | this License. Any attempt otherwise to propagate or modify it is void, and will 357 | automatically terminate your rights under this License (including any patent licenses 358 | granted under the third paragraph of section 11). 359 | 360 | However, if you cease all violation of this License, then your license from a 361 | particular copyright holder is reinstated **(a)** provisionally, unless and until the 362 | copyright holder explicitly and finally terminates your license, and **(b)** permanently, 363 | if the copyright holder fails to notify you of the violation by some reasonable means 364 | prior to 60 days after the cessation. 365 | 366 | Moreover, your license from a particular copyright holder is reinstated permanently 367 | if the copyright holder notifies you of the violation by some reasonable means, this 368 | is the first time you have received notice of violation of this License (for any 369 | work) from that copyright holder, and you cure the violation prior to 30 days after 370 | your receipt of the notice. 371 | 372 | Termination of your rights under this section does not terminate the licenses of 373 | parties who have received copies or rights from you under this License. If your 374 | rights have been terminated and not permanently reinstated, you do not qualify to 375 | receive new licenses for the same material under section 10. 376 | 377 | ### 9. Acceptance Not Required for Having Copies 378 | 379 | You are not required to accept this License in order to receive or run a copy of the 380 | Program. Ancillary propagation of a covered work occurring solely as a consequence of 381 | using peer-to-peer transmission to receive a copy likewise does not require 382 | acceptance. However, nothing other than this License grants you permission to 383 | propagate or modify any covered work. These actions infringe copyright if you do not 384 | accept this License. Therefore, by modifying or propagating a covered work, you 385 | indicate your acceptance of this License to do so. 386 | 387 | ### 10. Automatic Licensing of Downstream Recipients 388 | 389 | Each time you convey a covered work, the recipient automatically receives a license 390 | from the original licensors, to run, modify and propagate that work, subject to this 391 | License. You are not responsible for enforcing compliance by third parties with this 392 | License. 393 | 394 | An “entity transaction” is a transaction transferring control of an 395 | organization, or substantially all assets of one, or subdividing an organization, or 396 | merging organizations. If propagation of a covered work results from an entity 397 | transaction, each party to that transaction who receives a copy of the work also 398 | receives whatever licenses to the work the party's predecessor in interest had or 399 | could give under the previous paragraph, plus a right to possession of the 400 | Corresponding Source of the work from the predecessor in interest, if the predecessor 401 | has it or can get it with reasonable efforts. 402 | 403 | You may not impose any further restrictions on the exercise of the rights granted or 404 | affirmed under this License. For example, you may not impose a license fee, royalty, 405 | or other charge for exercise of rights granted under this License, and you may not 406 | initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging 407 | that any patent claim is infringed by making, using, selling, offering for sale, or 408 | importing the Program or any portion of it. 409 | 410 | ### 11. Patents 411 | 412 | A “contributor” is a copyright holder who authorizes use under this 413 | License of the Program or a work on which the Program is based. The work thus 414 | licensed is called the contributor's “contributor version”. 415 | 416 | A contributor's “essential patent claims” are all patent claims owned or 417 | controlled by the contributor, whether already acquired or hereafter acquired, that 418 | would be infringed by some manner, permitted by this License, of making, using, or 419 | selling its contributor version, but do not include claims that would be infringed 420 | only as a consequence of further modification of the contributor version. For 421 | purposes of this definition, “control” includes the right to grant patent 422 | sublicenses in a manner consistent with the requirements of this License. 423 | 424 | Each contributor grants you a non-exclusive, worldwide, royalty-free patent license 425 | under the contributor's essential patent claims, to make, use, sell, offer for sale, 426 | import and otherwise run, modify and propagate the contents of its contributor 427 | version. 428 | 429 | In the following three paragraphs, a “patent license” is any express 430 | agreement or commitment, however denominated, not to enforce a patent (such as an 431 | express permission to practice a patent or covenant not to sue for patent 432 | infringement). To “grant” such a patent license to a party means to make 433 | such an agreement or commitment not to enforce a patent against the party. 434 | 435 | If you convey a covered work, knowingly relying on a patent license, and the 436 | Corresponding Source of the work is not available for anyone to copy, free of charge 437 | and under the terms of this License, through a publicly available network server or 438 | other readily accessible means, then you must either **(1)** cause the Corresponding 439 | Source to be so available, or **(2)** arrange to deprive yourself of the benefit of the 440 | patent license for this particular work, or **(3)** arrange, in a manner consistent with 441 | the requirements of this License, to extend the patent license to downstream 442 | recipients. “Knowingly relying” means you have actual knowledge that, but 443 | for the patent license, your conveying the covered work in a country, or your 444 | recipient's use of the covered work in a country, would infringe one or more 445 | identifiable patents in that country that you have reason to believe are valid. 446 | 447 | If, pursuant to or in connection with a single transaction or arrangement, you 448 | convey, or propagate by procuring conveyance of, a covered work, and grant a patent 449 | license to some of the parties receiving the covered work authorizing them to use, 450 | propagate, modify or convey a specific copy of the covered work, then the patent 451 | license you grant is automatically extended to all recipients of the covered work and 452 | works based on it. 453 | 454 | A patent license is “discriminatory” if it does not include within the 455 | scope of its coverage, prohibits the exercise of, or is conditioned on the 456 | non-exercise of one or more of the rights that are specifically granted under this 457 | License. You may not convey a covered work if you are a party to an arrangement with 458 | a third party that is in the business of distributing software, under which you make 459 | payment to the third party based on the extent of your activity of conveying the 460 | work, and under which the third party grants, to any of the parties who would receive 461 | the covered work from you, a discriminatory patent license **(a)** in connection with 462 | copies of the covered work conveyed by you (or copies made from those copies), or **(b)** 463 | primarily for and in connection with specific products or compilations that contain 464 | the covered work, unless you entered into that arrangement, or that patent license 465 | was granted, prior to 28 March 2007. 466 | 467 | Nothing in this License shall be construed as excluding or limiting any implied 468 | license or other defenses to infringement that may otherwise be available to you 469 | under applicable patent law. 470 | 471 | ### 12. No Surrender of Others' Freedom 472 | 473 | If conditions are imposed on you (whether by court order, agreement or otherwise) 474 | that contradict the conditions of this License, they do not excuse you from the 475 | conditions of this License. If you cannot convey a covered work so as to satisfy 476 | simultaneously your obligations under this License and any other pertinent 477 | obligations, then as a consequence you may not convey it at all. For example, if you 478 | agree to terms that obligate you to collect a royalty for further conveying from 479 | those to whom you convey the Program, the only way you could satisfy both those terms 480 | and this License would be to refrain entirely from conveying the Program. 481 | 482 | ### 13. Use with the GNU Affero General Public License 483 | 484 | Notwithstanding any other provision of this License, you have permission to link or 485 | combine any covered work with a work licensed under version 3 of the GNU Affero 486 | General Public License into a single combined work, and to convey the resulting work. 487 | The terms of this License will continue to apply to the part which is the covered 488 | work, but the special requirements of the GNU Affero General Public License, section 489 | 13, concerning interaction through a network will apply to the combination as such. 490 | 491 | ### 14. Revised Versions of this License 492 | 493 | The Free Software Foundation may publish revised and/or new versions of the GNU 494 | General Public License from time to time. Such new versions will be similar in spirit 495 | to the present version, but may differ in detail to address new problems or concerns. 496 | 497 | Each version is given a distinguishing version number. If the Program specifies that 498 | a certain numbered version of the GNU General Public License “or any later 499 | version” applies to it, you have the option of following the terms and 500 | conditions either of that numbered version or of any later version published by the 501 | Free Software Foundation. If the Program does not specify a version number of the GNU 502 | General Public License, you may choose any version ever published by the Free 503 | Software Foundation. 504 | 505 | If the Program specifies that a proxy can decide which future versions of the GNU 506 | General Public License can be used, that proxy's public statement of acceptance of a 507 | version permanently authorizes you to choose that version for the Program. 508 | 509 | Later license versions may give you additional or different permissions. However, no 510 | additional obligations are imposed on any author or copyright holder as a result of 511 | your choosing to follow a later version. 512 | 513 | ### 15. Disclaimer of Warranty 514 | 515 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. 516 | EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 517 | PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER 518 | EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 519 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE 520 | QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE 521 | DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 522 | 523 | ### 16. Limitation of Liability 524 | 525 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY 526 | COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS 527 | PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, 528 | INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE 529 | PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE 530 | OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE 531 | WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 532 | POSSIBILITY OF SUCH DAMAGES. 533 | 534 | ### 17. Interpretation of Sections 15 and 16 535 | 536 | If the disclaimer of warranty and limitation of liability provided above cannot be 537 | given local legal effect according to their terms, reviewing courts shall apply local 538 | law that most closely approximates an absolute waiver of all civil liability in 539 | connection with the Program, unless a warranty or assumption of liability accompanies 540 | a copy of the Program in return for a fee. 541 | 542 | _END OF TERMS AND CONDITIONS_ 543 | 544 | ## How to Apply These Terms to Your New Programs 545 | 546 | If you develop a new program, and you want it to be of the greatest possible use to 547 | the public, the best way to achieve this is to make it free software which everyone 548 | can redistribute and change under these terms. 549 | 550 | To do so, attach the following notices to the program. It is safest to attach them 551 | to the start of each source file to most effectively state the exclusion of warranty; 552 | and each file should have at least the “copyright” line and a pointer to 553 | where the full notice is found. 554 | 555 | 556 | Copyright (C) 557 | 558 | This program is free software: you can redistribute it and/or modify 559 | it under the terms of the GNU General Public License as published by 560 | the Free Software Foundation, either version 3 of the License, or 561 | (at your option) any later version. 562 | 563 | This program is distributed in the hope that it will be useful, 564 | but WITHOUT ANY WARRANTY; without even the implied warranty of 565 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 566 | GNU General Public License for more details. 567 | 568 | You should have received a copy of the GNU General Public License 569 | along with this program. If not, see . 570 | 571 | Also add information on how to contact you by electronic and paper mail. 572 | 573 | If the program does terminal interaction, make it output a short notice like this 574 | when it starts in an interactive mode: 575 | 576 | Copyright (C) 577 | This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. 578 | This is free software, and you are welcome to redistribute it 579 | under certain conditions; type 'show c' for details. 580 | 581 | The hypothetical commands `show w` and `show c` should show the appropriate parts of 582 | the General Public License. Of course, your program's commands might be different; 583 | for a GUI interface, you would use an “about box”. 584 | 585 | You should also get your employer (if you work as a programmer) or school, if any, to 586 | sign a “copyright disclaimer” for the program, if necessary. For more 587 | information on this, and how to apply and follow the GNU GPL, see 588 | <>. 589 | 590 | The GNU General Public License does not permit incorporating your program into 591 | proprietary programs. If your program is a subroutine library, you may consider it 592 | more useful to permit linking proprietary applications with the library. If this is 593 | what you want to do, use the GNU Lesser General Public License instead of this 594 | License. But first, please read 595 | <>. 596 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | export(chat) 4 | export(chat_history) 5 | export(check_model_installed) 6 | export(copy_model) 7 | export(create_model) 8 | export(delete_model) 9 | export(embed_text) 10 | export(list_models) 11 | export(make_query) 12 | export(new_chat) 13 | export(ping_ollama) 14 | export(pull_model) 15 | export(query) 16 | export(show_model) 17 | -------------------------------------------------------------------------------- /NEWS.md: -------------------------------------------------------------------------------- 1 | # rollama (development version) 2 | 3 | # rollama 0.2.2 4 | 5 | # rollama 0.2.1 6 | 7 | * added support for structured output 8 | * added support for custom headers (e.g., for authentication) 9 | * added option for custom outputs 10 | * some bug fixes and improved documentation 11 | 12 | # rollama 0.2.0 13 | 14 | * added make_query() function to facilitate easier annotation 15 | * added more output formats to query()/chat() 16 | * improved performance of embed_text() 17 | * improved performance of query() for multiple queries 18 | * changed default model to llama3.1 19 | * added option to employ multiple servers 20 | * pull_model() gained verbose option 21 | * improved annotation vignette 22 | * added vignette on how to use Hugging Face Hub models 23 | * some bug fixes 24 | 25 | # rollama 0.1.0 26 | 27 | * adds function `check_model_installed` 28 | * changes default model to llama3 29 | 30 | # rollama 0.0.3 31 | 32 | * add option to query several models at once 33 | * dedicated embedding models are available now (see `vignette("text-embedding", "rollama")`) 34 | * error handling and bug fixes 35 | 36 | # rollama 0.0.2 37 | 38 | * Initial CRAN submission. 39 | -------------------------------------------------------------------------------- /R/chat.r: -------------------------------------------------------------------------------- 1 | #' Chat with a LLM through Ollama 2 | #' 3 | #' @details `query` sends a single question to the API, without knowledge about 4 | #' previous questions (only the config message is relevant). `chat` treats new 5 | #' messages as part of the same conversation until [new_chat] is called. 6 | #' 7 | #' To make the output reproducible, you can set a seed with 8 | #' `options(rollama_seed = 42)`. As long as the seed stays the same, the 9 | #' models will give the same answer, changing the seed leads to a different 10 | #' answer. 11 | #' 12 | #' For the output of `query`, there are a couple of options: 13 | #' 14 | #' - `response`: the response of the Ollama server 15 | #' - `text`: only the answer as a character vector 16 | #' - `data.frame`: a data.frame containing model and response 17 | #' - `list`: a list containing the prompt to Ollama and the response 18 | #' - `httr2_response`: the response of the Ollama server including HTML 19 | #' headers in the `httr2` response format 20 | #' - `httr2_request`: httr2_request objects in a list, in case you want to run 21 | #' them with [httr2::req_perform()], [httr2::req_perform_sequential()], or 22 | #' [httr2::req_perform_parallel()] yourself. 23 | #' - a custom function that takes the `httr2_response`(s) from the Ollama 24 | #' server as an input. 25 | #' 26 | #' 27 | #' @param q the question as a character string or a conversation object. 28 | #' @param model which model(s) to use. See for 29 | #' options. Default is "llama3.1". Set `option(rollama_model = "modelname")` to 30 | #' change default for the current session. See [pull_model] for more 31 | #' details. 32 | #' @param screen Logical. Should the answer be printed to the screen. 33 | #' @param server URL to one or several Ollama servers (not the API). Defaults to 34 | #' "http://localhost:11434". 35 | #' @param images path(s) to images (for multimodal models such as llava). 36 | #' @param model_params a named list of additional model parameters listed in the 37 | #' [documentation for the 38 | #' Modelfile](https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values) 39 | #' such as temperature. Use a seed and set the temperature to zero to get 40 | #' reproducible results (see examples). 41 | #' @param output what the function should return. Possible values are 42 | #' "response", "text", "list", "data.frame", "httr2_response" or 43 | #' "httr2_request" or a function see details. 44 | #' @param format the format to return a response in. Currently the only accepted 45 | #' value is `"json"`. 46 | #' @param template the prompt template to use (overrides what is defined in the 47 | #' Modelfile). 48 | #' @param verbose Whether to print status messages to the Console. Either 49 | #' `TRUE`/`FALSE` or see [httr2::progress_bars]. The default is to have status 50 | #' messages in interactive sessions. Can be changed with 51 | #' `options(rollama_verbose = FALSE)`. 52 | #' 53 | #' @return list of objects set in output parameter. 54 | #' @export 55 | #' 56 | #' @examplesIf interactive() 57 | #' # ask a single question 58 | #' query("why is the sky blue?") 59 | #' 60 | #' # hold a conversation 61 | #' chat("why is the sky blue?") 62 | #' chat("and how do you know that?") 63 | #' 64 | #' # save the response to an object and extract the answer 65 | #' resp <- query(q = "why is the sky blue?") 66 | #' answer <- resp[[1]]$message$content 67 | #' 68 | #' # or just get the answer directly 69 | #' answer <- query(q = "why is the sky blue?", output = "text") 70 | #' 71 | #' # besides the other output options, you can also supply a custom function 72 | #' query_duration <- function(resp) { 73 | #' nanosec <- purrr::map(resp, httr2::resp_body_json) |> 74 | #' purrr::map_dbl("total_duration") 75 | #' round(nanosec * 1e-9, digits = 2) 76 | #' } 77 | #' # this function only returns the number of seconds a request took 78 | #' res <- query("why is the sky blue?", output = query_duration) 79 | #' res 80 | #' 81 | #' # ask question about images (to a multimodal model) 82 | #' images <- c("https://avatars.githubusercontent.com/u/23524101?v=4", # remote 83 | #' "/path/to/your/image.jpg") # or local images supported 84 | #' query(q = "describe these images", 85 | #' model = "llava", 86 | #' images = images[1]) # just using the first path as the second is not real 87 | #' 88 | #' # set custom options for the model at runtime (rather than in create_model()) 89 | #' query("why is the sky blue?", 90 | #' model_params = list( 91 | #' num_keep = 5, 92 | #' seed = 42, 93 | #' num_predict = 100, 94 | #' top_k = 20, 95 | #' top_p = 0.9, 96 | #' min_p = 0.0, 97 | #' tfs_z = 0.5, 98 | #' typical_p = 0.7, 99 | #' repeat_last_n = 33, 100 | #' temperature = 0.8, 101 | #' repeat_penalty = 1.2, 102 | #' presence_penalty = 1.5, 103 | #' frequency_penalty = 1.0, 104 | #' mirostat = 1, 105 | #' mirostat_tau = 0.8, 106 | #' mirostat_eta = 0.6, 107 | #' penalize_newline = TRUE, 108 | #' numa = FALSE, 109 | #' num_ctx = 1024, 110 | #' num_batch = 2, 111 | #' num_gpu = 0, 112 | #' main_gpu = 0, 113 | #' low_vram = FALSE, 114 | #' vocab_only = FALSE, 115 | #' use_mmap = TRUE, 116 | #' use_mlock = FALSE, 117 | #' num_thread = 8 118 | #' )) 119 | #' 120 | #' # use a seed to get reproducible results 121 | #' query("why is the sky blue?", model_params = list(seed = 42)) 122 | #' 123 | #' # to set a seed for the whole session you can use 124 | #' options(rollama_seed = 42) 125 | #' 126 | #' # this might be interesting if you want to turn off the GPU and load the 127 | #' # model into the system memory (slower, but most people have more RAM than 128 | #' # VRAM, which might be interesting for larger models) 129 | #' query("why is the sky blue?", 130 | #' model_params = list(num_gpu = 0)) 131 | #' 132 | #' # Asking the same question to multiple models is also supported 133 | #' query("why is the sky blue?", model = c("llama3.1", "orca-mini")) 134 | #' 135 | #' # And if you have multiple Ollama servers in your network, you can send 136 | #' # requests to them in parallel 137 | #' if (ping_ollama(c("http://localhost:11434/", 138 | #' "http://192.168.2.45:11434/"))) { # check if servers are running 139 | #' query("why is the sky blue?", model = c("llama3.1", "orca-mini"), 140 | #' server = c("http://localhost:11434/", 141 | #' "http://192.168.2.45:11434/")) 142 | #' } 143 | query <- function(q, 144 | model = NULL, 145 | screen = TRUE, 146 | server = NULL, 147 | images = NULL, 148 | model_params = NULL, 149 | output = c("response", "text", "list", "data.frame", "httr2_response", "httr2_request"), 150 | format = NULL, 151 | template = NULL, 152 | verbose = getOption("rollama_verbose", 153 | default = interactive())) { 154 | if (!is.function(output)) { 155 | output <- match.arg(output) 156 | } 157 | 158 | # q can be a string, a data.frame, or list of data.frames 159 | if (is.character(q)) { 160 | config <- getOption("rollama_config", default = NULL) 161 | 162 | msg <- do.call(rbind, list( 163 | if (!is.null(config)) data.frame(role = "system", 164 | content = config), 165 | data.frame(role = "user", content = q) 166 | )) 167 | 168 | if (length(images) > 0) { 169 | rlang::check_installed("base64enc") 170 | images <- purrr::map_chr(images, \(i) base64enc::base64encode(i)) 171 | msg <- tibble::add_column(msg, images = list(images)) 172 | } 173 | msg <- list(msg) 174 | } else if (is.data.frame(q)) { 175 | msg <- list(check_conversation(q)) 176 | } else { 177 | msg <- purrr::map(q, check_conversation) 178 | } 179 | 180 | reqs <- build_req(model = model, 181 | msg = msg, 182 | server = server, 183 | images = images, 184 | model_params = model_params, 185 | format = format, 186 | template = template) 187 | 188 | if (identical(output, "httr2_request")) return(invisible(reqs)) 189 | 190 | if (length(reqs) > 1L) { 191 | resps <- perform_reqs(reqs, verbose) 192 | } else { 193 | resps <- perform_req(reqs, verbose) 194 | } 195 | 196 | res <- NULL 197 | if (screen) { 198 | res <- purrr::map(resps, httr2::resp_body_json) 199 | purrr::walk(res, function(r) { 200 | screen_answer(purrr::pluck(r, "message", "content"), 201 | purrr::pluck(r, "model")) 202 | }) 203 | } 204 | 205 | if (is.function(output)) { 206 | return(invisible(output(resps))) 207 | } 208 | 209 | if (identical(output, "httr2_response")) return(invisible(resps)) 210 | 211 | if (is.null(res)) { 212 | res <- purrr::map(resps, httr2::resp_body_json) 213 | } 214 | 215 | out <- switch(output, 216 | "response" = res, 217 | "text" = purrr::map_chr(res, c("message", "content")), 218 | "list" = process2list(res, reqs), 219 | "data.frame" = process2df(res) 220 | ) 221 | invisible(out) 222 | } 223 | 224 | 225 | #' @rdname query 226 | #' @export 227 | chat <- function(q, 228 | model = NULL, 229 | screen = TRUE, 230 | server = NULL, 231 | images = NULL, 232 | model_params = NULL, 233 | template = NULL, 234 | verbose = getOption("rollama_verbose", 235 | default = interactive())) { 236 | 237 | config <- getOption("rollama_config", default = NULL) 238 | hist <- chat_history() 239 | 240 | # save prompt 241 | names(q) <- Sys.time() 242 | the$prompts <- c(the$prompts, q) 243 | 244 | q <- data.frame(role = "user", content = q) 245 | if (length(images) > 0) { 246 | rlang::check_installed("base64enc") 247 | images <- list(purrr::map_chr(images, \(i) base64enc::base64encode(i))) 248 | q <- tibble::add_column(q, images = images) 249 | } 250 | 251 | msg <- do.call(rbind, (list( 252 | if (!is.null(config)) data.frame(role = "system", 253 | content = config), 254 | if (nrow(hist) > 0) hist[, c("role", "content")], 255 | q 256 | ))) 257 | 258 | resp <- query(q = msg, 259 | model = model, 260 | screen = screen, 261 | server = server, 262 | model_params = model_params, 263 | template = template, 264 | verbose = verbose) 265 | 266 | # save response 267 | r <- purrr::pluck(resp, 1, "message", "content") 268 | names(r) <- Sys.time() 269 | the$responses <- c(the$responses, r) 270 | 271 | invisible(resp) 272 | } 273 | 274 | 275 | #' Handle conversations 276 | #' 277 | #' Shows and deletes (`new_chat`) the local prompt and response history to start 278 | #' a new conversation. 279 | #' 280 | #' @return chat_history: tibble with chat history 281 | #' @export 282 | chat_history <- function() { 283 | out <- tibble::tibble( 284 | role = c(rep("user", length(the$prompts)), 285 | rep("assistant", length(the$responses))), 286 | content = unname(c(the$prompts, the$responses)), 287 | time = as.POSIXct(names(c(the$prompts, the$responses))) 288 | ) 289 | out[order(out$time), ] 290 | } 291 | 292 | 293 | #' @rdname chat_history 294 | #' @return new_chat: Does not return a value 295 | #' @export 296 | new_chat <- function() { 297 | the$responses <- NULL 298 | the$prompts <- NULL 299 | } 300 | 301 | 302 | 303 | #' Generate and format queries for a language model 304 | #' 305 | #' `make_query` generates structured input for a language model, including 306 | #' system prompt, user messages, and optional examples (assistant answers). 307 | #' 308 | #' @details The function supports the inclusion of examples, which are 309 | #' dynamically added to the structured input. Each example follows the same 310 | #' format as the primary user query. 311 | #' 312 | #' @param text A character vector of texts to be annotated. 313 | #' @param prompt A string defining the main task or question to be passed to the 314 | #' language model. 315 | #' @param template A string template for formatting user queries, containing 316 | #' placeholders like `{text}`, `{prefix}`, and `{suffix}`. 317 | #' @param system An optional string to specify a system prompt. 318 | #' @param prefix A prefix string to prepend to each user query. 319 | #' @param suffix A suffix string to append to each user query. 320 | #' @param examples A `tibble` with columns `text` and `answer`, representing 321 | #' example user messages and corresponding assistant responses. 322 | #' 323 | #' @return A list of tibbles, one for each input `text`, containing structured 324 | #' rows for system messages, user messages, and assistant responses. 325 | #' @export 326 | #' 327 | #' @examples 328 | #' template <- "{prefix}{text}\n\n{prompt}{suffix}" 329 | #' examples <- tibble::tribble( 330 | #' ~text, ~answer, 331 | #' "This movie was amazing, with great acting and story.", "positive", 332 | #' "The film was okay, but not particularly memorable.", "neutral", 333 | #' "I found this movie boring and poorly made.", "negative" 334 | #' ) 335 | #' queries <- make_query( 336 | #' text = c("A stunning visual spectacle.", "Predictable but well-acted."), 337 | #' prompt = "Classify sentiment as positive, neutral, or negative.", 338 | #' template = template, 339 | #' system = "Provide a sentiment classification.", 340 | #' prefix = "Review: ", 341 | #' suffix = " Please classify.", 342 | #' examples = examples 343 | #' ) 344 | #' print(queries) 345 | #' if (ping_ollama()) { # only run this example when Ollama is running 346 | #' query(queries, screen = TRUE, output = "text") 347 | #' } 348 | make_query <- function(text, 349 | prompt, 350 | template = "{prefix}{text}\n{prompt}\n{suffix}", 351 | system = NULL, 352 | prefix = NULL, 353 | suffix = NULL, 354 | examples = NULL) { 355 | 356 | rlang::check_installed("glue") 357 | 358 | # Process each input text 359 | queries <- lapply(text, function(txt) { 360 | # Initialize structured query 361 | full_query <- tibble::tibble(role = character(), content = character()) 362 | 363 | # Add system message if provided 364 | if (!is.null(system)) { 365 | full_query <- full_query |> 366 | dplyr::add_row(role = "system", content = system) 367 | } 368 | 369 | # Add examples if provided 370 | if (!is.null(examples)) { 371 | examples <- tibble::as_tibble(examples) |> 372 | dplyr::rowwise() |> 373 | dplyr::mutate( 374 | user_content = glue::glue( 375 | template, 376 | text = text, 377 | prompt = prompt, 378 | prefix = prefix, 379 | suffix = suffix, 380 | .null = "" 381 | ) 382 | ) |> 383 | dplyr::ungroup() 384 | 385 | for (i in seq_len(nrow(examples))) { 386 | full_query <- full_query |> 387 | dplyr::add_row(role = "user", content = examples$user_content[i]) |> 388 | dplyr::add_row(role = "assistant", content = examples$answer[i]) 389 | } 390 | } 391 | 392 | # Add main user query 393 | main_query <- glue::glue( 394 | template, 395 | text = txt, 396 | prompt = prompt, 397 | prefix = prefix, 398 | suffix = suffix, 399 | .null = "" 400 | ) 401 | full_query <- full_query |> dplyr::add_row(role = "user", content = main_query) 402 | 403 | return(full_query) 404 | }) 405 | 406 | return(queries) 407 | } 408 | 409 | -------------------------------------------------------------------------------- /R/embedding.r: -------------------------------------------------------------------------------- 1 | #' Generate Embeddings 2 | #' 3 | #' @param text text vector to generate embeddings for. 4 | #' @param model which model to use. See for 5 | #' options. Default is "llama3.1". Set option(rollama_model = "modelname") to 6 | #' change default for the current session. See \link{pull_model} for more 7 | #' details. 8 | #' @param model_params a named list of additional model parameters listed in the 9 | #' [documentation for the 10 | #' Modelfile](https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values). 11 | #' @param verbose Whether to print status messages to the Console 12 | #' (\code{TRUE}/\code{FALSE}). The default is to have status messages in 13 | #' interactive sessions. Can be changed with \code{options(rollama_verbose = 14 | #' FALSE)}. 15 | #' @inheritParams query 16 | #' 17 | #' @return a tibble with embeddings. 18 | #' @export 19 | #' 20 | #' @examples 21 | #' \dontrun{ 22 | #' embed_text(c( 23 | #' "Here is an article about llamas...", 24 | #' "R is a language and environment for statistical computing and graphics.")) 25 | #' } 26 | embed_text <- function(text, 27 | model = NULL, 28 | server = NULL, 29 | model_params = NULL, 30 | verbose = getOption("rollama_verbose", 31 | default = interactive())) { 32 | 33 | if (is.null(model)) model <- getOption("rollama_model", default = "llama3.1") 34 | if (is.null(server)) server <- getOption("rollama_server", 35 | default = "http://localhost:11434") 36 | check_model_installed(model, server = server) 37 | 38 | pb <- FALSE 39 | if (verbose) pb <- list( 40 | format = "{cli::pb_spin} embedding text {cli::pb_current} / {cli::pb_total} ({cli::pb_rate}) {cli::pb_eta}", 41 | format_done = "{cli::col_green(cli::symbol$tick)} embedded {cli::pb_total} texts {cli::col_silver('[', cli::pb_elapsed, ']')}", 42 | clear = FALSE 43 | ) 44 | 45 | reqs <- purrr::map(text, function(t) { 46 | list(model = model, 47 | prompt = t, 48 | stream = FALSE, 49 | model_params = model_params) |> 50 | purrr::compact() |> 51 | make_req(server = server, 52 | endpoint = "/api/embeddings") 53 | }) 54 | 55 | resps <- httr2::req_perform_parallel(reqs, progress = pb) 56 | 57 | out <- purrr::map(resps, function(resp) { 58 | if (httr2::resp_content_type(resp) == "application/json") { 59 | emd <- httr2::resp_body_json(resp) |> 60 | purrr::pluck("embedding") 61 | names(emd) <- paste0("dim_", seq_along(emd)) 62 | tibble::as_tibble(emd) 63 | } else { 64 | cli::cli_alert_danger("Request did not return embeddings") 65 | } 66 | }) |> 67 | dplyr::bind_rows() 68 | return(out) 69 | } 70 | -------------------------------------------------------------------------------- /R/lib.R: -------------------------------------------------------------------------------- 1 | #' Ping server to see if Ollama is reachable 2 | #' 3 | #' @param silent suppress warnings and status (only return `TRUE`/`FALSE`). 4 | #' @param version return version instead of `TRUE`. 5 | #' @inheritParams query 6 | #' 7 | #' @return TRUE if server is running 8 | #' @export 9 | ping_ollama <- function(server = NULL, silent = FALSE, version = FALSE) { 10 | 11 | if (is.null(server)) server <- getOption("rollama_server", 12 | default = "http://localhost:11434") 13 | 14 | out <- purrr::map(server, function(sv) { 15 | res <- try({ 16 | httr2::request(sv) |> 17 | httr2::req_url_path("api/version") |> 18 | httr2::req_perform() |> 19 | httr2::resp_body_json() 20 | }, silent = TRUE) 21 | 22 | if (!methods::is(res, "try-error") && purrr::pluck_exists(res, "version")) { 23 | if (!silent) cli::cli_inform( 24 | "{cli::col_green(cli::symbol$play)} Ollama (v{res$version}) is running at {.url {sv}}!" 25 | ) 26 | if (version) return(res$version) 27 | return(TRUE) 28 | } else { 29 | if (!silent) { 30 | cli::cli_alert_danger("Could not connect to Ollama at {.url {sv}}") 31 | } 32 | return(FALSE) 33 | } 34 | }) 35 | invisible(unlist(out)) 36 | } 37 | 38 | 39 | build_req <- function(model, 40 | msg, 41 | server, 42 | images, 43 | model_params, 44 | format, 45 | template) { 46 | 47 | if (is.null(model)) model <- getOption("rollama_model", default = "llama3.1") 48 | if (is.null(server)) server <- getOption("rollama_server", 49 | default = "http://localhost:11434") 50 | seed <- getOption("rollama_seed") 51 | if (!is.null(seed) && !purrr::pluck_exists(model_params, "seed")) { 52 | model_params <- append(model_params, list(seed = seed)) 53 | } 54 | check_model_installed(model, server = server) 55 | if (length(msg) != length(model)) { 56 | if (length(model) > 1L) 57 | cli::cli_alert_info(c( 58 | "The number of queries is unequal to the number of models you supplied.", 59 | "We assume you want to run each query with each model" 60 | )) 61 | req_data <- purrr::map(msg, function(ms) { 62 | purrr::map(model, function(m) { 63 | list( 64 | model = m, 65 | messages = ms, 66 | stream = FALSE, 67 | options = model_params, 68 | format = format, 69 | template = template 70 | ) |> 71 | purrr::compact() |> # remove NULL values 72 | make_req( 73 | server = sample(server, 1, prob = as_prob(names(server))), 74 | endpoint = "/api/chat" 75 | ) 76 | }) 77 | }) |> 78 | unlist(recursive = FALSE) 79 | } else { 80 | req_data <- purrr::map2(msg, model, function(ms, m) { 81 | list( 82 | model = m, 83 | messages = ms, 84 | stream = FALSE, 85 | options = model_params, 86 | format = format, 87 | template = template 88 | ) |> 89 | purrr::compact() |> # remove NULL values 90 | make_req( 91 | server = sample(server, 1, prob = as_prob(names(server))), 92 | endpoint = "/api/chat" 93 | ) 94 | }) 95 | } 96 | 97 | return(req_data) 98 | } 99 | 100 | 101 | make_req <- function(req_data, server, endpoint) { 102 | r <- httr2::request(server) |> 103 | httr2::req_url_path_append(endpoint) |> 104 | httr2::req_body_json(prep_req_data(req_data), auto_unbox = FALSE) |> 105 | # see https://github.com/JBGruber/rollama/issues/23 106 | httr2::req_options(timeout_ms = 1000 * 60 * 60 * 24, 107 | connecttimeout_ms = 1000 * 60 * 60 * 24) |> 108 | httr2::req_headers(!!!get_headers()) 109 | return(r) 110 | } 111 | 112 | 113 | perform_reqs <- function(reqs, verbose) { 114 | 115 | model <- purrr::map_chr(reqs, c("body", "data", "model")) |> 116 | unique() 117 | pb <- FALSE 118 | if (!is.logical(verbose)) { 119 | pb <- verbose 120 | } else if (verbose) { 121 | pb <- list( 122 | clear = TRUE, 123 | format = c("{cli::pb_spin} {getOption('model')} {?is/are} thinking about ", 124 | "{cli::pb_total - cli::pb_current}/{cli::pb_total} question{?s}", 125 | "[ETA: {cli::pb_eta}]") 126 | ) 127 | } 128 | 129 | withr::with_options(list(cli.progress_show_after = 0, model = model), { 130 | resps <- httr2::req_perform_parallel( 131 | reqs = reqs, 132 | on_error = "continue", 133 | progress = pb 134 | ) 135 | }) 136 | 137 | fails <- httr2::resps_failures(resps) |> 138 | purrr::map_chr("message") 139 | 140 | # all fails 141 | if (length(fails) == length(reqs)) { 142 | cli::cli_abort(fails) 143 | } else if (length(fails) < length(reqs) && length(fails) > 0) { 144 | throw_error(fails) 145 | } 146 | 147 | httr2::resps_successes(resps) 148 | } 149 | 150 | 151 | perform_req <- function(reqs, verbose) { 152 | 153 | if (verbose) { 154 | model <- purrr::map_chr(reqs, c("body", "data", "model")) |> 155 | unique() 156 | 157 | id <- cli::cli_progress_bar(format = "{cli::pb_spin} {model} {?is/are} thinking", 158 | clear = TRUE) 159 | 160 | # turn off errors since error messages can't be seen in sub-process 161 | req <- httr2::req_error(reqs[[1]], is_error = function(resp) FALSE) 162 | 163 | rp <- callr::r_bg(httr2::req_perform, 164 | args = list(req = req), 165 | package = TRUE) 166 | 167 | while (rp$is_alive()) { 168 | cli::cli_progress_update(id = id) 169 | Sys.sleep(2 / 100) 170 | } 171 | resp <- rp$get_result() 172 | res <- httr2::resp_body_json(resp) 173 | if (purrr::pluck_exists(res, "error")) { 174 | cli::cli_abort(purrr::pluck(res, "error")) 175 | } 176 | return(list(resp)) 177 | } 178 | 179 | reqs[[1]] |> 180 | httr2::req_error(body = function(resp) httr2::resp_body_json(resp) |> 181 | purrr::pluck("error", .default = "unknown error")) |> 182 | httr2::req_perform() |> 183 | list() 184 | } 185 | 186 | 187 | get_headers <- function() { 188 | agent <- the$agent 189 | if (is.null(agent)) { 190 | sess <- utils::sessionInfo() 191 | the$agent <- agent <- paste0( 192 | "rollama/", utils::packageVersion("rollama"), 193 | "(", sess$platform, ") ", 194 | sess$R.version$version.string 195 | ) 196 | } 197 | list( 198 | "Content-Type" = "application/json", 199 | "Accept" = "application/json", 200 | "User-Agent" = agent, 201 | # get additional headers from option (if set) 202 | getOption("rollama_headers") 203 | ) |> 204 | unlist() 205 | } 206 | 207 | 208 | # the requirements for the data are a little weird as boxes can only show up in 209 | # very particular places in the json string. 210 | prep_req_data <- function(tbl) { 211 | if (purrr::pluck_exists(tbl, "options")) { 212 | tbl$options <- purrr::map(tbl$option, jsonlite::unbox) 213 | } 214 | purrr::modify_tree(tbl, leaf = function(x) { 215 | if (length(x) == 1L) { 216 | jsonlite::unbox(x) 217 | } else { 218 | x 219 | } 220 | }) 221 | } 222 | 223 | 224 | # function to display progress in streaming operations 225 | pgrs <- function(resp) { 226 | if (!getOption("rollama_verbose", default = interactive())) return(TRUE) 227 | the$str_prgs$stream_resp <- c(the$str_prgs$stream_resp, resp) 228 | resp <- the$str_prgs$stream_resp 229 | 230 | status <- strsplit(rawToChar(resp), "\n")[[1]] 231 | status <- grep("}$", x = status, value = TRUE) |> 232 | textConnection() |> 233 | jsonlite::stream_in(verbose = FALSE, simplifyVector = FALSE) 234 | 235 | status <- setdiff(status, the$str_prgs$pb_done) 236 | for (s in status) { 237 | status_message <- purrr::pluck(s, "status") 238 | if (!purrr::pluck_exists(s, "total")) { 239 | if (isTRUE(status_message == "success")) { 240 | cli::cli_progress_message("{cli::col_green(cli::symbol$tick)} success!") 241 | } else if (purrr::pluck_exists(s, "error")) { 242 | cli::cli_abort("{purrr::pluck(s, \"error\")}") 243 | } else { 244 | cli::cli_progress_step(purrr::pluck(s, "status"), .envir = the) 245 | } 246 | } else { 247 | the$str_prgs$f <- sub("pulling ", "", purrr::pluck(s, "status")) 248 | the$str_prgs$done <- purrr::pluck(s, "completed", .default = 0L) 249 | the$str_prgs$total <- purrr::pluck(s, "total", .default = 0L) 250 | the$str_prgs$done_pct <- 251 | paste(round(the$str_prgs$done / the$str_prgs$total * 100, 0), "%") 252 | if (the$str_prgs$done != the$str_prgs$total) { 253 | the$str_prgs$speed <- try(as.integer( 254 | the$str_prgs$done / 255 | (as.integer(Sys.time()) - as.integer(the$str_prgs$pb_start)) 256 | ), silent = TRUE) 257 | if (methods::is(the$str_prgs$speed, "try-error")) 258 | the$str_prgs$speed <- 0L 259 | } else { 260 | the$str_prgs$speed <- 1L 261 | } 262 | 263 | if (!isTRUE(the$str_prgs$pb == the$str_prgs$f)) { 264 | cli::cli_progress_bar( 265 | name = the$str_prgs$f, 266 | type = "download", 267 | format = paste0( 268 | "{cli::pb_spin} downloading {str_prgs$f} ", 269 | "({str_prgs$done_pct} of {prettyunits::pretty_bytes(str_prgs$total)}) ", 270 | "at {prettyunits::pretty_bytes(str_prgs$speed)}/s" 271 | ), 272 | format_done = paste0( 273 | "{cli::col_green(cli::symbol$tick)} downloaded {str_prgs$f}" 274 | ), 275 | .envir = the 276 | ) 277 | the$str_prgs$pb <- the$str_prgs$f 278 | the$str_prgs$pb_start <- Sys.time() 279 | } else { 280 | if (the$str_prgs$total > the$str_prgs$done) { 281 | cli::cli_progress_update(force = TRUE, .envir = the) 282 | } else { 283 | cli::cli_process_done(.envir = the) 284 | the$str_prgs$pb <- NULL 285 | } 286 | } 287 | } 288 | the$str_prgs$pb_done <- append(the$str_prgs$pb_done, list(s)) 289 | } 290 | TRUE 291 | } 292 | -------------------------------------------------------------------------------- /R/models.r: -------------------------------------------------------------------------------- 1 | #' Pull, show and delete models 2 | #' 3 | #' @details 4 | #' - `pull_model()`: downloads model 5 | #' - `show_model()`: displays information about a local model 6 | #' - `copy_model()`: creates a model with another name from an existing model 7 | #' - `delete_model()`: deletes local model 8 | #' 9 | #' **Model names**: Model names follow a model:tag format, where model can have 10 | #' an optional namespace such as example/model. Some examples are 11 | #' orca-mini:3b-q4_1 and llama3.1:70b. The tag is optional and, if not provided, 12 | #' will default to latest. The tag is used to identify a specific version. 13 | #' 14 | #' @param model name of the model(s). Defaults to "llama3.1" when `NULL` (except 15 | #' in `delete_model`). 16 | #' @param insecure allow insecure connections to the library. Only use this if 17 | #' you are pulling from your own library during development. 18 | #' @param destination name of the copied model. 19 | #' @inheritParams query 20 | #' 21 | #' @return (invisible) a tibble with information about the model (except in 22 | #' `delete_model`) 23 | #' @export 24 | #' 25 | #' @examples 26 | #' \dontrun{ 27 | #' # download a model and save information in an object 28 | #' model_info <- pull_model("mixtral") 29 | #' # after you pull, you can get the same information with: 30 | #' model_info <- show_model("mixtral") 31 | #' # pulling models from Hugging Face Hub is also possible 32 | #' pull_model("https://huggingface.co/oxyapi/oxy-1-small-GGUF:Q2_K") 33 | #' } 34 | pull_model <- function(model = NULL, 35 | server = NULL, 36 | insecure = FALSE, 37 | verbose = getOption("rollama_verbose", 38 | default = interactive())) { 39 | 40 | if (is.null(model)) model <- getOption("rollama_model", default = "llama3.1") 41 | if (is.null(server)) server <- getOption("rollama_server", 42 | default = "http://localhost:11434") 43 | 44 | if (length(model) > 1L) { 45 | for (m in model) pull_model(m, server, insecure, verbose) 46 | } 47 | 48 | # flush progress 49 | the$str_prgs <- NULL 50 | req <- httr2::request(server) |> 51 | httr2::req_url_path_append("/api/pull") |> 52 | httr2::req_body_json(list(name = model, insecure = insecure)) |> 53 | httr2::req_headers(!!!get_headers()) 54 | if (verbose) { 55 | httr2::req_perform_stream(req, callback = pgrs, buffer_kb = 0.1) 56 | cli::cli_process_done(.envir = the) 57 | } else { 58 | httr2::req_perform(req) 59 | } 60 | total <- try(as.integer(the$str_prgs$total), silent = TRUE) 61 | if (methods::is(total, "try-error")) { 62 | cli::cli_alert_success("model {model} pulled succesfully") 63 | } else { 64 | total <- prettyunits::pretty_bytes(total) 65 | cli::cli_alert_success("model {model} ({total}) pulled succesfully") 66 | } 67 | the$str_prgs <- NULL 68 | 69 | invisible(show_model(model)) 70 | } 71 | 72 | 73 | #' @rdname pull_model 74 | #' @export 75 | show_model <- function(model = NULL, server = NULL) { 76 | 77 | if (is.null(model)) model <- getOption("rollama_model", default = "llama3.1") 78 | if (is.null(server)) server <- getOption("rollama_server", 79 | default = "http://localhost:11434") 80 | if (length(model) != 1L) cli::cli_abort("model needs to be one model name.") 81 | 82 | httr2::request(server) |> 83 | httr2::req_url_path_append("/api/show") |> 84 | httr2::req_body_json(list(name = model)) |> 85 | httr2::req_error(body = function(resp) httr2::resp_body_json(resp)$error) |> 86 | httr2::req_headers(!!!get_headers()) |> 87 | httr2::req_perform() |> 88 | httr2::resp_body_json() |> 89 | purrr::list_flatten(name_spec = "{inner}") |> 90 | as_tibble_onerow() 91 | } 92 | 93 | 94 | #' Create a model from a Modelfile 95 | #' 96 | #' @param model name of the model to create 97 | #' @param modelfile either a path to a model file to be read or the contents of 98 | #' the model file as a character vector. 99 | #' @inheritParams query 100 | #' 101 | #' @details Custom models are the way to save your system message and model 102 | #' parameters in a dedicated shareable way. If you use `show_model()`, you can 103 | #' look at the configuration of a model in the column modelfile. To get more 104 | #' information and a list of valid parameters, check out 105 | #' . Most 106 | #' options are also available through the `query` and `chat` functions, yet 107 | #' are not persistent over sessions. 108 | #' 109 | #' 110 | #' @return Nothing. Called to create a model on the Ollama server. 111 | #' @export 112 | #' 113 | #' @examples 114 | #' modelfile <- system.file("extdata", "modelfile.txt", package = "rollama") 115 | #' \dontrun{create_model("mario", modelfile)} 116 | #' modelfile <- "FROM llama3.1\nSYSTEM You are mario from Super Mario Bros." 117 | #' \dontrun{create_model("mario", modelfile)} 118 | create_model <- function(model, modelfile, server = NULL) { 119 | 120 | if (is.null(server)) server <- getOption("rollama_server", 121 | default = "http://localhost:11434") 122 | if (isTRUE(file.exists(modelfile))) { 123 | modelfile <- readChar(modelfile, file.size(modelfile)) 124 | } else if (length(modelfile) > 1) { 125 | modelfile <- paste0(modelfile, collapse = "\n") 126 | } 127 | 128 | # flush progress 129 | the$str_prgs <- NULL 130 | httr2::request(server) |> 131 | httr2::req_url_path_append("/api/create") |> 132 | httr2::req_method("POST") |> 133 | httr2::req_body_json(list(name = model, modelfile = modelfile)) |> 134 | httr2::req_headers(!!!get_headers()) |> 135 | httr2::req_perform_stream(callback = pgrs, buffer_kb = 0.1) 136 | 137 | cli::cli_process_done(.envir = the) 138 | the$str_prgs <- NULL 139 | 140 | model_info <- show_model(model) # move here to test if model was created 141 | cli::cli_alert_success("model {model} created") 142 | invisible(model_info) 143 | } 144 | 145 | 146 | #' @rdname pull_model 147 | #' @export 148 | delete_model <- function(model, server = NULL) { 149 | 150 | if (is.null(server)) server <- getOption("rollama_server", 151 | default = "http://localhost:11434") 152 | 153 | httr2::request(server) |> 154 | httr2::req_url_path_append("/api/delete") |> 155 | httr2::req_method("DELETE") |> 156 | httr2::req_body_json(list(name = model)) |> 157 | httr2::req_error(body = function(resp) httr2::resp_body_json(resp)$error) |> 158 | httr2::req_headers(!!!get_headers()) |> 159 | httr2::req_perform() 160 | 161 | cli::cli_alert_success("model {model} removed") 162 | } 163 | 164 | 165 | #' @rdname pull_model 166 | #' @export 167 | copy_model <- function(model, 168 | destination = paste0(model, "-copy"), 169 | server = NULL) { 170 | 171 | if (is.null(server)) server <- getOption("rollama_server", 172 | default = "http://localhost:11434") 173 | 174 | httr2::request(server) |> 175 | httr2::req_url_path_append("/api/copy") |> 176 | httr2::req_body_json(list(source = model, 177 | destination = destination)) |> 178 | httr2::req_error(body = function(resp) httr2::resp_body_json(resp)$error) |> 179 | httr2::req_headers(!!!get_headers()) |> 180 | httr2::req_perform() 181 | 182 | cli::cli_alert_success("model {model} copied to {destination}") 183 | } 184 | 185 | 186 | #' List models that are available locally. 187 | #' 188 | #' @inheritParams query 189 | #' 190 | #' @return a tibble of installed models 191 | #' @export 192 | list_models <- function(server = NULL) { 193 | 194 | if (is.null(server)) server <- getOption("rollama_server", 195 | default = "http://localhost:11434") 196 | 197 | httr2::request(server) |> 198 | httr2::req_url_path_append("/api/tags") |> 199 | httr2::req_headers(!!!get_headers()) |> 200 | httr2::req_perform() |> 201 | httr2::resp_body_json() |> 202 | purrr::pluck("models") |> 203 | purrr::map(\(x) purrr::list_flatten(x, name_spec = "{inner}")) |> 204 | dplyr::bind_rows() 205 | } 206 | -------------------------------------------------------------------------------- /R/rollama-package.R: -------------------------------------------------------------------------------- 1 | # package environment 2 | the <- new.env() 3 | 4 | #' @keywords internal 5 | "_PACKAGE" 6 | 7 | #' @title rollama Options 8 | #' @name rollama-options 9 | #' 10 | #' @description The behaviour of `rollama` can be controlled through 11 | #' `options()`. Specifically, the options below can be set. 12 | #' 13 | #' @details 14 | #' \describe{ 15 | #' \item{rollama_server}{\describe{ 16 | #' This controls the default server where Ollama is expected to run. It assumes 17 | #' that you are running Ollama locally in a Docker container. 18 | #' \item{default:}{\code{"http://localhost:11434"}} 19 | #' }} 20 | #' \item{rollama_model}{\describe{ 21 | #' The default model is llama3.1, which is a good overall option with reasonable 22 | #' performance and size for most tasks. You can change the model in each 23 | #' function call or globally with this option. 24 | #' \item{default:}{\code{"llama3.1"}} 25 | #' }} 26 | #' \item{rollama_verbose}{\describe{ 27 | #' Whether the package tells users what is going on, e.g., showing a spinner 28 | #' while the models are thinking or showing the download speed while pulling 29 | #' models. Since this adds some complexity to the code, you might want to 30 | #' disable it when you get errors (it won't fix the error, but you get a 31 | #' better error trace). 32 | #' \item{default:}{\code{TRUE}} 33 | #' }} 34 | #' \item{rollama_config}{\describe{ 35 | #' The default configuration or system message. If NULL, the system message 36 | #' defined in the used model is employed. 37 | #' \item{default:}{None} 38 | #' }} 39 | #' \item{rollama_seed}{\describe{ 40 | #' As long as the seed stays the same, the 41 | #' models will give the same answer, changing the seed leads to a different 42 | #' answer. Per default, no seed is set and each call to \code{query()} or 43 | #' \code{chat()} will give you a different answer. 44 | #' \item{default:}{None} 45 | #' }} 46 | #' } 47 | #' @examples 48 | #' options(rollama_config = "You make answers understandable to a 5 year old") 49 | NULL 50 | 51 | ## usethis namespace: start 52 | ## usethis namespace: end 53 | NULL 54 | -------------------------------------------------------------------------------- /R/utils.r: -------------------------------------------------------------------------------- 1 | screen_answer <- function(x, model = NULL) { 2 | pars <- unlist(strsplit(x, "\n", fixed = TRUE)) 3 | cli::cli_h1("Answer from {cli::style_bold({model})}") 4 | # "{i}" instead of i stops glue from evaluating code inside the answer 5 | for (i in pars) cli::cli_text("{i}") 6 | } 7 | 8 | 9 | #' Check if one or several models are installed on the server 10 | #' 11 | #' @param model names of one or several models as character vector. 12 | #' @param check_only only return TRUE/FALSE and don't download models. 13 | #' @param auto_pull if FALSE, the default, asks before downloading models. 14 | #' @inheritParams query 15 | #' 16 | #' @return invisible TRUE/FALSE 17 | #' @export 18 | check_model_installed <- function(model, 19 | check_only = FALSE, 20 | auto_pull = FALSE, 21 | server = getOption("rollama_server", 22 | default = "http://localhost:11434")) { 23 | 24 | model <- sub("^([^:]+)$", "\\1:latest", model) 25 | for (sv in server) { 26 | models_df <- list_models(server = sv) 27 | mdl <- setdiff(model, models_df[["name"]]) 28 | 29 | if (length(mdl) > 0L) { 30 | if (check_only) { 31 | return(invisible(FALSE)) 32 | } 33 | if (interactive() && !auto_pull) { 34 | msg <- c( 35 | "{cli::col_cyan(cli::symbol$info)}", 36 | " Model{?s} {.emph {mdl}} not installed on {sv}.", 37 | " Would you like to download {?it/them}?" 38 | ) 39 | auto_pull <- utils::askYesNo(cli::cli_text(msg)) 40 | } 41 | if (!auto_pull) { 42 | cli::cli_abort("Model {mdl} not installed on {sv}.") 43 | return(invisible(FALSE)) 44 | } 45 | } 46 | if (auto_pull) { 47 | for (m in mdl) { 48 | pull_model(m, server = sv) 49 | } 50 | } 51 | } 52 | return(invisible(TRUE)) 53 | } 54 | 55 | 56 | # process responses to list 57 | process2list <- function(resps, reqs) { 58 | purrr::map2(resps, reqs, function(resp, req) { 59 | list( 60 | request = list( 61 | model = purrr::pluck(req, "body", "data", "model"), 62 | role = purrr::pluck(req, "body", "data", "messages", "role"), 63 | message = purrr::pluck(req, "body", "data", "messages", "content") 64 | ), 65 | response = list( 66 | model = purrr::pluck(resp, "model"), 67 | role = purrr::pluck(resp, "message", "role"), 68 | message = purrr::pluck(resp, "message", "content") 69 | ) 70 | ) 71 | }) 72 | } 73 | 74 | 75 | # process responses to data.frame 76 | process2df <- function(resps) { 77 | tibble::tibble( 78 | model = purrr::map_chr(resps, "model"), 79 | role = purrr::map_chr(resps, c("message", "role")), 80 | response = purrr::map_chr(resps, c("message", "content")) 81 | ) 82 | } 83 | 84 | 85 | # makes sure list can be turned into tibble 86 | as_tibble_onerow <- function(l) { 87 | l <- purrr::map(l, function(c) { 88 | if (length(c) != 1) { 89 | return(list(c)) 90 | } 91 | return(c) 92 | }) 93 | # .name_repair required for older versions of Ollama 94 | tibble::as_tibble(l, .name_repair = "minimal") 95 | } 96 | 97 | 98 | as_prob <- function(x) { 99 | if (!is.null(x)) { 100 | out <- try(as.numeric(x), silent = TRUE) 101 | if (methods::is(out, "try-error")) { 102 | cli::cli_abort("Names must be parsable to a numeric vector of probability weights") 103 | } 104 | return(out) 105 | } 106 | return(x) 107 | } 108 | 109 | 110 | check_conversation <- function(msg) { 111 | if (!"user" %in% msg$role && nchar(msg$content) > 0) 112 | cli::cli_abort(paste("If you supply a conversation object, it needs at", 113 | "least one user message. See {.help query}.")) 114 | return(msg) 115 | } 116 | 117 | throw_error <- function(fails) { 118 | error_counts <- table(fails) 119 | for (f in names(error_counts)) { 120 | if (error_counts[f] > 2) { 121 | cli::cli_alert_danger("error ({error_counts[f]} times): {f}") 122 | } else { 123 | cli::cli_alert_danger("error: {f}") 124 | } 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /README.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | output: github_document 3 | --- 4 | 5 | 6 | 7 | ```{r, include = FALSE} 8 | knitr::opts_chunk$set( 9 | collapse = TRUE, 10 | comment = "#>", 11 | fig.path = "man/figures/README-", 12 | out.width = "100%" 13 | ) 14 | options(rollama_seed = 42) 15 | ``` 16 | 17 | # `rollama` rollama-logo 18 | 19 | 20 | [![R-CMD-check](https://github.com/JBGruber/rollama/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/JBGruber/rollama/actions/workflows/R-CMD-check.yaml) 21 | [![Codecov test coverage](https://codecov.io/gh/JBGruber/rollama/branch/main/graph/badge.svg)](https://app.codecov.io/gh/JBGruber/rollama?branch=main) 22 | [![CRAN status](https://www.r-pkg.org/badges/version/rollama)](https://CRAN.R-project.org/package=rollama) 23 | [![CRAN_Download_Badge](https://cranlogs.r-pkg.org/badges/grand-total/rollama)](https://cran.r-project.org/package=rollama) 24 | [![arXiv:10.48550/arXiv.2404.07654](https://img.shields.io/badge/DOI-arXiv.2404.07654-B31B1B?logo=arxiv)](https://doi.org/10.48550/arXiv.2404.07654) 25 | [![say-thanks](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/JBGruber) 26 | 27 | 28 | The goal of `rollama` is to wrap the Ollama API, which allows you to run different LLMs locally and create an experience similar to ChatGPT/OpenAI's API. 29 | Ollama is very easy to deploy and handles a huge number of models. 30 | Checkout the project here: . 31 | 32 | 33 | ## Installation 34 | 35 | You can install this package from CRAN: 36 | 37 | ``` r 38 | install.packages("rollama") 39 | ``` 40 | 41 | Or you can install the development version of `rollama` from [GitHub](https://github.com/JBGruber/rollama). This version is updated more frequently and may contain bug fixes (or new bugs): 42 | 43 | ``` r 44 | # install.packages("remotes") 45 | remotes::install_github("JBGruber/rollama") 46 | ``` 47 | 48 | However, `rollama` is just the client package. 49 | The models are run in `Ollama`, which you need to install on your system, on a remote system or through [Docker](https://docs.docker.com/desktop/). 50 | The easiest way is to simply download and install the Ollama application from [their website](https://ollama.com/). 51 | Once `Ollama` is running, you can see if you can access it with: 52 | 53 | ```{r} 54 | rollama::ping_ollama() 55 | ``` 56 | 57 | 58 | ### Installation of Ollama through Docker 59 | 60 | For beginners we recommend to download Ollama from [their website](https://ollama.com/). However, if you are familiar with Docker, you can also run Ollama through Docker. The advantage of running things through Docker is that the application is isolated from the rest of your system, behaves the same on different systems, and is easy to download and update. 61 | You can also get a nice web interface. 62 | After making sure [Docker](https://docs.docker.com/desktop/) is installed, you can simply use the Docker Compose file from [this gist](https://gist.github.com/JBGruber/73f9f49f833c6171b8607b976abc0ddc). 63 | 64 | If you don’t know how to use Docker Compose, you can follow this [video](https://www.youtube.com/watch?v=iMyCdd5nP5U) to use the compose file and start Ollama and Open WebUI. 65 | 66 | ## Example 67 | 68 | The first thing you should do after installation is to pull one of the models from . 69 | By calling `pull_model()` without arguments, you are pulling the (current) default model --- "llama3.1 8b": 70 | 71 | ```{r lib} 72 | library(rollama) 73 | ``` 74 | ```{r eval=FALSE} 75 | pull_model() 76 | ``` 77 | 78 | There are two ways to communicate with the Ollama API. 79 | You can make single requests, which does not store any history and treats each query as the beginning of a new chat: 80 | 81 | ```{r query} 82 | # ask a single question 83 | query("Why is the sky blue? Answer with one sentence.") 84 | ``` 85 | 86 | With the output argument, we can specify the format of the response. Available options include "text", "list", "data.frame", "response", "httr2_response", and "httr2_request": 87 | 88 | ```{r output} 89 | # ask a single question and specify the output format 90 | query("Why is the sky blue? Answer with one sentence." , output = "text") 91 | ``` 92 | 93 | Or you can use the `chat` function, treats all messages sent during an R session as part of the same conversation: 94 | 95 | ```{r chat} 96 | # hold a conversation 97 | chat("Why is the sky blue? Give a short answer.") 98 | chat("And how do you know that? Give a short answer.") 99 | ``` 100 | 101 | If you are done with a conversation and want to start a new one, you can do that like so: 102 | 103 | ```{r new} 104 | new_chat() 105 | ``` 106 | 107 | ## Model parameters 108 | 109 | You can set a number of model parameters, either by creating a new model, with a [modelfile](https://jbgruber.github.io/rollama/reference/create_model.html), or by including the parameters in the prompt: 110 | 111 | ```{r} 112 | query("Why is the sky blue? Answer with one sentence.", output = "text", 113 | model_params = list( 114 | seed = 42, 115 | num_gpu = 0) 116 | ) 117 | ``` 118 | 119 | ```{r include=FALSE, results='asis'} 120 | l <- readLines("https://raw.githubusercontent.com/ollama/ollama/main/docs/modelfile.md") 121 | s <- grep("#### Valid Parameters and Values", l, fixed = TRUE) 122 | e <- grep("### TEMPLATE", l, fixed = TRUE) 123 | cat(l[s:e - 1], sep = "\n") 124 | ``` 125 | 126 | 127 | ## Configuration 128 | 129 | You can configure the server address, the system prompt and the model used for a query or chat. 130 | If not configured otherwise, `rollama` assumes you are using the default port (11434) of a local instance ("localhost"). 131 | Let's make this explicit by setting the option: 132 | 133 | ```{r server} 134 | options(rollama_server = "http://localhost:11434") 135 | ``` 136 | 137 | You can change how a model answers by setting a configuration or system message in plain English (or another language supported by the model): 138 | 139 | ```{r config} 140 | options(rollama_config = "You make short answers understandable to a 5 year old") 141 | query("Why is the sky blue?") 142 | ``` 143 | 144 | By default, the package uses the "llama3.1 8B" model. Supported models can be found at . 145 | To download a specific model make use of the additional information available in "Tags" . 146 | Change this via `rollama_model`: 147 | 148 | ```{r model} 149 | options(rollama_model = "llama3.2:3b-instruct-q4_1") 150 | # if you don't have the model yet: pull_model("llama3.2:3b-instruct-q4_1") 151 | query("Why is the sky blue? Answer with one sentence.") 152 | ``` 153 | 154 | ## Easy query generation 155 | 156 | The `make_query` function simplifies the creation of structured queries, which can, for example, be used in [annotation tasks](https://jbgruber.github.io/rollama/articles/annotation.html#the-make_query-helper-function). 157 | 158 | Main components (check the [documentation](https://jbgruber.github.io/rollama/articles/annotation.html#the-make_query-helper-function) for more options): 159 | 160 | - **`text`**: The text(s) to classify. 161 | - **`prompt`**: Could be a (classification) question 162 | - **`system`**: Optional system prompt providing context or instructions for the task. 163 | - **`examples`**: Optional prior examples for one-shot or few-shot learning (user messages and assistant responses). 164 | 165 | 166 | **Zero-shot Example** 167 | In this example, the function is used without examples: 168 | 169 | ```{r make_query} 170 | # Create a query using make_query 171 | q_zs <- make_query( 172 | text = "the pizza tastes terrible", 173 | prompt = "Is this text: 'positive', 'neutral', or 'negative'?", 174 | system = "You assign texts into categories. Answer with just the correct category." 175 | ) 176 | # Print the query 177 | print(q_zs) 178 | # Run the query 179 | query(q_zs, output = "text") 180 | 181 | ``` 182 | 183 | ## Learn more 184 | 185 | - [Use rollama for annotation tasks](https://jbgruber.github.io/rollama/articles/annotation.html) 186 | - [Annotate images](https://jbgruber.github.io/rollama/articles/image-annotation.html) 187 | - [Get text embedding](https://jbgruber.github.io/rollama/articles/text-embedding.html) 188 | - [Use more models (GGUF format) from Hugging Face](https://jbgruber.github.io/rollama/articles/hf-gguf.html) 189 | 190 | 191 | ## Citation 192 | 193 | Please cite the package using the [pre print](https://arxiv.org/abs/2404.07654) DOI: 194 | 195 | 196 | 197 | 198 | 199 | 200 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | # `rollama` rollama-logo 5 | 6 | 7 | 8 | [![R-CMD-check](https://github.com/JBGruber/rollama/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/JBGruber/rollama/actions/workflows/R-CMD-check.yaml) 9 | [![Codecov test 10 | coverage](https://codecov.io/gh/JBGruber/rollama/branch/main/graph/badge.svg)](https://app.codecov.io/gh/JBGruber/rollama?branch=main) 11 | [![CRAN 12 | status](https://www.r-pkg.org/badges/version/rollama)](https://CRAN.R-project.org/package=rollama) 13 | [![CRAN_Download_Badge](https://cranlogs.r-pkg.org/badges/grand-total/rollama)](https://cran.r-project.org/package=rollama) 14 | [![arXiv:10.48550/arXiv.2404.07654](https://img.shields.io/badge/DOI-arXiv.2404.07654-B31B1B?logo=arxiv)](https://doi.org/10.48550/arXiv.2404.07654) 15 | [![say-thanks](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/JBGruber) 16 | 17 | 18 | The goal of `rollama` is to wrap the Ollama API, which allows you to run 19 | different LLMs locally and create an experience similar to 20 | ChatGPT/OpenAI’s API. Ollama is very easy to deploy and handles a huge 21 | number of models. Checkout the project here: 22 | . 23 | 24 | ## Installation 25 | 26 | You can install this package from CRAN: 27 | 28 | ``` r 29 | install.packages("rollama") 30 | ``` 31 | 32 | Or you can install the development version of `rollama` from 33 | [GitHub](https://github.com/JBGruber/rollama). This version is updated 34 | more frequently and may contain bug fixes (or new bugs): 35 | 36 | ``` r 37 | # install.packages("remotes") 38 | remotes::install_github("JBGruber/rollama") 39 | ``` 40 | 41 | However, `rollama` is just the client package. The models are run in 42 | `Ollama`, which you need to install on your system, on a remote system 43 | or through [Docker](https://docs.docker.com/desktop/). The easiest way 44 | is to simply download and install the Ollama application from [their 45 | website](https://ollama.com/). Once `Ollama` is running, you can see if 46 | you can access it with: 47 | 48 | ``` r 49 | rollama::ping_ollama() 50 | #> ▶ Ollama (v0.6.1) is running at ! 51 | ``` 52 | 53 | ### Installation of Ollama through Docker 54 | 55 | For beginners we recommend to download Ollama from [their 56 | website](https://ollama.com/). However, if you are familiar with Docker, 57 | you can also run Ollama through Docker. The advantage of running things 58 | through Docker is that the application is isolated from the rest of your 59 | system, behaves the same on different systems, and is easy to download 60 | and update. You can also get a nice web interface. After making sure 61 | [Docker](https://docs.docker.com/desktop/) is installed, you can simply 62 | use the Docker Compose file from [this 63 | gist](https://gist.github.com/JBGruber/73f9f49f833c6171b8607b976abc0ddc). 64 | 65 | If you don’t know how to use Docker Compose, you can follow this 66 | [video](https://www.youtube.com/watch?v=iMyCdd5nP5U) to use the compose 67 | file and start Ollama and Open WebUI. 68 | 69 | ## Example 70 | 71 | The first thing you should do after installation is to pull one of the 72 | models from . By calling `pull_model()` 73 | without arguments, you are pulling the (current) default model — 74 | “llama3.1 8b”: 75 | 76 | ``` r 77 | library(rollama) 78 | ``` 79 | 80 | ``` r 81 | pull_model() 82 | ``` 83 | 84 | There are two ways to communicate with the Ollama API. You can make 85 | single requests, which does not store any history and treats each query 86 | as the beginning of a new chat: 87 | 88 | ``` r 89 | # ask a single question 90 | query("Why is the sky blue? Answer with one sentence.") 91 | #> 92 | #> ── Answer from llama3.1 ──────────────────────────────────────────────────────── 93 | #> The sky appears blue because of a phenomenon called Rayleigh scattering, in 94 | #> which shorter (blue) wavelengths of light are scattered more than longer (red) 95 | #> wavelengths by the tiny molecules of gases in the Earth's atmosphere. 96 | ``` 97 | 98 | With the output argument, we can specify the format of the response. 99 | Available options include “text”, “list”, “data.frame”, “response”, 100 | “httr2_response”, and “httr2_request”: 101 | 102 | ``` r 103 | # ask a single question and specify the output format 104 | query("Why is the sky blue? Answer with one sentence." , output = "text") 105 | #> 106 | #> ── Answer from llama3.1 ──────────────────────────────────────────────────────── 107 | #> The sky appears blue because of a phenomenon called Rayleigh scattering, in 108 | #> which shorter (blue) wavelengths of light are scattered more than longer (red) 109 | #> wavelengths by the tiny molecules of gases in the Earth's atmosphere. 110 | ``` 111 | 112 | Or you can use the `chat` function, treats all messages sent during an R 113 | session as part of the same conversation: 114 | 115 | ``` r 116 | # hold a conversation 117 | chat("Why is the sky blue? Give a short answer.") 118 | #> 119 | #> ── Answer from llama3.1 ──────────────────────────────────────────────────────── 120 | #> The sky appears blue because of a phenomenon called Rayleigh scattering, where 121 | #> shorter (blue) wavelengths of light are scattered more than longer (red) 122 | #> wavelengths by the tiny molecules of gases in the atmosphere. This scattering 123 | #> effect gives our sky its distinctive blue color during the daytime. 124 | chat("And how do you know that? Give a short answer.") 125 | #> 126 | #> ── Answer from llama3.1 ──────────────────────────────────────────────────────── 127 | #> I was trained on a vast amount of scientific knowledge and data, including 128 | #> information from various fields like physics, atmospheric science, and 129 | #> astronomy. Additionally, I've been fine-tuned to recognize and recall reliable 130 | #> sources, such as NASA, the Royal Society, and other reputable institutions that 131 | #> explain the phenomenon of Rayleigh scattering and its effect on the sky's 132 | #> color. 133 | ``` 134 | 135 | If you are done with a conversation and want to start a new one, you can 136 | do that like so: 137 | 138 | ``` r 139 | new_chat() 140 | ``` 141 | 142 | ## Model parameters 143 | 144 | You can set a number of model parameters, either by creating a new 145 | model, with a 146 | [modelfile](https://jbgruber.github.io/rollama/reference/create_model.html), 147 | or by including the parameters in the prompt: 148 | 149 | ``` r 150 | query("Why is the sky blue? Answer with one sentence.", output = "text", 151 | model_params = list( 152 | seed = 42, 153 | num_gpu = 0) 154 | ) 155 | #> 156 | #> ── Answer from llama3.1 ──────────────────────────────────────────────────────── 157 | #> The sky appears blue because of a phenomenon called Rayleigh scattering, in 158 | #> which shorter (blue) wavelengths of light are scattered more than longer (red) 159 | #> wavelengths by the tiny molecules of gases in the Earth's atmosphere. 160 | ``` 161 | 162 | ## Configuration 163 | 164 | You can configure the server address, the system prompt and the model 165 | used for a query or chat. If not configured otherwise, `rollama` assumes 166 | you are using the default port (11434) of a local instance 167 | (“localhost”). Let’s make this explicit by setting the option: 168 | 169 | ``` r 170 | options(rollama_server = "http://localhost:11434") 171 | ``` 172 | 173 | You can change how a model answers by setting a configuration or system 174 | message in plain English (or another language supported by the model): 175 | 176 | ``` r 177 | options(rollama_config = "You make short answers understandable to a 5 year old") 178 | query("Why is the sky blue?") 179 | #> 180 | #> ── Answer from llama3.1 ──────────────────────────────────────────────────────── 181 | #> The sky looks blue because of tiny particles in the air that bounce sunlight 182 | #> around. Imagine throwing a ball off a cliff and watching it bounce on the 183 | #> ground - the light from the sun does the same thing with these tiny particles, 184 | #> making it look blue! 185 | ``` 186 | 187 | By default, the package uses the “llama3.1 8B” model. Supported models 188 | can be found at . To download a specific 189 | model make use of the additional information available in “Tags” 190 | . Change this via 191 | `rollama_model`: 192 | 193 | ``` r 194 | options(rollama_model = "llama3.2:3b-instruct-q4_1") 195 | # if you don't have the model yet: pull_model("llama3.2:3b-instruct-q4_1") 196 | query("Why is the sky blue? Answer with one sentence.") 197 | #> 198 | #> ── Answer from llama3.2:3b-instruct-q4_1 ─────────────────────────────────────── 199 | #> The Earth's sky looks blue because of something called light, which bounces off 200 | #> tiny things in the air and comes back to us as blue! 201 | ``` 202 | 203 | ## Easy query generation 204 | 205 | The `make_query` function simplifies the creation of structured queries, 206 | which can, for example, be used in [annotation 207 | tasks](https://jbgruber.github.io/rollama/articles/annotation.html#the-make_query-helper-function). 208 | 209 | Main components (check the 210 | [documentation](https://jbgruber.github.io/rollama/articles/annotation.html#the-make_query-helper-function) 211 | for more options): 212 | 213 | - **`text`**: The text(s) to classify. 214 | - **`prompt`**: Could be a (classification) question 215 | - **`system`**: Optional system prompt providing context or instructions 216 | for the task. 217 | - **`examples`**: Optional prior examples for one-shot or few-shot 218 | learning (user messages and assistant responses). 219 | 220 | **Zero-shot Example** 221 | In this example, the function is used without examples: 222 | 223 | ``` r 224 | # Create a query using make_query 225 | q_zs <- make_query( 226 | text = "the pizza tastes terrible", 227 | prompt = "Is this text: 'positive', 'neutral', or 'negative'?", 228 | system = "You assign texts into categories. Answer with just the correct category." 229 | ) 230 | # Print the query 231 | print(q_zs) 232 | #> [[1]] 233 | #> # A tibble: 2 × 2 234 | #> role content 235 | #> 236 | #> 1 system You assign texts into categories. Answer with just the correct categor… 237 | #> 2 user the pizza tastes terrible 238 | #> Is this text: 'positive', 'neutral', or 'neg… 239 | # Run the query 240 | query(q_zs, output = "text") 241 | #> 242 | #> ── Answer from llama3.2:3b-instruct-q4_1 ─────────────────────────────────────── 243 | #> Negative 244 | ``` 245 | 246 | ## Learn more 247 | 248 | - [Use rollama for annotation 249 | tasks](https://jbgruber.github.io/rollama/articles/annotation.html) 250 | - [Annotate 251 | images](https://jbgruber.github.io/rollama/articles/image-annotation.html) 252 | - [Get text 253 | embedding](https://jbgruber.github.io/rollama/articles/text-embedding.html) 254 | - [Use more models (GGUF format) from Hugging 255 | Face](https://jbgruber.github.io/rollama/articles/hf-gguf.html) 256 | 257 | ## Citation 258 | 259 | Please cite the package using the [pre 260 | print](https://arxiv.org/abs/2404.07654) DOI: 261 | 262 | -------------------------------------------------------------------------------- /_pkgdown.yml: -------------------------------------------------------------------------------- 1 | url: https://jbgruber.github.io/rollama/ 2 | template: 3 | bootstrap: 5 4 | 5 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | comment: false 2 | 3 | coverage: 4 | status: 5 | project: 6 | default: 7 | target: auto 8 | threshold: 1% 9 | informational: true 10 | patch: 11 | default: 12 | target: auto 13 | threshold: 1% 14 | informational: true 15 | -------------------------------------------------------------------------------- /cran-comments.md: -------------------------------------------------------------------------------- 1 | The last submission unfortunately contained a bug in the progress bar of a 2 | function, rendering it unusable. This submission only fixes that issue. 3 | 4 | ## R CMD check results 5 | 0 errors | 0 warnings | 0 note 6 | -------------------------------------------------------------------------------- /inst/WORDLIST: -------------------------------------------------------------------------------- 1 | CMD 2 | ChatGPT 3 | Codecov 4 | DOI 5 | Embeddings 6 | GGUF 7 | Hvitfeldt 8 | JSON 9 | LLM 10 | LLMs 11 | Modelfile 12 | ORCID 13 | Ollama 14 | OpenAI's 15 | OpenAI’s 16 | RMDs 17 | Reichardt 18 | WebUI 19 | arXiv 20 | behaviour 21 | config 22 | embeddings 23 | http 24 | httr 25 | leaderboards 26 | llava 27 | modelfile 28 | modelname 29 | ollama 30 | orca 31 | pre 32 | smldemo 33 | tibble 34 | tibbles 35 | unnesting 36 | -------------------------------------------------------------------------------- /inst/extdata/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JBGruber/rollama/dc44d7879cfa6ec8f5b03ad59e7f5b51c42567f6/inst/extdata/logo.png -------------------------------------------------------------------------------- /inst/extdata/modelfile.txt: -------------------------------------------------------------------------------- 1 | FROM llama2 2 | # sets the temperature to 1 [higher is more creative, lower is more coherent] 3 | PARAMETER temperature 1 4 | # sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token 5 | PARAMETER num_ctx 4096 6 | 7 | # sets a custom system message to specify the behavior of the chat assistant 8 | SYSTEM You are Mario from super mario bros, acting as an assistant. 9 | -------------------------------------------------------------------------------- /man/chat_history.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/chat.r 3 | \name{chat_history} 4 | \alias{chat_history} 5 | \alias{new_chat} 6 | \title{Handle conversations} 7 | \usage{ 8 | chat_history() 9 | 10 | new_chat() 11 | } 12 | \value{ 13 | chat_history: tibble with chat history 14 | 15 | new_chat: Does not return a value 16 | } 17 | \description{ 18 | Shows and deletes (\code{new_chat}) the local prompt and response history to start 19 | a new conversation. 20 | } 21 | -------------------------------------------------------------------------------- /man/check_model_installed.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.r 3 | \name{check_model_installed} 4 | \alias{check_model_installed} 5 | \title{Check if one or several models are installed on the server} 6 | \usage{ 7 | check_model_installed( 8 | model, 9 | check_only = FALSE, 10 | auto_pull = FALSE, 11 | server = getOption("rollama_server", default = "http://localhost:11434") 12 | ) 13 | } 14 | \arguments{ 15 | \item{model}{names of one or several models as character vector.} 16 | 17 | \item{check_only}{only return TRUE/FALSE and don't download models.} 18 | 19 | \item{auto_pull}{if FALSE, the default, asks before downloading models.} 20 | 21 | \item{server}{URL to one or several Ollama servers (not the API). Defaults to 22 | "http://localhost:11434".} 23 | } 24 | \value{ 25 | invisible TRUE/FALSE 26 | } 27 | \description{ 28 | Check if one or several models are installed on the server 29 | } 30 | -------------------------------------------------------------------------------- /man/create_model.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/models.r 3 | \name{create_model} 4 | \alias{create_model} 5 | \title{Create a model from a Modelfile} 6 | \usage{ 7 | create_model(model, modelfile, server = NULL) 8 | } 9 | \arguments{ 10 | \item{model}{name of the model to create} 11 | 12 | \item{modelfile}{either a path to a model file to be read or the contents of 13 | the model file as a character vector.} 14 | 15 | \item{server}{URL to one or several Ollama servers (not the API). Defaults to 16 | "http://localhost:11434".} 17 | } 18 | \value{ 19 | Nothing. Called to create a model on the Ollama server. 20 | } 21 | \description{ 22 | Create a model from a Modelfile 23 | } 24 | \details{ 25 | Custom models are the way to save your system message and model 26 | parameters in a dedicated shareable way. If you use \code{show_model()}, you can 27 | look at the configuration of a model in the column modelfile. To get more 28 | information and a list of valid parameters, check out 29 | \url{https://github.com/ollama/ollama/blob/main/docs/modelfile.md}. Most 30 | options are also available through the \code{query} and \code{chat} functions, yet 31 | are not persistent over sessions. 32 | } 33 | \examples{ 34 | modelfile <- system.file("extdata", "modelfile.txt", package = "rollama") 35 | \dontrun{create_model("mario", modelfile)} 36 | modelfile <- "FROM llama3.1\nSYSTEM You are mario from Super Mario Bros." 37 | \dontrun{create_model("mario", modelfile)} 38 | } 39 | -------------------------------------------------------------------------------- /man/embed_text.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/embedding.r 3 | \name{embed_text} 4 | \alias{embed_text} 5 | \title{Generate Embeddings} 6 | \usage{ 7 | embed_text( 8 | text, 9 | model = NULL, 10 | server = NULL, 11 | model_params = NULL, 12 | verbose = getOption("rollama_verbose", default = interactive()) 13 | ) 14 | } 15 | \arguments{ 16 | \item{text}{text vector to generate embeddings for.} 17 | 18 | \item{model}{which model to use. See \url{https://ollama.com/library} for 19 | options. Default is "llama3.1". Set option(rollama_model = "modelname") to 20 | change default for the current session. See \link{pull_model} for more 21 | details.} 22 | 23 | \item{server}{URL to one or several Ollama servers (not the API). Defaults to 24 | "http://localhost:11434".} 25 | 26 | \item{model_params}{a named list of additional model parameters listed in the 27 | \href{https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values}{documentation for the Modelfile}.} 28 | 29 | \item{verbose}{Whether to print status messages to the Console 30 | (\code{TRUE}/\code{FALSE}). The default is to have status messages in 31 | interactive sessions. Can be changed with \code{options(rollama_verbose = 32 | FALSE)}.} 33 | } 34 | \value{ 35 | a tibble with embeddings. 36 | } 37 | \description{ 38 | Generate Embeddings 39 | } 40 | \examples{ 41 | \dontrun{ 42 | embed_text(c( 43 | "Here is an article about llamas...", 44 | "R is a language and environment for statistical computing and graphics.")) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /man/figures/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JBGruber/rollama/dc44d7879cfa6ec8f5b03ad59e7f5b51c42567f6/man/figures/logo.png -------------------------------------------------------------------------------- /man/list_models.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/models.r 3 | \name{list_models} 4 | \alias{list_models} 5 | \title{List models that are available locally.} 6 | \usage{ 7 | list_models(server = NULL) 8 | } 9 | \arguments{ 10 | \item{server}{URL to one or several Ollama servers (not the API). Defaults to 11 | "http://localhost:11434".} 12 | } 13 | \value{ 14 | a tibble of installed models 15 | } 16 | \description{ 17 | List models that are available locally. 18 | } 19 | -------------------------------------------------------------------------------- /man/make_query.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/chat.r 3 | \name{make_query} 4 | \alias{make_query} 5 | \title{Generate and format queries for a language model} 6 | \usage{ 7 | make_query( 8 | text, 9 | prompt, 10 | template = "{prefix}{text}\\n{prompt}\\n{suffix}", 11 | system = NULL, 12 | prefix = NULL, 13 | suffix = NULL, 14 | examples = NULL 15 | ) 16 | } 17 | \arguments{ 18 | \item{text}{A character vector of texts to be annotated.} 19 | 20 | \item{prompt}{A string defining the main task or question to be passed to the 21 | language model.} 22 | 23 | \item{template}{A string template for formatting user queries, containing 24 | placeholders like \code{{text}}, \code{{prefix}}, and \code{{suffix}}.} 25 | 26 | \item{system}{An optional string to specify a system prompt.} 27 | 28 | \item{prefix}{A prefix string to prepend to each user query.} 29 | 30 | \item{suffix}{A suffix string to append to each user query.} 31 | 32 | \item{examples}{A \code{tibble} with columns \code{text} and \code{answer}, representing 33 | example user messages and corresponding assistant responses.} 34 | } 35 | \value{ 36 | A list of tibbles, one for each input \code{text}, containing structured 37 | rows for system messages, user messages, and assistant responses. 38 | } 39 | \description{ 40 | \code{make_query} generates structured input for a language model, including 41 | system prompt, user messages, and optional examples (assistant answers). 42 | } 43 | \details{ 44 | The function supports the inclusion of examples, which are 45 | dynamically added to the structured input. Each example follows the same 46 | format as the primary user query. 47 | } 48 | \examples{ 49 | template <- "{prefix}{text}\n\n{prompt}{suffix}" 50 | examples <- tibble::tribble( 51 | ~text, ~answer, 52 | "This movie was amazing, with great acting and story.", "positive", 53 | "The film was okay, but not particularly memorable.", "neutral", 54 | "I found this movie boring and poorly made.", "negative" 55 | ) 56 | queries <- make_query( 57 | text = c("A stunning visual spectacle.", "Predictable but well-acted."), 58 | prompt = "Classify sentiment as positive, neutral, or negative.", 59 | template = template, 60 | system = "Provide a sentiment classification.", 61 | prefix = "Review: ", 62 | suffix = " Please classify.", 63 | examples = examples 64 | ) 65 | print(queries) 66 | if (ping_ollama()) { # only run this example when Ollama is running 67 | query(queries, screen = TRUE, output = "text") 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /man/ping_ollama.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/lib.R 3 | \name{ping_ollama} 4 | \alias{ping_ollama} 5 | \title{Ping server to see if Ollama is reachable} 6 | \usage{ 7 | ping_ollama(server = NULL, silent = FALSE, version = FALSE) 8 | } 9 | \arguments{ 10 | \item{server}{URL to one or several Ollama servers (not the API). Defaults to 11 | "http://localhost:11434".} 12 | 13 | \item{silent}{suppress warnings and status (only return \code{TRUE}/\code{FALSE}).} 14 | 15 | \item{version}{return version instead of \code{TRUE}.} 16 | } 17 | \value{ 18 | TRUE if server is running 19 | } 20 | \description{ 21 | Ping server to see if Ollama is reachable 22 | } 23 | -------------------------------------------------------------------------------- /man/pull_model.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/models.r 3 | \name{pull_model} 4 | \alias{pull_model} 5 | \alias{show_model} 6 | \alias{delete_model} 7 | \alias{copy_model} 8 | \title{Pull, show and delete models} 9 | \usage{ 10 | pull_model( 11 | model = NULL, 12 | server = NULL, 13 | insecure = FALSE, 14 | verbose = getOption("rollama_verbose", default = interactive()) 15 | ) 16 | 17 | show_model(model = NULL, server = NULL) 18 | 19 | delete_model(model, server = NULL) 20 | 21 | copy_model(model, destination = paste0(model, "-copy"), server = NULL) 22 | } 23 | \arguments{ 24 | \item{model}{name of the model(s). Defaults to "llama3.1" when \code{NULL} (except 25 | in \code{delete_model}).} 26 | 27 | \item{server}{URL to one or several Ollama servers (not the API). Defaults to 28 | "http://localhost:11434".} 29 | 30 | \item{insecure}{allow insecure connections to the library. Only use this if 31 | you are pulling from your own library during development.} 32 | 33 | \item{verbose}{Whether to print status messages to the Console. Either 34 | \code{TRUE}/\code{FALSE} or see \link[httr2:progress_bars]{httr2::progress_bars}. The default is to have status 35 | messages in interactive sessions. Can be changed with 36 | \code{options(rollama_verbose = FALSE)}.} 37 | 38 | \item{destination}{name of the copied model.} 39 | } 40 | \value{ 41 | (invisible) a tibble with information about the model (except in 42 | \code{delete_model}) 43 | } 44 | \description{ 45 | Pull, show and delete models 46 | } 47 | \details{ 48 | \itemize{ 49 | \item \code{pull_model()}: downloads model 50 | \item \code{show_model()}: displays information about a local model 51 | \item \code{copy_model()}: creates a model with another name from an existing model 52 | \item \code{delete_model()}: deletes local model 53 | } 54 | 55 | \strong{Model names}: Model names follow a model:tag format, where model can have 56 | an optional namespace such as example/model. Some examples are 57 | orca-mini:3b-q4_1 and llama3.1:70b. The tag is optional and, if not provided, 58 | will default to latest. The tag is used to identify a specific version. 59 | } 60 | \examples{ 61 | \dontrun{ 62 | # download a model and save information in an object 63 | model_info <- pull_model("mixtral") 64 | # after you pull, you can get the same information with: 65 | model_info <- show_model("mixtral") 66 | # pulling models from Hugging Face Hub is also possible 67 | pull_model("https://huggingface.co/oxyapi/oxy-1-small-GGUF:Q2_K") 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /man/query.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/chat.r 3 | \name{query} 4 | \alias{query} 5 | \alias{chat} 6 | \title{Chat with a LLM through Ollama} 7 | \usage{ 8 | query( 9 | q, 10 | model = NULL, 11 | screen = TRUE, 12 | server = NULL, 13 | images = NULL, 14 | model_params = NULL, 15 | output = c("response", "text", "list", "data.frame", "httr2_response", "httr2_request"), 16 | format = NULL, 17 | template = NULL, 18 | verbose = getOption("rollama_verbose", default = interactive()) 19 | ) 20 | 21 | chat( 22 | q, 23 | model = NULL, 24 | screen = TRUE, 25 | server = NULL, 26 | images = NULL, 27 | model_params = NULL, 28 | template = NULL, 29 | verbose = getOption("rollama_verbose", default = interactive()) 30 | ) 31 | } 32 | \arguments{ 33 | \item{q}{the question as a character string or a conversation object.} 34 | 35 | \item{model}{which model(s) to use. See \url{https://ollama.com/library} for 36 | options. Default is "llama3.1". Set \code{option(rollama_model = "modelname")} to 37 | change default for the current session. See \link{pull_model} for more 38 | details.} 39 | 40 | \item{screen}{Logical. Should the answer be printed to the screen.} 41 | 42 | \item{server}{URL to one or several Ollama servers (not the API). Defaults to 43 | "http://localhost:11434".} 44 | 45 | \item{images}{path(s) to images (for multimodal models such as llava).} 46 | 47 | \item{model_params}{a named list of additional model parameters listed in the 48 | \href{https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values}{documentation for the Modelfile} 49 | such as temperature. Use a seed and set the temperature to zero to get 50 | reproducible results (see examples).} 51 | 52 | \item{output}{what the function should return. Possible values are 53 | "response", "text", "list", "data.frame", "httr2_response" or 54 | "httr2_request" or a function see details.} 55 | 56 | \item{format}{the format to return a response in. Currently the only accepted 57 | value is \code{"json"}.} 58 | 59 | \item{template}{the prompt template to use (overrides what is defined in the 60 | Modelfile).} 61 | 62 | \item{verbose}{Whether to print status messages to the Console. Either 63 | \code{TRUE}/\code{FALSE} or see \link[httr2:progress_bars]{httr2::progress_bars}. The default is to have status 64 | messages in interactive sessions. Can be changed with 65 | \code{options(rollama_verbose = FALSE)}.} 66 | } 67 | \value{ 68 | list of objects set in output parameter. 69 | } 70 | \description{ 71 | Chat with a LLM through Ollama 72 | } 73 | \details{ 74 | \code{query} sends a single question to the API, without knowledge about 75 | previous questions (only the config message is relevant). \code{chat} treats new 76 | messages as part of the same conversation until \link{new_chat} is called. 77 | 78 | To make the output reproducible, you can set a seed with 79 | \code{options(rollama_seed = 42)}. As long as the seed stays the same, the 80 | models will give the same answer, changing the seed leads to a different 81 | answer. 82 | 83 | For the output of \code{query}, there are a couple of options: 84 | \itemize{ 85 | \item \code{response}: the response of the Ollama server 86 | \item \code{text}: only the answer as a character vector 87 | \item \code{data.frame}: a data.frame containing model and response 88 | \item \code{list}: a list containing the prompt to Ollama and the response 89 | \item \code{httr2_response}: the response of the Ollama server including HTML 90 | headers in the \code{httr2} response format 91 | \item \code{httr2_request}: httr2_request objects in a list, in case you want to run 92 | them with \code{\link[httr2:req_perform]{httr2::req_perform()}}, \code{\link[httr2:req_perform_sequential]{httr2::req_perform_sequential()}}, or 93 | \code{\link[httr2:req_perform_parallel]{httr2::req_perform_parallel()}} yourself. 94 | \item a custom function that takes the \code{httr2_response}(s) from the Ollama 95 | server as an input. 96 | } 97 | } 98 | \examples{ 99 | \dontshow{if (interactive()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} 100 | # ask a single question 101 | query("why is the sky blue?") 102 | 103 | # hold a conversation 104 | chat("why is the sky blue?") 105 | chat("and how do you know that?") 106 | 107 | # save the response to an object and extract the answer 108 | resp <- query(q = "why is the sky blue?") 109 | answer <- resp[[1]]$message$content 110 | 111 | # or just get the answer directly 112 | answer <- query(q = "why is the sky blue?", output = "text") 113 | 114 | # besides the other output options, you can also supply a custom function 115 | query_duration <- function(resp) { 116 | nanosec <- purrr::map(resp, httr2::resp_body_json) |> 117 | purrr::map_dbl("total_duration") 118 | round(nanosec * 1e-9, digits = 2) 119 | } 120 | # this function only returns the number of seconds a request took 121 | res <- query("why is the sky blue?", output = query_duration) 122 | res 123 | 124 | # ask question about images (to a multimodal model) 125 | images <- c("https://avatars.githubusercontent.com/u/23524101?v=4", # remote 126 | "/path/to/your/image.jpg") # or local images supported 127 | query(q = "describe these images", 128 | model = "llava", 129 | images = images[1]) # just using the first path as the second is not real 130 | 131 | # set custom options for the model at runtime (rather than in create_model()) 132 | query("why is the sky blue?", 133 | model_params = list( 134 | num_keep = 5, 135 | seed = 42, 136 | num_predict = 100, 137 | top_k = 20, 138 | top_p = 0.9, 139 | min_p = 0.0, 140 | tfs_z = 0.5, 141 | typical_p = 0.7, 142 | repeat_last_n = 33, 143 | temperature = 0.8, 144 | repeat_penalty = 1.2, 145 | presence_penalty = 1.5, 146 | frequency_penalty = 1.0, 147 | mirostat = 1, 148 | mirostat_tau = 0.8, 149 | mirostat_eta = 0.6, 150 | penalize_newline = TRUE, 151 | numa = FALSE, 152 | num_ctx = 1024, 153 | num_batch = 2, 154 | num_gpu = 0, 155 | main_gpu = 0, 156 | low_vram = FALSE, 157 | vocab_only = FALSE, 158 | use_mmap = TRUE, 159 | use_mlock = FALSE, 160 | num_thread = 8 161 | )) 162 | 163 | # use a seed to get reproducible results 164 | query("why is the sky blue?", model_params = list(seed = 42)) 165 | 166 | # to set a seed for the whole session you can use 167 | options(rollama_seed = 42) 168 | 169 | # this might be interesting if you want to turn off the GPU and load the 170 | # model into the system memory (slower, but most people have more RAM than 171 | # VRAM, which might be interesting for larger models) 172 | query("why is the sky blue?", 173 | model_params = list(num_gpu = 0)) 174 | 175 | # Asking the same question to multiple models is also supported 176 | query("why is the sky blue?", model = c("llama3.1", "orca-mini")) 177 | 178 | # And if you have multiple Ollama servers in your network, you can send 179 | # requests to them in parallel 180 | if (ping_ollama(c("http://localhost:11434/", 181 | "http://192.168.2.45:11434/"))) { # check if servers are running 182 | query("why is the sky blue?", model = c("llama3.1", "orca-mini"), 183 | server = c("http://localhost:11434/", 184 | "http://192.168.2.45:11434/")) 185 | } 186 | \dontshow{\}) # examplesIf} 187 | } 188 | -------------------------------------------------------------------------------- /man/rollama-options.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rollama-package.R 3 | \name{rollama-options} 4 | \alias{rollama-options} 5 | \title{rollama Options} 6 | \description{ 7 | The behaviour of \code{rollama} can be controlled through 8 | \code{options()}. Specifically, the options below can be set. 9 | } 10 | \details{ 11 | \describe{ 12 | \item{rollama_server}{\describe{ 13 | This controls the default server where Ollama is expected to run. It assumes 14 | that you are running Ollama locally in a Docker container. 15 | \item{default:}{\code{"http://localhost:11434"}} 16 | }} 17 | \item{rollama_model}{\describe{ 18 | The default model is llama3.1, which is a good overall option with reasonable 19 | performance and size for most tasks. You can change the model in each 20 | function call or globally with this option. 21 | \item{default:}{\code{"llama3.1"}} 22 | }} 23 | \item{rollama_verbose}{\describe{ 24 | Whether the package tells users what is going on, e.g., showing a spinner 25 | while the models are thinking or showing the download speed while pulling 26 | models. Since this adds some complexity to the code, you might want to 27 | disable it when you get errors (it won't fix the error, but you get a 28 | better error trace). 29 | \item{default:}{\code{TRUE}} 30 | }} 31 | \item{rollama_config}{\describe{ 32 | The default configuration or system message. If NULL, the system message 33 | defined in the used model is employed. 34 | \item{default:}{None} 35 | }} 36 | \item{rollama_seed}{\describe{ 37 | As long as the seed stays the same, the 38 | models will give the same answer, changing the seed leads to a different 39 | answer. Per default, no seed is set and each call to \code{query()} or 40 | \code{chat()} will give you a different answer. 41 | \item{default:}{None} 42 | }} 43 | } 44 | } 45 | \examples{ 46 | options(rollama_config = "You make answers understandable to a 5 year old") 47 | } 48 | -------------------------------------------------------------------------------- /man/rollama-package.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rollama-package.R 3 | \docType{package} 4 | \name{rollama-package} 5 | \alias{rollama} 6 | \alias{rollama-package} 7 | \title{rollama: Communicate with 'Ollama' to Run Large Language Models Locally} 8 | \description{ 9 | \if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}} 10 | 11 | Wraps the 'Ollama' \url{https://ollama.com} API, which can be used to communicate with generative large language models locally. 12 | } 13 | \seealso{ 14 | Useful links: 15 | \itemize{ 16 | \item \url{https://jbgruber.github.io/rollama/} 17 | \item \url{https://github.com/JBGruber/rollama} 18 | \item Report bugs at \url{https://github.com/JBGruber/rollama/issues} 19 | } 20 | 21 | } 22 | \author{ 23 | \strong{Maintainer}: Johannes B. Gruber \email{JohannesB.Gruber@gmail.com} (\href{https://orcid.org/0000-0001-9177-1772}{ORCID}) 24 | 25 | Authors: 26 | \itemize{ 27 | \item Maximilian Weber (\href{https://orcid.org/0000-0002-1174-449X}{ORCID}) [contributor] 28 | } 29 | 30 | } 31 | \keyword{internal} 32 | -------------------------------------------------------------------------------- /rollama.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | ProjectId: 320a4619-e1ad-4e6b-9cbd-fe7ce736995e 3 | 4 | RestoreWorkspace: No 5 | SaveWorkspace: No 6 | AlwaysSaveHistory: Default 7 | 8 | EnableCodeIndexing: Yes 9 | UseSpacesForTab: Yes 10 | NumSpacesForTab: 2 11 | Encoding: UTF-8 12 | 13 | RnwWeave: Sweave 14 | LaTeX: pdfLaTeX 15 | 16 | AutoAppendNewline: Yes 17 | StripTrailingWhitespace: Yes 18 | LineEndingConversion: Posix 19 | 20 | BuildType: Package 21 | PackageUseDevtools: Yes 22 | PackageInstallArgs: --no-multiarch --with-keep.source 23 | PackageRoxygenize: rd,collate,namespace 24 | -------------------------------------------------------------------------------- /tests/spelling.R: -------------------------------------------------------------------------------- 1 | if (requireNamespace("spelling", quietly = TRUE)) { 2 | spelling::spell_check_test( 3 | vignettes = TRUE, error = FALSE, 4 | skip_on_cran = TRUE 5 | ) 6 | } 7 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | # This file is part of the standard setup for testthat. 2 | # It is recommended that you do not modify it. 3 | # 4 | # Where should you do additional test configuration? 5 | # Learn more about the roles of various files in: 6 | # * https://r-pkgs.org/testing-design.html#sec-tests-files-overview 7 | # * https://testthat.r-lib.org/articles/special-files.html 8 | 9 | library(testthat) 10 | library(rollama) 11 | test_check("rollama") 12 | -------------------------------------------------------------------------------- /tests/testthat/setup-models.R: -------------------------------------------------------------------------------- 1 | if (ping_ollama(silent = TRUE)) { 2 | try({ 3 | delete_model("mario") 4 | delete_model("mario-copy") 5 | }, silent = TRUE) 6 | } 7 | -------------------------------------------------------------------------------- /tests/testthat/test-aaa.R: -------------------------------------------------------------------------------- 1 | # slightly out of place, but we don't want to pull unnecessary models and this 2 | # should come before the first test 3 | test_that("Auto pull model", { 4 | skip_if_not(ping_ollama(silent = TRUE)) 5 | expect_true(check_model_installed(getOption("rollama_model", default = "llama3.1"), 6 | auto_pull = TRUE)) 7 | }) 8 | -------------------------------------------------------------------------------- /tests/testthat/test-chat.R: -------------------------------------------------------------------------------- 1 | test_that("Test query", { 2 | skip_if_not(ping_ollama(silent = TRUE)) 3 | expect_message(query("test"), ".") 4 | expect_message(query("test", verbose = TRUE), ".") 5 | }) 6 | 7 | test_that("Test chat", { 8 | skip_if_not(ping_ollama(silent = TRUE)) 9 | expect_message(chat("Please only say 'yes'"), "Yes") 10 | expect_message(chat("One more time"), "Yes") 11 | expect_equal(nrow(chat_history()), 4L) 12 | # check order of history 13 | expect_equal(chat_history()$content[c(1, 3)], 14 | c("Please only say 'yes'", "One more time")) 15 | expect_equal(chat_history()$role, 16 | c("user", "assistant", "user", "assistant")) 17 | expect_error(query(q = tibble::tibble(role = "assistant", content = "Pos")), 18 | "needs.at.least.one.user.message") 19 | expect_equal({ 20 | new_chat() 21 | nrow(chat_history()) 22 | }, 0L) 23 | }) 24 | 25 | test_that("Test inputs", { 26 | skip_if_not(ping_ollama(silent = TRUE)) 27 | # text 28 | expect_message(query("test"), ".") 29 | # text + image (I don't want to pull a different model for this, the API still 30 | # works with models that can't handle images) 31 | query("test", 32 | images = system.file("extdata", "logo.png", package = "rollama")) 33 | # data.frame 34 | expect_message(query(data.frame(role = "user", content = "test")), ".") 35 | expect_error(query(data.frame(role = "system", content = "test")), 36 | "at.least.one.user.message") 37 | # list of data.frames 38 | l <- rep(list(data.frame(role = "user", content = "test")), 5) 39 | answers <- query(l, screen = FALSE) 40 | expect_length(answers, 5) 41 | expect_type(answers, "list") 42 | expect_type(answers[[1]], "list") 43 | }) 44 | 45 | test_that("Test output parameter", { 46 | skip_if_not(ping_ollama(silent = TRUE)) 47 | expect_s3_class( 48 | query("Please only say 'yes'", output = "httr2_request")[[1]], 49 | "httr2_request" 50 | ) 51 | expect_error(query("Please only say 'yes'", output = "invalid"), 52 | "should.be.one.of") 53 | 54 | skip_if_not(ping_ollama(silent = TRUE)) 55 | # "httr2_response", "text", "list", "data.frame", "httr2_request" 56 | expect_s3_class( 57 | query("Please only say 'yes'", screen = FALSE, 58 | output = "httr2_response")[[1]], 59 | "httr2_response" 60 | ) 61 | expect_equal( 62 | names(query("Please only say 'yes'", screen = FALSE, 63 | output = "list")[[1]]), 64 | c("request", "response") 65 | ) 66 | expect_equal( 67 | colnames(query("Please only say 'yes'", screen = FALSE, 68 | output = "data.frame")), 69 | c("model", "role", "response") 70 | ) 71 | }) 72 | 73 | test_that("Test seed", { 74 | skip_if_not(ping_ollama(silent = TRUE)) 75 | snapshot <- query("test", model_params = list(seed = 42), output = "text") 76 | expect_equal(query("test", model_params = list(seed = 42), output = "text"), 77 | snapshot) 78 | expect_equal({ 79 | withr::with_options(list(rollama_seed = 42), 80 | query("test", output = "text")) 81 | }, snapshot) 82 | # different seed, different result 83 | expect_false(isTRUE(all.equal( 84 | query("test", model_params = list(seed = 1), output = "text"), 85 | snapshot 86 | ))) 87 | }) 88 | -------------------------------------------------------------------------------- /tests/testthat/test-embedding.R: -------------------------------------------------------------------------------- 1 | test_that("embedding", { 2 | skip_if_not(ping_ollama(silent = TRUE)) 3 | expect_equal(nrow(embed_text(c("Test 1", "Test 2"))), 2) 4 | }) 5 | 6 | test_that("missing model", { 7 | skip_if_not(ping_ollama(silent = TRUE)) 8 | expect_error(embed_text("test", model = "missing"), 9 | "not.installed") 10 | }) 11 | -------------------------------------------------------------------------------- /tests/testthat/test-make_query.R: -------------------------------------------------------------------------------- 1 | test_that("correct structure", { 2 | examples <- tibble::tribble( 3 | ~text, ~answer, 4 | "This movie was amazing, with great acting and story.", "positive", 5 | "The film was okay, but not particularly memorable.", "neutral", 6 | "I found this movie boring and poorly made.", "negative" 7 | ) 8 | texts <- c("A stunning visual spectacle.", "Predictable but well-acted.") 9 | queries <- make_query( 10 | text = texts, 11 | prompt = "Classify sentiment as positive, neutral, or negative.", 12 | template = "{text}", 13 | system = "Provide a sentiment classification.", 14 | prefix = "Review: ", 15 | suffix = " Please classify.", 16 | examples = examples 17 | ) 18 | expect_true(texts[2] %in% queries[[2]][["content"]]) 19 | 20 | queries <- make_query( 21 | text = texts, 22 | prompt = "Classify sentiment as positive, neutral, or negative.", 23 | template = "{prefix}{text}\n{prompt}\n{suffix}", 24 | system = "Provide a sentiment classification.", 25 | prefix = "Review: ", 26 | suffix = " Please classify.", 27 | examples = examples 28 | ) 29 | expect_length(queries, 2L) 30 | expect_type(queries, "list") 31 | expect_s3_class(queries[[1]], "tbl_df") 32 | expect_equal(nrow(queries[[1]]), 8L) 33 | 34 | }) 35 | 36 | test_that("queries work with query()", { 37 | skip_if_not(ping_ollama(silent = TRUE)) 38 | queries <- make_query( 39 | text = c("A stunning visual spectacle.", "Predictable but well-acted."), 40 | prompt = "Classify sentiment as positive, neutral, or negative.", 41 | template = "{prefix}{text}\n{prompt}\n{suffix}", 42 | system = "Provide a sentiment classification.", 43 | prefix = "Review: ", 44 | suffix = " Please classify." 45 | ) 46 | results <- query(queries, output = "text", screen = FALSE) 47 | expect_type(results, "character") 48 | expect_length(results, 2L) 49 | }) 50 | -------------------------------------------------------------------------------- /tests/testthat/test-models.R: -------------------------------------------------------------------------------- 1 | options("rollama_verbose" = FALSE) 2 | 3 | test_that("pull model", { 4 | skip_if_not(ping_ollama(silent = TRUE)) 5 | expect_equal(nrow(pull_model()), 1L) 6 | }) 7 | 8 | test_that("show model", { 9 | skip_if_not(ping_ollama(silent = TRUE)) 10 | expect_equal(nrow(show_model()), 1L) 11 | expect_equal(ncol(list_models()), 11L) 12 | expect_s3_class(list_models(), "tbl_df") 13 | }) 14 | 15 | test_that("create model", { 16 | skip_if_not(ping_ollama(silent = TRUE)) 17 | skip_if(ping_ollama(silent = TRUE, version = TRUE) > "0.5.5") 18 | expect_equal(nrow(create_model( 19 | model = "mario", 20 | modelfile = "FROM llama3.1\nSYSTEM You are mario from Super Mario Bros." 21 | )), 1L) 22 | # also test modelfile 23 | expect_equal(nrow(create_model( 24 | model = "mario", 25 | modelfile = system.file("extdata", "modelfile.txt", package = "rollama") 26 | )), 1L) 27 | }) 28 | 29 | test_that("copy model", { 30 | skip_if_not(ping_ollama(silent = TRUE)) 31 | skip_if(ping_ollama(silent = TRUE, version = TRUE) > "0.5.5") 32 | expect_message(copy_model("mario"), 33 | "model.mario.copied.to.mario-copy") 34 | }) 35 | 36 | test_that("delete model", { 37 | skip_if_not(ping_ollama(silent = TRUE)) 38 | skip_if(ping_ollama(silent = TRUE, version = TRUE) > "0.5.5") 39 | expect_message(delete_model("mario"), 40 | "model.mario.removed") 41 | expect_message(delete_model("mario-copy"), 42 | "model.mario-copy.removed") 43 | }) 44 | 45 | test_that("model missing", { 46 | skip_if_not(ping_ollama(silent = TRUE)) 47 | skip_if(ping_ollama(silent = TRUE, version = TRUE) > "0.5.5") 48 | expect_error(check_model_installed("NOMODEL"), 49 | "Model.NOMODEL:latest.not.installed.") 50 | }) 51 | -------------------------------------------------------------------------------- /tests/testthat/test-utils.R: -------------------------------------------------------------------------------- 1 | test_that("ping", { 2 | expect_type(ping_ollama(), "logical") 3 | expect_type(ping_ollama(silent = TRUE), "logical") 4 | expect_no_message(ping_ollama(silent = TRUE)) 5 | }) 6 | 7 | test_that("verbose", { 8 | expect_no_message({ 9 | skip_if_not(ping_ollama(silent = TRUE)) 10 | op <- options("rollama_verbose" = FALSE) 11 | on.exit(options(op), add = TRUE, after = FALSE) 12 | query(q = "test", screen = FALSE) 13 | }) 14 | }) 15 | -------------------------------------------------------------------------------- /update.r: -------------------------------------------------------------------------------- 1 | devtools::document() 2 | spelling::spell_check_package() 3 | devtools::check() 4 | 5 | # re-compute vignettes 6 | setwd(here::here("vignettes")) 7 | knitr::knit("vignettes/annotation.Rmd.orig", output = "vignettes/annotation.Rmd") 8 | knitr::knit("vignettes/image-annotation.Rmd.orig", output = "vignettes/image-annotation.Rmd") 9 | knitr::knit("vignettes/text-embedding.Rmd.orig", output = "vignettes/text-embedding.Rmd") 10 | 11 | # render site to have a look 12 | setwd(here::here()) 13 | pkgdown::build_site() 14 | 15 | # submit to CRAN 16 | usethis::use_version("minor") 17 | rhub::check_for_cran() 18 | devtools::submit_cran() 19 | 20 | # once accepted by CRAN 21 | usethis::use_github_release() 22 | -------------------------------------------------------------------------------- /vignettes/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | *.R 3 | -------------------------------------------------------------------------------- /vignettes/README.md: -------------------------------------------------------------------------------- 1 | Build the vignette with: 2 | 3 | ```r 4 | knitr::knit("vignettes/annotation.Rmd.orig", output = "vignettes/annotation.Rmd") 5 | knitr::knit("vignettes/image-annotation.Rmd.orig", output = "vignettes/image-annotation.Rmd") 6 | knitr::knit("vignettes/text-embedding.Rmd.orig", output = "vignettes/text-embedding.Rmd") 7 | knitr::knit("vignettes/hf-gguf.Rmd.orig", output = "vignettes/hf-gguf.Rmd") 8 | # move figures to vignettes folder 9 | file.copy("figures", "vignettes/", overwrite = TRUE, recursive = TRUE) 10 | unlink("figures", recursive = TRUE) 11 | ``` 12 | 13 | The RMDs then only contain the results. 14 | -------------------------------------------------------------------------------- /vignettes/annotation.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "annotation" 3 | output: rmarkdown::html_vignette 4 | author: Maximilian Weber and Johannes B. Gruber 5 | vignette: > 6 | %\VignetteIndexEntry{annotation} 7 | %\VignetteEngine{knitr::rmarkdown} 8 | %\VignetteEncoding{UTF-8} 9 | --- 10 | 11 | # Introduction 12 | 13 | 14 | 15 | After you installed Ollama on your machine and downloaded the package rollama you can load the package and pull a model. 16 | The default model (`llama3.1`), is a good all-round chat model. 17 | For annotation, however, the instruction tuned llama models are often better suited, as they follow instructions more diligently and are less likely to trail off into a conversation. 18 | By changing the option `rollama_model`, we can change which model is used by default in the current session: 19 | 20 | 21 | ``` r 22 | library(rollama) 23 | options(rollama_model = "llama3.2:3b-instruct-q8_0") 24 | pull_model() 25 | #> ✔ model llama3.2:3b-instruct-q8_0 pulled succesfully 26 | ``` 27 | 28 | # Prompting Strategies 29 | 30 | If you want to annotate textual data, you can use various prompting strategies. 31 | For an overview of common approaches, you can read a paper by [Weber and Reichardt (2023)](https://arxiv.org/abs/2401.00284). 32 | These strategies primarily differ in whether or how many examples are given (Zero-shot, One-shot, or Few-shot) and whether reasoning is involved (Chain-of-Thought). 33 | 34 | When writing a prompt we can give the model content for the system part, user part and assistant part. 35 | The system message typically includes instructions or context that guides the interaction, setting the stage for how the user and the assistant should interact. 36 | For an annotation task we could write: 37 | "You assign texts into categories. Answer with just the correct category." 38 | The table below summarizes different prompting strategies for annotating textual data. 39 | Each strategy varies in the number of examples given and the incorporation of reasoning. 40 | 41 | | Prompting Strategy | Example Structure | 42 | |--------------------|--------------------| 43 | | Zero-shot | `{"role": "system", "content": "Text of System Prompt"},`
`{"role": "user", "content": "(Text to classify) + classification question"}` | 44 | | One-shot | `{"role": "system", "content": "Text of System Prompt"},`
`{"role": "user", "content": "(Example text) + classification question"},`
`{"role": "assistant", "content": "Example classification"},`
`{"role": "user", "content": "(Text to classify) + classification question"}` | 45 | | Few-shot | `{"role": "system", "content": "Text of System Prompt"},`
`{"role": "user", "content": "(Example text) + classification question"},`
`{"role": "assistant", "content": "Example classification"},`
`{"role": "user", "content": "(Example text) + classification question"},`
`{"role": "assistant", "content": "Example classification"},`
`. . . more examples`
`{"role": "user", "content": "(Text to classify) + classification question"}` | 46 | | Chain-of-Thought | `{"role": "system", "content": "Text of System Prompt"},`
`{"role": "user", "content": "(Text to classify) + reasoning question"},`
`{"role": "assistant", "content": "Reasoning"},`
`{"role": "user", "content": "Classification question"}` | 47 | 48 | 49 | ## Zero-shot 50 | 51 | In this approach, no prior examples are given. 52 | The structure includes a system prompt providing instructions and a user prompt with the text to classify and the classification question (in this example we only provide the categories). 53 | 54 | 55 | ``` r 56 | library(tibble) 57 | library(purrr) 58 | q <- tribble( 59 | ~role, ~content, 60 | "system", "You assign texts into categories. Answer with just the correct category.", 61 | "user", "text: the pizza tastes terrible\ncategories: positive, neutral, negative" 62 | ) 63 | query(q) 64 | #> 65 | #> ── Answer from llama3.2:3b-instruct-q8_0 ───────────────────────────── 66 | #> negative 67 | ``` 68 | 69 | 70 | ## One-shot 71 | 72 | This involves giving a single example before the actual task. 73 | The structure includes a system prompt, followed by a user prompt with an example text and classification question, the assistant's example classification, and then another user prompt with the new text to classify. 74 | 75 | 76 | ``` r 77 | q <- tribble( 78 | ~role, ~content, 79 | "system", "You assign texts into categories. Answer with just the correct category.", 80 | "user", "text: the pizza tastes terrible\ncategories: positive, neutral, negative", 81 | "assistant", "Category: Negative", 82 | "user", "text: the service is great\ncategories: positive, neutral, negative" 83 | ) 84 | query(q) 85 | #> 86 | #> ── Answer from llama3.2:3b-instruct-q8_0 ───────────────────────────── 87 | #> Category: Positive 88 | ``` 89 | 90 | A nice side effect of the one-shot strategy (and all n>0-strategies) is that you can tune the format the model uses in its replies. 91 | For example, if you want to have an output that easy to parse, you could change the assistant message to `"{'Category':'Negative'}"` 92 | 93 | 94 | ``` r 95 | q <- tribble( 96 | ~role, ~content, 97 | "system", "You assign texts into categories. Answer with just the correct category.", 98 | "user", "text: the pizza tastes terrible\ncategories: positive, neutral, negative", 99 | "assistant", "{'Category':'Negative'}", 100 | "user", "text: the service is great\ncategories: positive, neutral, negative" 101 | ) 102 | answer <- query(q) 103 | #> 104 | #> ── Answer from llama3.2:3b-instruct-q8_0 ───────────────────────────── 105 | #> {'Category':'Positive'} 106 | ``` 107 | 108 | This is a valid JSON return and can be parsed into a list with, e.g., `jsonlite::fromJSON()`. 109 | Using this logic, we could request a more informative output: 110 | 111 | 112 | ``` r 113 | q <- tribble( 114 | ~role, ~content, 115 | "system", "You assign texts into categories. Provide the following information: category, confidence, and the word that is most important for your coding decision.", 116 | "user", "text: the pizza tastes terrible\ncategories: positive, neutral, negative", 117 | "assistant", "{'Category':'Negative','Confidence':'100%','Important':'terrible'}", 118 | "user", "text: the service is great\ncategories: positive, neutral, negative" 119 | ) 120 | answer <- query(q) 121 | #> 122 | #> ── Answer from llama3.2:3b-instruct-q8_0 ───────────────────────────── 123 | #> {'Category':'Positive','Confidence':'100%','Important':'great'} 124 | ``` 125 | 126 | 127 | By using `pluck(answer, "message", "content")`, you can directly extract the result and don't need to copy it from screen. 128 | 129 | 130 | ## Few-shot 131 | 132 | This strategy includes multiple examples (more than one). 133 | The structure is similar to one-shot but with several iterations of user and assistant messages providing examples before the final text to classify. 134 | 135 | 136 | ``` r 137 | q <- tribble( 138 | ~role, ~content, 139 | "system", "You assign texts into categories. Answer with just the correct category.", 140 | "user", "text: the pizza tastes terrible\ncategories: positive, neutral, negative", 141 | "assistant", "Category: Negative", 142 | "user", "text: the service is great\ncategories: positive, neutral, negative", 143 | "assistant", "Category: Positive", 144 | "user", "text: I once came here with my wife\ncategories: positive, neutral, negative", 145 | "assistant", "Category: Neutral", 146 | "user", "text: I once ate pizza\ncategories: positive, neutral, negative" 147 | ) 148 | query(q) 149 | #> 150 | #> ── Answer from llama3.2:3b-instruct-q8_0 ───────────────────────────── 151 | #> Category: Neutral 152 | ``` 153 | 154 | 155 | ## Chain-of-Thought 156 | 157 | This approach involves at least one reasoning step. The structure here starts with the system prompt, then a user prompt with a text to classify and a reasoning question. 158 | 159 | 160 | 161 | ``` r 162 | q_thought <- tribble( 163 | ~role, ~content, 164 | "system", "You assign texts into categories. ", 165 | "user", "text: the pizza tastes terrible\nWhat sentiment (positive, neutral, or negative) would you assign? Provide some thoughts." 166 | ) 167 | output_thought <- query(q_thought, output = "text") 168 | #> 169 | #> ── Answer from llama3.2:3b-instruct-q8_0 ───────────────────────────── 170 | #> I would assign a negative sentiment to this text. 171 | #> 172 | #> Here's why: 173 | #> 174 | #> * The word "terrible" is a strong adjective that conveys a strong 175 | #> negative opinion. 176 | #> * The tone of the sentence is also quite critical and dismissive. 177 | #> * There are no positive or neutral phrases in the text, which 178 | #> suggests that the author has a clear dislike for the pizza. 179 | #> 180 | #> Overall, the language and tone used in this text indicate a strong 181 | #> negative sentiment towards the pizza. 182 | ``` 183 | 184 | In the next step we can use the assistant's reasoning and a user prompt with the classification question. 185 | 186 | 187 | ``` r 188 | q <- tribble( 189 | ~role, ~content, 190 | "system", "You assign texts into categories. ", 191 | "user", "text: the pizza tastes terrible\nWhat sentiment (positive, neutral, or negative) would you assign? Provide some thoughts.", 192 | "assistant", output_thought, 193 | "user", "Now answer with just the correct category (positive, neutral, or negative)" 194 | ) 195 | resps <- query(q) 196 | #> 197 | #> ── Answer from llama3.2:3b-instruct-q8_0 ───────────────────────────── 198 | #> Negative 199 | ``` 200 | 201 | 202 | 203 | ## The `make_query` helper function 204 | 205 | The `make_query` function is designed to facilitate the creation of a structured query for text classification, so that you do not need to build the tibble yourself and remember the specific structure. 206 | 207 | Components: 208 | 209 | - **text** to Classify: The new text(s) to be annotated. 210 | - **prompt**: Classification question with the categories to be annotated. 211 | - **template**: Defines the structure for user messages. Defines the structure for user messages. The template can include placeholders like {text}, {prefix}, and {suffix} to dynamically format input. 212 | - **system**: System Prompt: Provides context or instructions for the classification task (optional). 213 | - **prefix**: A string to prepend to user queries (optional). 214 | - **suffix**: A string to append to user queries (optional). 215 | - **examples**: Prior examples consisting of user messages and assistant responses (for one-shot and few-shot learning)(optional). 216 | 217 | 218 | ## Example usage 219 | 220 | ### Zero-shot example 221 | 222 | In this example, the function is used without any examples. 223 | 224 | 225 | ``` r 226 | # Call the make_query function 227 | q_zs <- make_query( 228 | template = "{text}\n{prompt}", 229 | text = "the pizza tastes terrible", 230 | prompt = "Categories: positive, neutral, negative", 231 | system = "You assign texts into categories. Answer with just the correct category.", 232 | ) 233 | 234 | # Print the query 235 | print(q_zs) 236 | #> [[1]] 237 | #> # A tibble: 2 × 2 238 | #> role content 239 | #> 240 | #> 1 system You assign texts into categories. Answer with just the corre… 241 | #> 2 user the pizza tastes terrible 242 | #> Categories: positive, neutral, nega… 243 | # Run the query 244 | query(q_zs) 245 | #> 246 | #> ── Answer from llama3.2:3b-instruct-q8_0 ───────────────────────────── 247 | #> negative 248 | ``` 249 | 250 | 251 | ### One-shot example 252 | 253 | Here, one prior example is provided to aid the classification: 254 | 255 | 256 | ``` r 257 | examples_os <- tibble::tribble( 258 | ~text, ~answer, 259 | "the pizza tastes terrible", "negative" 260 | ) 261 | 262 | q_os <- make_query( 263 | text = "the service is great", 264 | template = "{text}\n{prompt}", 265 | prompt = "Categories: positive, neutral, negative", 266 | system = "You assign texts into categories. Answer with just the correct category.", 267 | example = examples_os, 268 | ) 269 | print(q_os) 270 | #> [[1]] 271 | #> # A tibble: 4 × 2 272 | #> role content 273 | #> 274 | #> 1 system You assign texts into categories. Answer with just the co… 275 | #> 2 user the pizza tastes terrible 276 | #> Categories: positive, neutral, n… 277 | #> 3 assistant negative 278 | #> 4 user the service is great 279 | #> Categories: positive, neutral, negati… 280 | 281 | query(q_os) 282 | #> 283 | #> ── Answer from llama3.2:3b-instruct-q8_0 ───────────────────────────── 284 | #> positive 285 | ``` 286 | 287 | 288 | ### Few-shot example with multiple examples 289 | 290 | This scenario uses multiple examples to enrich the context for the new classification: 291 | 292 | 293 | ``` r 294 | examples_fs <- tibble::tribble( 295 | ~text, ~answer, 296 | "the pizza tastes terrible", "negative", 297 | "the service is great", "positive", 298 | "I once came here with my wife", "neutral" 299 | ) 300 | 301 | q_fs <- make_query( 302 | text = "I once ate pizza", 303 | prompt = "Categories: positive, neutral, negative", 304 | template = "{text}\n{prompt}", 305 | system = "You assign texts into categories. Answer with just the correct category.", 306 | examples = examples_fs 307 | ) 308 | 309 | query(q_fs) 310 | #> 311 | #> ── Answer from llama3.2:3b-instruct-q8_0 ───────────────────────────── 312 | #> neutral 313 | ``` 314 | 315 | # Batch annotation 316 | 317 | In practice, you probably never want to annotate just one text, except maybe for testing. 318 | Instead you normally have a collections of texts, which is why `make_query` takes a vector for the `text` argument. 319 | In this section, we highlight how this is useful with an example batch of texts. 320 | 321 | 322 | # Example using a dataframe 323 | 324 | This example demonstrates how to perform sentiment analysis on a set of movie reviews. 325 | The process involves creating a dataframe of reviews, processing each review to classify its sentiment, and appending the results as a new column in the dataframe. 326 | 327 | We create a dataframe named `movie_reviews` with two columns: 328 | 329 | 330 | ``` r 331 | # Create an example dataframe with 5 movie reviews 332 | movie_reviews <- tibble::tibble( 333 | review_id = 1:5, 334 | review = c("A stunning visual spectacle with a gripping storyline.", 335 | "The plot was predictable, but the acting was superb.", 336 | "An overrated film with underwhelming performances.", 337 | "A beautiful tale of love and adventure, beautifully shot.", 338 | "The movie lacked depth, but the special effects were incredible.") 339 | ) 340 | # Print the initial dataframe 341 | movie_reviews 342 | #> # A tibble: 5 × 2 343 | #> review_id review 344 | #> 345 | #> 1 1 A stunning visual spectacle with a gripping storyline. 346 | #> 2 2 The plot was predictable, but the acting was superb. 347 | #> 3 3 An overrated film with underwhelming performances. 348 | #> 4 4 A beautiful tale of love and adventure, beautifully shot. 349 | #> 5 5 The movie lacked depth, but the special effects were incr… 350 | ``` 351 | 352 | We can use `make_query` again to define a query for each of these reviews. 353 | What we want to do is to perform a sentiment analysis, guided by a system message and a classification question. 354 | 355 | 356 | ``` r 357 | # Process each review using make_query 358 | queries <- make_query( 359 | text = movie_reviews$review, 360 | prompt = "Categories: positive, neutral, negative", 361 | template = "{prefix}{text}\n{prompt}", 362 | system = "Classify the sentiment of the movie review. Answer with just the correct category.", 363 | prefix = "Text to classify: " 364 | ) 365 | ``` 366 | 367 | This produces a list of data.frames that have the same query format we are now familiar with. 368 | All of them have the same prompt, system message and prefix, but each has a different text that came from the movie reviews data.frame we created above. 369 | The `query` function accepts lists of queries, so we can get the annotations simply using: 370 | 371 | 372 | ``` r 373 | # Process and annotate the movie reviews 374 | movie_reviews$annotation <- query(queries, screen = FALSE, output = "text") 375 | 376 | # Print the annotated dataframe 377 | movie_reviews 378 | #> # A tibble: 5 × 3 379 | #> review_id review annotation 380 | #> 381 | #> 1 1 A stunning visual spectacle with a gripping st… Positive 382 | #> 2 2 The plot was predictable, but the acting was s… positive 383 | #> 3 3 An overrated film with underwhelming performan… Negative 384 | #> 4 4 A beautiful tale of love and adventure, beauti… Positive 385 | #> 5 5 The movie lacked depth, but the special effect… Neutral 386 | ``` 387 | 388 | We can also use this approach in a 'tidy' coding style: 389 | 390 | 391 | ``` r 392 | library(dplyr, warn.conflicts = FALSE) 393 | movie_reviews_annotated <- movie_reviews |> 394 | mutate( 395 | sentiment = make_query( 396 | text = review, 397 | prompt = "Categories: positive, neutral, negative", 398 | template = "{prefix}{text}\n{prompt}", 399 | system = "Classify the sentiment of the movie review. Answer with just the correct category.", 400 | prefix = "Text to classify: " 401 | ) |> 402 | query(screen = FALSE, output = "text") 403 | ) 404 | movie_reviews_annotated 405 | #> # A tibble: 5 × 4 406 | #> review_id review annotation sentiment 407 | #> 408 | #> 1 1 A stunning visual spectacle with a g… Positive Positive 409 | #> 2 2 The plot was predictable, but the ac… positive positive 410 | #> 3 3 An overrated film with underwhelming… Negative Negative 411 | #> 4 4 A beautiful tale of love and adventu… Positive Positive 412 | #> 5 5 The movie lacked depth, but the spec… Neutral Neutral 413 | ``` 414 | 415 | This takes a little longer than classic supervised machine learning or even classification with transformer models. 416 | However, the advantage is that instructions can be provided using plain English, the models need very few examples to perform surprisingly well, and the best models, like `llama3.2`, can often deal with more complex categories than other approaches. 417 | 418 | -------------------------------------------------------------------------------- /vignettes/annotation.Rmd.orig: -------------------------------------------------------------------------------- 1 | --- 2 | title: "annotation" 3 | output: rmarkdown::html_vignette 4 | author: Maximilian Weber and Johannes B. Gruber 5 | vignette: > 6 | %\VignetteIndexEntry{annotation} 7 | %\VignetteEngine{knitr::rmarkdown} 8 | %\VignetteEncoding{UTF-8} 9 | --- 10 | 11 | # Introduction 12 | 13 | ```{r, include = FALSE} 14 | knitr::opts_chunk$set( 15 | collapse = TRUE, 16 | comment = "#>" 17 | ) 18 | options(rollama_verbose = FALSE) 19 | options(rollama_seed = 42) 20 | options(width = 70) 21 | ``` 22 | 23 | After you installed Ollama on your machine and downloaded the package rollama you can load the package and pull a model. 24 | The default model (`llama3.1`), is a good all-round chat model. 25 | For annotation, however, the instruction tuned llama models are often better suited, as they follow instructions more diligently and are less likely to trail off into a conversation. 26 | By changing the option `rollama_model`, we can change which model is used by default in the current session: 27 | 28 | ```{r} 29 | library(rollama) 30 | options(rollama_model = "llama3.2:3b-instruct-q8_0") 31 | pull_model() 32 | ``` 33 | 34 | # Prompting Strategies 35 | 36 | If you want to annotate textual data, you can use various prompting strategies. 37 | For an overview of common approaches, you can read a paper by [Weber and Reichardt (2023)](https://arxiv.org/abs/2401.00284). 38 | These strategies primarily differ in whether or how many examples are given (Zero-shot, One-shot, or Few-shot) and whether reasoning is involved (Chain-of-Thought). 39 | 40 | When writing a prompt we can give the model content for the system part, user part and assistant part. 41 | The system message typically includes instructions or context that guides the interaction, setting the stage for how the user and the assistant should interact. 42 | For an annotation task we could write: 43 | "You assign texts into categories. Answer with just the correct category." 44 | The table below summarizes different prompting strategies for annotating textual data. 45 | Each strategy varies in the number of examples given and the incorporation of reasoning. 46 | 47 | | Prompting Strategy | Example Structure | 48 | |--------------------|--------------------| 49 | | Zero-shot | `{"role": "system", "content": "Text of System Prompt"},`
`{"role": "user", "content": "(Text to classify) + classification question"}` | 50 | | One-shot | `{"role": "system", "content": "Text of System Prompt"},`
`{"role": "user", "content": "(Example text) + classification question"},`
`{"role": "assistant", "content": "Example classification"},`
`{"role": "user", "content": "(Text to classify) + classification question"}` | 51 | | Few-shot | `{"role": "system", "content": "Text of System Prompt"},`
`{"role": "user", "content": "(Example text) + classification question"},`
`{"role": "assistant", "content": "Example classification"},`
`{"role": "user", "content": "(Example text) + classification question"},`
`{"role": "assistant", "content": "Example classification"},`
`. . . more examples`
`{"role": "user", "content": "(Text to classify) + classification question"}` | 52 | | Chain-of-Thought | `{"role": "system", "content": "Text of System Prompt"},`
`{"role": "user", "content": "(Text to classify) + reasoning question"},`
`{"role": "assistant", "content": "Reasoning"},`
`{"role": "user", "content": "Classification question"}` | 53 | 54 | 55 | ## Zero-shot 56 | 57 | In this approach, no prior examples are given. 58 | The structure includes a system prompt providing instructions and a user prompt with the text to classify and the classification question (in this example we only provide the categories). 59 | 60 | ```{r} 61 | library(tibble) 62 | library(purrr) 63 | q <- tribble( 64 | ~role, ~content, 65 | "system", "You assign texts into categories. Answer with just the correct category.", 66 | "user", "text: the pizza tastes terrible\ncategories: positive, neutral, negative" 67 | ) 68 | query(q) 69 | ``` 70 | 71 | 72 | ## One-shot 73 | 74 | This involves giving a single example before the actual task. 75 | The structure includes a system prompt, followed by a user prompt with an example text and classification question, the assistant's example classification, and then another user prompt with the new text to classify. 76 | 77 | ```{r} 78 | q <- tribble( 79 | ~role, ~content, 80 | "system", "You assign texts into categories. Answer with just the correct category.", 81 | "user", "text: the pizza tastes terrible\ncategories: positive, neutral, negative", 82 | "assistant", "Category: Negative", 83 | "user", "text: the service is great\ncategories: positive, neutral, negative" 84 | ) 85 | query(q) 86 | ``` 87 | 88 | A nice side effect of the one-shot strategy (and all n>0-strategies) is that you can tune the format the model uses in its replies. 89 | For example, if you want to have an output that easy to parse, you could change the assistant message to `"{'Category':'Negative'}"` 90 | 91 | ```{r} 92 | q <- tribble( 93 | ~role, ~content, 94 | "system", "You assign texts into categories. Answer with just the correct category.", 95 | "user", "text: the pizza tastes terrible\ncategories: positive, neutral, negative", 96 | "assistant", "{'Category':'Negative'}", 97 | "user", "text: the service is great\ncategories: positive, neutral, negative" 98 | ) 99 | answer <- query(q) 100 | ``` 101 | 102 | This is a valid JSON return and can be parsed into a list with, e.g., `jsonlite::fromJSON()`. 103 | Using this logic, we could request a more informative output: 104 | 105 | ```{r} 106 | q <- tribble( 107 | ~role, ~content, 108 | "system", "You assign texts into categories. Provide the following information: category, confidence, and the word that is most important for your coding decision.", 109 | "user", "text: the pizza tastes terrible\ncategories: positive, neutral, negative", 110 | "assistant", "{'Category':'Negative','Confidence':'100%','Important':'terrible'}", 111 | "user", "text: the service is great\ncategories: positive, neutral, negative" 112 | ) 113 | answer <- query(q) 114 | ``` 115 | 116 | 117 | By using `pluck(answer, "message", "content")`, you can directly extract the result and don't need to copy it from screen. 118 | 119 | 120 | ## Few-shot 121 | 122 | This strategy includes multiple examples (more than one). 123 | The structure is similar to one-shot but with several iterations of user and assistant messages providing examples before the final text to classify. 124 | 125 | ```{r} 126 | q <- tribble( 127 | ~role, ~content, 128 | "system", "You assign texts into categories. Answer with just the correct category.", 129 | "user", "text: the pizza tastes terrible\ncategories: positive, neutral, negative", 130 | "assistant", "Category: Negative", 131 | "user", "text: the service is great\ncategories: positive, neutral, negative", 132 | "assistant", "Category: Positive", 133 | "user", "text: I once came here with my wife\ncategories: positive, neutral, negative", 134 | "assistant", "Category: Neutral", 135 | "user", "text: I once ate pizza\ncategories: positive, neutral, negative" 136 | ) 137 | query(q) 138 | ``` 139 | 140 | 141 | ## Chain-of-Thought 142 | 143 | This approach involves at least one reasoning step. The structure here starts with the system prompt, then a user prompt with a text to classify and a reasoning question. 144 | 145 | 146 | ```{r} 147 | q_thought <- tribble( 148 | ~role, ~content, 149 | "system", "You assign texts into categories. ", 150 | "user", "text: the pizza tastes terrible\nWhat sentiment (positive, neutral, or negative) would you assign? Provide some thoughts." 151 | ) 152 | output_thought <- query(q_thought, output = "text") 153 | ``` 154 | 155 | In the next step we can use the assistant's reasoning and a user prompt with the classification question. 156 | 157 | ```{r} 158 | q <- tribble( 159 | ~role, ~content, 160 | "system", "You assign texts into categories. ", 161 | "user", "text: the pizza tastes terrible\nWhat sentiment (positive, neutral, or negative) would you assign? Provide some thoughts.", 162 | "assistant", output_thought, 163 | "user", "Now answer with just the correct category (positive, neutral, or negative)" 164 | ) 165 | resps <- query(q) 166 | ``` 167 | 168 | 169 | 170 | ## The `make_query` helper function 171 | 172 | The `make_query` function is designed to facilitate the creation of a structured query for text classification, so that you do not need to build the tibble yourself and remember the specific structure. 173 | 174 | Components: 175 | 176 | - **text** to Classify: The new text(s) to be annotated. 177 | - **prompt**: Classification question with the categories to be annotated. 178 | - **template**: Defines the structure for user messages. Defines the structure for user messages. The template can include placeholders like {text}, {prefix}, and {suffix} to dynamically format input. 179 | - **system**: System Prompt: Provides context or instructions for the classification task (optional). 180 | - **prefix**: A string to prepend to user queries (optional). 181 | - **suffix**: A string to append to user queries (optional). 182 | - **examples**: Prior examples consisting of user messages and assistant responses (for one-shot and few-shot learning)(optional). 183 | 184 | 185 | ## Example usage 186 | 187 | ### Zero-shot example 188 | 189 | In this example, the function is used without any examples. 190 | 191 | ```{r} 192 | # Call the make_query function 193 | q_zs <- make_query( 194 | template = "{text}\n{prompt}", 195 | text = "the pizza tastes terrible", 196 | prompt = "Categories: positive, neutral, negative", 197 | system = "You assign texts into categories. Answer with just the correct category.", 198 | ) 199 | 200 | # Print the query 201 | print(q_zs) 202 | # Run the query 203 | query(q_zs) 204 | ``` 205 | 206 | 207 | ### One-shot example 208 | 209 | Here, one prior example is provided to aid the classification: 210 | 211 | ```{r} 212 | examples_os <- tibble::tribble( 213 | ~text, ~answer, 214 | "the pizza tastes terrible", "negative" 215 | ) 216 | 217 | q_os <- make_query( 218 | text = "the service is great", 219 | template = "{text}\n{prompt}", 220 | prompt = "Categories: positive, neutral, negative", 221 | system = "You assign texts into categories. Answer with just the correct category.", 222 | example = examples_os, 223 | ) 224 | print(q_os) 225 | 226 | query(q_os) 227 | ``` 228 | 229 | 230 | ### Few-shot example with multiple examples 231 | 232 | This scenario uses multiple examples to enrich the context for the new classification: 233 | 234 | ```{r} 235 | examples_fs <- tibble::tribble( 236 | ~text, ~answer, 237 | "the pizza tastes terrible", "negative", 238 | "the service is great", "positive", 239 | "I once came here with my wife", "neutral" 240 | ) 241 | 242 | q_fs <- make_query( 243 | text = "I once ate pizza", 244 | prompt = "Categories: positive, neutral, negative", 245 | template = "{text}\n{prompt}", 246 | system = "You assign texts into categories. Answer with just the correct category.", 247 | examples = examples_fs 248 | ) 249 | 250 | query(q_fs) 251 | ``` 252 | 253 | # Batch annotation 254 | 255 | In practice, you probably never want to annotate just one text, except maybe for testing. 256 | Instead you normally have a collections of texts, which is why `make_query` takes a vector for the `text` argument. 257 | In this section, we highlight how this is useful with an example batch of texts. 258 | 259 | 260 | # Example using a dataframe 261 | 262 | This example demonstrates how to perform sentiment analysis on a set of movie reviews. 263 | The process involves creating a dataframe of reviews, processing each review to classify its sentiment, and appending the results as a new column in the dataframe. 264 | 265 | We create a dataframe named `movie_reviews` with two columns: 266 | 267 | ```{r} 268 | # Create an example dataframe with 5 movie reviews 269 | movie_reviews <- tibble::tibble( 270 | review_id = 1:5, 271 | review = c("A stunning visual spectacle with a gripping storyline.", 272 | "The plot was predictable, but the acting was superb.", 273 | "An overrated film with underwhelming performances.", 274 | "A beautiful tale of love and adventure, beautifully shot.", 275 | "The movie lacked depth, but the special effects were incredible.") 276 | ) 277 | # Print the initial dataframe 278 | movie_reviews 279 | ``` 280 | 281 | We can use `make_query` again to define a query for each of these reviews. 282 | What we want to do is to perform a sentiment analysis, guided by a system message and a classification question. 283 | 284 | ```{r} 285 | # Process each review using make_query 286 | queries <- make_query( 287 | text = movie_reviews$review, 288 | prompt = "Categories: positive, neutral, negative", 289 | template = "{prefix}{text}\n{prompt}", 290 | system = "Classify the sentiment of the movie review. Answer with just the correct category.", 291 | prefix = "Text to classify: " 292 | ) 293 | ``` 294 | 295 | This produces a list of data.frames that have the same query format we are now familiar with. 296 | All of them have the same prompt, system message and prefix, but each has a different text that came from the movie reviews data.frame we created above. 297 | The `query` function accepts lists of queries, so we can get the annotations simply using: 298 | 299 | ```{r} 300 | # Process and annotate the movie reviews 301 | movie_reviews$annotation <- query(queries, screen = FALSE, output = "text") 302 | 303 | # Print the annotated dataframe 304 | movie_reviews 305 | ``` 306 | 307 | We can also use this approach in a 'tidy' coding style: 308 | 309 | ```{r} 310 | library(dplyr, warn.conflicts = FALSE) 311 | movie_reviews_annotated <- movie_reviews |> 312 | mutate( 313 | sentiment = make_query( 314 | text = review, 315 | prompt = "Categories: positive, neutral, negative", 316 | template = "{prefix}{text}\n{prompt}", 317 | system = "Classify the sentiment of the movie review. Answer with just the correct category.", 318 | prefix = "Text to classify: " 319 | ) |> 320 | query(screen = FALSE, output = "text") 321 | ) 322 | movie_reviews_annotated 323 | ``` 324 | 325 | This takes a little longer than classic supervised machine learning or even classification with transformer models. 326 | However, the advantage is that instructions can be provided using plain English, the models need very few examples to perform surprisingly well, and the best models, like `llama3.2`, can often deal with more complex categories than other approaches. 327 | 328 | -------------------------------------------------------------------------------- /vignettes/figures/smldemo-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JBGruber/rollama/dc44d7879cfa6ec8f5b03ad59e7f5b51c42567f6/vignettes/figures/smldemo-1.png -------------------------------------------------------------------------------- /vignettes/hf-gguf.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Hugging Face Models" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{hf-gguf} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | 11 | 12 | If you are looking for a model to use, you should probably search for it on the [Ollama website](https://ollama.com/search). 13 | However, the models listed there are not all models that can be used in (r)ollama. 14 | Models in the GGUF format from *Hugging Face Hub*, a very popular platform for sharing machine learning models. 15 | To look for a specific model, all you need to do is visit (this is already filtered to GGUF models, other model formats are not compatible with Ollama). 16 | 17 | Once you have identified a model, you can simply pass the URL to the `pull_model` function: 18 | 19 | 20 | ``` r 21 | library(rollama) 22 | pull_model("https://huggingface.co/oxyapi/oxy-1-small-GGUF:Q2_K") 23 | #> ✔ model https://huggingface.co/oxyapi/oxy-1-small-GGUF:Q2_K pulled succesfully 24 | ``` 25 | 26 | Note that the `:Q2_K` at the end is the [quantization scheme](https://huggingface.co/docs/optimum/en/concept_guides/quantization). 27 | Q2_K is the smallest available version of the model, which gives up some performance, but is faster to run. 28 | You can find the different quantization versions when clicking the `Use this model` on a model site. 29 | When downloading, Ollama converts the URL automatically into a name, we need to query our model list first to see how the model is named now: 30 | 31 | 32 | ``` r 33 | grep("oxy-1-small", list_models()$name, value = TRUE) 34 | #> [1] "huggingface.co/oxyapi/oxy-1-small-GGUF:Q2_K" 35 | ``` 36 | 37 | But except for the awkward name, we can now use this model as any other one: 38 | 39 | 40 | ``` r 41 | chat("Why is the sky blue?", model = "huggingface.co/oxyapi/oxy-1-small-GGUF:Q2_K") 42 | #> 43 | #> ── Answer from huggingface.co/oxyapi/oxy-1-small-GGUF:Q2_K ─────────── 44 | #> The sky appears blue to our eyes due to a process called Rayleigh 45 | #> scattering. Sunlight entering the Earth’s atmosphere collides with 46 | #> the air molecules, causing them to scatter in all directions. 47 | #> Nitrogen and oxygen make up about 95% of the earth's atmosphere. 48 | #> 49 | #> When sunlight encounters these gases, it gets scattered widely 50 | #> throughout the sky. Shorter wavelengths of visible light fall within 51 | #> blue’s wavelength range; thus, when sunlight scatters into our eyes 52 | #> as blue light, we perceive a blue sky. 53 | #> 54 | #> However, at sunrise and sunset hours, the sun is lower on the horizon 55 | #> compared to noon time. As such the path of sunlight passes through 56 | #> more atmosphere than usual, which causes the longer wavelengths (red) 57 | #> to dominate over shorter ones, giving us the shades of red hues like 58 | #> pink or crimson skies. 59 | ``` 60 | 61 | Note that this also works with [text embedding models](https://jbgruber.github.io/rollama/articles/text-embedding.html). 62 | Hugging Face Hub has some nice filters with which you can pre-select appropriate models and then use full text search to find more. 63 | This search looks for embedding models with the correct model type, for example: 64 | 65 | 66 | 67 | The trending models are often quite good for general tasks, but more information is available in leaderboards and blog posts. 68 | For no particular reason, let's use Snowflake's Arctic-embed-m-v1.5 embed for demonstration purposes here: 69 | 70 | 71 | ``` r 72 | pull_model("https://huggingface.co/Snowflake/snowflake-arctic-embed-m-v1.5:BF16") 73 | #> ✔ model https://huggingface.co/Snowflake/snowflake-arctic-embed-m-v1.5:BF16 pulled succesfully 74 | embed_text(c("Why is the sky blue?", "I am pretty happy we can work with GGUF models in R"), 75 | model = "huggingface.co/Snowflake/snowflake-arctic-embed-m-v1.5:BF16") 76 | #> # A tibble: 2 × 768 77 | #> dim_1 dim_2 dim_3 dim_4 dim_5 dim_6 dim_7 dim_8 dim_9 dim_10 78 | #> 79 | #> 1 0.221 0.128 0.698 0.0980 0.891 -0.145 0.168 0.722 -0.308 -0.674 80 | #> 2 0.239 -0.332 0.699 -0.125 0.856 0.345 -0.0625 0.361 -0.309 -0.657 81 | #> # ℹ 758 more variables: dim_11 , dim_12 , dim_13 , 82 | #> # dim_14 , dim_15 , dim_16 , dim_17 , 83 | #> # dim_18 , dim_19 , dim_20 , dim_21 , 84 | #> # dim_22 , dim_23 , dim_24 , dim_25 , 85 | #> # dim_26 , dim_27 , dim_28 , dim_29 , 86 | #> # dim_30 , dim_31 , dim_32 , dim_33 , 87 | #> # dim_34 , dim_35 , dim_36 , dim_37 , … 88 | ``` 89 | 90 | 91 | -------------------------------------------------------------------------------- /vignettes/hf-gguf.Rmd.orig: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Hugging Face Models" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{hf-gguf} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | ```{r, include = FALSE} 11 | knitr::opts_chunk$set( 12 | collapse = TRUE, 13 | comment = "#>" 14 | ) 15 | options(rollama_verbose = FALSE) 16 | options(rollama_seed = 42) 17 | options(width = 70) 18 | ``` 19 | 20 | If you are looking for a model to use, you should probably search for it on the [Ollama website](https://ollama.com/search). 21 | However, the models listed there are not all models that can be used in (r)ollama. 22 | Models in the GGUF format from *Hugging Face Hub*, a very popular platform for sharing machine learning models. 23 | To look for a specific model, all you need to do is visit (this is already filtered to GGUF models, other model formats are not compatible with Ollama). 24 | 25 | Once you have identified a model, you can simply pass the URL to the `pull_model` function: 26 | 27 | ```{r} 28 | library(rollama) 29 | pull_model("https://huggingface.co/oxyapi/oxy-1-small-GGUF:Q2_K") 30 | ``` 31 | 32 | Note that the `:Q2_K` at the end is the [quantization scheme](https://huggingface.co/docs/optimum/en/concept_guides/quantization). 33 | Q2_K is the smallest available version of the model, which gives up some performance, but is faster to run. 34 | You can find the different quantization versions when clicking the `Use this model` on a model site. 35 | When downloading, Ollama converts the URL automatically into a name, we need to query our model list first to see how the model is named now: 36 | 37 | ```{r} 38 | grep("oxy-1-small", list_models()$name, value = TRUE) 39 | ``` 40 | 41 | But except for the awkward name, we can now use this model as any other one: 42 | 43 | ```{r} 44 | chat("Why is the sky blue?", model = "huggingface.co/oxyapi/oxy-1-small-GGUF:Q2_K") 45 | ``` 46 | 47 | Note that this also works with [text embedding models](https://jbgruber.github.io/rollama/articles/text-embedding.html). 48 | Hugging Face Hub has some nice filters with which you can pre-select appropriate models and then use full text search to find more. 49 | This search looks for embedding models with the correct model type, for example: 50 | 51 | 52 | 53 | The trending models are often quite good for general tasks, but more information is available in leaderboards and blog posts. 54 | For no particular reason, let's use Snowflake's Arctic-embed-m-v1.5 embed for demonstration purposes here: 55 | 56 | ```{r} 57 | pull_model("https://huggingface.co/Snowflake/snowflake-arctic-embed-m-v1.5:BF16") 58 | embed_text(c("Why is the sky blue?", "I am pretty happy we can work with GGUF models in R"), 59 | model = "huggingface.co/Snowflake/snowflake-arctic-embed-m-v1.5:BF16") 60 | ``` 61 | 62 | 63 | -------------------------------------------------------------------------------- /vignettes/image-annotation.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "image-annotation" 3 | output: rmarkdown::html_vignette 4 | author: Maximilian Weber 5 | vignette: > 6 | %\VignetteIndexEntry{image-annotation} 7 | %\VignetteEngine{knitr::rmarkdown} 8 | %\VignetteEncoding{UTF-8} 9 | --- 10 | 11 | 12 | 13 | Ollama also supports multimodal models, which can interact with (but not create) images. 14 | 15 | We start by loading the package: 16 | 17 | 18 | ``` r 19 | library(rollama) 20 | ``` 21 | 22 | After loading the package, we need to pull a model that can handle images. 23 | For example, the [llava](https://llava-vl.github.io/) model. 24 | Using `pull_model("llava")` will download the model, or just load it if it has already been downloaded before. 25 | 26 | 27 | 28 | ``` r 29 | pull_model("llava") 30 | #> ✔ model llava pulled succesfully 31 | ``` 32 | 33 | We can use textual and visual input together. 34 | For instance, we can ask a question and provide a link to a picture or a local file path, such as `images = "/home/user/Pictures/IMG_4561.jpg"`. 35 | 36 | In the first example, we ask the model to describe the logo of this package: 37 | 38 | 39 | ``` r 40 | query("Excitedly desscribe this logo", model = "llava", 41 | images = "https://raw.githubusercontent.com/JBGruber/rollama/master/man/figures/logo.png") 42 | #> 43 | #> ── Answer from llava ───────────────────────────────────────────────── 44 | #> This is an image of a logo for "Rollama." The logo features a playful 45 | #> and creative design, with a cartoon-style character resting on a bed 46 | #> of green grass. The character is anthropomorphic, having arms and 47 | #> legs like a human, but it has animal-like ears and is wearing a blue 48 | #> helmet. The helmet seems to have a visor, and there's a badge 49 | #> attached to it that reads "Rollama." 50 | #> 51 | #> The background of the logo is light blue with a faint cloud pattern, 52 | #> which adds to the whimsical feel of the design. The use of bold 53 | #> colors and simple shapes gives the logo a friendly and approachable 54 | #> vibe, suggesting that whatever "Rollama" represents could be fun and 55 | #> enjoyable. 56 | #> 57 | #> Without additional context, it's not possible to determine the exact 58 | #> nature or purpose of Rollama from this image alone. However, the 59 | #> playful design and the badge suggest that it might be related to a 60 | #> game, an application, or possibly a branding for something 61 | #> entertaining and engaging. 62 | ``` 63 | 64 | The second example asks a classification question: 65 | 66 | 67 | ``` r 68 | query("Which animal is in this image: a llama, dog, or walrus?", 69 | model = "llava", 70 | images = "https://raw.githubusercontent.com/JBGruber/rollama/master/man/figures/logo.png") 71 | #> 72 | #> ── Answer from llava ───────────────────────────────────────────────── 73 | #> The image shows an animated character that resembles a llama. It has 74 | #> distinctive features of a llama, such as the large head with two 75 | #> forward-facing horns and a long, curved neck with short, rounded ears 76 | #> at the top. 77 | ``` 78 | 79 | -------------------------------------------------------------------------------- /vignettes/image-annotation.Rmd.orig: -------------------------------------------------------------------------------- 1 | --- 2 | title: "image-annotation" 3 | output: rmarkdown::html_vignette 4 | author: Maximilian Weber 5 | vignette: > 6 | %\VignetteIndexEntry{image-annotation} 7 | %\VignetteEngine{knitr::rmarkdown} 8 | %\VignetteEncoding{UTF-8} 9 | --- 10 | 11 | ```{r, include = FALSE} 12 | knitr::opts_chunk$set( 13 | collapse = TRUE, 14 | comment = "#>" 15 | ) 16 | options(rollama_verbose = FALSE) 17 | options(rollama_seed = 42) 18 | options(width = 70) 19 | ``` 20 | 21 | Ollama also supports multimodal models, which can interact with (but not create) images. 22 | 23 | We start by loading the package: 24 | 25 | ```{r setup} 26 | library(rollama) 27 | ``` 28 | 29 | After loading the package, we need to pull a model that can handle images. 30 | For example, the [llava](https://llava-vl.github.io/) model. 31 | Using `pull_model("llava")` will download the model, or just load it if it has already been downloaded before. 32 | 33 | 34 | ```{r} 35 | pull_model("llava") 36 | ``` 37 | 38 | We can use textual and visual input together. 39 | For instance, we can ask a question and provide a link to a picture or a local file path, such as `images = "/home/user/Pictures/IMG_4561.jpg"`. 40 | 41 | In the first example, we ask the model to describe the logo of this package: 42 | 43 | ```{r} 44 | query("Excitedly desscribe this logo", model = "llava", 45 | images = "https://raw.githubusercontent.com/JBGruber/rollama/master/man/figures/logo.png") 46 | ``` 47 | 48 | The second example asks a classification question: 49 | 50 | ```{r} 51 | query("Which animal is in this image: a llama, dog, or walrus?", 52 | model = "llava", 53 | images = "https://raw.githubusercontent.com/JBGruber/rollama/master/man/figures/logo.png") 54 | ``` 55 | 56 | -------------------------------------------------------------------------------- /vignettes/text-embedding.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "text-embedding" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{text-embedding} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | 11 | 12 | Ollama, and hence `rollama`, can be used for text embedding. 13 | In short, text embedding uses the knowledge of the meaning of words inferred from the context that is saved in a large language model through its training to turn text into meaningful vectors of numbers. 14 | This technique is a powerful preprocessing step for supervised machine learning and often increases the performance of a classification model substantially. 15 | Compared to using `rollama` directly for classification, the advantage is that converting text into embeddings and then using these embeddings for classification is usually faster and more resource efficient -- especially if you re-use embeddings for multiple tasks. 16 | 17 | 18 | ``` r 19 | library(rollama) 20 | library(tidyverse) 21 | ``` 22 | 23 | 24 | ``` r 25 | reviews_df <- read_csv("https://raw.githubusercontent.com/AFAgarap/ecommerce-reviews-analysis/master/Womens%20Clothing%20E-Commerce%20Reviews.csv", 26 | show_col_types = FALSE) 27 | glimpse(reviews_df) 28 | #> Rows: 23,486 29 | #> Columns: 11 30 | #> $ ...1 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, … 31 | #> $ `Clothing ID` 767, 1080, 1077, 1049, 847, 1080, … 32 | #> $ Age 33, 34, 60, 50, 47, 49, 39, 39, 24… 33 | #> $ Title NA, NA, "Some major design flaws",… 34 | #> $ `Review Text` "Absolutely wonderful - silky and … 35 | #> $ Rating 4, 5, 3, 5, 5, 2, 5, 4, 5, 5, 3, 5… 36 | #> $ `Recommended IND` 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1… 37 | #> $ `Positive Feedback Count` 0, 4, 0, 0, 6, 4, 1, 4, 0, 0, 14, … 38 | #> $ `Division Name` "Initmates", "General", "General",… 39 | #> $ `Department Name` "Intimate", "Dresses", "Dresses", … 40 | #> $ `Class Name` "Intimates", "Dresses", "Dresses",… 41 | ``` 42 | 43 | Now this is a rather big dataset, and I don't want to stress my GPU too much, so I only select the first 5,000 reviews for embedding. 44 | I also process the data slightly by combining the title and review text into a single column and turning the rating into a binary variable: 45 | 46 | 47 | ``` r 48 | reviews <- reviews_df |> 49 | slice_head(n = 5000) |> 50 | rename(id = ...1) |> 51 | mutate(rating = factor(Rating == 5, c(TRUE, FALSE), c("5", "<5"))) |> 52 | mutate(full_text = paste0(ifelse(is.na(Title), "", Title), `Review Text`)) 53 | ``` 54 | 55 | To turn one or multiple texts into embeddings, you can simply use `embed_text`: 56 | 57 | 58 | ``` r 59 | embed_text(text = reviews$full_text[1:3]) 60 | #> # A tibble: 3 × 3,072 61 | #> dim_1 dim_2 dim_3 dim_4 dim_5 dim_6 dim_7 dim_8 dim_9 dim_10 62 | #> 63 | #> 1 -1.30 0.277 2.73 0.608 -1.95 -1.19 0.425 -0.349 0.935 -0.664 64 | #> 2 1.44 1.13 -0.152 -0.00552 0.285 -2.49 -0.557 1.93 2.08 -1.12 65 | #> 3 -2.37 0.284 -0.904 -1.69 0.490 -0.743 -1.60 0.149 2.57 0.893 66 | #> # ℹ 3,062 more variables: dim_11 , dim_12 , dim_13 , 67 | #> # dim_14 , dim_15 , dim_16 , dim_17 , 68 | #> # dim_18 , dim_19 , dim_20 , dim_21 , 69 | #> # dim_22 , dim_23 , dim_24 , dim_25 , 70 | #> # dim_26 , dim_27 , dim_28 , dim_29 , 71 | #> # dim_30 , dim_31 , dim_32 , dim_33 , 72 | #> # dim_34 , dim_35 , dim_36 , dim_37 , … 73 | ``` 74 | 75 | To use this on the sample of reviews, I put the embeddings into a new column, before unnesting the resulting data.frame. 76 | The reason behind this is that I want to make sure the embeddings belong to the correct review ID. 77 | I also use a different model this time: [`nomic-embed-text`](https://ollama.com/library/nomic-embed-text). 78 | While models like `llama3.1` are extremely powerful at handling conversations and natural language requests, they are also computationally intensive, and hence relatively slow. 79 | As of version 0.1.26, Ollama support using dedicated embedding models, which can perform the task a lot faster and with fewer resources. 80 | Download the model with `pull_model("nomic-embed-text")` then we can run: 81 | 82 | 83 | ``` r 84 | reviews_embeddings <- reviews |> 85 | mutate(embeddings = embed_text(text = full_text, model = "nomic-embed-text")) |> 86 | select(id, rating, embeddings) |> 87 | unnest_wider(embeddings) 88 | ``` 89 | 90 | The resulting data.frame contains the ID and rating along the 768 embedding dimensions: 91 | 92 | 93 | ``` r 94 | reviews_embeddings 95 | #> # A tibble: 5,000 × 770 96 | #> id rating dim_1 dim_2 dim_3 dim_4 dim_5 dim_6 dim_7 97 | #> 98 | #> 1 0 <5 1.12 1.56 -4.48 -0.129 -0.373 0.390 -1.24 99 | #> 2 1 5 0.792 0.721 -3.14 -0.808 -1.81 1.35 0.403 100 | #> 3 2 <5 0.539 1.12 -2.58 -0.417 -0.992 1.77 0.895 101 | #> 4 3 5 -0.150 1.25 -4.12 -0.0750 -0.835 1.06 -0.0965 102 | #> 5 4 5 0.352 0.972 -3.40 -1.18 -0.686 0.489 0.127 103 | #> 6 5 <5 0.907 0.975 -2.78 -0.638 -1.48 2.21 0.373 104 | #> 7 6 5 0.523 0.321 -2.46 -0.678 -0.640 0.501 0.703 105 | #> 8 7 <5 0.224 0.694 -3.12 -0.562 -1.50 -0.0708 0.178 106 | #> 9 8 5 -0.0477 1.21 -3.70 -0.300 -0.936 0.583 0.135 107 | #> 10 9 5 -0.105 1.13 -3.22 -0.310 -1.69 0.857 -0.157 108 | #> # ℹ 4,990 more rows 109 | #> # ℹ 761 more variables: dim_8 , dim_9 , dim_10 , 110 | #> # dim_11 , dim_12 , dim_13 , dim_14 , 111 | #> # dim_15 , dim_16 , dim_17 , dim_18 , 112 | #> # dim_19 , dim_20 , dim_21 , dim_22 , 113 | #> # dim_23 , dim_24 , dim_25 , dim_26 , 114 | #> # dim_27 , dim_28 , dim_29 , dim_30 , … 115 | ``` 116 | 117 | As said above, these embeddings are often used in supervised machine learning. 118 | I use part of [a blog post by Emil Hvitfeldt](https://emilhvitfeldt.com/post/textrecipes-series-pretrained-word-embeddings/) show how this can be done using the data we embedded above in the powerful `tidymodels` collection of packages: 119 | 120 | 121 | ``` r 122 | library(tidymodels) 123 | # split data into training an test set (for validation) 124 | set.seed(1) 125 | reviews_split <- initial_split(reviews_embeddings) 126 | 127 | reviews_train <- training(reviews_split) 128 | 129 | # set up the model we want to use 130 | lasso_spec <- logistic_reg(penalty = tune(), mixture = 1) |> 131 | set_engine("glmnet") 132 | 133 | # we specify that we want to do some hyperparameter tuning and bootstrapping 134 | param_grid <- grid_regular(penalty(), levels = 50) 135 | reviews_boot <- bootstraps(reviews_train, times = 10) 136 | 137 | # and we define the model. Here we use the embeddings to predict the rating 138 | rec_spec <- recipe(rating ~ ., data = select(reviews_train, -id)) 139 | 140 | # bringing this together in a workflow 141 | wf_fh <- workflow() |> 142 | add_recipe(rec_spec) |> 143 | add_model(lasso_spec) 144 | 145 | # now we do the tuning 146 | set.seed(42) 147 | lasso_grid <- tune_grid( 148 | wf_fh, 149 | resamples = reviews_boot, 150 | grid = param_grid 151 | ) 152 | 153 | # select the best model 154 | wf_fh_final <- wf_fh |> 155 | finalize_workflow(parameters = select_best(lasso_grid, metric = "roc_auc")) 156 | 157 | # and train a new model + predict the classes for the test set 158 | final_res <- last_fit(wf_fh_final, reviews_split) 159 | 160 | # we extract these predictions 161 | final_pred <- final_res |> 162 | collect_predictions() 163 | 164 | # look at the results 165 | conf_mat(final_pred, truth = rating, estimate = .pred_class) 166 | #> Truth 167 | #> Prediction 5 <5 168 | #> 5 647 138 169 | #> <5 67 398 170 | 171 | # and evaluate them with a few standard metrics 172 | my_metrics <- metric_set(accuracy, precision, recall, f_meas) 173 | 174 | my_metrics(final_pred, truth = rating, estimate = .pred_class) 175 | #> # A tibble: 4 × 3 176 | #> .metric .estimator .estimate 177 | #> 178 | #> 1 accuracy binary 0.836 179 | #> 2 precision binary 0.824 180 | #> 3 recall binary 0.906 181 | #> 4 f_meas binary 0.863 182 | 183 | # and the ROC curve 184 | final_pred |> 185 | roc_curve(rating, .pred_5) |> 186 | autoplot() 187 | ``` 188 | 189 | ![plot of chunk smldemo](figures/smldemo-1.png) 190 | 191 | -------------------------------------------------------------------------------- /vignettes/text-embedding.Rmd.orig: -------------------------------------------------------------------------------- 1 | --- 2 | title: "text-embedding" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{text-embedding} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | ```{r setup, include = FALSE} 11 | knitr::opts_chunk$set( 12 | collapse = TRUE, 13 | message = FALSE, 14 | fig.path = "figures/", 15 | comment = "#>" 16 | ) 17 | options(tidyverse.quiet = TRUE) 18 | options(rollama_verbose = FALSE) 19 | options(width = 70) 20 | ``` 21 | 22 | Ollama, and hence `rollama`, can be used for text embedding. 23 | In short, text embedding uses the knowledge of the meaning of words inferred from the context that is saved in a large language model through its training to turn text into meaningful vectors of numbers. 24 | This technique is a powerful preprocessing step for supervised machine learning and often increases the performance of a classification model substantially. 25 | Compared to using `rollama` directly for classification, the advantage is that converting text into embeddings and then using these embeddings for classification is usually faster and more resource efficient -- especially if you re-use embeddings for multiple tasks. 26 | 27 | ```{r} 28 | library(rollama) 29 | library(tidyverse) 30 | ``` 31 | 32 | ```{r} 33 | reviews_df <- read_csv("https://raw.githubusercontent.com/AFAgarap/ecommerce-reviews-analysis/master/Womens%20Clothing%20E-Commerce%20Reviews.csv", 34 | show_col_types = FALSE) 35 | glimpse(reviews_df) 36 | ``` 37 | 38 | Now this is a rather big dataset, and I don't want to stress my GPU too much, so I only select the first 5,000 reviews for embedding. 39 | I also process the data slightly by combining the title and review text into a single column and turning the rating into a binary variable: 40 | 41 | ```{r processing} 42 | reviews <- reviews_df |> 43 | slice_head(n = 5000) |> 44 | rename(id = ...1) |> 45 | mutate(rating = factor(Rating == 5, c(TRUE, FALSE), c("5", "<5"))) |> 46 | mutate(full_text = paste0(ifelse(is.na(Title), "", Title), `Review Text`)) 47 | ``` 48 | 49 | To turn one or multiple texts into embeddings, you can simply use `embed_text`: 50 | 51 | ```{r embeddingsmal, message=TRUE} 52 | embed_text(text = reviews$full_text[1:3]) 53 | ``` 54 | 55 | To use this on the sample of reviews, I put the embeddings into a new column, before unnesting the resulting data.frame. 56 | The reason behind this is that I want to make sure the embeddings belong to the correct review ID. 57 | I also use a different model this time: [`nomic-embed-text`](https://ollama.com/library/nomic-embed-text). 58 | While models like `llama3.1` are extremely powerful at handling conversations and natural language requests, they are also computationally intensive, and hence relatively slow. 59 | As of version 0.1.26, Ollama support using dedicated embedding models, which can perform the task a lot faster and with fewer resources. 60 | Download the model with `pull_model("nomic-embed-text")` then we can run: 61 | 62 | ```{r embedding, message=TRUE} 63 | reviews_embeddings <- reviews |> 64 | mutate(embeddings = embed_text(text = full_text, model = "nomic-embed-text")) |> 65 | select(id, rating, embeddings) |> 66 | unnest_wider(embeddings) 67 | ``` 68 | 69 | The resulting data.frame contains the ID and rating along the 768 embedding dimensions: 70 | 71 | ```{r} 72 | reviews_embeddings 73 | ``` 74 | 75 | As said above, these embeddings are often used in supervised machine learning. 76 | I use part of [a blog post by Emil Hvitfeldt](https://emilhvitfeldt.com/post/textrecipes-series-pretrained-word-embeddings/) show how this can be done using the data we embedded above in the powerful `tidymodels` collection of packages: 77 | 78 | ```{r smldemo} 79 | library(tidymodels) 80 | # split data into training an test set (for validation) 81 | set.seed(1) 82 | reviews_split <- initial_split(reviews_embeddings) 83 | 84 | reviews_train <- training(reviews_split) 85 | 86 | # set up the model we want to use 87 | lasso_spec <- logistic_reg(penalty = tune(), mixture = 1) |> 88 | set_engine("glmnet") 89 | 90 | # we specify that we want to do some hyperparameter tuning and bootstrapping 91 | param_grid <- grid_regular(penalty(), levels = 50) 92 | reviews_boot <- bootstraps(reviews_train, times = 10) 93 | 94 | # and we define the model. Here we use the embeddings to predict the rating 95 | rec_spec <- recipe(rating ~ ., data = select(reviews_train, -id)) 96 | 97 | # bringing this together in a workflow 98 | wf_fh <- workflow() |> 99 | add_recipe(rec_spec) |> 100 | add_model(lasso_spec) 101 | 102 | # now we do the tuning 103 | set.seed(42) 104 | lasso_grid <- tune_grid( 105 | wf_fh, 106 | resamples = reviews_boot, 107 | grid = param_grid 108 | ) 109 | 110 | # select the best model 111 | wf_fh_final <- wf_fh |> 112 | finalize_workflow(parameters = select_best(lasso_grid, metric = "roc_auc")) 113 | 114 | # and train a new model + predict the classes for the test set 115 | final_res <- last_fit(wf_fh_final, reviews_split) 116 | 117 | # we extract these predictions 118 | final_pred <- final_res |> 119 | collect_predictions() 120 | 121 | # look at the results 122 | conf_mat(final_pred, truth = rating, estimate = .pred_class) 123 | 124 | # and evaluate them with a few standard metrics 125 | my_metrics <- metric_set(accuracy, precision, recall, f_meas) 126 | 127 | my_metrics(final_pred, truth = rating, estimate = .pred_class) 128 | 129 | # and the ROC curve 130 | final_pred |> 131 | roc_curve(rating, .pred_5) |> 132 | autoplot() 133 | ``` 134 | 135 | --------------------------------------------------------------------------------