├── .gitignore ├── LICENSE ├── README.md ├── app ├── build.gradle ├── gradle │ └── wrapper │ │ └── gradle-wrapper.properties └── src │ ├── main │ ├── java │ │ └── gemini │ │ │ └── workshop │ │ │ ├── App.java │ │ │ ├── Conversation.java │ │ │ ├── ExtractData.java │ │ │ ├── FunctionCalling.java │ │ │ ├── FunctionCallingAssistant.java │ │ │ ├── GemmaWithOllamaContainer.java │ │ │ ├── MultiFunctionCallingAssistant.java │ │ │ ├── Multimodal.java │ │ │ ├── QA.java │ │ │ ├── RAG.java │ │ │ ├── StreamQA.java │ │ │ ├── TemplatePrompt.java │ │ │ └── TextClassification.java │ └── resources │ │ └── commons-logging.properties │ └── test │ └── java │ └── gemini │ └── workshop │ └── AppTest.java ├── attention-is-all-you-need.pdf ├── gradle.properties ├── gradle └── wrapper │ └── gradle-wrapper.properties ├── gradlew ├── gradlew.bat ├── python ├── README.md ├── conversation.py ├── extract_data.py ├── function_calling.py ├── function_calling_assistant.py ├── gemma_with_ollama_container.py ├── multi_function_calling_assistant.py ├── multimodal.py ├── qa.py ├── rag.py ├── requirements.txt ├── stream_qa.py ├── template_prompt.py └── text_classification.py └── settings.gradle /.gitignore: -------------------------------------------------------------------------------- 1 | .venv/ 2 | .gradle 3 | **/build/ 4 | !src/**/build/ 5 | app/bin 6 | 7 | # Ignore Gradle GUI config 8 | gradle-app.setting 9 | 10 | # Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored) 11 | !gradle-wrapper.jar 12 | 13 | # Avoid ignore Gradle wrappper properties 14 | !gradle-wrapper.properties 15 | 16 | # Cache of project 17 | .gradletasknamecache 18 | 19 | # Eclipse Gradle plugin generated files 20 | # Eclipse Core 21 | .project 22 | # JDT-specific (Eclipse Java Development Tools) 23 | .classpath 24 | 25 | # IntelliJ IDEA project directory 26 | .idea 27 | 28 | # Compiled class file 29 | *.class 30 | 31 | # Log file 32 | *.log 33 | 34 | # Package Files # 35 | *.jar 36 | *.war 37 | *.nar 38 | *.ear 39 | *.zip 40 | *.tar.gz 41 | *.rar 42 | 43 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 44 | hs_err_pid* 45 | replay_pid* 46 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Gemini Vertex AI and LangChain4j (Java) 2 | 3 | > [!NOTE] 4 | > This is the code for [Gemini in Java with Vertex AI and LangChain4j](https://codelabs.developers.google.com/codelabs/gemini-java-developers) 5 | > codelab geared towards Java developers to discover [Gemini](https://deepmind.google/technologies/gemini/) 6 | > and its open-source variant [Gemma](https://ai.google.dev/gemma) Large Language Model by Google using [LangChain4j](https://docs.langchain4j.dev/) 7 | > framework. 8 | > 9 | > There's also Python versions of these samples in [python](python) folder. 10 | 11 | ## Prerequisites 12 | 13 | The code examples have been tested on the following environment: 14 | 15 | * Java 21 16 | * Gradle 8.6 17 | 18 | In order to run these examples, you need to have a Google Cloud account and 19 | project ready. 20 | 21 | You also need to make sure the Vertex AI is enabled: 22 | 23 | ```bash 24 | gcloud services enable aiplatform.googleapis.com 25 | ``` 26 | 27 | Before running the examples, you'll need to set up two environment variables: 28 | 29 | ```bash 30 | export PROJECT_ID=YOUR_PROJECT_ID 31 | export LOCATION=us-central1 32 | ``` 33 | 34 | > [!WARNING] 35 | > Be sure to update the project ID and location to match your project. 36 | 37 | Create the Gradle wrapper: 38 | 39 | ```bash 40 | gradle wrapper 41 | ``` 42 | 43 | ## Samples 44 | 45 | These are the list of samples for different use cases: 46 | 47 | * [Simple Question & Answer](app/src/main/java/gemini/workshop/QA.java) 48 | 49 | ```bash 50 | ./gradlew run -q -DjavaMainClass=gemini.workshop.QA 51 | ``` 52 | 53 | * [Simple Question & Answer via streaming](app/src/main/java/gemini/workshop/StreamQA.java) 54 | 55 | ```bash 56 | ./gradlew run -q -DjavaMainClass=gemini.workshop.StreamQA 57 | ``` 58 | 59 | * [Hold a conversation with a chatbot](app/src/main/java/gemini/workshop/Conversation.java) 60 | 61 | ```bash 62 | ./gradlew run -q -DjavaMainClass=gemini.workshop.Conversation 63 | ``` 64 | 65 | * [Describing an image with multimodality](app/src/main/java/gemini/workshop/Multimodal.java) (text+image) 66 | 67 | ```bash 68 | ./gradlew run -q -DjavaMainClass=gemini.workshop.Multimodal 69 | ``` 70 | 71 | * [Extracting structured data from unstructured text](app/src/main/java/gemini/workshop/ExtractData.java) 72 | 73 | ```bash 74 | ./gradlew run -q -DjavaMainClass=gemini.workshop.ExtractData 75 | ``` 76 | 77 | * [Manipulating prompt templates](app/src/main/java/gemini/workshop/TemplatePrompt.java) 78 | 79 | ```bash 80 | ./gradlew run -q -DjavaMainClass=gemini.workshop.TemplatePrompt 81 | ``` 82 | 83 | * [Text classification & sentiment analysis](app/src/main/java/gemini/workshop/TextClassification.java) 84 | 85 | ```bash 86 | ./gradlew run -q -DjavaMainClass=gemini.workshop.TextClassification 87 | ``` 88 | 89 | * [Retrieval Augmented Generation](app/src/main/java/gemini/workshop/RAG.java) 90 | 91 | ```bash 92 | ./gradlew run -q -DjavaMainClass=gemini.workshop.RAG 93 | ``` 94 | 95 | * [Function calling](app/src/main/java/gemini/workshop/FunctionCalling.java) 96 | 97 | ```bash 98 | ./gradlew run -q -DjavaMainClass=gemini.workshop.FunctionCalling 99 | ``` 100 | 101 | * [Function calling assistant](app/src/main/java/gemini/workshop/FunctionCallingAssistant.java) 102 | 103 | ```bash 104 | ./gradlew run -q -DjavaMainClass=gemini.workshop.FunctionCallingAssistant 105 | ``` 106 | 107 | * [Multi function calling assistant](app/src/main/java/gemini/workshop/MultiFunctionCallingAssistant.java) 108 | 109 | ```bash 110 | ./gradlew run -q -DjavaMainClass=gemini.workshop.MultiFunctionCallingAssistant 111 | ``` 112 | 113 | * [Running Gemma with Ollama TestContainer](app/src/main/java/gemini/workshop/GemmaWithOllamaContainer.java) 114 | 115 | ```bash 116 | ./gradlew run -q -DjavaMainClass=gemini.workshop.GemmaWithOllamaContainer 117 | ``` 118 | 119 | --- 120 | This is not an official Google product. 121 | -------------------------------------------------------------------------------- /app/build.gradle: -------------------------------------------------------------------------------- 1 | plugins { 2 | // Apply the application plugin to add support for building a CLI application in Java. 3 | id 'application' 4 | } 5 | 6 | repositories { 7 | // Use Maven Central for resolving dependencies. 8 | mavenCentral() 9 | } 10 | 11 | dependencies { 12 | // Logging library 13 | implementation 'org.slf4j:slf4j-jdk14:2.0.16' 14 | 15 | // This dependency is used by the application. 16 | implementation 'dev.langchain4j:langchain4j:1.0.0-alpha1' 17 | implementation 'dev.langchain4j:langchain4j-vertex-ai:1.0.0-alpha1' 18 | implementation 'dev.langchain4j:langchain4j-vertex-ai-gemini:1.0.0-alpha1' 19 | implementation 'dev.langchain4j:langchain4j-document-parser-apache-pdfbox:1.0.0-alpha1' 20 | 21 | // Gemma via Ollama and TestContainers 22 | implementation 'dev.langchain4j:langchain4j-ollama:1.0.0-alpha1' 23 | implementation 'org.testcontainers:ollama:1.20.4' 24 | } 25 | 26 | testing { 27 | suites { 28 | // Configure the built-in test suite 29 | test { 30 | // Use JUnit Jupiter test framework 31 | useJUnitJupiter('5.11.4') 32 | } 33 | } 34 | } 35 | 36 | // Apply a specific Java toolchain to ease working on different environments. 37 | java { 38 | toolchain { 39 | languageVersion = JavaLanguageVersion.of(21) 40 | } 41 | } 42 | 43 | tasks.withType(JavaCompile).configureEach { 44 | options.compilerArgs << '-parameters' 45 | } 46 | 47 | application { 48 | mainClass = providers.systemProperty('javaMainClass') 49 | .orElse('gemini.workshop.App') 50 | } -------------------------------------------------------------------------------- /app/gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | #Wed Mar 20 10:15:45 CET 2024 2 | distributionBase=GRADLE_USER_HOME 3 | distributionPath=wrapper/dists 4 | distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-bin.zip 5 | zipStoreBase=GRADLE_USER_HOME 6 | zipStorePath=wrapper/dists 7 | -------------------------------------------------------------------------------- /app/src/main/java/gemini/workshop/App.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | public class App { 19 | public String getGreeting() { 20 | return "Hello World!"; 21 | } 22 | 23 | public static void main(String[] args) { 24 | System.out.println(new App().getGreeting()); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /app/src/main/java/gemini/workshop/Conversation.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | import dev.langchain4j.model.chat.ChatLanguageModel; 19 | import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; 20 | import dev.langchain4j.memory.chat.MessageWindowChatMemory; 21 | import dev.langchain4j.service.AiServices; 22 | 23 | import java.util.List; 24 | 25 | public class Conversation { 26 | public static void main(String[] args) { 27 | ChatLanguageModel model = VertexAiGeminiChatModel.builder() 28 | .project(System.getenv("PROJECT_ID")) 29 | .location(System.getenv("LOCATION")) 30 | .modelName("gemini-2.0-flash") 31 | .build(); 32 | 33 | MessageWindowChatMemory chatMemory = MessageWindowChatMemory.builder() 34 | .maxMessages(20) 35 | .build(); 36 | 37 | interface ConversationService { 38 | String chat(String message); 39 | } 40 | 41 | ConversationService conversation = 42 | AiServices.builder(ConversationService.class) 43 | .chatLanguageModel(model) 44 | .chatMemory(chatMemory) 45 | .build(); 46 | 47 | List.of( 48 | "Hello!", 49 | "What is the country where the Eiffel tower is situated?", 50 | "How many inhabitants are there in that country?" 51 | ).forEach( message -> { 52 | System.out.println("\nUser: " + message); 53 | System.out.println("Gemini: " + conversation.chat(message)); 54 | }); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /app/src/main/java/gemini/workshop/ExtractData.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | import dev.langchain4j.model.chat.ChatLanguageModel; 19 | import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; 20 | import dev.langchain4j.service.AiServices; 21 | import dev.langchain4j.service.SystemMessage; 22 | 23 | import static dev.langchain4j.model.vertexai.SchemaHelper.fromClass; 24 | 25 | public class ExtractData { 26 | 27 | record Person(String name, int age) { } 28 | 29 | interface PersonExtractor { 30 | @SystemMessage(""" 31 | Your role is to extract the name and age 32 | of the person described in the biography. 33 | """) 34 | Person extractPerson(String biography); 35 | } 36 | 37 | public static void main(String[] args) { 38 | ChatLanguageModel model = VertexAiGeminiChatModel.builder() 39 | .project(System.getenv("PROJECT_ID")) 40 | .location(System.getenv("LOCATION")) 41 | .modelName("gemini-2.0-flash") 42 | .responseMimeType("application/json") 43 | .responseSchema(fromClass(Person.class)) 44 | .build(); 45 | 46 | PersonExtractor extractor = AiServices.create(PersonExtractor.class, model); 47 | 48 | Person person = extractor.extractPerson(""" 49 | Anna is a 23 year old artist based in Brooklyn, New York. She was born and 50 | raised in the suburbs of Chicago, where she developed a love for art at a 51 | young age. She attended the School of the Art Institute of Chicago, where 52 | she studied painting and drawing. After graduating, she moved to New York 53 | City to pursue her art career. Anna's work is inspired by her personal 54 | experiences and observations of the world around her. She often uses bright 55 | colors and bold lines to create vibrant and energetic paintings. Her work 56 | has been exhibited in galleries and museums in New York City and Chicago. 57 | """); 58 | 59 | System.out.println(person.name()); // Anna 60 | System.out.println(person.age()); // 23 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /app/src/main/java/gemini/workshop/FunctionCalling.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | import dev.langchain4j.model.chat.ChatLanguageModel; 19 | import dev.langchain4j.model.chat.request.json.JsonObjectSchema; 20 | import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; 21 | import dev.langchain4j.agent.tool.ToolExecutionRequest; 22 | import dev.langchain4j.agent.tool.ToolSpecification; 23 | import dev.langchain4j.data.message.AiMessage; 24 | import dev.langchain4j.data.message.ChatMessage; 25 | import dev.langchain4j.data.message.UserMessage; 26 | import dev.langchain4j.data.message.ToolExecutionResultMessage; 27 | import dev.langchain4j.model.output.Response; 28 | 29 | import java.util.List; 30 | import java.util.ArrayList; 31 | 32 | public class FunctionCalling { 33 | public static void main(String[] args) { 34 | ChatLanguageModel model = VertexAiGeminiChatModel.builder() 35 | .project(System.getenv("PROJECT_ID")) 36 | .location(System.getenv("LOCATION")) 37 | .modelName("gemini-2.0-flash") 38 | .maxOutputTokens(100) 39 | .build(); 40 | 41 | ToolSpecification weatherToolSpec = ToolSpecification.builder() 42 | .name("getWeather") 43 | .description("Get the weather forecast for a given location or city") 44 | .parameters(JsonObjectSchema.builder() 45 | .addStringProperty( 46 | "location", 47 | "the location or city to get the weather forecast for") 48 | .build()) 49 | .build(); 50 | 51 | List allMessages = new ArrayList<>(); 52 | 53 | // 1) Ask about the weather 54 | UserMessage weatherQuestion = UserMessage.from("What is the weather in Paris?"); 55 | allMessages.add(weatherQuestion); 56 | 57 | // 2) The model replies with a function call request 58 | Response messageResponse = model.generate(allMessages, weatherToolSpec); 59 | ToolExecutionRequest toolExecutionRequest = messageResponse.content().toolExecutionRequests().get(0); 60 | System.out.println("Tool execution request: " + toolExecutionRequest); 61 | allMessages.add(messageResponse.content()); 62 | 63 | // Here, we would call a real weather forecast service 64 | 65 | // 3) We send back the result of the function call 66 | ToolExecutionResultMessage toolExecResMsg = ToolExecutionResultMessage.from(toolExecutionRequest, 67 | "{\"location\":\"Paris\",\"forecast\":\"sunny\", \"temperature\": 20}"); 68 | allMessages.add(toolExecResMsg); 69 | 70 | // 4) The model answers with a sentence describing the weather 71 | Response weatherResponse = model.generate(allMessages); 72 | System.out.println("Answer: " + weatherResponse.content().text()); 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /app/src/main/java/gemini/workshop/FunctionCallingAssistant.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | import dev.langchain4j.agent.tool.P; 19 | import dev.langchain4j.agent.tool.Tool; 20 | import dev.langchain4j.memory.chat.MessageWindowChatMemory; 21 | import dev.langchain4j.model.chat.ChatLanguageModel; 22 | import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; 23 | import dev.langchain4j.service.AiServices; 24 | 25 | public class FunctionCallingAssistant { 26 | 27 | record WeatherForecast(String location, String forecast, int temperature) {} 28 | 29 | static class WeatherForecastService { 30 | @Tool("Get the weather forecast for a location") 31 | WeatherForecast getForecast(@P("Location to get the forecast for") String location) { 32 | if (location.equals("Paris")) { 33 | return new WeatherForecast("Paris", "sunny", 20); 34 | } else if (location.equals("London")) { 35 | return new WeatherForecast("London", "rainy", 15); 36 | } else { 37 | return new WeatherForecast("Unknown", "unknown", 0); 38 | } 39 | } 40 | } 41 | 42 | interface WeatherAssistant { 43 | String chat(String userMessage); 44 | } 45 | 46 | public static void main(String[] args) { 47 | ChatLanguageModel model = VertexAiGeminiChatModel.builder() 48 | .project(System.getenv("PROJECT_ID")) 49 | .location(System.getenv("LOCATION")) 50 | .modelName("gemini-2.0-flash") 51 | .build(); 52 | 53 | WeatherForecastService weatherForecastService = new WeatherForecastService(); 54 | 55 | WeatherAssistant assistant = AiServices.builder(WeatherAssistant.class) 56 | .chatLanguageModel(model) 57 | .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) 58 | .tools(weatherForecastService) 59 | .build(); 60 | 61 | System.out.println(assistant.chat("What is the weather in Paris?")); 62 | System.out.println(assistant.chat("Is it warmer in London or in Paris?")); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /app/src/main/java/gemini/workshop/GemmaWithOllamaContainer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | import com.github.dockerjava.api.model.Image; 19 | import dev.langchain4j.model.chat.ChatLanguageModel; 20 | import dev.langchain4j.model.ollama.OllamaChatModel; 21 | import org.testcontainers.DockerClientFactory; 22 | import org.testcontainers.ollama.OllamaContainer; 23 | import org.testcontainers.utility.DockerImageName; 24 | 25 | import java.io.IOException; 26 | import java.time.Duration; 27 | import java.time.Instant; 28 | import java.util.List; 29 | 30 | // Make sure you have Docker installed and running locally before running this sample 31 | public class GemmaWithOllamaContainer { 32 | 33 | private static final String TC_OLLAMA_GEMMA3 = "tc-ollama-gemma3-1b"; 34 | public static final String GEMMA_3 = "gemma3:1b"; 35 | 36 | // Creating an Ollama container with Gemma 3 if it doesn't exist. 37 | private static OllamaContainer createGemmaOllamaContainer() throws IOException, InterruptedException { 38 | 39 | // Check if the custom Gemma Ollama image exists already 40 | List listImagesCmd = DockerClientFactory.lazyClient() 41 | .listImagesCmd() 42 | .withImageNameFilter(TC_OLLAMA_GEMMA3) 43 | .exec(); 44 | 45 | if (listImagesCmd.isEmpty()) { 46 | System.out.println("Creating a new Ollama container with Gemma 3 image..."); 47 | OllamaContainer ollama = new OllamaContainer("ollama/ollama:0.7.1"); 48 | System.out.println("Starting Ollama..."); 49 | ollama.start(); 50 | System.out.println("Pulling model..."); 51 | ollama.execInContainer("ollama", "pull", GEMMA_3); 52 | System.out.println("Committing to image..."); 53 | ollama.commitToImage(TC_OLLAMA_GEMMA3); 54 | return ollama; 55 | } 56 | 57 | System.out.println("Ollama image substitution..."); 58 | // Substitute the default Ollama image with our Gemma variant 59 | return new OllamaContainer( 60 | DockerImageName.parse(TC_OLLAMA_GEMMA3) 61 | .asCompatibleSubstituteFor("ollama/ollama")); 62 | } 63 | 64 | public static void main(String[] args) throws IOException, InterruptedException { 65 | Instant start = Instant.now(); 66 | 67 | OllamaContainer ollama = createGemmaOllamaContainer(); 68 | System.out.printf("Container created in %ds %n", Duration.between(start, Instant.now()).getSeconds()); 69 | start = Instant.now(); 70 | 71 | ollama.start(); 72 | System.out.printf("Ollama container started in %ds %n", Duration.between(start, Instant.now()).getSeconds()); 73 | start = Instant.now(); 74 | 75 | ChatLanguageModel model = OllamaChatModel.builder() 76 | .baseUrl(String.format("http://%s:%d", ollama.getHost(), ollama.getFirstMappedPort())) 77 | .modelName(GEMMA_3) 78 | .timeout(Duration.ofMinutes(2)) 79 | .build(); 80 | 81 | System.out.printf("Model ready in %ds %n", Duration.between(start, Instant.now()).getSeconds()); 82 | start = Instant.now(); 83 | 84 | System.out.println(model.generate("Why is the sky blue?")); 85 | System.out.printf("First response: %ds %n", Duration.between(start, Instant.now()).getSeconds()); 86 | start = Instant.now(); 87 | 88 | System.out.println(model.generate("Who was the first cat who stepped on the moon?")); 89 | System.out.printf("Second response: %ds %n", Duration.between(start, Instant.now()).getSeconds()); 90 | start = Instant.now(); 91 | 92 | System.out.println(model.generate("What are the differences between the Gemini model and the Gemma models?")); 93 | System.out.printf("Third response: %ds %n", Duration.between(start, Instant.now()).getSeconds()); 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /app/src/main/java/gemini/workshop/MultiFunctionCallingAssistant.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | import dev.langchain4j.agent.tool.P; 19 | import dev.langchain4j.agent.tool.Tool; 20 | import dev.langchain4j.model.chat.ChatLanguageModel; 21 | import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; 22 | import dev.langchain4j.service.AiServices; 23 | 24 | import java.util.Random; 25 | 26 | import static dev.langchain4j.memory.chat.MessageWindowChatMemory.*; 27 | 28 | public class MultiFunctionCallingAssistant { 29 | static class MultiTools { 30 | @Tool("Convert amounts between two currencies") 31 | double convertCurrency( 32 | @P("Currency to convert from") String fromCurrency, 33 | @P("Currency to convert to") String toCurrency, 34 | @P("Amount to convert") double amount) { 35 | 36 | double result = amount; 37 | 38 | if (fromCurrency.equals("USD") && toCurrency.equals("EUR")) { 39 | result = amount * 0.93; 40 | } else if (fromCurrency.equals("USD") && toCurrency.equals("GBP")) { 41 | result = amount * 0.79; 42 | } 43 | 44 | System.out.println( 45 | "convertCurrency(fromCurrency = " + fromCurrency + 46 | ", toCurrency = " + toCurrency + 47 | ", amount = " + amount + ") == " + result); 48 | 49 | return result; 50 | } 51 | 52 | @Tool("Get the current value of a stock in US dollars") 53 | double getStockPrice(@P("Stock symbol") String symbol) { 54 | double result = 170.0 + 10 * new Random().nextDouble(); 55 | 56 | System.out.println("getStockPrice(symbol = " + symbol + ") == " + result); 57 | 58 | return result; 59 | } 60 | 61 | @Tool("Apply a percentage to a given amount") 62 | double applyPercentage(@P("Initial amount") double amount, @P("Percentage between 0-100 to apply") double percentage) { 63 | double result = amount * (percentage / 100); 64 | 65 | System.out.println("applyPercentage(amount = " + amount + ", percentage = " + percentage + ") == " + result); 66 | 67 | return result; 68 | } 69 | } 70 | 71 | interface MultiToolsAssistant { 72 | String chat(String userMessage); 73 | } 74 | 75 | public static void main(String[] args) { 76 | ChatLanguageModel model = VertexAiGeminiChatModel.builder() 77 | .project(System.getenv("PROJECT_ID")) 78 | .location(System.getenv("LOCATION")) 79 | .modelName("gemini-2.0-flash") 80 | .maxOutputTokens(100) 81 | .build(); 82 | 83 | MultiTools multiTools = new MultiTools(); 84 | 85 | MultiToolsAssistant assistant = AiServices.builder(MultiToolsAssistant.class) 86 | .chatLanguageModel(model) 87 | .chatMemory(withMaxMessages(10)) 88 | .tools(multiTools) 89 | .build(); 90 | 91 | System.out.println(assistant.chat( 92 | "What is 10% of the AAPL stock price converted from USD to EUR?")); 93 | } 94 | } -------------------------------------------------------------------------------- /app/src/main/java/gemini/workshop/Multimodal.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | import dev.langchain4j.model.chat.ChatLanguageModel; 19 | import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; 20 | import dev.langchain4j.data.message.AiMessage; 21 | import dev.langchain4j.data.message.ImageContent; 22 | import dev.langchain4j.data.message.TextContent; 23 | import dev.langchain4j.data.message.UserMessage; 24 | import dev.langchain4j.model.output.Response; 25 | 26 | public class Multimodal { 27 | 28 | static final String CAT_IMAGE_URL = 29 | "https://upload.wikimedia.org/wikipedia/commons/b/b6/Felis_catus-cat_on_snow.jpg"; 30 | 31 | public static void main(String[] args) { 32 | ChatLanguageModel model = VertexAiGeminiChatModel.builder() 33 | .project(System.getenv("PROJECT_ID")) 34 | .location(System.getenv("LOCATION")) 35 | .modelName("gemini-2.0-flash") 36 | .build(); 37 | 38 | UserMessage userMessage = UserMessage.from( 39 | ImageContent.from(CAT_IMAGE_URL), 40 | TextContent.from("Describe the picture") 41 | ); 42 | 43 | Response response = model.generate(userMessage); 44 | 45 | System.out.println(response.content().text()); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /app/src/main/java/gemini/workshop/QA.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; 19 | import dev.langchain4j.model.chat.ChatLanguageModel; 20 | 21 | public class QA { 22 | public static void main(String[] args) { 23 | ChatLanguageModel model = VertexAiGeminiChatModel.builder() 24 | .project(System.getenv("PROJECT_ID")) 25 | .location(System.getenv("LOCATION")) 26 | .modelName("gemini-2.0-flash") 27 | .build(); 28 | 29 | System.out.println( 30 | model.generate("Why is the sky blue?") 31 | ); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /app/src/main/java/gemini/workshop/RAG.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | import dev.langchain4j.memory.chat.MessageWindowChatMemory; 19 | import dev.langchain4j.model.chat.ChatLanguageModel; 20 | import dev.langchain4j.data.document.Document; 21 | import dev.langchain4j.data.document.parser.apache.pdfbox.ApachePdfBoxDocumentParser; 22 | import dev.langchain4j.data.document.splitter.DocumentSplitters; 23 | import dev.langchain4j.data.segment.TextSegment; 24 | import dev.langchain4j.model.vertexai.VertexAiEmbeddingModel; 25 | import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever; 26 | import dev.langchain4j.service.AiServices; 27 | import dev.langchain4j.service.Result; 28 | import dev.langchain4j.store.embedding.EmbeddingStoreIngestor; 29 | import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore; 30 | import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; 31 | 32 | import java.io.IOException; 33 | import java.net.URI; 34 | import java.net.URISyntaxException; 35 | import java.net.URL; 36 | import java.util.List; 37 | 38 | public class RAG { 39 | 40 | public static void main(String[] args) throws IOException, URISyntaxException { 41 | 42 | // =============== 43 | // INGESTION PHASE 44 | 45 | URL url = new URI("https://raw.githubusercontent.com/meteatamel/genai-beyond-basics/main/samples/grounding/vertexai-search/cymbal-starlight-2024.pdf").toURL(); 46 | ApachePdfBoxDocumentParser pdfParser = new ApachePdfBoxDocumentParser(); 47 | Document document = pdfParser.parse(url.openStream()); 48 | 49 | VertexAiEmbeddingModel embeddingModel = VertexAiEmbeddingModel.builder() 50 | .endpoint(System.getenv("LOCATION") + "-aiplatform.googleapis.com:443") 51 | .project(System.getenv("PROJECT_ID")) 52 | .location(System.getenv("LOCATION")) 53 | .publisher("google") 54 | .modelName("text-embedding-005") 55 | .maxRetries(3) 56 | .build(); 57 | 58 | InMemoryEmbeddingStore embeddingStore = 59 | new InMemoryEmbeddingStore<>(); 60 | 61 | EmbeddingStoreIngestor storeIngestor = EmbeddingStoreIngestor.builder() 62 | .documentSplitter(DocumentSplitters.recursive(500, 100)) 63 | .embeddingModel(embeddingModel) 64 | .embeddingStore(embeddingStore) 65 | .build(); 66 | System.out.println("Chunking and embedding PDF..."); 67 | storeIngestor.ingest(document); 68 | 69 | // =============== 70 | // RETRIEVAL PHASE 71 | 72 | ChatLanguageModel model = VertexAiGeminiChatModel.builder() 73 | .project(System.getenv("PROJECT_ID")) 74 | .location(System.getenv("LOCATION")) 75 | .modelName("gemini-2.0-flash") 76 | .maxOutputTokens(1000) 77 | .build(); 78 | 79 | EmbeddingStoreContentRetriever retriever = 80 | new EmbeddingStoreContentRetriever(embeddingStore, embeddingModel); 81 | 82 | interface CarExpert { 83 | Result ask(String question); 84 | } 85 | 86 | CarExpert expert = AiServices.builder(CarExpert.class) 87 | .chatLanguageModel(model) 88 | .chatMemory(MessageWindowChatMemory.withMaxMessages(10)) 89 | .contentRetriever(retriever) 90 | /* 91 | .retrievalAugmentor(DefaultRetrievalAugmentor.builder() 92 | .contentInjector(DefaultContentInjector.builder() 93 | .promptTemplate(PromptTemplate.from(""" 94 | You are an expert in car automotive, and you answer concisely. 95 | 96 | Here is the question: {{userMessage}} 97 | 98 | Answer using the following information: 99 | {{contents}} 100 | """)) 101 | .build()) 102 | .contentRetriever(retriever) 103 | .build()) 104 | */ 105 | .build(); 106 | 107 | System.out.println("Ready!\n"); 108 | List.of( 109 | "What is the cargo capacity of Cymbal Starlight?", 110 | "What's the emergency roadside assistance phone number?", 111 | "Are there some special kits available on that car?" 112 | ).forEach(query -> { 113 | Result response = expert.ask(query); 114 | System.out.printf("%n=== %s === %n%n %s %n%n", query, response.content()); 115 | System.out.println("SOURCE: " + response.sources().getFirst().textSegment().text()); 116 | }); 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /app/src/main/java/gemini/workshop/StreamQA.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | import dev.langchain4j.model.chat.StreamingChatLanguageModel; 19 | import dev.langchain4j.model.vertexai.VertexAiGeminiStreamingChatModel; 20 | 21 | import static dev.langchain4j.model.LambdaStreamingResponseHandler.onNext; 22 | 23 | public class StreamQA { 24 | public static void main(String[] args) { 25 | StreamingChatLanguageModel model = VertexAiGeminiStreamingChatModel.builder() 26 | .project(System.getenv("PROJECT_ID")) 27 | .location(System.getenv("LOCATION")) 28 | .modelName("gemini-2.0-flash") 29 | .maxOutputTokens(4000) 30 | .build(); 31 | 32 | model.generate("Why is the sky blue?", onNext(System.out::println)); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /app/src/main/java/gemini/workshop/TemplatePrompt.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | import dev.langchain4j.model.chat.ChatLanguageModel; 19 | import dev.langchain4j.model.input.PromptTemplate; 20 | import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; 21 | import dev.langchain4j.data.message.AiMessage; 22 | import dev.langchain4j.model.input.Prompt; 23 | import dev.langchain4j.model.output.Response; 24 | 25 | import java.util.HashMap; 26 | import java.util.Map; 27 | 28 | public class TemplatePrompt { 29 | public static void main(String[] args) { 30 | ChatLanguageModel model = VertexAiGeminiChatModel.builder() 31 | .project(System.getenv("PROJECT_ID")) 32 | .location(System.getenv("LOCATION")) 33 | .modelName("gemini-2.0-flash") 34 | .maxOutputTokens(4000) 35 | .temperature(1.0f) 36 | .topK(40) 37 | .topP(0.95f) 38 | .maxRetries(3) 39 | .build(); 40 | 41 | PromptTemplate promptTemplate = PromptTemplate.from(""" 42 | You're a friendly chef with a lot of cooking experience. 43 | Create a recipe for a {{dish}} with the following ingredients: \ 44 | {{ingredients}}, and give it a name. 45 | """ 46 | ); 47 | 48 | Map variables = new HashMap<>(); 49 | variables.put("dish", "dessert"); 50 | variables.put("ingredients", "strawberries, chocolate, and whipped cream"); 51 | 52 | Prompt prompt = promptTemplate.apply(variables); 53 | 54 | Response response = model.generate(prompt.toUserMessage()); 55 | 56 | System.out.println(response.content().text()); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /app/src/main/java/gemini/workshop/TextClassification.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | import com.google.cloud.vertexai.api.Schema; 19 | import com.google.cloud.vertexai.api.Type; 20 | import dev.langchain4j.data.message.UserMessage; 21 | import dev.langchain4j.memory.chat.MessageWindowChatMemory; 22 | import dev.langchain4j.model.chat.ChatLanguageModel; 23 | import dev.langchain4j.model.vertexai.VertexAiGeminiChatModel; 24 | import dev.langchain4j.data.message.AiMessage; 25 | import dev.langchain4j.service.AiServices; 26 | import dev.langchain4j.service.SystemMessage; 27 | 28 | import java.util.List; 29 | 30 | public class TextClassification { 31 | 32 | enum Sentiment { POSITIVE, NEUTRAL, NEGATIVE } 33 | 34 | public static void main(String[] args) { 35 | ChatLanguageModel model = VertexAiGeminiChatModel.builder() 36 | .project(System.getenv("PROJECT_ID")) 37 | .location(System.getenv("LOCATION")) 38 | .modelName("gemini-2.0-flash") 39 | .maxOutputTokens(10) 40 | .maxRetries(3) 41 | .responseSchema(Schema.newBuilder() 42 | .setType(Type.STRING) 43 | .addAllEnum(List.of("POSITIVE", "NEUTRAL", "NEGATIVE")) 44 | .build()) 45 | .build(); 46 | 47 | 48 | interface SentimentAnalysis { 49 | @SystemMessage(""" 50 | Analyze the sentiment of the text below. 51 | Respond only with one word to describe the sentiment. 52 | """) 53 | Sentiment analyze(String text); 54 | } 55 | 56 | MessageWindowChatMemory memory = MessageWindowChatMemory.withMaxMessages(10); 57 | memory.add(UserMessage.from("This is fantastic news!")); 58 | memory.add(AiMessage.from(Sentiment.POSITIVE.name())); 59 | 60 | memory.add(UserMessage.from("Pi is roughly equal to 3.14")); 61 | memory.add(AiMessage.from(Sentiment.NEUTRAL.name())); 62 | 63 | memory.add(UserMessage.from("I really disliked the pizza. Who would use pineapples as a pizza topping?")); 64 | memory.add(AiMessage.from(Sentiment.NEGATIVE.name())); 65 | 66 | SentimentAnalysis sentimentAnalysis = 67 | AiServices.builder(SentimentAnalysis.class) 68 | .chatLanguageModel(model) 69 | .chatMemory(memory) 70 | .build(); 71 | 72 | System.out.println(sentimentAnalysis.analyze("I love strawberries!")); 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /app/src/main/resources/commons-logging.properties: -------------------------------------------------------------------------------- 1 | org.apache.commons.logging.Log=org.apache.commons.logging.impl.NoOpLog -------------------------------------------------------------------------------- /app/src/test/java/gemini/workshop/AppTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2024 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * https://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package gemini.workshop; 17 | 18 | import org.junit.jupiter.api.Test; 19 | import static org.junit.jupiter.api.Assertions.*; 20 | 21 | class AppTest { 22 | @Test void appHasAGreeting() { 23 | App classUnderTest = new App(); 24 | assertNotNull(classUnderTest.getGreeting(), "app should have a greeting"); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /attention-is-all-you-need.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/glaforge/gemini-workshop-for-java-developers/14c0c5793f7f293dce0817f95f4ab5232f8a132c/attention-is-all-you-need.pdf -------------------------------------------------------------------------------- /gradle.properties: -------------------------------------------------------------------------------- 1 | # This file was generated by the Gradle 'init' task. 2 | # https://docs.gradle.org/current/userguide/build_environment.html#sec:gradle_configuration_properties 3 | 4 | org.gradle.parallel=true 5 | org.gradle.caching=true 6 | 7 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionBase=GRADLE_USER_HOME 2 | distributionPath=wrapper/dists 3 | distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-bin.zip 4 | networkTimeout=10000 5 | validateDistributionUrl=true 6 | zipStoreBase=GRADLE_USER_HOME 7 | zipStorePath=wrapper/dists 8 | -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 4 | # Copyright © 2015-2021 the original authors. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # https://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | ############################################################################## 20 | # 21 | # Gradle start up script for POSIX generated by Gradle. 22 | # 23 | # Important for running: 24 | # 25 | # (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is 26 | # noncompliant, but you have some other compliant shell such as ksh or 27 | # bash, then to run this script, type that shell name before the whole 28 | # command line, like: 29 | # 30 | # ksh Gradle 31 | # 32 | # Busybox and similar reduced shells will NOT work, because this script 33 | # requires all of these POSIX shell features: 34 | # * functions; 35 | # * expansions «$var», «${var}», «${var:-default}», «${var+SET}», 36 | # «${var#prefix}», «${var%suffix}», and «$( cmd )»; 37 | # * compound commands having a testable exit status, especially «case»; 38 | # * various built-in commands including «command», «set», and «ulimit». 39 | # 40 | # Important for patching: 41 | # 42 | # (2) This script targets any POSIX shell, so it avoids extensions provided 43 | # by Bash, Ksh, etc; in particular arrays are avoided. 44 | # 45 | # The "traditional" practice of packing multiple parameters into a 46 | # space-separated string is a well documented source of bugs and security 47 | # problems, so this is (mostly) avoided, by progressively accumulating 48 | # options in "$@", and eventually passing that to Java. 49 | # 50 | # Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, 51 | # and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; 52 | # see the in-line comments for details. 53 | # 54 | # There are tweaks for specific operating systems such as AIX, CygWin, 55 | # Darwin, MinGW, and NonStop. 56 | # 57 | # (3) This script is generated from the Groovy template 58 | # https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt 59 | # within the Gradle project. 60 | # 61 | # You can find Gradle at https://github.com/gradle/gradle/. 62 | # 63 | ############################################################################## 64 | 65 | # Attempt to set APP_HOME 66 | 67 | # Resolve links: $0 may be a link 68 | app_path=$0 69 | 70 | # Need this for daisy-chained symlinks. 71 | while 72 | APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path 73 | [ -h "$app_path" ] 74 | do 75 | ls=$( ls -ld "$app_path" ) 76 | link=${ls#*' -> '} 77 | case $link in #( 78 | /*) app_path=$link ;; #( 79 | *) app_path=$APP_HOME$link ;; 80 | esac 81 | done 82 | 83 | # This is normally unused 84 | # shellcheck disable=SC2034 85 | APP_BASE_NAME=${0##*/} 86 | # Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) 87 | APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit 88 | 89 | # Use the maximum available, or set MAX_FD != -1 to use that value. 90 | MAX_FD=maximum 91 | 92 | warn () { 93 | echo "$*" 94 | } >&2 95 | 96 | die () { 97 | echo 98 | echo "$*" 99 | echo 100 | exit 1 101 | } >&2 102 | 103 | # OS specific support (must be 'true' or 'false'). 104 | cygwin=false 105 | msys=false 106 | darwin=false 107 | nonstop=false 108 | case "$( uname )" in #( 109 | CYGWIN* ) cygwin=true ;; #( 110 | Darwin* ) darwin=true ;; #( 111 | MSYS* | MINGW* ) msys=true ;; #( 112 | NONSTOP* ) nonstop=true ;; 113 | esac 114 | 115 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 116 | 117 | 118 | # Determine the Java command to use to start the JVM. 119 | if [ -n "$JAVA_HOME" ] ; then 120 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 121 | # IBM's JDK on AIX uses strange locations for the executables 122 | JAVACMD=$JAVA_HOME/jre/sh/java 123 | else 124 | JAVACMD=$JAVA_HOME/bin/java 125 | fi 126 | if [ ! -x "$JAVACMD" ] ; then 127 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 128 | 129 | Please set the JAVA_HOME variable in your environment to match the 130 | location of your Java installation." 131 | fi 132 | else 133 | JAVACMD=java 134 | if ! command -v java >/dev/null 2>&1 135 | then 136 | die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 137 | 138 | Please set the JAVA_HOME variable in your environment to match the 139 | location of your Java installation." 140 | fi 141 | fi 142 | 143 | # Increase the maximum file descriptors if we can. 144 | if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then 145 | case $MAX_FD in #( 146 | max*) 147 | # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. 148 | # shellcheck disable=SC2039,SC3045 149 | MAX_FD=$( ulimit -H -n ) || 150 | warn "Could not query maximum file descriptor limit" 151 | esac 152 | case $MAX_FD in #( 153 | '' | soft) :;; #( 154 | *) 155 | # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. 156 | # shellcheck disable=SC2039,SC3045 157 | ulimit -n "$MAX_FD" || 158 | warn "Could not set maximum file descriptor limit to $MAX_FD" 159 | esac 160 | fi 161 | 162 | # Collect all arguments for the java command, stacking in reverse order: 163 | # * args from the command line 164 | # * the main class name 165 | # * -classpath 166 | # * -D...appname settings 167 | # * --module-path (only if needed) 168 | # * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. 169 | 170 | # For Cygwin or MSYS, switch paths to Windows format before running java 171 | if "$cygwin" || "$msys" ; then 172 | APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) 173 | CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) 174 | 175 | JAVACMD=$( cygpath --unix "$JAVACMD" ) 176 | 177 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 178 | for arg do 179 | if 180 | case $arg in #( 181 | -*) false ;; # don't mess with options #( 182 | /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath 183 | [ -e "$t" ] ;; #( 184 | *) false ;; 185 | esac 186 | then 187 | arg=$( cygpath --path --ignore --mixed "$arg" ) 188 | fi 189 | # Roll the args list around exactly as many times as the number of 190 | # args, so each arg winds up back in the position where it started, but 191 | # possibly modified. 192 | # 193 | # NB: a `for` loop captures its iteration list before it begins, so 194 | # changing the positional parameters here affects neither the number of 195 | # iterations, nor the values presented in `arg`. 196 | shift # remove old arg 197 | set -- "$@" "$arg" # push replacement arg 198 | done 199 | fi 200 | 201 | 202 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 203 | DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' 204 | 205 | # Collect all arguments for the java command: 206 | # * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, 207 | # and any embedded shellness will be escaped. 208 | # * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be 209 | # treated as '${Hostname}' itself on the command line. 210 | 211 | set -- \ 212 | "-Dorg.gradle.appname=$APP_BASE_NAME" \ 213 | -classpath "$CLASSPATH" \ 214 | org.gradle.wrapper.GradleWrapperMain \ 215 | "$@" 216 | 217 | # Stop when "xargs" is not available. 218 | if ! command -v xargs >/dev/null 2>&1 219 | then 220 | die "xargs is not available" 221 | fi 222 | 223 | # Use "xargs" to parse quoted args. 224 | # 225 | # With -n1 it outputs one arg per line, with the quotes and backslashes removed. 226 | # 227 | # In Bash we could simply go: 228 | # 229 | # readarray ARGS < <( xargs -n1 <<<"$var" ) && 230 | # set -- "${ARGS[@]}" "$@" 231 | # 232 | # but POSIX shell has neither arrays nor command substitution, so instead we 233 | # post-process each arg (as a line of input to sed) to backslash-escape any 234 | # character that might be a shell metacharacter, then use eval to reverse 235 | # that process (while maintaining the separation between arguments), and wrap 236 | # the whole thing up as a single "set" statement. 237 | # 238 | # This will of course break if any of these variables contains a newline or 239 | # an unmatched quote. 240 | # 241 | 242 | eval "set -- $( 243 | printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | 244 | xargs -n1 | 245 | sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | 246 | tr '\n' ' ' 247 | )" '"$@"' 248 | 249 | exec "$JAVACMD" "$@" 250 | -------------------------------------------------------------------------------- /gradlew.bat: -------------------------------------------------------------------------------- 1 | @rem 2 | @rem Copyright 2015 the original author or authors. 3 | @rem 4 | @rem Licensed under the Apache License, Version 2.0 (the "License"); 5 | @rem you may not use this file except in compliance with the License. 6 | @rem You may obtain a copy of the License at 7 | @rem 8 | @rem https://www.apache.org/licenses/LICENSE-2.0 9 | @rem 10 | @rem Unless required by applicable law or agreed to in writing, software 11 | @rem distributed under the License is distributed on an "AS IS" BASIS, 12 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | @rem See the License for the specific language governing permissions and 14 | @rem limitations under the License. 15 | @rem 16 | 17 | @if "%DEBUG%"=="" @echo off 18 | @rem ########################################################################## 19 | @rem 20 | @rem Gradle startup script for Windows 21 | @rem 22 | @rem ########################################################################## 23 | 24 | @rem Set local scope for the variables with windows NT shell 25 | if "%OS%"=="Windows_NT" setlocal 26 | 27 | set DIRNAME=%~dp0 28 | if "%DIRNAME%"=="" set DIRNAME=. 29 | @rem This is normally unused 30 | set APP_BASE_NAME=%~n0 31 | set APP_HOME=%DIRNAME% 32 | 33 | @rem Resolve any "." and ".." in APP_HOME to make it shorter. 34 | for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi 35 | 36 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 37 | set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" 38 | 39 | @rem Find java.exe 40 | if defined JAVA_HOME goto findJavaFromJavaHome 41 | 42 | set JAVA_EXE=java.exe 43 | %JAVA_EXE% -version >NUL 2>&1 44 | if %ERRORLEVEL% equ 0 goto execute 45 | 46 | echo. 1>&2 47 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 48 | echo. 1>&2 49 | echo Please set the JAVA_HOME variable in your environment to match the 1>&2 50 | echo location of your Java installation. 1>&2 51 | 52 | goto fail 53 | 54 | :findJavaFromJavaHome 55 | set JAVA_HOME=%JAVA_HOME:"=% 56 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 57 | 58 | if exist "%JAVA_EXE%" goto execute 59 | 60 | echo. 1>&2 61 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 62 | echo. 1>&2 63 | echo Please set the JAVA_HOME variable in your environment to match the 1>&2 64 | echo location of your Java installation. 1>&2 65 | 66 | goto fail 67 | 68 | :execute 69 | @rem Setup the command line 70 | 71 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 72 | 73 | 74 | @rem Execute Gradle 75 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* 76 | 77 | :end 78 | @rem End local scope for the variables with windows NT shell 79 | if %ERRORLEVEL% equ 0 goto mainEnd 80 | 81 | :fail 82 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 83 | rem the _cmd.exe /c_ return code! 84 | set EXIT_CODE=%ERRORLEVEL% 85 | if %EXIT_CODE% equ 0 set EXIT_CODE=1 86 | if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% 87 | exit /b %EXIT_CODE% 88 | 89 | :mainEnd 90 | if "%OS%"=="Windows_NT" endlocal 91 | 92 | :omega 93 | -------------------------------------------------------------------------------- /python/README.md: -------------------------------------------------------------------------------- 1 | # Gemini with Vertex AI and LangChain (Python) 2 | 3 | > [!NOTE] 4 | > This is the Python code for [Gemini in Java with Vertex AI and LangChain4j](https://codelabs.developers.google.com/codelabs/gemini-java-developers) 5 | > codelab geared towards Python developers to discover [Gemini](https://deepmind.google/technologies/gemini/) 6 | > Large Language Model by Google using [LangChain](https://www.langchain.com/) framework. 7 | 8 | ## Prerequisites 9 | 10 | Before running the samples, it's a good idea to create a Python virtual environment and activate it: 11 | 12 | ```shell 13 | python -m venv .venv 14 | source .venv/bin/activate 15 | ``` 16 | 17 | Install the dependencies: 18 | 19 | ```shell 20 | pip install -r requirements.txt 21 | ``` 22 | 23 | You also need to have a Google Cloud account and project ready and set up two environment variables: 24 | 25 | ```shell 26 | export PROJECT_ID=YOUR_PROJECT_ID 27 | export LOCATION=us-central1 28 | ``` 29 | 30 | ## Samples 31 | 32 | These are the list of samples for different use cases: 33 | 34 | * [Simple Question & Answer](qa.py) 35 | 36 | ```shell 37 | python qa.py 38 | ``` 39 | 40 | * [Simple Question & Answer via streaming](stream_qa.py) 41 | 42 | ```shell 43 | python stream_qa.py 44 | ``` 45 | 46 | * [Hold a conversation with a chatbot](conversation.py) 47 | 48 | ```shell 49 | python conversation.py 50 | ``` 51 | 52 | * [Describing an image with multimodality](multimodal.py) 53 | 54 | ```shell 55 | python multimodal.py 56 | ``` 57 | 58 | * [Extracting structured data from unstructured text](extract_data.py) 59 | 60 | ```shell 61 | python extract_data.py 62 | ``` 63 | 64 | * [Manipulating prompt templates](template_prompt.py) 65 | 66 | ```shell 67 | python template_prompt.py 68 | ``` 69 | 70 | * [Text classification & sentiment analysis](text_classification.py) 71 | 72 | ```shell 73 | python text_classification.py 74 | ``` 75 | 76 | * [Retrieval Augmented Generation](rag.py) 77 | 78 | ```shell 79 | python rag.py 80 | ``` 81 | 82 | * [Function calling](function_calling.py) 83 | 84 | ```shell 85 | python function_calling.py 86 | ``` 87 | 88 | * [Function calling assistant](function_calling_assistant.py) 89 | 90 | ```shell 91 | python function_calling_assistant.py 92 | ``` 93 | 94 | * [Multi function calling assistant](multi_function_calling_assistant.py) 95 | 96 | ```shell 97 | python multi_function_calling_assistant.py 98 | ``` 99 | 100 | --- 101 | This is not an official Google product. 102 | -------------------------------------------------------------------------------- /python/conversation.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from langchain_community.chat_message_histories import ChatMessageHistory 4 | from langchain_core.chat_history import BaseChatMessageHistory 5 | from langchain_core.prompts import ChatPromptTemplate 6 | from langchain_core.runnables import RunnableWithMessageHistory 7 | from langchain_google_vertexai import ChatVertexAI 8 | 9 | store = {} # memory is maintained outside the chain 10 | 11 | 12 | def get_session_history(session_id: str) -> BaseChatMessageHistory: 13 | if session_id not in store: 14 | store[session_id] = ChatMessageHistory() 15 | return store[session_id] 16 | 17 | 18 | if __name__ == "__main__": 19 | llm = ChatVertexAI( 20 | project=os.environ["PROJECT_ID"], 21 | location="us-central1", 22 | model="gemini-2.0-flash" 23 | ) 24 | 25 | prompt = ChatPromptTemplate.from_messages( 26 | [ 27 | ("system", "You are a helpful assistant."), 28 | ("placeholder", "{history}"), 29 | ("human", "{input}"), 30 | ] 31 | ) 32 | 33 | chain = prompt | llm 34 | 35 | with_message_history = RunnableWithMessageHistory( 36 | chain, 37 | get_session_history, 38 | input_messages_key="input", 39 | history_messages_key="history", 40 | ) 41 | 42 | messages = [ 43 | "Hello!", 44 | "What is the country where the Eiffel tower is situated?", 45 | "How many inhabitants are there in that country?" 46 | ] 47 | 48 | for message in messages: 49 | print(f"User: {message}") 50 | print("Gemini: ", end="") 51 | for chunk in with_message_history.stream( 52 | {"input": message}, 53 | config={"configurable": {"session_id": "abc123"}}, 54 | ): 55 | print(chunk.content, end="", flush=True) 56 | -------------------------------------------------------------------------------- /python/extract_data.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from langchain_core.prompts import ChatPromptTemplate 4 | from langchain_google_vertexai import ChatVertexAI 5 | from pydantic import BaseModel, Field 6 | 7 | # Reference: https://python.langchain.com/v0.2/docs/how_to/structured_output/ 8 | 9 | 10 | class Person(BaseModel): 11 | """Information about a person with name and age""" 12 | 13 | name: str = Field(description="The name of the person") 14 | age: int = Field(description="The age of the person") 15 | 16 | 17 | if __name__ == "__main__": 18 | 19 | prompt = ChatPromptTemplate.from_messages( 20 | [ 21 | ( 22 | "system", 23 | "You're an expert extractor. Extract the relevant information from the text" 24 | ), 25 | # MessagesPlaceholder('examples'), 26 | ("human", "{text}"), 27 | ] 28 | ) 29 | 30 | llm = ChatVertexAI( 31 | project=os.environ["PROJECT_ID"], 32 | location="us-central1", 33 | model="gemini-2.0-flash", 34 | temperature=0 35 | ) 36 | 37 | runnable = prompt | llm.with_structured_output(Person) 38 | 39 | text = """Anna is a 23 year old artist based in Brooklyn, New York. She was born and 40 | raised in the suburbs of Chicago, where she developed a love for art at a 41 | young age. She attended the School of the Art Institute of Chicago, where 42 | she studied painting and drawing. After graduating, she moved to New York 43 | City to pursue her art career. Anna's work is inspired by her personal 44 | experiences and observations of the world around her. She often uses bright 45 | colors and bold lines to create vibrant and energetic paintings. Her work 46 | has been exhibited in galleries and museums in New York City and Chicago.""" 47 | 48 | response = runnable.invoke({"text": text}) 49 | print(response) 50 | -------------------------------------------------------------------------------- /python/function_calling.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | from langchain_core.messages import HumanMessage, ToolMessage 5 | from langchain_core.tools import tool 6 | from langchain_google_vertexai import ChatVertexAI 7 | 8 | 9 | # Reference: https://python.langchain.com/v0.2/docs/how_to/function_calling/ 10 | 11 | @tool 12 | def get_weather_forecast(location: str) -> str: 13 | """Get the weather forecast for a given location or city""" 14 | data = { 15 | "location": location, 16 | "forecast": "sunny", 17 | "temperature": 20 18 | } 19 | json_data = json.dumps(data) 20 | return json_data 21 | 22 | 23 | if __name__ == "__main__": 24 | tools = [get_weather_forecast] 25 | 26 | llm = ChatVertexAI( 27 | project=os.environ["PROJECT_ID"], 28 | location="us-central1", 29 | model="gemini-2.0-flash" 30 | ) 31 | 32 | llm_with_tools = llm.bind_tools(tools) 33 | 34 | # Ask about the weather 35 | query = "How's the weather in Paris?" 36 | print(f"User: {query}") 37 | messages = [HumanMessage(query)] 38 | response = llm_with_tools.invoke(messages) 39 | messages.append(response) 40 | 41 | # The model replies with a function call request 42 | print(f"Response: {response.tool_calls}") 43 | 44 | for tool_call in response.tool_calls: 45 | selected_tool = {"get_weather_forecast": get_weather_forecast}[tool_call["name"].lower()] 46 | tool_output = selected_tool.invoke(tool_call["args"]) 47 | # Send back the result of the function call 48 | messages.append(ToolMessage(tool_output, tool_call_id=tool_call["id"])) 49 | 50 | # Invoke the model again with function call response 51 | response = llm_with_tools.invoke(messages) 52 | print(f"Response: {response.content}") 53 | 54 | -------------------------------------------------------------------------------- /python/function_calling_assistant.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | from langchain import hub 5 | from langchain.agents import create_tool_calling_agent, AgentExecutor 6 | from langchain_core.tools import tool 7 | from langchain_google_vertexai import ChatVertexAI 8 | 9 | 10 | # Reference: 11 | # https://python.langchain.com/v0.2/docs/how_to/function_calling/ 12 | # https://python.langchain.com/v0.1/docs/use_cases/tool_use/agents/ 13 | 14 | @tool 15 | def get_weather_forecast(location: str) -> str: 16 | """Get the weather forecast and temperature for a location""" 17 | if location == "Paris": 18 | return json.dumps({ 19 | "location": location, 20 | "forecast": "sunny", 21 | "temperature": 20 22 | }) 23 | elif location == "London": 24 | return json.dumps({ 25 | "location": location, 26 | "forecast": "rainy", 27 | "temperature": 15 28 | }) 29 | 30 | return json.dumps({ 31 | "location": "unknown", 32 | "forecast": "unknown", 33 | "temperature": 0 34 | }) 35 | 36 | 37 | if __name__ == "__main__": 38 | tools = [get_weather_forecast] 39 | 40 | prompt = hub.pull("hwchase17/openai-tools-agent") 41 | prompt.pretty_print() 42 | 43 | llm = ChatVertexAI( 44 | project=os.environ["PROJECT_ID"], 45 | location="us-central1", 46 | model="gemini-1.5-pro-002" 47 | ) 48 | 49 | # Construct the tool calling agent 50 | agent = create_tool_calling_agent(llm, tools, prompt) 51 | 52 | # Create an agent executor by passing in the agent and tools 53 | agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) 54 | 55 | query = "How is the weather in Paris?" 56 | print(f"User: {query}") 57 | response = agent_executor.invoke({"input": query}) 58 | print(f"Response: {response['output']}") 59 | 60 | query = "Is it warmer in London or in Paris?" 61 | print(f"User: {query}") 62 | response = agent_executor.invoke({"input": query}) 63 | print(f"Response: {response['output']}") 64 | -------------------------------------------------------------------------------- /python/gemma_with_ollama_container.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright 2024 Google LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | https://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | """ 16 | 17 | # Requires you to install Ollama 18 | # https://ollama.com/download/linux 19 | 20 | import docker 21 | import time 22 | import ollama 23 | from testcontainers.ollama import OllamaContainer 24 | from docker.models.containers import Container 25 | 26 | # Make sure you have Docker installed and running locally before running this sample 27 | 28 | TC_OLLAMA_GEMMA2 = "tc-ollama-gemma2-2b" 29 | GEMMA_2 = "gemma2:2b" 30 | OLLAMA_VERSION = "ollama/ollama:0.3.12" 31 | 32 | 33 | def create_gemma_ollama_container(): 34 | """Creates an Ollama container with Gemma 2 if it doesn't exist.""" 35 | 36 | client = docker.from_env() 37 | api_client = docker.APIClient() # add an api client to use the commit 38 | 39 | try: 40 | # Check if the custom Gemma Ollama image exists already 41 | images = client.images.list(filters={"reference": TC_OLLAMA_GEMMA2}) 42 | if not images: 43 | print("Creating a new Ollama container with Gemma 2 image...") 44 | # Create a default Ollama Container, we will need it to download the model 45 | ollama_container_pull = OllamaContainer(OLLAMA_VERSION) 46 | print("Starting Ollama...") 47 | ollama_container_pull.start() 48 | print("Pulling model...") 49 | ollama_container_pull.exec(["ollama", "pull", GEMMA_2]) 50 | print("Committing to image...") 51 | # Commit the changes to the new image 52 | # Get the low level container object 53 | low_level_container : Container = ollama_container_pull._container 54 | container_id = low_level_container.id 55 | 56 | api_client.commit(container=container_id, repository=TC_OLLAMA_GEMMA2) #fixed 57 | 58 | ollama_container_pull.stop() # stop the container that pulled the model 59 | else: 60 | print("Ollama image already exists") 61 | 62 | print("Ollama image substitution...") 63 | # Substitute the default Ollama image with our Gemma variant 64 | return OllamaContainer(TC_OLLAMA_GEMMA2) 65 | 66 | except Exception as e: 67 | print(f"An error occurred: {e}") 68 | raise 69 | 70 | 71 | def main(): 72 | start = time.time() 73 | ollama_container = create_gemma_ollama_container() # We are calling it ollama_container instead of ollama 74 | print(f"Container created in {time.time() - start:.0f}s") 75 | start = time.time() 76 | 77 | ollama_container.start() # We are calling it ollama_container instead of ollama 78 | print(f"Ollama container started in {time.time() - start:.0f}s") 79 | start = time.time() 80 | 81 | base_url = f"http://{ollama_container.get_container_host_ip()}:{ollama_container.get_exposed_port(11434)}" # We are calling it ollama_container instead of ollama 82 | # The following line was commented because the library ollama changed and it does not have the ChatLanguageModel class anymore. 83 | # model = OllamaChatModel.builder().base_url(base_url).model_name(GEMMA_2).timeout(120).build() 84 | 85 | print(f"Model ready in {time.time() - start:.0f}s") 86 | start = time.time() 87 | 88 | # The library changed, so now you have to use client.chat instead of model.generate 89 | client = ollama.Client(host=base_url) # This is ok, we are calling the ollama client here 90 | 91 | print(client.chat(model=GEMMA_2, messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])['message'][ 92 | 'content']) 93 | print(f"First response: {time.time() - start:.0f}s") 94 | start = time.time() 95 | 96 | print(client.chat(model=GEMMA_2, messages=[{'role': 'user', 'content': 'Who was the first cat who stepped on the moon?'}])['message'][ 97 | 'content']) 98 | print(f"Second response: {time.time() - start:.0f}s") 99 | start = time.time() 100 | 101 | print(client.chat(model=GEMMA_2, messages=[ 102 | {'role': 'user', 'content': 'What are the differences between the Gemini model and the Gemma models?'}])['message'][ 103 | 'content']) 104 | print(f"Third response: {time.time() - start:.0f}s") 105 | 106 | ollama_container.stop() # We are calling it ollama_container instead of ollama 107 | 108 | 109 | if __name__ == "__main__": 110 | main() 111 | -------------------------------------------------------------------------------- /python/multi_function_calling_assistant.py: -------------------------------------------------------------------------------- 1 | import os 2 | from random import random 3 | 4 | from langchain import hub 5 | from langchain.agents import create_tool_calling_agent, AgentExecutor 6 | from langchain_core.tools import tool 7 | from langchain_google_vertexai import ChatVertexAI 8 | 9 | 10 | # Reference: 11 | # https://python.langchain.com/v0.2/docs/how_to/function_calling/ 12 | # https://python.langchain.com/v0.1/docs/use_cases/tool_use/agents/ 13 | 14 | 15 | @tool 16 | def convert_currency(from_currency: str, to_currency: str, amount: float) -> float: 17 | """Convert from from_currency to to_currency with the specified amount""" 18 | 19 | result = amount 20 | if from_currency == "USD" and to_currency == "EUR": 21 | result = amount * 0.93 22 | elif from_currency == "USD" and to_currency == "GBP": 23 | result = amount * 0.79 24 | 25 | print(f"convertCurrency(fromCurrency = {from_currency}, toCurrency = {to_currency}, amount = {amount}) == {result}") 26 | 27 | return result 28 | 29 | @tool 30 | def get_stock_price(symbol: str) -> float: 31 | """"Get the current value of a stock in US dollars""" 32 | result = 170.0 + 10 * random() 33 | print(f"get_stock_price(symbol = {symbol}) == {result}") 34 | return result 35 | 36 | 37 | @tool 38 | def apply_percentage(amount: float, percentage: float) -> float: 39 | """ Applies a percentage to a given amount""" 40 | result = amount * (percentage / 100) 41 | print(f"applyPercentage(amount = {amount}, percentage = {percentage}) == {result}") 42 | return result 43 | 44 | 45 | if __name__ == "__main__": 46 | tools = [convert_currency, get_stock_price, apply_percentage] 47 | 48 | prompt = hub.pull("hwchase17/openai-tools-agent") 49 | prompt.pretty_print() 50 | 51 | llm = ChatVertexAI( 52 | project=os.environ["PROJECT_ID"], 53 | location="us-central1", 54 | model="gemini-2.0-flash" 55 | ) 56 | 57 | # Construct the tool calling agent 58 | agent = create_tool_calling_agent(llm, tools, prompt) 59 | 60 | # Create an agent executor by passing in the agent and tools 61 | agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) 62 | 63 | query = "What is 10% of the AAPL stock price converted from USD to EUR?" 64 | print(f"User: {query}") 65 | response = agent_executor.invoke({"input": query}) 66 | print(f"Response: {response['output']}") 67 | -------------------------------------------------------------------------------- /python/multimodal.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from langchain_core.messages import HumanMessage 4 | from langchain_google_vertexai import ChatVertexAI 5 | 6 | CLOUD_NEXT_URL = "https://storage.googleapis.com/github-repo/img/vision/google-cloud-next.jpeg" 7 | 8 | if __name__ == "__main__": 9 | llm = ChatVertexAI( 10 | project=os.environ["PROJECT_ID"], 11 | location="us-central1", 12 | model="gemini-2.0-flash" 13 | ) 14 | 15 | image_message = { 16 | "type": "image_url", 17 | "image_url": {"url": CLOUD_NEXT_URL}, 18 | } 19 | text_message = { 20 | "type": "text", 21 | "text": "Describe the picture", 22 | } 23 | 24 | message = HumanMessage(content=[text_message, image_message]) 25 | 26 | response = llm.invoke([message]) 27 | print(response.content) 28 | -------------------------------------------------------------------------------- /python/qa.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from langchain_google_vertexai import ChatVertexAI 4 | 5 | if __name__ == "__main__": 6 | llm = ChatVertexAI( 7 | project=os.environ["PROJECT_ID"], 8 | location="us-central1", 9 | model="gemini-2.0-flash" 10 | ) 11 | 12 | response = llm.invoke("Why is the sky blue?") 13 | print(response.content) 14 | -------------------------------------------------------------------------------- /python/rag.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from langchain.chains.combine_documents import create_stuff_documents_chain 4 | from langchain.chains.retrieval import create_retrieval_chain 5 | from langchain_community.document_loaders import PyPDFLoader 6 | from langchain_community.vectorstores import Annoy 7 | from langchain_core.prompts import ChatPromptTemplate 8 | from langchain_google_vertexai import VertexAIEmbeddings, ChatVertexAI 9 | from langchain_text_splitters import RecursiveCharacterTextSplitter 10 | 11 | # Reference: https://python.langchain.com/v0.2/docs/tutorials/pdf_qa/ 12 | 13 | if __name__ == "__main__": 14 | 15 | print("Load and parse the PDF") 16 | loader = PyPDFLoader( 17 | "https://raw.githubusercontent.com/meteatamel/genai-beyond-basics/main/samples/grounding/vertexai-search/cymbal-starlight-2024.pdf") 18 | documents = loader.load() 19 | 20 | print("Split the document into chunks") 21 | text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100) 22 | texts = text_splitter.split_documents(documents) 23 | 24 | print("Initialize the embedding model") 25 | embeddingsLlm = VertexAIEmbeddings( 26 | project=os.environ["PROJECT_ID"], 27 | location="us-central1", 28 | model_name="text-embedding-005" 29 | ) 30 | 31 | print("Create a vector store") 32 | vector_store = Annoy.from_documents(texts, embeddingsLlm) 33 | 34 | retriever = vector_store.as_retriever() 35 | 36 | print("Initialize the chat model") 37 | llm = ChatVertexAI( 38 | project=os.environ["PROJECT_ID"], 39 | location="us-central1", 40 | model="gemini-2.0-flash" 41 | ) 42 | 43 | system_prompt = ( 44 | "You are an assistant for question-answering tasks. " 45 | "Use the following pieces of retrieved context to answer " 46 | "the question. If you don't know the answer, say that you " 47 | "don't know. Use three sentences maximum and keep the " 48 | "answer concise." 49 | "\n\n" 50 | "{context}" 51 | ) 52 | 53 | prompt = ChatPromptTemplate.from_messages( 54 | [ 55 | ("system", system_prompt), 56 | ("human", "{input}"), 57 | ] 58 | ) 59 | 60 | print("Create RAG chain") 61 | question_answer_chain = create_stuff_documents_chain(llm, prompt) 62 | rag_chain = create_retrieval_chain(retriever, question_answer_chain) 63 | 64 | print("Ready!") 65 | 66 | questions = [ 67 | "What is the cargo capacity of Cymbal Starlight?", 68 | "What's the emergency roadside assistance phone number?", 69 | "Are there some special kits available on that car?" 70 | ] 71 | 72 | for question in questions: 73 | print(f"\n=== {question} ===") 74 | response = rag_chain.invoke({"input": question}) 75 | print(response['answer']) 76 | -------------------------------------------------------------------------------- /python/requirements.txt: -------------------------------------------------------------------------------- 1 | # Automatically generated by https://github.com/damnever/pigar. 2 | annoy==1.17.3 3 | langchain==0.3.18 4 | langchainhub==0.1.21 5 | langchain-community==0.3.17 6 | langchain-core==0.3.35 7 | langchain-google-vertexai==2.0.13 8 | langchain-text-splitters==0.3.6 9 | pydantic==2.10.6 10 | pypdf==5.3.0 11 | docker==7.1.0 12 | testcontainers==4.9.2 13 | ollama==0.4.7 -------------------------------------------------------------------------------- /python/stream_qa.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from langchain_google_vertexai import ChatVertexAI 4 | 5 | if __name__ == "__main__": 6 | llm = ChatVertexAI( 7 | project=os.environ["PROJECT_ID"], 8 | location="us-central1", 9 | model="gemini-2.0-flash", 10 | max_output_tokens=4000 11 | ) 12 | 13 | for chunk in llm.stream("Why is the sky blue?"): 14 | print(chunk.content, end="", flush=True) 15 | -------------------------------------------------------------------------------- /python/template_prompt.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from langchain_core.prompts import PromptTemplate 4 | from langchain_google_vertexai import ChatVertexAI 5 | 6 | if __name__ == "__main__": 7 | llm = ChatVertexAI( 8 | project=os.environ["PROJECT_ID"], 9 | location="us-central1", 10 | model="gemini-2.0-flash" 11 | ) 12 | 13 | prompt_template = PromptTemplate.from_template(""" 14 | You're a friendly chef with a lot of cooking experience. 15 | Create a recipe for a {dish} with the following ingredients: {ingredients}, and give it a name. 16 | """) 17 | 18 | prompt = prompt_template.format(dish="dessert", ingredients="strawberries, chocolate, and whipped cream") 19 | 20 | response = llm.invoke(prompt) 21 | print(response.content) 22 | -------------------------------------------------------------------------------- /python/text_classification.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from langchain_core.prompts import PromptTemplate 4 | from langchain_google_vertexai import ChatVertexAI 5 | 6 | if __name__ == "__main__": 7 | llm = ChatVertexAI( 8 | project=os.environ["PROJECT_ID"], 9 | location="us-central1", 10 | model="gemini-2.0-flash" 11 | ) 12 | 13 | prompt_template = PromptTemplate.from_template(""" 14 | Analyze the sentiment of the text below. Respond only with one word to describe the sentiment. 15 | 16 | INPUT: This is fantastic news! 17 | OUTPUT: POSITIVE 18 | 19 | INPUT: Pi is roughly equal to 3.14 20 | OUTPUT: NEUTRAL 21 | 22 | INPUT: I really disliked the pizza. Who would use pineapples as a pizza topping? 23 | OUTPUT: NEGATIVE 24 | 25 | INPUT: {text} 26 | OUTPUT: 27 | """) 28 | 29 | prompt = prompt_template.format(text="I love strawberries!") 30 | 31 | response = llm.invoke(prompt) 32 | print(response.content) 33 | -------------------------------------------------------------------------------- /settings.gradle: -------------------------------------------------------------------------------- 1 | /* 2 | * This file was generated by the Gradle 'init' task. 3 | * 4 | * The settings file is used to specify which projects to include in your build. 5 | * For more detailed information on multi-project builds, please refer to https://docs.gradle.org/8.4/userguide/building_swift_projects.html in the Gradle documentation. 6 | * This project uses @Incubating APIs which are subject to change. 7 | */ 8 | 9 | plugins { 10 | // Apply the foojay-resolver plugin to allow automatic download of JDKs 11 | id 'org.gradle.toolchains.foojay-resolver-convention' version '0.7.0' 12 | } 13 | 14 | rootProject.name = 'gemini-workshop' 15 | include('app') 16 | --------------------------------------------------------------------------------