├── .gitignore
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── auth
├── authenticated
│ ├── Controllers
│ │ └── DefaultController.cs
│ ├── Dockerfile
│ ├── Program.cs
│ ├── Startup.cs
│ └── authenticated.csproj
├── calling
│ ├── Controllers
│ │ └── DefaultController.cs
│ ├── Dockerfile
│ ├── Program.cs
│ ├── Startup.cs
│ └── calling.csproj
└── receiving
│ ├── Dockerfile
│ ├── Program.cs
│ ├── Startup.cs
│ └── receiving.csproj
├── dbt-job
├── Dockerfile
├── README.md
├── jaffle-shop
│ ├── .gitignore
│ ├── .user.yml
│ ├── README.md
│ ├── analysis
│ │ └── .gitkeep
│ ├── data
│ │ └── .gitkeep
│ ├── dbt_project.yml
│ ├── macros
│ │ └── .gitkeep
│ ├── models
│ │ └── customers.sql
│ ├── profiles.yml
│ ├── snapshots
│ │ └── .gitkeep
│ └── tests
│ │ └── .gitkeep
├── messagebody.json
└── script.sh
├── dbt
├── Dockerfile
├── invoke.go
├── jaffle-shop
│ ├── .gitignore
│ ├── .user.yml
│ ├── README.md
│ ├── analysis
│ │ └── .gitkeep
│ ├── data
│ │ └── .gitkeep
│ ├── dbt_project.yml
│ ├── macros
│ │ └── .gitkeep
│ ├── models
│ │ └── customers.sql
│ ├── profiles.yml
│ ├── snapshots
│ │ └── .gitkeep
│ └── tests
│ │ └── .gitkeep
└── script.sh
├── docs
├── auth.md
├── configure.md
├── deploy-from-source.md
├── images
│ ├── cloud-run-console-private.png
│ ├── cloud-run-console.png
│ ├── cloud-run-pubsub.png
│ ├── cloud-run-schedule.png
│ ├── cloud-run-storage.png
│ ├── cloud-run-tasks.png
│ ├── dbt-customers-table.png
│ ├── dbt-customers-table2.png
│ ├── jaffleshop-dataset.png
│ ├── serverless-containers-with-cloud-run.png
│ └── serverless-on-google-cloud.png
├── private.md
├── public.md
├── pubsub.md
├── scheduled-dbt-service-bigquery.md
├── scheduled.md
├── storage.md
└── tasks.md
├── event-display
└── csharp
│ ├── Dockerfile
│ ├── Program.cs
│ ├── Startup.cs
│ └── event-display.csproj
├── health-checks
├── README.md
├── index.js
├── package.json
└── service.yaml
└── helloworld
└── csharp
├── 3.1
├── Program.cs
├── Startup.cs
└── helloworld.csproj
├── 5.0
├── Dockerfile
├── Program.cs
├── Startup.cs
└── helloworld.csproj
├── 6.0
├── Dockerfile
├── Program.cs
└── helloworld.csproj
└── 7.0
├── Program.cs
└── helloworld.csproj
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 | ##
4 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
5 |
6 | # User-specific files
7 | *.vscode/
8 | *.suo
9 | *.user
10 | *.userosscache
11 | *.sln.docstates
12 | appsettings.*
13 |
14 | # User-specific files (MonoDevelop/Xamarin Studio)
15 | *.userprefs
16 |
17 | # Build results
18 | [Dd]ebug/
19 | [Dd]ebugPublic/
20 | [Rr]elease/
21 | [Rr]eleases/
22 | x64/
23 | x86/
24 | bld/
25 | [Bb]in/
26 | [Oo]bj/
27 | [Ll]og/
28 |
29 | # Visual Studio 2015/2017 cache/options directory
30 | .vs/
31 | # Uncomment if you have tasks that create the project's static files in wwwroot
32 | #wwwroot/
33 |
34 | # Visual Studio 2017 auto generated files
35 | Generated\ Files/
36 |
37 | # MSTest test Results
38 | [Tt]est[Rr]esult*/
39 | [Bb]uild[Ll]og.*
40 |
41 | # NUNIT
42 | *.VisualState.xml
43 | TestResult.xml
44 |
45 | # Build Results of an ATL Project
46 | [Dd]ebugPS/
47 | [Rr]eleasePS/
48 | dlldata.c
49 |
50 | # Benchmark Results
51 | BenchmarkDotNet.Artifacts/
52 |
53 | # .NET Core
54 | project.lock.json
55 | project.fragment.lock.json
56 | artifacts/
57 | **/Properties/launchSettings.json
58 |
59 | # StyleCop
60 | StyleCopReport.xml
61 |
62 | # Files built by Visual Studio
63 | *_i.c
64 | *_p.c
65 | *_i.h
66 | *.ilk
67 | *.meta
68 | *.obj
69 | *.iobj
70 | *.pch
71 | *.pdb
72 | *.ipdb
73 | *.pgc
74 | *.pgd
75 | *.rsp
76 | *.sbr
77 | *.tlb
78 | *.tli
79 | *.tlh
80 | *.tmp
81 | *.tmp_proj
82 | *.log
83 | *.vspscc
84 | *.vssscc
85 | .builds
86 | *.pidb
87 | *.svclog
88 | *.scc
89 |
90 | # Chutzpah Test files
91 | _Chutzpah*
92 |
93 | # Visual C++ cache files
94 | ipch/
95 | *.aps
96 | *.ncb
97 | *.opendb
98 | *.opensdf
99 | *.sdf
100 | *.cachefile
101 | *.VC.db
102 | *.VC.VC.opendb
103 |
104 | # Visual Studio profiler
105 | *.psess
106 | *.vsp
107 | *.vspx
108 | *.sap
109 |
110 | # Visual Studio Trace Files
111 | *.e2e
112 |
113 | # TFS 2012 Local Workspace
114 | $tf/
115 |
116 | # Guidance Automation Toolkit
117 | *.gpState
118 |
119 | # ReSharper is a .NET coding add-in
120 | _ReSharper*/
121 | *.[Rr]e[Ss]harper
122 | *.DotSettings.user
123 |
124 | # JustCode is a .NET coding add-in
125 | .JustCode
126 |
127 | # TeamCity is a build add-in
128 | _TeamCity*
129 |
130 | # DotCover is a Code Coverage Tool
131 | *.dotCover
132 |
133 | # AxoCover is a Code Coverage Tool
134 | .axoCover/*
135 | !.axoCover/settings.json
136 |
137 | # Visual Studio code coverage results
138 | *.coverage
139 | *.coveragexml
140 |
141 | # NCrunch
142 | _NCrunch_*
143 | .*crunch*.local.xml
144 | nCrunchTemp_*
145 |
146 | # MightyMoose
147 | *.mm.*
148 | AutoTest.Net/
149 |
150 | # Web workbench (sass)
151 | .sass-cache/
152 |
153 | # Installshield output folder
154 | [Ee]xpress/
155 |
156 | # DocProject is a documentation generator add-in
157 | DocProject/buildhelp/
158 | DocProject/Help/*.HxT
159 | DocProject/Help/*.HxC
160 | DocProject/Help/*.hhc
161 | DocProject/Help/*.hhk
162 | DocProject/Help/*.hhp
163 | DocProject/Help/Html2
164 | DocProject/Help/html
165 |
166 | # Click-Once directory
167 | publish/
168 |
169 | # Publish Web Output
170 | *.[Pp]ublish.xml
171 | *.azurePubxml
172 | # Note: Comment the next line if you want to checkin your web deploy settings,
173 | # but database connection strings (with potential passwords) will be unencrypted
174 | *.pubxml
175 | *.publishproj
176 |
177 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
178 | # checkin your Azure Web App publish settings, but sensitive information contained
179 | # in these scripts will be unencrypted
180 | PublishScripts/
181 |
182 | # NuGet Packages
183 | *.nupkg
184 | # The packages folder can be ignored because of Package Restore
185 | **/[Pp]ackages/*
186 | # except build/, which is used as an MSBuild target.
187 | !**/[Pp]ackages/build/
188 | # Uncomment if necessary however generally it will be regenerated when needed
189 | #!**/[Pp]ackages/repositories.config
190 | # NuGet v3's project.json files produces more ignorable files
191 | *.nuget.props
192 | *.nuget.targets
193 |
194 | # Microsoft Azure Build Output
195 | csx/
196 | *.build.csdef
197 |
198 | # Microsoft Azure Emulator
199 | ecf/
200 | rcf/
201 |
202 | # Windows Store app package directories and files
203 | AppPackages/
204 | BundleArtifacts/
205 | Package.StoreAssociation.xml
206 | _pkginfo.txt
207 | *.appx
208 |
209 | # Visual Studio cache files
210 | # files ending in .cache can be ignored
211 | *.[Cc]ache
212 | # but keep track of directories ending in .cache
213 | !*.[Cc]ache/
214 |
215 | # Others
216 | ClientBin/
217 | ~$*
218 | *~
219 | *.dbmdl
220 | *.dbproj.schemaview
221 | *.jfm
222 | *.pfx
223 | *.publishsettings
224 | orleans.codegen.cs
225 |
226 | # Including strong name files can present a security risk
227 | # (https://github.com/github/gitignore/pull/2483#issue-259490424)
228 | #*.snk
229 |
230 | # Since there are multiple workflows, uncomment next line to ignore bower_components
231 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
232 | #bower_components/
233 |
234 | # RIA/Silverlight projects
235 | Generated_Code/
236 |
237 | # Backup & report files from converting an old project file
238 | # to a newer Visual Studio version. Backup files are not needed,
239 | # because we have git ;-)
240 | _UpgradeReport_Files/
241 | Backup*/
242 | UpgradeLog*.XML
243 | UpgradeLog*.htm
244 | ServiceFabricBackup/
245 | *.rptproj.bak
246 |
247 | # SQL Server files
248 | *.mdf
249 | *.ldf
250 | *.ndf
251 |
252 | # Business Intelligence projects
253 | *.rdl.data
254 | *.bim.layout
255 | *.bim_*.settings
256 | *.rptproj.rsuser
257 |
258 | # Microsoft Fakes
259 | FakesAssemblies/
260 |
261 | # GhostDoc plugin setting file
262 | *.GhostDoc.xml
263 |
264 | # Node.js Tools for Visual Studio
265 | .ntvs_analysis.dat
266 | node_modules/
267 |
268 | # Visual Studio 6 build log
269 | *.plg
270 |
271 | # Visual Studio 6 workspace options file
272 | *.opt
273 |
274 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
275 | *.vbw
276 |
277 | # Visual Studio LightSwitch build output
278 | **/*.HTMLClient/GeneratedArtifacts
279 | **/*.DesktopClient/GeneratedArtifacts
280 | **/*.DesktopClient/ModelManifest.xml
281 | **/*.Server/GeneratedArtifacts
282 | **/*.Server/ModelManifest.xml
283 | _Pvt_Extensions
284 |
285 | # Paket dependency manager
286 | .paket/paket.exe
287 | paket-files/
288 |
289 | # FAKE - F# Make
290 | .fake/
291 |
292 | # JetBrains Rider
293 | .idea/
294 | *.sln.iml
295 |
296 | # CodeRush
297 | .cr/
298 |
299 | # Python Tools for Visual Studio (PTVS)
300 | __pycache__/
301 | *.pyc
302 |
303 | # Cake - Uncomment if you are using it
304 | # tools/**
305 | # !tools/packages.config
306 |
307 | # Tabs Studio
308 | *.tss
309 |
310 | # Telerik's JustMock configuration file
311 | *.jmconfig
312 |
313 | # BizTalk build output
314 | *.btp.cs
315 | *.btm.cs
316 | *.odx.cs
317 | *.xsd.cs
318 |
319 | # OpenCover UI analysis results
320 | OpenCover/
321 |
322 | # Azure Stream Analytics local run output
323 | ASALocalRun/
324 |
325 | # MSBuild Binary and Structured Log
326 | *.binlog
327 |
328 | # NVidia Nsight GPU debugger configuration file
329 | *.nvuser
330 |
331 | # MFractors (Xamarin productivity tool) working folder
332 | .mfractor/
333 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to Contribute
2 |
3 | We'd love to accept your patches and contributions to this project. There are
4 | just a few small guidelines you need to follow.
5 |
6 | ## Contributor License Agreement
7 |
8 | Contributions to this project must be accompanied by a Contributor License
9 | Agreement. You (or your employer) retain the copyright to your contribution;
10 | this simply gives us permission to use and redistribute your contributions as
11 | part of the project. Head over to to see
12 | your current agreements on file or to sign a new one.
13 |
14 | You generally only need to submit a CLA once, so if you've already submitted one
15 | (even if it was for a different project), you probably don't need to do it
16 | again.
17 |
18 | ## Code reviews
19 |
20 | All submissions, including submissions by project members, require review. We
21 | use GitHub pull requests for this purpose. Consult
22 | [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
23 | information on using pull requests.
24 |
25 | ## Community Guidelines
26 |
27 | This project follows
28 | [Google's Open Source Community Guidelines](https://opensource.google.com/conduct/).
29 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Cloud Run Tutorial
2 |
3 | 
4 |
5 | [Cloud Run](https://cloud.google.com/run/) is a managed serverless platform that enables you to run stateless containers invocable via HTTP requests on Google Cloud.
6 |
7 | Cloud Run is built from open-source [Knative](https://knative.dev/), letting you choose to run your containers either fully managed with Cloud Run, or in your Google Kubernetes Engine cluster with Cloud Run on Anthos, or use Knative on any Kubernetes cluster running anywhere.
8 |
9 | ## Slides
10 |
11 | There's a [presentation](https://speakerdeck.com/meteatamel/serverless-containers-with-cloud-run) that accompanies the tutorial.
12 |
13 |
14 |
15 |
16 |
17 | ## Setup
18 |
19 | [Cloud Run](https://cloud.google.com/run/) is a fully managed service, so
20 | there's no setup other than enabling Cloud Run and Cloud Build.
21 |
22 | [Cloud Run for
23 | Anthos](https://cloud.google.com/run/docs/quickstarts/prebuilt-deploy-gke) runs
24 | on GKE on Anthos platform.
25 |
26 | Setup your project id and number that we'll need throughout samples:
27 |
28 | ```bash
29 | export PROJECT_ID="$(gcloud config get-value core/project)"
30 | export PROJECT_NUMBER="$(gcloud projects list --filter=${PROJECT_ID} --format='value(PROJECT_NUMBER)')"
31 | ```
32 |
33 | Enable Cloud Build and Cloud Run:
34 |
35 | ```bash
36 | gcloud services enable --project ${PROJECT_ID} \
37 | cloudbuild.googleapis.com \
38 | run.googleapis.com
39 | ```
40 |
41 | ## Samples
42 |
43 | Cloud Run Serving
44 |
45 | * [Public service](docs/public.md)
46 | * [Configure service](docs/configure.md)
47 | * [Private service](docs/private.md)
48 | * [Pub/Sub triggered service](docs/pubsub.md)
49 | * [Storage triggered service](docs/storage.md)
50 | * [Scheduled service](docs/scheduled.md)
51 | * [Task triggered service](docs/tasks.md)
52 | * [Service to service authentication](docs/auth.md)
53 | * [Cloud Run Healthchecks](health-checks)
54 |
55 | Cloud Run Eventing
56 | * [Image processing pipeline v1 - Eventarc (AuditLog-Cloud Storage) + Cloud Run](https://github.com/GoogleCloudPlatform/eventarc-samples/tree/main/processing-pipelines/image-v1)
57 | * [Image processing pipeline v2 - Eventarc (Cloud Storage) + Cloud Run + Workflows](https://github.com/GoogleCloudPlatform/eventarc-samples/blob/main/processing-pipelines/image-v2)
58 | * [Image processing pipeline v3 - Eventarc (Cloud Storage) + Workflows](https://github.com/GoogleCloudPlatform/eventarc-samples/blob/main/processing-pipelines/image-v2)
59 | * [Image processing pipeline v1 - Eventarc (AuditLog-Cloud Storage) + Cloud Run for Anthos](https://github.com/GoogleCloudPlatform/eventarc-samples/blob/main/processing-pipelines/image-v1/image-processing-pipeline-eventarc-crfa.md)
60 | * [BigQuery processing pipeline - Eventarc + Cloud Run](https://github.com/GoogleCloudPlatform/eventarc-samples/tree/main/processing-pipelines/bigquery)
61 | * [BigQuery processing pipeline - Eventarc + Cloud Run for Anthos](https://github.com/GoogleCloudPlatform/eventarc-samples/blob/main/processing-pipelines/bigquery/bigquery-processing-pipeline-eventarc-crfa.md)
62 |
63 | Other
64 |
65 | * [Scheduled dbt service with BigQuery](docs/scheduled-dbt-service-bigquery.md)
66 | * [Scheduled dbt job with BigQuery](dbt-job)
67 | * [Deploying from source code](docs/deploy-from-source.md)
68 |
69 | -------
70 |
71 | This is not an official Google product.
72 |
--------------------------------------------------------------------------------
/auth/authenticated/Controllers/DefaultController.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Net.Http;
3 | using System.Threading.Tasks;
4 | using Microsoft.AspNetCore.Mvc;
5 |
6 | namespace authenticated.Controllers
7 | {
8 |
9 | [Route("")]
10 | public class DefaultController : ControllerBase
11 | {
12 | private readonly IHttpClientFactory _clientFactory;
13 |
14 | public DefaultController(IHttpClientFactory clientFactory)
15 | {
16 | _clientFactory = clientFactory;
17 | }
18 |
19 | [HttpGet]
20 | public async Task GetAsync()
21 | {
22 | var url = Environment.GetEnvironmentVariable("URL");
23 | if (url == null)
24 | {
25 | return BadRequest("No URL defined");
26 | }
27 |
28 | var idToken = await GetIdToken(url);
29 | if (idToken == null)
30 | {
31 | return BadRequest("No id token could be fetched");
32 | }
33 |
34 | var content = await MakeAuthRequest(idToken, url);
35 |
36 | return Ok("Second service says: " + content);
37 | }
38 |
39 | private async Task GetIdToken(string targetUrl)
40 | {
41 | var httpClient = _clientFactory.CreateClient();
42 |
43 | var metadataUrl = $"http://metadata/computeMetadata/v1/instance/service-accounts/default/identity?audience={targetUrl}";
44 | var request = new HttpRequestMessage(HttpMethod.Get, metadataUrl);
45 | request.Headers.Add("Metadata-Flavor", "Google");
46 |
47 | var response = await httpClient.SendAsync(request);
48 |
49 | return await response.Content.ReadAsStringAsync();
50 | }
51 |
52 | private async Task MakeAuthRequest(string idToken, string url)
53 | {
54 | var httpClient = _clientFactory.CreateClient();
55 |
56 | var request = new HttpRequestMessage(HttpMethod.Get, url);
57 | request.Headers.Add("Authorization", "Bearer " + idToken);
58 |
59 | var response = await httpClient.SendAsync(request);
60 |
61 | return await response.Content.ReadAsStringAsync();
62 | }
63 | }
64 | }
--------------------------------------------------------------------------------
/auth/authenticated/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use Microsoft's official build .NET image.
2 | # https://hub.docker.com/_/microsoft-dotnet-core-sdk/
3 | FROM mcr.microsoft.com/dotnet/core/sdk:3.1-alpine AS build
4 | WORKDIR /app
5 |
6 | # Install production dependencies.
7 | # Copy csproj and restore as distinct layers.
8 | COPY *.csproj ./
9 | RUN dotnet restore
10 |
11 | # Copy local code to the container image.
12 | COPY . ./
13 | WORKDIR /app
14 |
15 | # Build a release artifact.
16 | RUN dotnet publish -c Release -o out
17 |
18 |
19 | # Use Microsoft's official runtime .NET image.
20 | # https://hub.docker.com/_/microsoft-dotnet-core-aspnet/
21 | FROM mcr.microsoft.com/dotnet/core/aspnet:3.1-alpine AS runtime
22 | WORKDIR /app
23 | COPY --from=build /app/out ./
24 |
25 | # Run the web service on container startup.
26 | ENTRYPOINT ["dotnet", "authenticated.dll"]
27 |
28 |
--------------------------------------------------------------------------------
/auth/authenticated/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using Microsoft.AspNetCore.Hosting;
3 | using Microsoft.Extensions.Hosting;
4 |
5 | namespace authenticated
6 | {
7 | public class Program
8 | {
9 | public static void Main(string[] args)
10 | {
11 | CreateHostBuilder(args).Build().Run();
12 | }
13 |
14 | public static IHostBuilder CreateHostBuilder(string[] args)
15 | {
16 | var port = Environment.GetEnvironmentVariable("PORT") ?? "8080";
17 | var url = String.Concat("http://0.0.0.0:", port);
18 |
19 | return Host.CreateDefaultBuilder(args)
20 | .ConfigureWebHostDefaults(webBuilder =>
21 | {
22 | webBuilder.UseStartup().UseUrls(url);
23 | });
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/auth/authenticated/Startup.cs:
--------------------------------------------------------------------------------
1 | using Microsoft.AspNetCore.Builder;
2 | using Microsoft.AspNetCore.Hosting;
3 | using Microsoft.Extensions.DependencyInjection;
4 | using Microsoft.Extensions.Hosting;
5 |
6 | namespace authenticated
7 | {
8 | public class Startup
9 | {
10 | public void ConfigureServices(IServiceCollection services)
11 | {
12 | services.AddHttpClient();
13 | services.AddControllers();
14 | }
15 |
16 | public void Configure(IApplicationBuilder app, IWebHostEnvironment env)
17 | {
18 | if (env.IsDevelopment())
19 | {
20 | app.UseDeveloperExceptionPage();
21 | }
22 |
23 | app.UseRouting();
24 |
25 | app.UseEndpoints(endpoints =>
26 | {
27 | endpoints.MapControllers();
28 | });
29 | }
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/auth/authenticated/authenticated.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | netcoreapp3.1
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/auth/calling/Controllers/DefaultController.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Net.Http;
3 | using System.Threading.Tasks;
4 | using Microsoft.AspNetCore.Mvc;
5 |
6 | namespace calling.Controllers
7 | {
8 |
9 | [Route("")]
10 | public class DefaultController : ControllerBase
11 | {
12 | private readonly IHttpClientFactory _clientFactory;
13 |
14 | public DefaultController(IHttpClientFactory clientFactory)
15 | {
16 | _clientFactory = clientFactory;
17 | }
18 |
19 | [HttpGet]
20 | public async Task GetAsync()
21 | {
22 | var httpClient = _clientFactory.CreateClient();
23 |
24 | var url = Environment.GetEnvironmentVariable("URL");
25 | if (url == null)
26 | {
27 | return BadRequest("No URL defined");
28 | }
29 |
30 | var request = new HttpRequestMessage(HttpMethod.Get, url);
31 |
32 | var response = await httpClient.SendAsync(request);
33 |
34 | var content = await response.Content.ReadAsStringAsync();
35 |
36 | return Ok("Second service says: " + content);
37 | }
38 | }
39 | }
--------------------------------------------------------------------------------
/auth/calling/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use Microsoft's official build .NET image.
2 | # https://hub.docker.com/_/microsoft-dotnet-core-sdk/
3 | FROM mcr.microsoft.com/dotnet/core/sdk:3.1-alpine AS build
4 | WORKDIR /app
5 |
6 | # Install production dependencies.
7 | # Copy csproj and restore as distinct layers.
8 | COPY *.csproj ./
9 | RUN dotnet restore
10 |
11 | # Copy local code to the container image.
12 | COPY . ./
13 | WORKDIR /app
14 |
15 | # Build a release artifact.
16 | RUN dotnet publish -c Release -o out
17 |
18 |
19 | # Use Microsoft's official runtime .NET image.
20 | # https://hub.docker.com/_/microsoft-dotnet-core-aspnet/
21 | FROM mcr.microsoft.com/dotnet/core/aspnet:3.1-alpine AS runtime
22 | WORKDIR /app
23 | COPY --from=build /app/out ./
24 |
25 | # Run the web service on container startup.
26 | ENTRYPOINT ["dotnet", "calling.dll"]
27 |
28 |
--------------------------------------------------------------------------------
/auth/calling/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using Microsoft.AspNetCore.Hosting;
3 | using Microsoft.Extensions.Hosting;
4 |
5 | namespace calling
6 | {
7 | public class Program
8 | {
9 | public static void Main(string[] args)
10 | {
11 | CreateHostBuilder(args).Build().Run();
12 | }
13 |
14 | public static IHostBuilder CreateHostBuilder(string[] args)
15 | {
16 | var port = Environment.GetEnvironmentVariable("PORT") ?? "8080";
17 | var url = String.Concat("http://0.0.0.0:", port);
18 |
19 | return Host.CreateDefaultBuilder(args)
20 | .ConfigureWebHostDefaults(webBuilder =>
21 | {
22 | webBuilder.UseStartup().UseUrls(url);
23 | });
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/auth/calling/Startup.cs:
--------------------------------------------------------------------------------
1 | using Microsoft.AspNetCore.Builder;
2 | using Microsoft.AspNetCore.Hosting;
3 | using Microsoft.Extensions.DependencyInjection;
4 | using Microsoft.Extensions.Hosting;
5 |
6 | namespace calling
7 | {
8 | public class Startup
9 | {
10 | public void ConfigureServices(IServiceCollection services)
11 | {
12 | services.AddHttpClient();
13 | services.AddControllers();
14 | }
15 |
16 | public void Configure(IApplicationBuilder app, IWebHostEnvironment env)
17 | {
18 | if (env.IsDevelopment())
19 | {
20 | app.UseDeveloperExceptionPage();
21 | }
22 |
23 | app.UseRouting();
24 |
25 | app.UseEndpoints(endpoints =>
26 | {
27 | endpoints.MapControllers();
28 | });
29 | }
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/auth/calling/calling.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | netcoreapp3.1
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/auth/receiving/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use Microsoft's official build .NET image.
2 | # https://hub.docker.com/_/microsoft-dotnet-core-sdk/
3 | FROM mcr.microsoft.com/dotnet/core/sdk:3.1-alpine AS build
4 | WORKDIR /app
5 |
6 | # Install production dependencies.
7 | # Copy csproj and restore as distinct layers.
8 | COPY *.csproj ./
9 | RUN dotnet restore
10 |
11 | # Copy local code to the container image.
12 | COPY . ./
13 | WORKDIR /app
14 |
15 | # Build a release artifact.
16 | RUN dotnet publish -c Release -o out
17 |
18 |
19 | # Use Microsoft's official runtime .NET image.
20 | # https://hub.docker.com/_/microsoft-dotnet-core-aspnet/
21 | FROM mcr.microsoft.com/dotnet/core/aspnet:3.1-alpine AS runtime
22 | WORKDIR /app
23 | COPY --from=build /app/out ./
24 |
25 | # Run the web service on container startup.
26 | ENTRYPOINT ["dotnet", "receiving.dll"]
27 |
28 |
--------------------------------------------------------------------------------
/auth/receiving/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using Microsoft.AspNetCore.Hosting;
3 | using Microsoft.Extensions.Hosting;
4 |
5 | namespace receiving
6 | {
7 | public class Program
8 | {
9 | public static void Main(string[] args)
10 | {
11 | CreateHostBuilder(args).Build().Run();
12 | }
13 |
14 | public static IHostBuilder CreateHostBuilder(string[] args)
15 | {
16 | var port = Environment.GetEnvironmentVariable("PORT") ?? "8080";
17 | var url = String.Concat("http://0.0.0.0:", port);
18 |
19 | return Host.CreateDefaultBuilder(args)
20 | .ConfigureWebHostDefaults(webBuilder =>
21 | {
22 | webBuilder.UseStartup().UseUrls(url);
23 | });
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/auth/receiving/Startup.cs:
--------------------------------------------------------------------------------
1 | using Microsoft.AspNetCore.Builder;
2 | using Microsoft.AspNetCore.Hosting;
3 | using Microsoft.AspNetCore.Http;
4 | using Microsoft.Extensions.DependencyInjection;
5 | using Microsoft.Extensions.Hosting;
6 |
7 | namespace receiving
8 | {
9 | public class Startup
10 | {
11 | public void ConfigureServices(IServiceCollection services)
12 | {
13 | }
14 |
15 | public void Configure(IApplicationBuilder app, IWebHostEnvironment env)
16 | {
17 | if (env.IsDevelopment())
18 | {
19 | app.UseDeveloperExceptionPage();
20 | }
21 |
22 | app.UseRouting();
23 |
24 | app.UseEndpoints(endpoints =>
25 | {
26 | endpoints.MapGet("/", async context =>
27 | {
28 | await context.Response.WriteAsync("Hello World!");
29 | });
30 | });
31 | }
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/auth/receiving/receiving.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | netcoreapp3.1
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/dbt-job/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM fishtownanalytics/dbt:0.19.1
2 | USER root
3 | WORKDIR /dbt
4 | COPY script.sh ./
5 | COPY jaffle-shop ./
6 |
7 | ENTRYPOINT "./script.sh"
--------------------------------------------------------------------------------
/dbt-job/README.md:
--------------------------------------------------------------------------------
1 | # Scheduled Cloud Run dbt job with BigQuery
2 |
3 | > **Note:** Cloud Run jobs is a feature in *private preview*.
4 | > Only allow-listed projects can currently take advantage of it.
5 |
6 | [dbt](https://docs.getdbt.com/) is an open source project to build data
7 | transformation pipelines with supported databases such as BigQuery, Postgres,
8 | Redshift and more.
9 |
10 | In this sample, I want to show you how to setup a scheduled Cloud Run job
11 | that uses [dbt](https://docs.getdbt.com/) with BigQuery backend.
12 |
13 | I'm assuming that you already have a Google Cloud project setup with BigQuery
14 | enabled, you have `gcloud` setup to use that project and you have `dbt`
15 | installed locally.
16 |
17 | ## Jaffle Shop
18 |
19 | For the sample dbt service, we will use
20 | [jaffle-shop](https://github.com/fishtown-analytics/jaffle_shop). `jaffle_shop`
21 | is a fictional ecommerce store with the following tables:
22 |
23 | 
24 |
25 | There is already a public project `dbt-tutorial` with a `jaffle_shop` dataset
26 | in BigQuery:
27 |
28 | 
29 |
30 | There is also a [tutorial](https://docs.getdbt.com/tutorial/setting-up) in DBT
31 | documentation showing how to transform this dataset with DBT. We will transform
32 | this tutorial into a scheduled service.
33 |
34 | ## Run dbt locally with BigQuery
35 |
36 | We already setup the sample project in [jaffle-shop](jaffle-shop)
37 | folder. Feel free to explore it in detail. We'll highlight a few things.
38 |
39 | First, [dbt_project.yml](jaffle-shop/dbt_project.yml) file has `jaffle_shop` name and
40 | profile. It also has a single `jaffle_shop` model materialized as `table`.
41 |
42 | Second, [profiles.yml](jaffle-shop/profiles.yml) defines the BigQuery backend for dbt
43 | to connect to. This profile uses oauth for authentication to create a BigQuery
44 | dataset in your project.
45 |
46 | Third, [customers.sql](jaffle-shop/models/customers.sql) defines the
47 | model for dbt. It reads from `dbt-tutorial` project's `jaffle_shop` dataset and
48 | creates a new transformed customers table.
49 |
50 | Inside the `jaffle_shop` folder, run dbt with this new profile:
51 |
52 | ```sh
53 | $ dbt run --profiles-dir .
54 |
55 | Running with dbt=0.17.2
56 | Found 1 model, 0 tests, 0 snapshots, 0 analyses, 147 macros, 0 operations, 0 seed files, 0 sources
57 |
58 | 16:16:10 | Concurrency: 1 threads (target='dev')
59 | 16:16:10 |
60 | 16:16:10 | 1 of 1 START table model dbt_atamel_dataset.customers................ [RUN]
61 | 16:16:15 | 1 of 1 OK created table model dbt_atamel_dataset.customers........... [CREATE TABLE (100) in 4.84s]
62 | 16:16:15 |
63 | 16:16:15 | Finished running 1 table model in 9.96s.
64 |
65 | Completed successfully
66 |
67 | Done. PASS=1 WARN=0 ERROR=0 SKIP=0 TOTAL=1
68 | ```
69 |
70 | You should see a new dataset and a customers table created in BigQuery:
71 |
72 | 
73 |
74 | ## Run dbt as a Cloud Run Job
75 |
76 | Running dbt as a Cloud Run job requires that you run dbt in a container.
77 |
78 | dbt has some [base images](https://hub.docker.com/r/fishtownanalytics/dbt/tags)
79 | that you can rely on (although the documentation is pretty much non-existent).
80 | This is a sample [Dockerfile](Dockerfile) that works:
81 |
82 | ```dockerfile
83 | FROM fishtownanalytics/dbt:0.19.1
84 | USER root
85 | WORKDIR /dbt
86 | COPY script.sh ./
87 | COPY jaffle-shop ./
88 |
89 | ENTRYPOINT "./script.sh"
90 | ```
91 |
92 | In this `Dockerfile`, we use the dbt base image, copy our dbt project and also the
93 | script to call that project with the profile.
94 |
95 | Enable the Cloud Build and Run APIs:
96 |
97 | ```sh
98 | gcloud services enable run.googleapis.com
99 | gcloud services enable cloudbuild.googleapis.com
100 | ```
101 |
102 | Build the container:
103 |
104 | ```sh
105 | JOB_NAME=dbt-job
106 | PROJECT_ID=$(gcloud config get-value core/project)
107 |
108 | gcloud builds submit --tag gcr.io/$PROJECT_ID/$JOB_NAME
109 | ```
110 |
111 | To test, first create the job:
112 |
113 | ```sh
114 | REGION=europe-west1
115 | gcloud config set run/region ${REGION}
116 |
117 | gcloud alpha run jobs create dbt-job \
118 | --image=gcr.io/$PROJECT_ID/$JOB_NAME
119 | ```
120 |
121 | Run the job:
122 |
123 | ```sh
124 | gcloud alpha run jobs run dbt-job
125 | ```
126 |
127 | You can see the progress of the execution:
128 |
129 | ```sh
130 | gcloud alpha run executions describe dbt-job-lfwc5
131 |
132 | ✔ Execution dbt-job-lfwc5 in region europe-west1
133 | 1 task completed successfully
134 | ```
135 |
136 | And, you should see the dataset created with a new
137 | customers table in BigQuery:
138 |
139 | 
140 |
141 | ## Setup Cloud Scheduler
142 |
143 | The final step is to call the Cloud Run job on a schedule. You can do this
144 | with Cloud Scheduler.
145 |
146 | First, enable the Cloud Scheduler API:
147 |
148 | ```sh
149 | gcloud services enable cloudscheduler.googleapis.com
150 | ```
151 |
152 | Replace values in `messagebody.json` with values of your project:
153 |
154 | ```sh
155 | sed -i -e "s/PROJECT_ID/$PROJECT_ID/" ./messagebody.json
156 | sed -i -e "s/JOB_NAME/$JOB_NAME/" ./messagebody.json
157 | ```
158 |
159 | Create a Cloud Scheduler job to call the service every day at 9:00:
160 |
161 | ```sh
162 | PROJECT_NUMBER="$(gcloud projects describe $(gcloud config get-value project) --format='value(projectNumber)')"
163 |
164 | gcloud scheduler jobs create http $JOB_NAME-run --schedule "0 9 * * *" \
165 | --http-method=POST \
166 | --uri=https://$REGION-run.googleapis.com/apis/run.googleapis.com/v1alpha1/namespaces/$PROJECT_ID/jobs \
167 | --oauth-service-account-email=$PROJECT_NUMBER-compute@developer.gserviceaccount.com \
168 | --message-body-from-file=messagebody.json
169 | ```
170 |
171 | You can test that the service by manually invoking the job:
172 |
173 | ```sh
174 | gcloud scheduler jobs run $JOB_NAME-run
175 | ```
176 |
177 | After a few seconds, you should see the dataset created with a new
178 | customers table in BigQuery:
179 |
180 | 
181 |
--------------------------------------------------------------------------------
/dbt-job/jaffle-shop/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | target/
3 | dbt_modules/
4 | logs/
5 |
--------------------------------------------------------------------------------
/dbt-job/jaffle-shop/.user.yml:
--------------------------------------------------------------------------------
1 | id: 8c904cc6-ec98-485c-8b1b-a1f9ca872050
2 |
--------------------------------------------------------------------------------
/dbt-job/jaffle-shop/README.md:
--------------------------------------------------------------------------------
1 | Welcome to your new dbt project!
2 |
3 | ### Using the starter project
4 |
5 | Try running the following commands:
6 | - dbt run
7 | - dbt test
8 |
9 |
10 | ### Resources:
11 | - Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
12 | - Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
13 | - Join the [chat](http://slack.getdbt.com/) on Slack for live discussions and support
14 | - Find [dbt events](https://events.getdbt.com) near you
15 | - Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
16 |
--------------------------------------------------------------------------------
/dbt-job/jaffle-shop/analysis/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/dbt-job/jaffle-shop/analysis/.gitkeep
--------------------------------------------------------------------------------
/dbt-job/jaffle-shop/data/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/dbt-job/jaffle-shop/data/.gitkeep
--------------------------------------------------------------------------------
/dbt-job/jaffle-shop/dbt_project.yml:
--------------------------------------------------------------------------------
1 |
2 | # Name your project! Project names should contain only lowercase characters
3 | # and underscores. A good package name should reflect your organization's
4 | # name or the intended use of these models
5 | name: 'jaffle_shop'
6 | version: '1.0.0'
7 | config-version: 2
8 |
9 | # This setting configures which "profile" dbt uses for this project.
10 | profile: 'jaffle_shop'
11 |
12 | # These configurations specify where dbt should look for different types of files.
13 | # The `source-paths` config, for example, states that models in this project can be
14 | # found in the "models/" directory. You probably won't need to change these!
15 | source-paths: ["models"]
16 | analysis-paths: ["analysis"]
17 | test-paths: ["tests"]
18 | data-paths: ["data"]
19 | macro-paths: ["macros"]
20 | snapshot-paths: ["snapshots"]
21 |
22 | target-path: "target" # directory which will store compiled SQL files
23 | clean-targets: # directories to be removed by `dbt clean`
24 | - "target"
25 | - "dbt_modules"
26 |
27 |
28 | # Configuring models
29 | # Full documentation: https://docs.getdbt.com/docs/configuring-models
30 |
31 | # In this example config, we tell dbt to build all models in the example/ directory
32 | # as tables. These settings can be overridden in the individual model files
33 | # using the `{{ config(...) }}` macro.
34 | models:
35 | jaffle_shop:
36 | materialized: table
--------------------------------------------------------------------------------
/dbt-job/jaffle-shop/macros/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/dbt-job/jaffle-shop/macros/.gitkeep
--------------------------------------------------------------------------------
/dbt-job/jaffle-shop/models/customers.sql:
--------------------------------------------------------------------------------
1 | with customers as (
2 |
3 | select
4 | id as customer_id,
5 | first_name,
6 | last_name
7 |
8 | from `dbt-tutorial`.jaffle_shop.customers
9 |
10 | ),
11 |
12 | orders as (
13 |
14 | select
15 | id as order_id,
16 | user_id as customer_id,
17 | order_date,
18 | status
19 |
20 | from `dbt-tutorial`.jaffle_shop.orders
21 |
22 | ),
23 |
24 | customer_orders as (
25 |
26 | select
27 | customer_id,
28 |
29 | min(order_date) as first_order_date,
30 | max(order_date) as most_recent_order_date,
31 | count(order_id) as number_of_orders
32 |
33 | from orders
34 |
35 | group by 1
36 |
37 | ),
38 |
39 |
40 | final as (
41 |
42 | select
43 | customers.customer_id,
44 | customers.first_name,
45 | customers.last_name,
46 | customer_orders.first_order_date,
47 | customer_orders.most_recent_order_date,
48 | coalesce(customer_orders.number_of_orders, 0) as number_of_orders
49 |
50 | from customers
51 |
52 | left join customer_orders using (customer_id)
53 |
54 | )
55 |
56 | select * from final
--------------------------------------------------------------------------------
/dbt-job/jaffle-shop/profiles.yml:
--------------------------------------------------------------------------------
1 | jaffle_shop:
2 | target: dev
3 | outputs:
4 | dev:
5 | type: bigquery
6 | method: oauth
7 | project: knative-atamel
8 | dataset: dbt_atamel_dataset
9 |
--------------------------------------------------------------------------------
/dbt-job/jaffle-shop/snapshots/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/dbt-job/jaffle-shop/snapshots/.gitkeep
--------------------------------------------------------------------------------
/dbt-job/jaffle-shop/tests/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/dbt-job/jaffle-shop/tests/.gitkeep
--------------------------------------------------------------------------------
/dbt-job/messagebody.json:
--------------------------------------------------------------------------------
1 | {
2 | "apiVersion": "run.googleapis.com/v1alpha1",
3 | "kind": "Job",
4 | "metadata": {
5 | "annotations": {
6 | "run.googleapis.com/launch-stage": "ALPHA"
7 | },
8 | "name": "JOB_NAME",
9 | "namespace": "PROJECT_ID"
10 | },
11 | "spec": {
12 | "completions": 1,
13 | "template": {
14 | "spec": {
15 | "containers": [
16 | {
17 | "image": "gcr.io/PROJECT_ID/JOB_NAME"
18 | }
19 | ]
20 | }
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/dbt-job/script.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | dbt run --profiles-dir .
3 |
4 |
--------------------------------------------------------------------------------
/dbt/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.13 as builder
2 | WORKDIR /app
3 | COPY invoke.go ./
4 | RUN CGO_ENABLED=0 GOOS=linux go build -v -o server
5 |
6 | FROM fishtownanalytics/dbt:0.17.0
7 | USER root
8 | WORKDIR /dbt
9 | COPY --from=builder /app/server ./
10 | COPY script.sh ./
11 | COPY jaffle-shop ./
12 |
13 | ENTRYPOINT "./server"
--------------------------------------------------------------------------------
/dbt/invoke.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "net/http"
7 | "os"
8 | "os/exec"
9 | )
10 |
11 | func handler(w http.ResponseWriter, r *http.Request) {
12 | log.Print("helloworld: received a request")
13 |
14 | cmd := exec.CommandContext(r.Context(), "/bin/sh", "script.sh")
15 | cmd.Stderr = os.Stderr
16 | out, err := cmd.Output()
17 | if err != nil {
18 | w.WriteHeader(500)
19 | }
20 | w.Write(out)
21 | }
22 |
23 | func main() {
24 | log.Print("helloworld: starting server...")
25 |
26 | http.HandleFunc("/", handler)
27 |
28 | port := os.Getenv("PORT")
29 | if port == "" {
30 | port = "8080"
31 | }
32 |
33 | log.Printf("helloworld: listening on %s", port)
34 | log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), nil))
35 | }
36 |
--------------------------------------------------------------------------------
/dbt/jaffle-shop/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | target/
3 | dbt_modules/
4 | logs/
5 |
--------------------------------------------------------------------------------
/dbt/jaffle-shop/.user.yml:
--------------------------------------------------------------------------------
1 | id: 8c904cc6-ec98-485c-8b1b-a1f9ca872050
2 |
--------------------------------------------------------------------------------
/dbt/jaffle-shop/README.md:
--------------------------------------------------------------------------------
1 | Welcome to your new dbt project!
2 |
3 | ### Using the starter project
4 |
5 | Try running the following commands:
6 | - dbt run
7 | - dbt test
8 |
9 |
10 | ### Resources:
11 | - Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
12 | - Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
13 | - Join the [chat](http://slack.getdbt.com/) on Slack for live discussions and support
14 | - Find [dbt events](https://events.getdbt.com) near you
15 | - Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
16 |
--------------------------------------------------------------------------------
/dbt/jaffle-shop/analysis/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/dbt/jaffle-shop/analysis/.gitkeep
--------------------------------------------------------------------------------
/dbt/jaffle-shop/data/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/dbt/jaffle-shop/data/.gitkeep
--------------------------------------------------------------------------------
/dbt/jaffle-shop/dbt_project.yml:
--------------------------------------------------------------------------------
1 |
2 | # Name your project! Project names should contain only lowercase characters
3 | # and underscores. A good package name should reflect your organization's
4 | # name or the intended use of these models
5 | name: 'jaffle_shop'
6 | version: '1.0.0'
7 | config-version: 2
8 |
9 | # This setting configures which "profile" dbt uses for this project.
10 | profile: 'jaffle_shop'
11 |
12 | # These configurations specify where dbt should look for different types of files.
13 | # The `source-paths` config, for example, states that models in this project can be
14 | # found in the "models/" directory. You probably won't need to change these!
15 | source-paths: ["models"]
16 | analysis-paths: ["analysis"]
17 | test-paths: ["tests"]
18 | data-paths: ["data"]
19 | macro-paths: ["macros"]
20 | snapshot-paths: ["snapshots"]
21 |
22 | target-path: "target" # directory which will store compiled SQL files
23 | clean-targets: # directories to be removed by `dbt clean`
24 | - "target"
25 | - "dbt_modules"
26 |
27 |
28 | # Configuring models
29 | # Full documentation: https://docs.getdbt.com/docs/configuring-models
30 |
31 | # In this example config, we tell dbt to build all models in the example/ directory
32 | # as tables. These settings can be overridden in the individual model files
33 | # using the `{{ config(...) }}` macro.
34 | models:
35 | jaffle_shop:
36 | materialized: table
--------------------------------------------------------------------------------
/dbt/jaffle-shop/macros/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/dbt/jaffle-shop/macros/.gitkeep
--------------------------------------------------------------------------------
/dbt/jaffle-shop/models/customers.sql:
--------------------------------------------------------------------------------
1 | with customers as (
2 |
3 | select
4 | id as customer_id,
5 | first_name,
6 | last_name
7 |
8 | from `dbt-tutorial`.jaffle_shop.customers
9 |
10 | ),
11 |
12 | orders as (
13 |
14 | select
15 | id as order_id,
16 | user_id as customer_id,
17 | order_date,
18 | status
19 |
20 | from `dbt-tutorial`.jaffle_shop.orders
21 |
22 | ),
23 |
24 | customer_orders as (
25 |
26 | select
27 | customer_id,
28 |
29 | min(order_date) as first_order_date,
30 | max(order_date) as most_recent_order_date,
31 | count(order_id) as number_of_orders
32 |
33 | from orders
34 |
35 | group by 1
36 |
37 | ),
38 |
39 |
40 | final as (
41 |
42 | select
43 | customers.customer_id,
44 | customers.first_name,
45 | customers.last_name,
46 | customer_orders.first_order_date,
47 | customer_orders.most_recent_order_date,
48 | coalesce(customer_orders.number_of_orders, 0) as number_of_orders
49 |
50 | from customers
51 |
52 | left join customer_orders using (customer_id)
53 |
54 | )
55 |
56 | select * from final
--------------------------------------------------------------------------------
/dbt/jaffle-shop/profiles.yml:
--------------------------------------------------------------------------------
1 | jaffle_shop:
2 | target: dev
3 | outputs:
4 | dev:
5 | type: bigquery
6 | method: oauth
7 | project: dbt-atamel
8 | dataset: dbt_atamel_dataset
9 |
--------------------------------------------------------------------------------
/dbt/jaffle-shop/snapshots/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/dbt/jaffle-shop/snapshots/.gitkeep
--------------------------------------------------------------------------------
/dbt/jaffle-shop/tests/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/dbt/jaffle-shop/tests/.gitkeep
--------------------------------------------------------------------------------
/dbt/script.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | dbt run --profiles-dir .
3 |
4 |
--------------------------------------------------------------------------------
/docs/auth.md:
--------------------------------------------------------------------------------
1 | # Service to service authentication
2 |
3 | In this example, we'll see how to setup service-to-service auth between two Cloud Run services. More specifically:
4 |
5 | * We'll deploy a service that makes a call to another service and see that it fails to make the call.
6 | * We'll setup service-to-service auth in the wrong way to make the call work.
7 | * We'll give our service an identity and do the service-to-service authentication the right way.
8 |
9 | ## Deploy a receiving service
10 |
11 | First, let's build and deploy a *private* Cloud Run service that will receive calls. You can check out the code in [auth/receiving](../auth/receiving) but it is a service that simply echoes back with `Hello World`.
12 |
13 | Deploy the service with `no-allow-unauthenticated` flag:
14 |
15 | ```sh
16 | PROJECT_ID=$(gcloud config get-value project)
17 | SERVICE_NAME=receiving
18 | REGION=us-central1
19 |
20 | gcloud builds submit \
21 | --tag gcr.io/$PROJECT_ID/receiving
22 |
23 | gcloud run deploy $SERVICE_NAME \
24 | --image gcr.io/$PROJECT_ID/receiving \
25 | --platform managed \
26 | --no-allow-unauthenticated \
27 | --region $REGION
28 | ```
29 |
30 | ## Deploy a calling service
31 |
32 | Second, let's build and deploy a Cloud Run service that calls the receiving Cloud Run service. The code is in [auth/calling](../auth/calling) folder. When it receives a request, it makes an HTTP GET request to the passed in env var URL.
33 |
34 | Get the url of the receiving service and deploy the public service pointing to that url:
35 |
36 | ```sh
37 | RECEIVING_SERVICE_URL=$(gcloud run services describe $SERVICE_NAME --region $REGION --format 'value(status.url)')
38 | SERVICE_NAME=calling
39 |
40 | gcloud builds submit \
41 | --tag gcr.io/$PROJECT_ID/calling
42 |
43 | gcloud run deploy $SERVICE_NAME \
44 | --image gcr.io/$PROJECT_ID/calling \
45 | --platform managed \
46 | --allow-unauthenticated \
47 | --region $REGION \
48 | --set-env-vars URL=$RECEIVING_SERVICE_URL
49 | ```
50 |
51 | ## Test the calling service
52 |
53 | Test the calling service:
54 |
55 | ```sh
56 | CALLING_SERVICE_URL=$(gcloud run services describe $SERVICE_NAME --region $REGION --format 'value(status.url)')
57 |
58 | curl $CALLING_SERVICE_URL
59 |
60 | Second service says:
61 |
62 |
63 | 403 Forbidden
64 |
65 |
66 | Error: Forbidden
67 | Your client does not have permission to get URL /
from this server.
68 |
69 |
70 | ```
71 |
72 | As expected, the calling service gets a `Forbidden` error from the receiving service because it's not authenticated.
73 |
74 | ## Service-to-service auth overview
75 |
76 | For service-to-service auth to work, two things must happen:
77 |
78 | 1. The receiving service must be configured to accept requests from the calling service.
79 | 2. The calling service must identify itself to the receiving service.
80 |
81 | We'll get back to #1 for now and focus on #2 in the next section.
82 |
83 | ## Identify calling service to the receiving service
84 |
85 | A Cloud Run service can use an identity token to identify itself to another Cloud Run service. But where do you get an identity token? You can use the Compute Metadata Server to fetch identity tokens with a specific audience (i.e. another Cloud Run service url) as follows:
86 |
87 | ```sh
88 | curl "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/identity?audience=[AUDIENCE]" \
89 | -H "Metadata-Flavor: Google"
90 | ```
91 |
92 | In the modified version of our calling service in [auth/authenticated](../auth/authenticated) folder, we get an id token in `GetIdToken` method and then pass that token as an authorization header. Check out [DefaultController.cs](../auth/authenticated/Controllers/DefaultController.cs) for details.
93 |
94 | Let's build and deploy the modified calling service. In [auth/authenticated](../auth/authenticated) folder, first get the service url of the private service and then deploy the modified calling service:
95 |
96 | ```sh
97 | SERVICE_NAME=authenticated
98 |
99 | gcloud builds submit \
100 | --tag gcr.io/$PROJECT_ID/authenticated
101 |
102 | gcloud run deploy $SERVICE_NAME \
103 | --image gcr.io/$PROJECT_ID/authenticated \
104 | --platform managed \
105 | --allow-unauthenticated \
106 | --region $REGION \
107 | --set-env-vars URL=$RECEIVING_SERVICE_URL
108 | ```
109 |
110 | ## Test the calling service
111 |
112 | Test the modified calling service:
113 |
114 | ```sh
115 | AUTH_SERVICE_URL=$(gcloud run services describe $SERVICE_NAME --region $REGION --format='value(status.url)')
116 |
117 | curl $AUTH_SERVICE_URL
118 |
119 | Second service says: Hello World!
120 | ```
121 |
122 | But why does it work? Even though we identified our calling service to the receiving service, we didn't configure the receiving service to accept requests from the calling service (we ignored #1).
123 |
124 | As explained in [service identity docs](https://cloud.google.com/run/docs/securing/service-identity), by default, Cloud Run services use the Compute Engine default service account `(PROJECT_NUMBER-compute@developer.gserviceaccount.com)`, which has the Project > Editor IAM role. This means that by default, your Cloud Run revisions have read and write access to all resources in your GCP project.
125 |
126 | This is why it worked but it's not ideal.
127 |
128 | ## Give calling service an identity
129 |
130 | Let's now give the calling service an identity. First, create a Service Account:
131 |
132 | ```sh
133 | SERVICE_ACCOUNT=cloudrun-authenticated-sa
134 |
135 | gcloud iam service-accounts create $SERVICE_ACCOUNT \
136 | --display-name "Cloud Run Authenticated Service Account"
137 | ```
138 |
139 | Deploy the calling service with the new identity:
140 |
141 | ```sh
142 | SERVICE_NAME=authenticated
143 |
144 | gcloud run services update $SERVICE_NAME \
145 | --service-account $SERVICE_ACCOUNT@$PROJECT_ID.iam.gserviceaccount.com \
146 | --platform managed \
147 | --region $REGION
148 | ```
149 |
150 | At this point, if you try the calling service, it'll get a `Forbidden` error from the receiving service because it's not associated with the default Compute Engine service account.
151 |
152 | ## Configure the receiving service to accept requests from the calling service
153 |
154 | We need to configure the receiving service to accept requests from the calling service. This is done by giving the service account of the calling service the Cloud Run invoke role on the receiving service:
155 |
156 | ```sh
157 | SERVICE_NAME=receiving
158 |
159 | gcloud run services add-iam-policy-binding $SERVICE_NAME \
160 | --member=serviceAccount:$SERVICE_ACCOUNT@$PROJECT_ID.iam.gserviceaccount.com \
161 | --role=roles/run.invoker \
162 | --platform managed
163 | ```
164 |
165 | Finally, everything works as expected:
166 |
167 | ```sh
168 | curl $AUTH_SERVICE_URL
169 |
170 | Second service says: Hello World!
171 | ```
172 |
--------------------------------------------------------------------------------
/docs/configure.md:
--------------------------------------------------------------------------------
1 | # Configure
2 |
3 | Let's see how to configure a service. We can use the service from the [previous](public.md) step.
4 |
5 | ## Environment variables
6 |
7 | The service in [helloworld](../helloworld) folder looks for an environment variable `TARGET` to print out but it's not set. Let's set that variable in Cloud Run:
8 |
9 | ```sh
10 | gcloud run services update $SERVICE_NAME \
11 | --platform managed \
12 | --update-env-vars TARGET=v1
13 | ```
14 |
15 | Now, the service reads the environment variable:
16 |
17 | ```sh
18 | curl $SERVICE_URL
19 |
20 | Hello v1!
21 | ```
22 |
23 | ## CPU
24 |
25 | By default, 1 CPU is allocated for each container instance. This cannot be changed for managed Cloud Run but can be changed on Cloud Run on Anthos.
26 |
27 | ## Memory
28 |
29 | By default, the service gets 256Mi of memory. Let's set it to maximum 2Gi:
30 |
31 | ```sh
32 | gcloud run services update $SERVICE_NAME \
33 | --platform managed \
34 | --memory 2Gi
35 | ```
36 |
37 | ## Concurrency
38 |
39 | By default, each container gets 80 requests. Let's half that to 40:
40 |
41 | ```sh
42 | gcloud run services update $SERVICE_NAME \
43 | --platform managed \
44 | --concurrency 40
45 | ```
46 |
47 | ## Request timeout
48 |
49 | By default, the request timeout is 300 seconds. Let's set it to maximum 900 seconds:
50 |
51 | ```sh
52 | gcloud run services update $SERVICE_NAME \
53 | --platform managed \
54 | --timeout 900
55 | ```
56 |
57 | ## Autoscaling
58 |
59 | By default, Cloud Run autoscales to 1000 containers. Let's set it to maximum 500:
60 |
61 | ```sh
62 | gcloud run services update $SERVICE_NAME \
63 | --platform managed \
64 | --max-instances 500
65 | ```
66 |
--------------------------------------------------------------------------------
/docs/deploy-from-source.md:
--------------------------------------------------------------------------------
1 | # Deploy from source
2 |
3 | Cloud Run now supports deploying directly from source with a single CLI command `gcloud run deploy`. Source code is uploaded to Cloud Build, which uses GCP Buildpacks or an included Dockerfile to build a container. The result is pushed to Artifact Registry and deployed to Cloud Run. You can read more about it in [deploying from source code](https://cloud.google.com/run/docs/deploying-source-code) docs.
4 |
5 | ## 'Hello World' service
6 |
7 | Take a look at the service we already created in
8 | [helloworld/csharp/7.0](../helloworld/csharp/7.0) folder. It's a .NET app and it
9 | doesn't have a `Dockerfile`.
10 |
11 | ## Deploy to Cloud Run
12 |
13 | Inside the source folder:
14 |
15 | ```sh
16 | SERVICE_NAME=helloworld-dotnet
17 | REGION=us-central1
18 |
19 | gcloud run deploy $SERVICE_NAME \
20 | --source . \
21 | --allow-unauthenticated \
22 | --region $REGION
23 | ```
24 |
25 | This uploads sources to Cloud Build, uses Buildpacks to build a container and then deploy to a Cloud Run service.
26 |
27 | ## Test the service
28 |
29 | You can test the service by visiting the url mentioned during deployment and in Cloud Run console.
30 |
31 | ```sh
32 | SERVICE_URL=$(gcloud run services describe $SERVICE_NAME --region $REGION --format 'value(status.url)')
33 |
34 | curl $SERVICE_URL
35 |
36 | Hello World from .NET 7.0!
37 | ```
38 |
--------------------------------------------------------------------------------
/docs/images/cloud-run-console-private.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/docs/images/cloud-run-console-private.png
--------------------------------------------------------------------------------
/docs/images/cloud-run-console.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/docs/images/cloud-run-console.png
--------------------------------------------------------------------------------
/docs/images/cloud-run-pubsub.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/docs/images/cloud-run-pubsub.png
--------------------------------------------------------------------------------
/docs/images/cloud-run-schedule.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/docs/images/cloud-run-schedule.png
--------------------------------------------------------------------------------
/docs/images/cloud-run-storage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/docs/images/cloud-run-storage.png
--------------------------------------------------------------------------------
/docs/images/cloud-run-tasks.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/docs/images/cloud-run-tasks.png
--------------------------------------------------------------------------------
/docs/images/dbt-customers-table.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/docs/images/dbt-customers-table.png
--------------------------------------------------------------------------------
/docs/images/dbt-customers-table2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/docs/images/dbt-customers-table2.png
--------------------------------------------------------------------------------
/docs/images/jaffleshop-dataset.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/docs/images/jaffleshop-dataset.png
--------------------------------------------------------------------------------
/docs/images/serverless-containers-with-cloud-run.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/docs/images/serverless-containers-with-cloud-run.png
--------------------------------------------------------------------------------
/docs/images/serverless-on-google-cloud.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/meteatamel/cloudrun-tutorial/a155eaea5c1af12f85a05eccc8489d9c7e211689/docs/images/serverless-on-google-cloud.png
--------------------------------------------------------------------------------
/docs/private.md:
--------------------------------------------------------------------------------
1 | # Private service
2 |
3 | Let's deploy a container to non publicly accessible Cloud Run service. We can use the service in [helloworld](../helloworld) folder from before.
4 |
5 | ## Deploy to Cloud Run
6 |
7 | ```sh
8 | PROJECT_ID=$(gcloud config get-value project)
9 | SERVICE_NAME=helloworld-private
10 | REGION=us-central1
11 |
12 | gcloud run deploy $SERVICE_NAME \
13 | --image gcr.io/$PROJECT_ID/helloworld \
14 | --no-allow-unauthenticated \
15 | --platform managed \
16 | --region $REGION
17 | ```
18 |
19 | This creates a private Cloud Run service.
20 |
21 | ## Test the service
22 |
23 | If you test the service by visiting the url of the service, you get a 403 Forbidden error:
24 |
25 | ```sh
26 | SERVICE_URL=$(gcloud run services describe $SERVICE_NAME --region $REGION --format 'value(status.url)')
27 |
28 | curl https://helloworld-private-paelpl5x6a-ew.a.run.app
29 |
30 |
31 |
32 | 403 Forbidden
33 |
34 |
35 | Error: Forbidden
36 | Your client does not have permission to get URL /
from this server.
37 |
38 |
39 | ```
40 |
41 | There's an [Authenticate](https://cloud.google.com/run/docs/authenticating/overview) section in Cloud Run docs that shows how to authenticate for different use cases.
42 |
43 | For testing, you can go to the Cloud Run console and under the service url, you can see an example on how to call the service with the Authorization header:
44 |
45 | 
46 |
47 | Try again:
48 |
49 | ```sh
50 | curl -H \
51 | "Authorization: Bearer $(gcloud auth print-identity-token)" \
52 | ${SERVICE_URL}
53 |
54 | Hello World!
55 | ```
56 |
--------------------------------------------------------------------------------
/docs/public.md:
--------------------------------------------------------------------------------
1 | # Public service
2 |
3 | Let's deploy a container to a public Cloud Run service.
4 |
5 | ## Cloud Run Button
6 |
7 | [Cloud Run Button](https://github.com/GoogleCloudPlatform/cloud-run-button) is a fun and easy way of running your code on Cloud Run. You can try it out here to deploy the service in [helloworld](../helloworld) folder:
8 |
9 | [](https://deploy.cloud.run?git_url=https://github.com/meteatamel/cloudrun-tutorial.git&dir=helloworld/csharp)
10 |
11 | Let's go through the steps involved in actually creating and deploying the 'Hello World' service.
12 |
13 | ## 'Hello World' service
14 |
15 | Take a look at the service we already created in [helloworld/csharp/6.0](../helloworld/csharp/6.0) folder. It's a .NET app with a `Dockerfile`.
16 |
17 | ## Build the container
18 |
19 | In folder where `Dockerfile` resides, build the container using Cloud Build and push it to Container Registry:
20 |
21 | ```sh
22 | PROJECT_ID=$(gcloud config get-value project)
23 | SERVICE_NAME=hello-http-container-dotnet50
24 |
25 | gcloud builds submit \
26 | --tag gcr.io/$PROJECT_ID/$SERVICE_NAME
27 | ```
28 |
29 | ## Deploy to Cloud Run
30 |
31 | ```sh
32 | REGION=us-central1
33 |
34 | gcloud run deploy $SERVICE_NAME \
35 | --image gcr.io/$PROJECT_ID/$SERVICE_NAME \
36 | --allow-unauthenticated \
37 | --platform managed \
38 | --region $REGION
39 | ```
40 |
41 | This creates a Cloud Run service and a revision for the current configuration. In the end, you get a url that you can browse to.
42 |
43 | You can also see the service in Cloud Run console:
44 |
45 | 
46 |
47 | ## Test the service
48 |
49 | You can test the service by visiting the url mentioned during deployment and in Cloud Run console.
50 |
51 | ```sh
52 | SERVICE_URL=$(gcloud run services describe $SERVICE_NAME --region $REGION --format 'value(status.url)')
53 |
54 | curl $SERVICE_URL
55 |
56 | Hello World from .NET 6.0!
57 | ```
58 |
--------------------------------------------------------------------------------
/docs/pubsub.md:
--------------------------------------------------------------------------------
1 | # Pub/Sub triggered service
2 |
3 | So far, we deployed HTTP triggered public services. However, this is not the only way to trigger Cloud Run services. In this tutorial, let's see how a Cloud Pub/Sub message can trigger an internal service. You can read more about this in Cloud Run [docs](https://cloud.google.com/run/docs/events/pubsub-push).
4 |
5 | 
6 |
7 | ## Create a 'Event Display' service
8 |
9 | Take a look at the service we already created in [event-display](../event-display) folder. It simply logs out the HTTP request body. We'll use it to display the received messages.
10 |
11 | ## Build the container
12 |
13 | In folder where `Dockerfile` resides, build the container using Cloud Build and push it to Container Registry:
14 |
15 | ```sh
16 | PROJECT_ID=$(gcloud config get-value project)
17 | SERVICE_NAME=event-display
18 |
19 | gcloud builds submit \
20 | --tag gcr.io/$PROJECT_ID/$SERVICE_NAME
21 | ```
22 |
23 | ## Deploy to Cloud Run
24 |
25 | Note that we're deploying with `no-allow-unauthenticated` flag. We only want Pub/Sub to trigger the service:
26 |
27 | ```sh
28 | REGION=us-central1
29 |
30 | gcloud run deploy $SERVICE_NAME \
31 | --image gcr.io/$PROJECT_ID/event-display \
32 | --no-allow-unauthenticated \
33 | --platform managed \
34 | --region $REGION
35 | ```
36 |
37 | ## Setup Pub/Sub to trigger Cloud Run
38 |
39 | Create a Pub/Sub topic:
40 |
41 | ```sh
42 | TOPIC_NAME=cloudrun-pubsub
43 |
44 | gcloud pubsub topics create $TOPIC_NAME
45 | ```
46 |
47 | Create a service account:
48 |
49 | ```sh
50 | SERVICE_ACCOUNT=$TOPIC_NAME-sa
51 |
52 | gcloud iam service-accounts create $SERVICE_ACCOUNT \
53 | --display-name "Cloud Run Pub/Sub Service Account"
54 | ```
55 |
56 | Give service account permission to invoke the Cloud Run service:
57 |
58 | ```sh
59 | gcloud run services add-iam-policy-binding $SERVICE_NAME \
60 | --member=serviceAccount:$SERVICE_ACCOUNT@$PROJECT_ID.iam.gserviceaccount.com \
61 | --role=roles/run.invoker \
62 | --platform managed
63 | ```
64 |
65 | Enable your project to create Cloud Pub/Sub authentication tokens:
66 |
67 | ```sh
68 | gcloud projects add-iam-policy-binding $PROJECT_ID \
69 | --member=serviceAccount:service-$PROJECT_NUMBER@gcp-sa-pubsub.iam.gserviceaccount.com \
70 | --role=roles/iam.serviceAccountTokenCreator
71 | ```
72 |
73 | Create a Cloud Pub/Sub subscription with the service account:
74 |
75 | ```sh
76 | SERVICE_URL=$(gcloud run services describe $SERVICE_NAME --region $REGION --format 'value(status.url)')
77 |
78 | gcloud pubsub subscriptions create $TOPIC_NAME-subscription --topic $TOPIC_NAME \
79 | --push-endpoint=$SERVICE_URL \
80 | --push-auth-service-account=$SERVICE_ACCOUNT@$PROJECT_ID.iam.gserviceaccount.com
81 | ```
82 |
83 | ## Test the service
84 |
85 | You can test the service by sending a message to the queue:
86 |
87 | ```sh
88 | gcloud pubsub topics publish $TOPIC_NAME --message "Hello World"
89 | ```
90 |
91 | If you check the logs of the service in Cloud Run console, you should see the event:
92 |
93 | ```sh
94 | Event Display received event: {"message":{"data":"SGVsbG8gV29ybGQ=","messageId":"849662793093263","message_id":"849662793093263","publishTime":"2019-11-12T16:12:51.296Z","publish_time":"2019-11-12T16:12:51.296Z"},"subscription":"projects/knative-atamel/subscriptions/cloudrun-topic-subscription"}
95 | ```
96 |
97 | The message is base64 encoded under data:
98 |
99 | ```sh
100 | echo SGVsbG8gV29ybGQ= | base64 -D
101 |
102 | Hello World
103 | ```
104 |
--------------------------------------------------------------------------------
/docs/scheduled-dbt-service-bigquery.md:
--------------------------------------------------------------------------------
1 | # Scheduled Cloud Run dbt service with BigQuery
2 |
3 | [dbt](https://docs.getdbt.com/) is an open source project to build data
4 | transformation pipelines with supported databases such as BigQuery, Postgres,
5 | Redshift and more.
6 |
7 | In this sample, I want to show you how to setup a scheduled Cloud Run service
8 | that uses [dbt](https://docs.getdbt.com/) with BigQuery backend.
9 |
10 | I'm assuming that you already have a Google Cloud project setup with BigQuery
11 | enabled, you have `gcloud` setup to use that project and you have `dbt`
12 | installed locally.
13 |
14 | ## Jaffle Shop
15 |
16 | For the sample dbt service, we will use
17 | [jaffle-shop](https://github.com/fishtown-analytics/jaffle_shop). `jaffle_shop`
18 | is a fictional ecommerce store with the following tables:
19 |
20 | 
21 |
22 | There is already a public project `dbt-tutorial` with a `jaffle_shop` dataset
23 | in BigQuery:
24 |
25 | 
26 |
27 | There is also a [tutorial](https://docs.getdbt.com/tutorial/setting-up) in DBT
28 | documentation showing how to transform this dataset with DBT. We will transform
29 | this tutorial into a scheduled service.
30 |
31 | ## Run dbt locally with BigQuery
32 |
33 | We already setup the sample project in [jaffle-shop](../dbt/jaffle-shop)
34 | folder. Feel free to explore it in detail. We'll highlight a few things.
35 |
36 | First, [dbt_project.yml](../dbt/jaffle-shop/dbt_project.yml) file has `jaffle_shop` name and
37 | profile. It also has a single `jaffle_shop` model materialized as `table`.
38 |
39 | Second, [profiles.yml](../dbt/jaffle-shop/profiles.yml) defines the BigQuery backend for dbt
40 | to connect to. This profile uses oauth for authentication to create a BigQuery
41 | dataset in your project.
42 |
43 | Third, [customers.sql](../dbt/jaffle-shop/models/customers.sql) defines the
44 | model for dbt. It reads from `dbt-tutorial` project's `jaffle_shop` dataset and
45 | creates a new transformed customers table.
46 |
47 | Run dbt with this new profile:
48 |
49 | ```sh
50 | $ dbt run --profiles-dir .
51 |
52 | Running with dbt=0.17.2
53 | Found 1 model, 0 tests, 0 snapshots, 0 analyses, 147 macros, 0 operations, 0 seed files, 0 sources
54 |
55 | 16:16:10 | Concurrency: 1 threads (target='dev')
56 | 16:16:10 |
57 | 16:16:10 | 1 of 1 START table model dbt_atamel_dataset.customers................ [RUN]
58 | 16:16:15 | 1 of 1 OK created table model dbt_atamel_dataset.customers........... [CREATE TABLE (100) in 4.84s]
59 | 16:16:15 |
60 | 16:16:15 | Finished running 1 table model in 9.96s.
61 |
62 | Completed successfully
63 |
64 | Done. PASS=1 WARN=0 ERROR=0 SKIP=0 TOTAL=1
65 | ```
66 |
67 | You should see a new dataset and a customers table created in BigQuery:
68 |
69 | 
70 |
71 | ## Run dbt with Cloud Run
72 |
73 | Running dbt on Cloud Run has a few challenges, namely:
74 |
75 | 1. dbt is mainly a command line tool whereas Cloud Run expects HTTP requests.
76 | How do you call dbt command from a Cloud Run service?
77 | 2. Cloud Run runs containers. How do you run dbt in a container?
78 | 3. How do you authenticate dbt with BigQuery? OAuth works for end users but for
79 | services running in the cloud, it's probably not the right solution.
80 |
81 | For #1, Cloud Run has [an
82 | example](https://cloud.google.com/run/docs/quickstarts/build-and-deploy#shell)
83 | on how to run a shell command from an HTTP Server deployed to Cloud Run. It involves
84 | setting up a Go based HTTP server that simply calls a shell script upon receiving a GET
85 | request. You can simply copy that as [invoke.go](../dbt/invoke.go). In our case, the
86 | shell script, [script.sh](../dbt/script.sh) calls dbt with the profile folder.
87 |
88 | For #2, dbt has some [base
89 | images](https://hub.docker.com/r/fishtownanalytics/dbt/tags) that you can rely
90 | on (although the documentation is pretty much non-existent). This is a sample
91 | [Dockerfile](../dbt/Dockerfile) that works:
92 |
93 | ```dockerfile
94 | FROM golang:1.13 as builder
95 | WORKDIR /app
96 | COPY invoke.go ./
97 | RUN CGO_ENABLED=0 GOOS=linux go build -v -o server
98 |
99 | FROM fishtownanalytics/dbt:0.17.0
100 | USER root
101 | WORKDIR /dbt
102 | COPY --from=builder /app/server ./
103 | COPY script.sh ./
104 | COPY jaffle-shop ./
105 |
106 | ENTRYPOINT "./server"
107 | ```
108 |
109 | In this Dockerfile, we first build the HTTP server. Then, we use the dbt base
110 | image, copy our dbt project and also the script to call that project with the
111 | profile. Finally, we start the HTTP server to receive requests.
112 |
113 | For #3, Cloud Run, by default, uses the Compute Engine default service account
114 | and that should be able to make BigQuery calls. However, it's best practice to assign a
115 | more granular permission to your Cloud Run service by assigning a
116 | dedicated service account with more restricted IAM roles.
117 |
118 | In this case, create a service account with `bigquery.admin` role (you probably
119 | want to use even a finer grained role in production):
120 |
121 | ```sh
122 | export SERVICE_ACCOUNT=dbt-sa
123 | gcloud iam service-accounts create ${SERVICE_ACCOUNT} \
124 | --display-name "DBT BigQuery Service Account"
125 | gcloud projects add-iam-policy-binding \
126 | $(gcloud config get-value project) \
127 | --member=serviceAccount:${SERVICE_ACCOUNT}@$(gcloud config get-value project).iam.gserviceaccount.com \
128 | --role=roles/bigquery.admin
129 | ```
130 |
131 | Enable the Cloud Build and Run APIs:
132 |
133 | ```sh
134 | gcloud services enable run.googleapis.com
135 | gcloud services enable cloudbuild.googleapis.com
136 | ```
137 |
138 | Build the container:
139 |
140 | ```sh
141 | export SERVICE_NAME=dbt-service
142 | gcloud builds submit \
143 | --tag gcr.io/$(gcloud config get-value project)/${SERVICE_NAME}
144 | ```
145 |
146 | Deploy to Cloud Run with the service account created earlier and also
147 | `no-allow-unauthenticated` flag to make it a private service:
148 |
149 | ```sh
150 | gcloud run deploy ${SERVICE_NAME} \
151 | --image gcr.io/$(gcloud config get-value project)/${SERVICE_NAME} \
152 | --service-account ${SERVICE_ACCOUNT}@$(gcloud config get-value project).iam.gserviceaccount.com \
153 | --platform managed \
154 | --no-allow-unauthenticated
155 | ```
156 |
157 | ## Setup Cloud Scheduler
158 |
159 | The final step is to call the Cloud Run service on a schedule. You can do this
160 | with Cloud Scheduler.
161 |
162 | Enable the Cloud Scheduler API:
163 |
164 | ```sh
165 | gcloud services enable cloudscheduler.googleapis.com
166 | ```
167 |
168 | Create a service account for Cloud Scheduler with `run.invoker` role:
169 |
170 | ```sh
171 | export SERVICE_ACCOUNT=dbt-scheduler-sa
172 | gcloud iam service-accounts create ${SERVICE_ACCOUNT} \
173 | --display-name "DBT Scheduler Service Account"
174 | gcloud run services add-iam-policy-binding ${SERVICE_NAME} \
175 | --member=serviceAccount:${SERVICE_ACCOUNT}@$(gcloud config get-value project).iam.gserviceaccount.com \
176 | --role=roles/run.invoker \
177 | --platform managed
178 | ```
179 |
180 | Create a Cloud Scheduler job to call the service every 5 minutes:
181 |
182 | ```sh
183 | export SERVICE_URL="$(gcloud run services list --platform managed --filter=${SERVICE_NAME} --format='value(URL)')"
184 | gcloud scheduler jobs create http ${SERVICE_NAME}-job --schedule "*/5 * * * *" \
185 | --http-method=GET \
186 | --uri=${SERVICE_URL} \
187 | --oidc-service-account-email=${SERVICE_ACCOUNT}@$(gcloud config get-value project).iam.gserviceaccount.com \
188 | --oidc-token-audience=${SERVICE_URL}
189 | ```
190 |
191 | You can test that the service by manually invoking the job:
192 |
193 | ```sh
194 | gcloud scheduler jobs run ${SERVICE_NAME}-job
195 | ```
196 |
197 | After a few seconds, you should see the dataset created with a new
198 | customers table in BigQuery:
199 |
200 | 
201 |
--------------------------------------------------------------------------------
/docs/scheduled.md:
--------------------------------------------------------------------------------
1 | # Scheduled service
2 |
3 | You can use Cloud Scheduler to securely trigger a Cloud Run service on a schedule, similar to cron jobs. You can read more about this in Cloud Run [docs](https://cloud.google.com/run/docs/events/using-scheduler).
4 |
5 | 
6 |
7 | ## Enable Cloud Scheduler
8 |
9 | First, make sure the Cloud Scheduler service is enabled in your project:
10 |
11 | ```bash
12 | gcloud services enable cloudscheduler.googleapis.com
13 | ```
14 |
15 | ## Create a 'Event Display' service
16 |
17 | Take a look at the service we already created in [event-display](../event-display) folder. It simply logs out the HTTP request body. We'll use it to display the received messages.
18 |
19 | ## Build the container
20 |
21 | In folder where `Dockerfile` resides, build the container using Cloud Build and push it to Container Registry:
22 |
23 | ```bash
24 | gcloud builds submit \
25 | --project ${PROJECT_ID} \
26 | --tag gcr.io/${PROJECT_ID}/event-display
27 | ```
28 |
29 | ## Deploy to Cloud Run
30 |
31 | Note that we're deploying with `no-allow-unauthenticated` flag. We only want Cloud Scheduler to trigger the service:
32 |
33 | ```bash
34 | export SERVICE_NAME=event-display-scheduled
35 |
36 | gcloud run deploy ${SERVICE_NAME} \
37 | --image gcr.io/${PROJECT_ID}/event-display \
38 | --platform managed \
39 | --no-allow-unauthenticated
40 | ```
41 |
42 | ## Setup Cloud Scheduler to trigger Cloud Run
43 |
44 | Create a service account:
45 |
46 | ```bash
47 | export SERVICE_ACCOUNT=cloudrun-scheduler-sa
48 |
49 | gcloud iam service-accounts create ${SERVICE_ACCOUNT} \
50 | --display-name "Cloud Run Scheduler Service Account"
51 | ```
52 |
53 | Give service account permission to invoke the Cloud Run service:
54 |
55 | ```bash
56 | gcloud run services add-iam-policy-binding event-display-scheduled \
57 | --member=serviceAccount:${SERVICE_ACCOUNT}@${PROJECT_ID}.iam.gserviceaccount.com \
58 | --role=roles/run.invoker
59 | ```
60 |
61 | ## Create a job
62 |
63 | Create a Cloud Scheduler job to execute every 5 minutes:
64 |
65 | ```bash
66 | export SERVICE_URL="$(gcloud run services list --platform managed --filter=${SERVICE_NAME} --format='value(URL)')"
67 |
68 | gcloud beta scheduler jobs create http cloudrun-job --schedule "*/5 * * * *" \
69 | --http-method=POST \
70 | --uri=${SERVICE_URL} \
71 | --oidc-service-account-email=${SERVICE_ACCOUNT}@${PROJECT_ID}.iam.gserviceaccount.com \
72 | --oidc-token-audience=${SERVICE_URL}
73 | ```
74 |
75 | ## Test the service
76 |
77 | You can check the logs of the service to see that it's been triggered by the Cloud Scheduler every 5 mins:
78 |
79 | ```
80 | 12:15:00.578 GMT POST 200 188 B 100 ms Google-Cloud-Scheduler https://event-display-scheduled-pbelpl5x6a-ew.a.run.app/
81 |
82 | 12:20:00.641 GMT POST 200 188 B 32 ms Google-Cloud-Scheduler https://event-display-scheduled-pbelpl5x6a-ew.a.run.app/
83 | ```
--------------------------------------------------------------------------------
/docs/storage.md:
--------------------------------------------------------------------------------
1 | # Storage triggered service
2 |
3 | In [Pub/Sub triggered service](pubsub.md) example, you can see how a Pub/Sub message triggers an internal Cloud Run service. This sample is similar, except the Pub/Sub message will be coming from Cloud Storage.
4 |
5 | [Cloud Storage](https://cloud.google.com/storage/docs/) is a highly scalable object storage. You can configure a Cloud Storage bucket to trigger a Pub/Sub message when there's an object upload. This Pub/Sub message can in turn be handled by a Cloud Run Service:
6 |
7 | 
8 |
9 | ## Create a 'Event Display' service
10 |
11 | Take a look at the service we already created in [event-display](../event-display) folder. It simply logs out the HTTP request body. We'll use it to display the received messages.
12 |
13 | ## Build the container
14 |
15 | In folder where `Dockerfile` resides, build the container using Cloud Build and push it to Container Registry:
16 |
17 | ```bash
18 | gcloud builds submit \
19 | --project ${PROJECT_ID} \
20 | --tag gcr.io/${PROJECT_ID}/event-display
21 | ```
22 |
23 | ## Deploy to Cloud Run
24 |
25 | Note that we're deploying with `no-allow-unauthenticated` flag. We only want Storage and then Pub/Sub to trigger the service:
26 |
27 | ```bash
28 | export SERVICE_NAME=event-display-storage
29 |
30 | gcloud run deploy ${SERVICE_NAME} \
31 | --image gcr.io/${PROJECT_ID}/event-display \
32 | --platform managed \
33 | --no-allow-unauthenticated
34 | ```
35 |
36 | ## Setup Pub/Sub to trigger Cloud Run
37 |
38 | Create a Pub/Sub topic:
39 |
40 | ```bash
41 | export TOPIC_NAME=cloudrun-storage
42 |
43 | gcloud pubsub topics create ${TOPIC_NAME}
44 | ```
45 |
46 | Create a service account:
47 |
48 | ```bash
49 | export SERVICE_ACCOUNT=${TOPIC_NAME}-sa
50 |
51 | gcloud iam service-accounts create ${SERVICE_ACCOUNT} \
52 | --display-name "Cloud Run Storage Service Account"
53 | ```
54 |
55 | Give service account permission to invoke the Cloud Run service:
56 |
57 | ```bash
58 | gcloud run services add-iam-policy-binding ${SERVICE_NAME} \
59 | --member=serviceAccount:${SERVICE_ACCOUNT}@${PROJECT_ID}.iam.gserviceaccount.com \
60 | --role=roles/run.invoker \
61 | --platform managed
62 | ```
63 |
64 | Enable your project to create Cloud Pub/Sub authentication tokens:
65 |
66 | ```bash
67 | gcloud projects add-iam-policy-binding ${PROJECT_ID} \
68 | --member=serviceAccount:service-${PROJECT_NUMBER}@gcp-sa-pubsub.iam.gserviceaccount.com \
69 | --role=roles/iam.serviceAccountTokenCreator
70 | ```
71 |
72 | Create a Cloud Pub/Sub subscription with the service account:
73 |
74 | ```bash
75 | export SERVICE_URL="$(gcloud run services list --platform managed --filter=${SERVICE_NAME} --format='value(URL)')"
76 |
77 | gcloud beta pubsub subscriptions create ${TOPIC_NAME}-subscription --topic ${TOPIC_NAME} \
78 | --push-endpoint=${SERVICE_URL} \
79 | --push-auth-service-account=${TOPIC_NAME}-sa@${PROJECT_ID}.iam.gserviceaccount.com
80 | ```
81 |
82 | ## Create a bucket and enable notifications
83 |
84 | Create a Cloud Storage bucket to store files:
85 |
86 | ```bash
87 | export BUCKET_NAME=cloudrun-bucket
88 |
89 | gsutil mb gs://${BUCKET_NAME}
90 | ```
91 |
92 | Enable Pub/Sub notifications on the bucket and link to the previously created topic:
93 |
94 | ```bash
95 | gsutil notification create -t ${TOPIC_NAME} -f json gs://${BUCKET_NAME}
96 | ```
97 |
98 | ## Test the service
99 |
100 | You can test the service by saving a file to the bucket:
101 |
102 | ```bash
103 | echo "Hello from Storage" > random.txt
104 |
105 | gsutil cp random.txt gs://${BUCKET_NAME}
106 | ```
107 |
108 | Check the logs of the service in Cloud Run console, you should see the Cloud Event for the storage event:
109 |
110 | ```
111 | 2019-11-28 14:20:31.756 GMT Event Display received event: {"message":{"attributes":{"bucketId":"cloudrun-bucket","eventTime":"2019-11-28T14:20:30.345244Z","eventType":"OBJECT_FINALIZE","notificationConfig":"projects/_/buckets/cloudrun-bucket/notificationConfigs/1","objectGeneration":"1574950830345472","objectId":"random.txt","payloadFormat":"JSON_API_V1"},"data":"...","messageId":"795659161806846","message_id":"795659161806846","publishTime":"2019-11-28T14:20:30.859Z","publish_time":"2019-11-28T14:20:30.859Z"},"subscription":"projects/knative-atamel/subscriptions/cloudrun-storage-subscription"}
112 | ```
113 |
--------------------------------------------------------------------------------
/docs/tasks.md:
--------------------------------------------------------------------------------
1 | # Task triggered service
2 |
3 | You can use [Cloud Tasks](https://cloud.google.com/tasks/) for async task execution with useful features such as task de-duplication, rate and retry controls, future scheduling and more. You can read more in Cloud Tasks [docs](https://cloud.google.com/tasks/docs/). The following blog posts are also useful in understanding use cases for Cloud Tasks:
4 |
5 | * [Asynchronous Code Execution with Google Cloud Tasks](https://medium.com/google-cloud/asynchronous-code-execution-with-google-cloud-tasks-9b73ceaf48c3) by Grant Timmerman.
6 | * [Cloud Tasks is a little stateful](https://medium.com/google-cloud/cloud-tasks-is-a-little-stateful-7ef39aad7d00) by Adam Ross.
7 |
8 | You can use Cloud Run as an HTTP target handler for Cloud Tasks:
9 |
10 | 
11 |
12 | ## Enable Cloud Tasks
13 |
14 | First, make sure the Cloud Tasks API is enabled in your project:
15 |
16 | ```bash
17 | gcloud services enable cloudtasks.googleapis.com
18 | ```
19 |
20 | ## Create a 'Event Display' service
21 |
22 | Take a look at the service we already created in [event-display](../event-display) folder. It simply logs out the HTTP request body. We'll use it to display the received messages.
23 |
24 | ## Build the container
25 |
26 | In folder where `Dockerfile` resides, build the container using Cloud Build and push it to Container Registry:
27 |
28 | ```bash
29 | gcloud builds submit \
30 | --project ${PROJECT_ID} \
31 | --tag gcr.io/${PROJECT_ID}/event-display
32 | ```
33 |
34 | ## Deploy to Cloud Run
35 |
36 | Note that we're deploying with `no-allow-unauthenticated` flag. We only want Cloud Tasks to trigger the service:
37 |
38 | ```bash
39 | export SERVICE_NAME=event-display-tasks
40 |
41 | gcloud run deploy ${SERVICE_NAME} \
42 | --image gcr.io/${PROJECT_ID}/event-display \
43 | --platform managed \
44 | --no-allow-unauthenticated
45 | ```
46 |
47 | ## Setup Cloud Tasks to trigger Cloud Run
48 |
49 | Create a service account:
50 |
51 | ```bash
52 | export SERVICE_ACCOUNT=cloudrun-tasks-sa
53 |
54 | gcloud iam service-accounts create ${SERVICE_ACCOUNT} \
55 | --display-name "Cloud Run Tasks Service Account"
56 | ```
57 |
58 | Give service account permission to invoke the Cloud Run service:
59 |
60 | ```bash
61 | gcloud run services add-iam-policy-binding event-display-tasks \
62 | --member=serviceAccount:${SERVICE_ACCOUNT}@${PROJECT_ID}.iam.gserviceaccount.com \
63 | --role=roles/run.invoker
64 | ```
65 |
66 | ## Create a task queue
67 |
68 | Create a Cloud Tasks queue:
69 |
70 | ```bash
71 | export QUEUE_NAME=cloudrun-queue
72 |
73 | gcloud tasks queues create ${QUEUE_NAME}
74 | ```
75 |
76 | ## Create a task
77 |
78 | You can create tasks programmatically or using gcloud. Here, we'll use gcloud for simplicity. Make sure you replace the url with your own Cloud Run service url you deployed earlier:
79 |
80 | ```bash
81 | export SERVICE_URL="$(gcloud run services list --platform managed --filter=${SERVICE_NAME} --format='value(URL)')"
82 |
83 | gcloud tasks create-http-task --queue ${QUEUE_NAME} \
84 | --url ${SERVICE_URL} \
85 | --body-content "Hello World from Cloud Tasks" \
86 | --oidc-service-account-email=${SERVICE_ACCOUNT}@${PROJECT_ID}.iam.gserviceaccount.com \
87 | --oidc-token-audience=${SERVICE_URL}
88 | ```
89 |
90 | ## Test the service
91 |
92 | You can check the logs of the service to see that it's been triggered by the Cloud Tasks:
93 |
94 | ```
95 | 2019-11-28 09:55:09.946 GMT Event Display received event: Hello World from Cloud Tasks
96 | ```
--------------------------------------------------------------------------------
/event-display/csharp/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use Microsoft's official build .NET image.
2 | # https://hub.docker.com/_/microsoft-dotnet-core-sdk/
3 | FROM mcr.microsoft.com/dotnet/core/sdk:3.1-alpine AS build
4 | WORKDIR /app
5 |
6 | # Install production dependencies.
7 | # Copy csproj and restore as distinct layers.
8 | COPY *.csproj ./
9 | RUN dotnet restore
10 |
11 | # Copy local code to the container image.
12 | COPY . ./
13 | WORKDIR /app
14 |
15 | # Build a release artifact.
16 | RUN dotnet publish -c Release -o out
17 |
18 |
19 | # Use Microsoft's official runtime .NET image.
20 | # https://hub.docker.com/_/microsoft-dotnet-core-aspnet/
21 | FROM mcr.microsoft.com/dotnet/core/aspnet:3.1-alpine AS runtime
22 | WORKDIR /app
23 | COPY --from=build /app/out ./
24 |
25 | # Run the web service on container startup.
26 | ENTRYPOINT ["dotnet", "event-display.dll"]
--------------------------------------------------------------------------------
/event-display/csharp/Program.cs:
--------------------------------------------------------------------------------
1 | // Copyright 2021 Google LLC
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // https://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 | using System;
15 | using Microsoft.AspNetCore.Hosting;
16 | using Microsoft.Extensions.Hosting;
17 |
18 | namespace event_display
19 | {
20 | public class Program
21 | {
22 | public static void Main(string[] args)
23 | {
24 | CreateHostBuilder(args).Build().Run();
25 | }
26 |
27 | public static IHostBuilder CreateHostBuilder(string[] args)
28 | {
29 | var port = Environment.GetEnvironmentVariable("PORT") ?? "8080";
30 | var url = $"http://0.0.0.0:{port}";
31 |
32 | return Host.CreateDefaultBuilder(args)
33 | .ConfigureWebHostDefaults(webBuilder =>
34 | {
35 | webBuilder.UseStartup().UseUrls(url);
36 | });
37 | }
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/event-display/csharp/Startup.cs:
--------------------------------------------------------------------------------
1 | // Copyright 2021 Google LLC
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // https://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 | using System.IO;
15 | using Microsoft.AspNetCore.Builder;
16 | using Microsoft.AspNetCore.Hosting;
17 | using Microsoft.AspNetCore.Http;
18 | using Microsoft.Extensions.DependencyInjection;
19 | using Microsoft.Extensions.Hosting;
20 | using Microsoft.Extensions.Logging;
21 |
22 | namespace event_display
23 | {
24 | public class Startup
25 | {
26 | public void ConfigureServices(IServiceCollection services)
27 | {
28 | }
29 |
30 | public void Configure(IApplicationBuilder app, IWebHostEnvironment env, ILogger logger)
31 | {
32 | if (env.IsDevelopment())
33 | {
34 | app.UseDeveloperExceptionPage();
35 | }
36 |
37 | logger.LogInformation("Event Display is starting...");
38 |
39 | app.UseRouting();
40 |
41 | app.UseEndpoints(endpoints =>
42 | {
43 | endpoints.MapPost("/", async context =>
44 | {
45 | using (var reader = new StreamReader(context.Request.Body))
46 | {
47 | var content = await reader.ReadToEndAsync();
48 | logger.LogInformation("Event Display received event: " + content);
49 | await context.Response.WriteAsync(content);
50 | }
51 | });
52 | });
53 | }
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/event-display/csharp/event-display.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | netcoreapp3.1
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/health-checks/README.md:
--------------------------------------------------------------------------------
1 | # Cloud Run Healthchecks
2 |
3 | > **Note:** Cloud Run Healthchecks is feature in *preview*.
4 | > Only allow-listed projects can currently take advantage of it. Please fill the
5 | > following [form](https://docs.google.com/forms/d/e/1FAIpQLScWCZiOrwGuEUYJXSvP_-ostVUreKt_Pq_8K53DwStr7q_w8g/viewform)
6 | > to get your project allow-listed before attempting this sample.
7 |
8 | In this sample, you will see how to use startup and liveness probes for fully
9 | managed Cloud Run.
10 |
11 | ## Startup and liveness probes
12 |
13 | You can configure startup probes to know when a container has started and is
14 | ready to start accepting the traffic. If such a probe is configured, it disables
15 | liveness checks until it succeeds, making sure those probes don't interfere with
16 | the application startup. This can be used to adopt liveness checks on slow
17 | starting containers, avoiding them getting killed by the Cloud Run before they
18 | are up and running.
19 |
20 | You can also configure liveness probes to know when to restart a container. For
21 | example, liveness probes could catch a deadlock, where an application is
22 | running, but unable to make progress. Restarting a container in such a state can
23 | help to make the application more available despite bugs.
24 |
25 | ## Deploy a Cloud Run service
26 |
27 | To showcase the startup and liveness probe, you will deploy a sample Node.js
28 | application to Cloud Run. You can check the source code in [index.js](index.js).
29 | It has two endpoints:
30 |
31 | * `/started` endpoint will be used in the startup probe and it artificially
32 | waits for 20 seconds before reporting that the container started running.
33 | * `/health` endpoint will be used in the liveness probe and it simply reports
34 | healthy all the time.
35 |
36 | Deploy the service to Cloud Run in a preferred region and with
37 | allow-unauthenticated flag enabled:
38 |
39 | ```sh
40 | gcloud run deploy
41 | ```
42 |
43 | ## Configure the service definition file
44 |
45 | Once the service is deployed, download its service definition file:
46 |
47 | ```sh
48 | gcloud run services describe health-checks --format export > service.yaml
49 | ```
50 |
51 | Add the `launch-stage` annotation to `service.yaml`. This will enable you to
52 | deploy Cloud Run service with the alpha features:
53 |
54 | ```yaml
55 | kind: Service
56 | metadata:
57 | annotations:
58 | ...
59 | run.googleapis.com/launch-stage: ALPHA
60 | ```
61 |
62 | Also, change the revision name from `00001` to `00002`. This will allow you to
63 | deploy the service with a new revision name later:
64 |
65 | ```yaml
66 | spec:
67 | template:
68 | metadata:
69 | annotations:
70 | ...
71 | name: health-checks-00002-siq
72 | ```
73 |
74 | ## Configure startup and liveness probes
75 |
76 | Let's add a startup probe to the `/started` endpoint and liveness probe to the
77 | `/health` endpoint by editing the `service.yaml` file:
78 |
79 | ```yaml
80 | containers:
81 | - image: ...
82 | startupProbe:
83 | httpGet:
84 | path: /started
85 | failureThreshold: 30
86 | periodSeconds: 10
87 | livenessProbe:
88 | httpGet:
89 | path: /health
90 | failureThreshold: 30
91 | periodSeconds: 10
92 | ```
93 |
94 | Now you can update the service:
95 |
96 | ```sh
97 | gcloud run services replace service.yaml
98 | ```
99 |
100 | ## Test
101 |
102 | As the service is deploying, you can check that the logs of the Cloud Run
103 | service to see that probes are working:
104 |
105 | ```sh
106 | Default
107 | 2022-05-24 17:00:37.899 BST Listening on port 8080
108 | 2022-05-24 17:00:45.234 BST /started: false
109 | 2022-05-24 17:00:55.242 BST /started: false
110 | 2022-05-24 17:01:05.245 BST /started: true
111 | 2022-05-24 17:01:05.250 BST /health: true
112 | 2022-05-24 17:01:15.252 BST /health: true
113 | 2022-05-24 17:01:25.255 BST /health: true
114 | 2022-05-24 17:01:35.259 BST /health: true
115 | ```
116 |
117 | The startup probe waits for the `/started` endpoint to report
118 | true before it concludes that the container is running.
119 |
120 | The liveness probe pings the `/health` endpoint every 10 seconds to see that the
121 | container is alive.
122 |
--------------------------------------------------------------------------------
/health-checks/index.js:
--------------------------------------------------------------------------------
1 | // Copyright 2022 Google LLC
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // https://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 |
14 | const express = require('express');
15 | const app = express();
16 |
17 | app.get('/', (req, res) => {
18 | res.send("Hello World!");
19 | });
20 |
21 | // This endpoint will be used in the startup probe and it artificially
22 | // waits for 20 seconds before reporting that the container started running.
23 | app.get('/started', (req, res) => {
24 | var now = Math.floor(Date.now() / 1000);
25 | var started = (now - startedTime) > 20;
26 | console.log(`/started: ${started}`);
27 | if (started) {
28 | res.status(200).send('OK: Service started');
29 | } else {
30 | res.status(503).send('Error: Service not started');
31 | }
32 | });
33 |
34 |
35 | // This endpoint will be used in the liveness probe and it simply reports
36 | // healthy all the time.
37 | app.get('/health', (req, res) => {
38 | console.log(`/health: ${healthy}`);
39 | if (healthy) {
40 | res.status(200).send('OK: Service is healthy');
41 | }
42 | else {
43 | res.status(503).send('Error: Service is not healthy');
44 | }
45 | });
46 |
47 | const port = parseInt(process.env.PORT) || 8080;
48 | const startedTime = Math.floor(Date.now() / 1000);
49 | var healthy = true;
50 |
51 | app.listen(port, () => {
52 | console.log(`Listening on port ${port}`);
53 | });
--------------------------------------------------------------------------------
/health-checks/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "helloworld",
3 | "description": "Simple hello world sample in Node",
4 | "version": "1.0.0",
5 | "private": true,
6 | "main": "index.js",
7 | "scripts": {
8 | "start": "node index.js"
9 | },
10 | "engines": {
11 | "node": ">=12.0.0"
12 | },
13 | "author": "Google LLC",
14 | "license": "Apache-2.0",
15 | "dependencies": {
16 | "express": "^4.17.1"
17 | }
18 | }
--------------------------------------------------------------------------------
/health-checks/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: serving.knative.dev/v1
2 | kind: Service
3 | metadata:
4 | annotations:
5 | client.knative.dev/user-image: europe-west1-docker.pkg.dev/serverless-atamel/cloud-run-source-deploy/health-checks
6 | run.googleapis.com/ingress: all
7 | run.googleapis.com/ingress-status: all
8 | run.googleapis.com/launch-stage: ALPHA
9 | labels:
10 | cloud.googleapis.com/location: europe-west1
11 | name: health-checks
12 | namespace: '422012409783'
13 | spec:
14 | template:
15 | metadata:
16 | annotations:
17 | autoscaling.knative.dev/maxScale: '100'
18 | client.knative.dev/user-image: europe-west1-docker.pkg.dev/serverless-atamel/cloud-run-source-deploy/health-checks
19 | run.googleapis.com/client-name: gcloud
20 | run.googleapis.com/client-version: 386.0.0
21 | name: health-checks-00003-wut
22 | spec:
23 | containerConcurrency: 80
24 | containers:
25 | - image: europe-west1-docker.pkg.dev/serverless-atamel/cloud-run-source-deploy/health-checks@sha256:2f6d69bb7366ccc43e7d2947a5b35803d5a314ff371d0a645cbdae6091cafc85
26 | startupProbe:
27 | httpGet:
28 | path: /started
29 | failureThreshold: 30
30 | periodSeconds: 10
31 | livenessProbe:
32 | httpGet:
33 | path: /healthz
34 | failureThreshold: 30
35 | periodSeconds: 10
36 | ports:
37 | - containerPort: 8080
38 | name: http1
39 | resources:
40 | limits:
41 | cpu: 1000m
42 | memory: 512Mi
43 | serviceAccountName: 422012409783-compute@developer.gserviceaccount.com
44 | timeoutSeconds: 300
45 | traffic:
46 | - latestRevision: true
47 | percent: 100
48 |
--------------------------------------------------------------------------------
/helloworld/csharp/3.1/Program.cs:
--------------------------------------------------------------------------------
1 | // Copyright 2021 Google LLC
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // https://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 | using System;
15 | using Microsoft.AspNetCore.Hosting;
16 | using Microsoft.Extensions.Hosting;
17 |
18 | namespace helloworld
19 | {
20 | public class Program
21 | {
22 | public static void Main(string[] args)
23 | {
24 | CreateHostBuilder(args).Build().Run();
25 | }
26 |
27 | public static IHostBuilder CreateHostBuilder(string[] args)
28 | {
29 | var port = Environment.GetEnvironmentVariable("PORT") ?? "8080";
30 | var url = $"http://0.0.0.0:{port}";
31 |
32 | return Host.CreateDefaultBuilder(args)
33 | .ConfigureWebHostDefaults(webBuilder =>
34 | {
35 | webBuilder.UseStartup().UseUrls(url);
36 | });
37 | }
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/helloworld/csharp/3.1/Startup.cs:
--------------------------------------------------------------------------------
1 | // Copyright 2021 Google LLC
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // https://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 | using System;
15 | using Microsoft.AspNetCore.Builder;
16 | using Microsoft.AspNetCore.Hosting;
17 | using Microsoft.AspNetCore.Http;
18 | using Microsoft.Extensions.DependencyInjection;
19 | using Microsoft.Extensions.Hosting;
20 |
21 | namespace helloworld
22 | {
23 | public class Startup
24 | {
25 | public void ConfigureServices(IServiceCollection services)
26 | {
27 | }
28 |
29 | public void Configure(IApplicationBuilder app, IWebHostEnvironment env)
30 | {
31 | if (env.IsDevelopment())
32 | {
33 | app.UseDeveloperExceptionPage();
34 | }
35 |
36 | app.UseRouting();
37 |
38 | app.UseEndpoints(endpoints =>
39 | {
40 | endpoints.MapGet("/", async context =>
41 | {
42 | var target = Environment.GetEnvironmentVariable("TARGET") ?? "World";
43 | await context.Response.WriteAsync($"Hello {target} from .NET Core 3.1!\n");
44 | });
45 | });
46 | }
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/helloworld/csharp/3.1/helloworld.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | netcoreapp3.1
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/helloworld/csharp/5.0/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use Microsoft's official build .NET image.
2 | # https://hub.docker.com/_/microsoft-dotnet-core-sdk/
3 | FROM mcr.microsoft.com/dotnet/sdk:5.0-alpine AS build
4 | WORKDIR /app
5 |
6 | # Install production dependencies.
7 | # Copy csproj and restore as distinct layers.
8 | COPY *.csproj ./
9 | RUN dotnet restore
10 |
11 | # Copy local code to the container image.
12 | COPY . ./
13 | WORKDIR /app
14 |
15 | # Build a release artifact.
16 | RUN dotnet publish -c Release -o out
17 |
18 |
19 | # Use Microsoft's official runtime .NET image.
20 | # https://hub.docker.com/_/microsoft-dotnet-core-aspnet/
21 | FROM mcr.microsoft.com/dotnet/aspnet:5.0-alpine AS runtime
22 | WORKDIR /app
23 | COPY --from=build /app/out ./
24 |
25 | # Run the web service on container startup.
26 | ENTRYPOINT ["dotnet", "helloworld.dll"]
27 |
28 |
--------------------------------------------------------------------------------
/helloworld/csharp/5.0/Program.cs:
--------------------------------------------------------------------------------
1 | // Copyright 2021 Google LLC
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // https://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 | using System;
15 | using Microsoft.AspNetCore.Hosting;
16 | using Microsoft.Extensions.Hosting;
17 |
18 | namespace helloworld
19 | {
20 | public class Program
21 | {
22 | public static void Main(string[] args)
23 | {
24 | CreateHostBuilder(args).Build().Run();
25 | }
26 |
27 | public static IHostBuilder CreateHostBuilder(string[] args)
28 | {
29 | var port = Environment.GetEnvironmentVariable("PORT") ?? "8080";
30 | var url = $"http://0.0.0.0:{port}";
31 |
32 | return Host.CreateDefaultBuilder(args)
33 | .ConfigureWebHostDefaults(webBuilder =>
34 | {
35 | webBuilder.UseStartup().UseUrls(url);
36 | });
37 | }
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/helloworld/csharp/5.0/Startup.cs:
--------------------------------------------------------------------------------
1 | // Copyright 2021 Google LLC
2 | //
3 | // Licensed under the Apache License, Version 2.0 (the "License");
4 | // you may not use this file except in compliance with the License.
5 | // You may obtain a copy of the License at
6 | //
7 | // https://www.apache.org/licenses/LICENSE-2.0
8 | //
9 | // Unless required by applicable law or agreed to in writing, software
10 | // distributed under the License is distributed on an "AS IS" BASIS,
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | // See the License for the specific language governing permissions and
13 | // limitations under the License.
14 | using System;
15 | using Microsoft.AspNetCore.Builder;
16 | using Microsoft.AspNetCore.Hosting;
17 | using Microsoft.AspNetCore.Http;
18 | using Microsoft.Extensions.DependencyInjection;
19 | using Microsoft.Extensions.Hosting;
20 |
21 | namespace helloworld
22 | {
23 | public class Startup
24 | {
25 | public void ConfigureServices(IServiceCollection services)
26 | {
27 | }
28 |
29 | public void Configure(IApplicationBuilder app, IWebHostEnvironment env)
30 | {
31 | if (env.IsDevelopment())
32 | {
33 | app.UseDeveloperExceptionPage();
34 | }
35 |
36 | app.UseRouting();
37 |
38 | app.UseEndpoints(endpoints =>
39 | {
40 | endpoints.MapGet("/", async context =>
41 | {
42 | var target = Environment.GetEnvironmentVariable("TARGET") ?? "World";
43 | await context.Response.WriteAsync($"Hello {target} from .NET 5.0!\n");
44 | });
45 | });
46 | }
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/helloworld/csharp/5.0/helloworld.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net5.0
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/helloworld/csharp/6.0/Dockerfile:
--------------------------------------------------------------------------------
1 | # syntax=docker/dockerfile:1
2 | FROM mcr.microsoft.com/dotnet/sdk:6.0 AS build-env
3 | WORKDIR /app
4 |
5 | # Copy csproj and restore as distinct layers
6 | COPY *.csproj ./
7 | RUN dotnet restore
8 |
9 | # Copy everything else and build
10 | COPY . ./
11 | RUN dotnet publish -c Release -o out
12 |
13 | # Build runtime image
14 | FROM mcr.microsoft.com/dotnet/aspnet:6.0
15 | WORKDIR /app
16 | COPY --from=build-env /app/out .
17 | ENTRYPOINT ["dotnet", "helloworld.dll"]
--------------------------------------------------------------------------------
/helloworld/csharp/6.0/Program.cs:
--------------------------------------------------------------------------------
1 | var builder = WebApplication.CreateBuilder(args);
2 |
3 | var port = Environment.GetEnvironmentVariable("PORT") ?? "8080";
4 | var url = $"http://0.0.0.0:{port}";
5 | // Doesn't work due to https://github.com/dotnet/aspnetcore/issues/38185
6 | //builder.WebHost.UseUrls(url);
7 |
8 | var app = builder.Build();
9 |
10 | var target = Environment.GetEnvironmentVariable("TARGET") ?? "World";
11 |
12 | app.MapGet("/", () => $"Hello {target} from .NET 6.0!");
13 |
14 | app.Run(url);
15 |
--------------------------------------------------------------------------------
/helloworld/csharp/6.0/helloworld.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net6.0
5 | enable
6 | enable
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/helloworld/csharp/7.0/Program.cs:
--------------------------------------------------------------------------------
1 | var builder = WebApplication.CreateBuilder(args);
2 |
3 | var port = Environment.GetEnvironmentVariable("PORT") ?? "8080";
4 | var url = $"http://0.0.0.0:{port}";
5 | builder.WebHost.UseUrls(url);
6 |
7 | var app = builder.Build();
8 |
9 | app.MapGet("/", () => "Hello World from .NET 7.0!");
10 |
11 | app.Run();
12 |
--------------------------------------------------------------------------------
/helloworld/csharp/7.0/helloworld.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | net7.0
5 | enable
6 | enable
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------