├── handler.js ├── .gitignore ├── serverless-gcp.yml ├── package.json ├── serverless-aws.yml ├── serverless-azure.yml ├── README.md ├── app.js └── blogpost.md /handler.js: -------------------------------------------------------------------------------- 1 | const handler = require('serverless-express/handler') 2 | const app = require('./app') 3 | 4 | exports.handler = handler(app) -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | jspm_packages 3 | .serverless 4 | serverless.yml 5 | app-function.json 6 | id-function.json 7 | getall-function.json 8 | postnew-function.json 9 | mongo 10 | serverless.yml.aws 11 | serverless.yml.azure 12 | serverless.yml.gcp -------------------------------------------------------------------------------- /serverless-gcp.yml: -------------------------------------------------------------------------------- 1 | service: serverless-multicloud-example 2 | provider: 3 | name: google 4 | stage: dev 5 | runtime: nodejs8 6 | region: asia-northeast1 7 | project: serverless-multicloud-example 8 | credentials: ~/.gcloud/keyfile.json 9 | environment: 10 | MONGO_URL: 'mongodb://username:password@mongo:27017/todolist' 11 | plugins: 12 | - serverless-google-cloudfunctions 13 | - serverless-express 14 | package: 15 | exclude: 16 | - .gitignore 17 | - .git/** 18 | functions: 19 | app: 20 | handler: handler 21 | events: 22 | - http: path 23 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "serverless-multicloud-example", 3 | "version": "1.0.0", 4 | "description": "Example multicloud serverless express app", 5 | "main": "handler.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "Jason Umiker", 10 | "license": "MIT", 11 | "dependencies": { 12 | "body-parser": "^1.19.0", 13 | "mongodb": "^3.3.0", 14 | "serverless-express": "^2.0.11" 15 | }, 16 | "devDependencies": { 17 | "serverless-azure-functions": "^0.7.0", 18 | "serverless-google-cloudfunctions": "^2.3.2", 19 | "serverless-offline": "^5.10.1" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /serverless-aws.yml: -------------------------------------------------------------------------------- 1 | service: serverless-multicloud-example 2 | provider: 3 | name: aws 4 | runtime: nodejs10.x 5 | stage: dev 6 | region: ap-southeast-2 7 | environment: 8 | MONGO_URL: 'mongodb://username:password@mongo:27017/todolist' 9 | package: 10 | exclude: 11 | - .gitignore 12 | - .git/** 13 | functions: 14 | app: 15 | handler: handler.handler 16 | events: 17 | - http: 18 | path: / 19 | method: GET 20 | - http: 21 | path: /documents/all 22 | method: GET 23 | - http: 24 | path: /documents/id 25 | method: GET 26 | - http: 27 | path: /documents/new 28 | method: POST 29 | - http: 30 | path: /documents/id 31 | method: DELETE 32 | - http: 33 | path: /documents/id 34 | method: PATCH 35 | plugins: 36 | - serverless-express 37 | - serverless-offline -------------------------------------------------------------------------------- /serverless-azure.yml: -------------------------------------------------------------------------------- 1 | service: serverless-multicloud-example 2 | provider: 3 | name: azure 4 | location: Australia East 5 | environment: 6 | MONGO_URL: 'mongodb://username:password@mongo:27017/todolist' 7 | plugins: 8 | - serverless-azure-functions 9 | - serverless-express 10 | package: 11 | exclude: 12 | - local.settings.json 13 | - .gitignore 14 | - .git/** 15 | functions: 16 | app: 17 | handler: handler.handler 18 | events: 19 | - http: true 20 | x-azure-settings: 21 | authLevel : anonymous 22 | name: req 23 | methods: 24 | - get 25 | - http: true 26 | x-azure-settings: 27 | direction: out 28 | name: res 29 | getall: 30 | handler: handler.handler 31 | events: 32 | - http: true 33 | x-azure-settings: 34 | authLevel : anonymous 35 | name: req 36 | methods: 37 | - get 38 | route: app/documents/all 39 | - http: true 40 | x-azure-settings: 41 | direction: out 42 | name: res 43 | id: 44 | handler: handler.handler 45 | events: 46 | - http: true 47 | x-azure-settings: 48 | authLevel : anonymous 49 | name: req 50 | methods: 51 | - get 52 | - delete 53 | - patch 54 | route: app/documents/id 55 | - http: true 56 | x-azure-settings: 57 | direction: out 58 | name: res 59 | postnew: 60 | handler: handler.handler 61 | events: 62 | - http: true 63 | x-azure-settings: 64 | authLevel : anonymous 65 | name: req 66 | methods: 67 | - post 68 | route: app/documents/new 69 | - http: true 70 | x-azure-settings: 71 | direction: out 72 | name: res 73 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # serverless-multicloud-example 2 | 3 | This is the example for my blog post - [Blog Post](blogpost.md) 4 | 5 | An example Node Express app that can be deployed in any of the major clouds by the Serverless framework (https://serverless.com). The differences in handler for each serverless service is abstracted by the serverless-express plugin (https://www.npmjs.com/package/serverless-express) - which has been isolated in `handler.js` - leaving `app.js` for our cloud-agnostic business logic. 6 | 7 | I intend for it to be a working example of the serverless without lock-in concepts discussed here - https://www.thoughtworks.com/insights/blog/mitigating-serverless-lock-fears 8 | 9 | To use copy the relevant severless-xxx.yml file for the cloud you want (AWS, GCP or Azure) to serverless.yml and do a `sls deploy`. 10 | 11 | You also need to provide an Internet-facing Mongo URL via environment variable in the serverless-xxx.yml file to use the Mongo CRUD functionality (all the /document APIs). 12 | 13 | Note that for GCP some initial setup is required as described here - https://serverless.com/framework/docs/providers/google/guide/credentials/. The other clouds you just need valid login details and CLIs configured to proceed. 14 | 15 | Note that for Azure the URIs have to start with `/api/app` and in Google they have to start with `/handler`. I am investigating how to have them start at the root like in AWS. 16 | 17 | For the AWS side you can use the offline testing functionality by doing a `sls offline`. 18 | 19 | The example app is a todo list application with a description and due date field for each task. 20 | 21 | The APIs are: 22 | `GET` - `/` - App description and version 23 | `POST` - `/documents/new` - Create a task/document (takes two parameters - `description` and `duedate`) 24 | `GET` - `/documents/all` - Return all tasks/documents 25 | `GET` - `/documents/id` - Return a particular document by ID (via `id` parameter) 26 | `DELETE` - `/documents/id` - Delete a particular document by ID (via `id` parameter) 27 | `PATCH` - `/documents/id` - Update a particular document by ID (via `id` parameter and it takes `description` and/or `duedate` as the fields to update) 28 | -------------------------------------------------------------------------------- /app.js: -------------------------------------------------------------------------------- 1 | //const express = require('express') 2 | const express = require('serverless-express/express') 3 | const app = express() 4 | const bodyParser = require('body-parser') 5 | //const port = 3000 6 | 7 | app.use(bodyParser.json()) 8 | app.use(bodyParser.urlencoded({ extended: true })) 9 | 10 | // Setup Mongo 11 | const MongoClient = require('mongodb').MongoClient; 12 | const assert = require('assert') 13 | const url = process.env.MONGO_URL 14 | const dbName = 'todolist' 15 | const client = new MongoClient(url) 16 | var ObjectID = require('mongodb').ObjectID; 17 | 18 | // Get all records 19 | app.get(['/documents/all', '/api/app/documents/all'], (req, res, next) => { 20 | client.connect(function (err) { 21 | console.log("Responding to a GET /documents/all") 22 | assert.equal(null, err) 23 | const db = client.db(dbName) 24 | const collection = db.collection('items'); 25 | 26 | collection.find({}).toArray((err, result) => { 27 | if (err) { 28 | res.status(400).send({ 'error': err }) 29 | } 30 | if (result === undefined || result.length === 0) { 31 | res.status(400).send({ 'error': 'No documents in database' }) 32 | } else { 33 | res.status(200).send(result) 34 | console.log(result) 35 | } 36 | }) 37 | }) 38 | }) 39 | 40 | // Get specific record 41 | app.get(['/documents/id', '/api/app/documents/id'], (req, res, next) => { 42 | client.connect(function (err) { 43 | console.log("Responding to a GET /documents/id") 44 | console.log(req.query) 45 | assert.equal(null, err) 46 | const db = client.db(dbName) 47 | const collection = db.collection('items'); 48 | 49 | if (req.query.id != undefined){ 50 | var o_id = new ObjectID(req.query.id) 51 | collection.findOne({_id: o_id}, (err, result) => { 52 | if (err) { 53 | res.status(400).send({ 'error': err }) 54 | } 55 | if (result === undefined) { 56 | res.status(400).send({ 'error': 'No document matching that id was found' }) 57 | } else { 58 | res.status(200).send(result) 59 | console.log(result) 60 | } 61 | }) 62 | } 63 | else { 64 | res.status(400).send({ 'error': 'No id parameter specified' }) 65 | } 66 | }) 67 | }) 68 | 69 | //Insert a document 70 | app.post(['/documents/new', '/api/app/documents/new'], (req, res, next) => { 71 | console.log("Responding to a POST /documents/new") 72 | console.log(req.body) 73 | client.connect(function (err) { 74 | assert.equal(null, err) 75 | const db = client.db(dbName) 76 | const collection = db.collection('items'); 77 | 78 | collection.insertOne({ 79 | "created":Date.now(), "updated":Date.now(), "description":req.body.description, "duedate":req.body.duedate 80 | }, (err, result) => { 81 | if (err) { 82 | res.status(400).send({ 'error': err }) 83 | } 84 | res.status(200).send(result) 85 | console.log(result) 86 | }) 87 | }) 88 | }) 89 | 90 | //Delete a document 91 | app.delete(['/documents/id', '/api/app/documents/id'], (req, res, next) => { 92 | client.connect(function (err) { 93 | console.log("Responding to a DELETE /documents/id") 94 | console.log(req.query) 95 | assert.equal(null, err) 96 | const db = client.db(dbName) 97 | const collection = db.collection('items'); 98 | 99 | if (req.query.id != undefined){ 100 | var o_id = new ObjectID(req.query.id) 101 | collection.deleteOne({_id: o_id}, (err, result) => { 102 | if (err) { 103 | res.status(400).send({ 'error': err }) 104 | } 105 | res.status(200).send(result) 106 | console.log(result) 107 | }) 108 | } 109 | else { 110 | res.status(400).send({ 'error': 'No id parameter specified' }) 111 | } 112 | }) 113 | }) 114 | 115 | //Update a document 116 | app.patch(['/documents/id', '/api/app/documents/id'], (req, res, next) => { 117 | client.connect(function (err) { 118 | console.log("Responding to a PATCH /documents/id") 119 | console.log(req.query) 120 | assert.equal(null, err) 121 | const db = client.db(dbName) 122 | const collection = db.collection('items'); 123 | 124 | if (req.query.id != undefined){ 125 | var o_id = new ObjectID(req.query.id) 126 | collection.updateOne({_id: o_id}, 127 | {$set: 128 | { 129 | updated: Date.now(), 130 | description: req.query.description, 131 | duedate: req.query.duedate 132 | } 133 | }, 134 | (err, result) => { 135 | if (err) { 136 | res.status(400).send({ 'error': err }) 137 | } 138 | res.status(200).send(result) 139 | console.log(result) 140 | }) 141 | } 142 | else { 143 | res.status(400).send({ 'error': 'No id parameter specified' }) 144 | } 145 | }) 146 | }) 147 | 148 | // Azure's default URI path for this is /api/app and it sends that through so we need to cater for that 149 | app.get(['/', '/api/app'], (req, res) => res.send('todolist v1.0.0')) 150 | 151 | //app.listen(port, () => console.log(`Example app listening on port ${port}!`)) 152 | module.exports = app -------------------------------------------------------------------------------- /blogpost.md: -------------------------------------------------------------------------------- 1 | # Approaches for Serverless without Vendor Lock-in 2 | By Jason Umiker 3 | 4 | Both containers and serverless have been hot topics in cloud architecture circles for the last couple years. One of the main justifications I’ve heard for containerising, especially on Kubernetes, has been that it allows for more portability of a workload across clouds – more so than VMs and especially more so than serverless architectures. About half the time I hear this portability it is to “avoid vendor lock-in” and the other is based on the belief that you can build a more reliable service by facilitating the failover of the workload between clouds as a type of disaster recovery. I’ve been thinking quite a bit about these statements and want to challenge some of the assumptions that underpin them. 5 | 6 | The key question here is whether you need to containerise a workload for it to be portable. I often hear this statement and didn’t challenge it, instead tending to question whether that portability was really necessary vs. all the benefits of centralising on one cloud platform. Then I read this blog post from Wisen Tanasa from ThoughtWorks - https://www.thoughtworks.com/insights/blog/mitigating-serverless-lock-fears. 7 | ![Lock-in Equation](https://insights-images.thoughtworks.com/Insights20Diagram05_78362c25d5be66120fb7ff5373c64396.png) 8 | ![Adapter Diagram](https://insights-images.thoughtworks.com/Insights20Diagram_123030303_fad81aa4bf183563a8b4b42eba770d85.png) 9 | 10 | It is a thought provoking discussion on software architectural approaches and possible trade-offs to balance or mitigate lock-in fears - while still realising the benefits of serverless and cloud. It made me seriously consider whether multi-cloud serverless is not just possible but practical. But, it didn’t have a concrete example or demo of doing it in practice. So, I set out to build one to see for myself just how hard it would be and what kind of limitations I’d encounter. 11 | 12 | Working through it there were three considerations: 13 | 1. Can your function run in another cloud? 14 | 1. Can you abstract any differences in how it’ll be invoked in the other cloud (the handler)? 15 | 1. Does the other cloud’s FaaS support the language/runtime (e.g. nodejs or golang)? 16 | 1. Does it use services/features not in the other cloud (e.g. AWS DynamoDB or GCP BigTable)? 17 | 1. Do you need to substantially change your build/deployment pipeline or tooling to deploy it to another cloud? 18 | 1. Can you migrate any/all required data between clouds – ideally between equivalent managed databases. 19 | 1. And to what extent can/should this be done to facilitate a DR failover between clouds? 20 | 21 | ## Can your function run in another cloud? 22 | 23 | While each major cloud has a roughly comparable Function-as-a-Service (FaaS) service (AWS Lambda, GCP Cloud Functions, Azure Functions, etc.) they all invoke your function with a slightly different JSON payload. The part of your function that deals with parsing this payload and getting things going is called the handler – and running it across multiple clouds means making this handler work given either payload. You can either abstract the handler from the business logic and have two pluggable ones for the different clouds or write one handler that can handle either format. The good news is that for many languages/runtimes others have already done this work for you! This npm package is one such abstracted handler for a Node Express app that makes it support the payload of all three major clouds - https://www.npmjs.com/package/serverless-express. 24 | 25 | When it comes to languages and runtimes the major clouds support several and finding one that works across two of them should be, if not easy, at least possible. In my example we’ll look at below I use the most recent version of Node.js with long-term support and all three major clouds supported it. 26 | 27 | So that takes us to the most interesting of the lock-in discussions – depending on a Service only available in one of the clouds. This can often be appealing – cloud managed services for databases, queues, streams etc. can be very cost effective and save you from much of the operational effort yet are often only available in one of the clouds. There are two approaches to dealing with this: 28 | * You use them but abstract that usage into a separate file/function, what Tanasa calls an adapter, to facilitate swapping it out without needing to rewrite the core business logic of your microservice. 29 | * The other approach is choosing to use only those services that are open-sourced or available in the other cloud so you don’t need to write such adapters. 30 | 31 | And, these are not mutually exclusive – you can do both where appropriate. When deciding which approach to take it is important to understand which services are available on your chosen clouds and what their APIs/SDKs are like. Often many of the clouds will have comparable services that just need to be called via a different SDKs – and as long as they both meet your requirements and are similar writing two adapters at the start might not be that difficult to just do together as you go. If it does look like it’ll be difficult, you could start with just the one adapter and flag the risk that moving means writing another adapter to something else as part of that migration effort. This one-adapter approach increases future migration cost in exchange for more short-term opportunity gain from just using the potentially cheaper and easier cloud service for now in the equation above. But we have our clear design and risk register so that risk is documented and quantifiable. 32 | 33 | The other way of thinking is choosing either open-source or commercial services that can be run in both providers/platforms. A good example is using MongoDB instead of AWS DynamoDB (as we do in the example). Both AWS and Azure have a managed Mongo-compatible database - and you can even run it in a VM instead if you wanted. If we choose this, then we don’t even have to abstract the database functionality out to an adapter because it won’t hinder our migration efforts. This approach decreases migration cost in exchange for less opportunity gain from using the potentially cheaper and easier cloud service in the equation above. 34 | 35 | ## Do you need to substantially change your build/deployment pipeline or tooling to deploy it to another cloud? 36 | 37 | Each cloud has infrastructure-as-code deployment tooling that is specific to it – e.g. AWS has CloudFormation and SAM, Azure has Resource Manager and Google has Cloud Deployment Manager. If you fully standardise on that tooling it means that you’ll need to change the tooling to migrate between clouds. There are at least two tools in this space that support all of the major clouds through –Serverless and Terraform. While the templates you’d make in these tools will vary a bit between clouds, standardising on a tool like that can help make migration more seamless. In the equation above this is the trade-off between migration cost and opportunity gain of having one vendor support both the cloud and its associated deployment tooling for you as a managed service. 38 | 39 | ## Can you migrate any required data between clouds – ideally between managed databases. 40 | 41 | This question will depend on the type of datastore or databases that you use. If you can run the same database in both clouds then it stands to reason you can take a backup from it in one cloud and restore it to another. And, in some areas, the providers have rallied around a common standard – AWS’ S3 API is supported for object storage across all three major clouds for example. Even if the database engines vary tools like AWS’ Database Migration Service and Schema Conversion Tool can migrate from things like Mongo to Dynamo or Oracle to PostgreSQL – but it adds more complexity and uncertainty to the migration effort. 42 | 43 | ### Can, and should, we continually replicate the data to the other cloud for DR? 44 | 45 | If you were to use the same database in both clouds many of them support replication – so it is possible that we could enable replication, either synchronous or in an eventually-consistent way, from a primary to a secondary cloud. The assumption that underpins this is that you can achieve better availability by being able to flip between clouds – and that assumption is worth digging into. 46 | 47 | In the case of a cloud like AWS the infrastructure is architected in a way to allow easy active/active high-availability between Availability Zones where nearly all of their managed services such as databases will automatically fail over to another zone for you. This is often more than enough to meet the availability/uptime goals of your application – and you add in exponentially more complexity to architect for such a replication and automatic failover between cloud providers. Not to mention that none of the managed database services in any of the cloud providers facilitate managed replication cross-cloud like they do within their own platforms - so this is something you’d have to build and run yourself. 48 | 49 | If the main issue is showing that you could move between clouds if you had a business relationship with one go sour, or decided strategically you want to back another horse, then you often would have at least weeks, if not months, to migrate. So investing in a capability to do it in minutes at the drop of a hat is likely overkill. 50 | 51 | ## My suggested approach and example app 52 | 53 | Given all of this, the approach that I would go with is to choose a primary cloud provider and run my application there in the most managed and highly available way. Then I can decide, based on my situation and the above equation balancing migration effort vs. opportunity gain, whether I leverage adapters in my code with single-cloud options or instead to use services I can get across multiple clouds as appropriate. 54 | 55 | Once you either written the adapters to accommodate both clouds and/or have chosen options available in both, you can prove via an integration test in your continuous integration (CI) pipeline that the microservice can be deployed to two providers by actually doing it each build. This proves to you and your stakeholders that you could migrate if you ever want or need to in a very simple and affordable way. 56 | 57 | ### Demo 58 | I have written a demo of a Node.js Express app that can be deployed to all three major clouds via Serverless back-ended onto a MongoDB. This means that the tooling to deploy is the same regardless of the cloud and so my pipelines, at least for deploying this app, would not need to change if I switched and helps facilitate an integration test where I deploy to both with the same tooling. 59 | 60 | As for the database I have a choice of running a separate Mongo development environment in each or just calling the Mongo in one from the other (perhaps over a VPN link) – because I know that I could run the Mongo in the secondary cloud if I ever needed to and that isn’t what I am really testing in my integration test – I am testing my handler and the underlying FaaS service executing it. 61 | 62 | The code is available here - https://github.com/jasonumiker/serverless-multicloud-example 63 | 64 | This is an example of the service up in each cloud: 65 | * AWS - https://sesallvxkb.execute-api.ap-southeast-2.amazonaws.com/dev/documents/all 66 | * Azure - https://serverless-multicloud-example.azurewebsites.net/api/app/documents/all 67 | * Google - https://asia-northeast1-serverless-multicloud-example.cloudfunctions.net/handler/documents/all 68 | 69 | ## Conclusion 70 | 71 | I now believe that it is possible to architect applications that are serverless yet portable – and to prove that portability every build with a deployment to both and subsequent integration test. What isn’t necessary, or perhaps even possible, is attempting to achieve a better availability by continual replication or automatic failover between clouds – you can likely achieve the availability you want at much lower cost and complexity in one. This is because the simpler something is, and the fewer moving parts and parties involved, the less likely that you’ll have availability issues and the easier they’ll be to troubleshoot if you ever do. 72 | 73 | In the conversations I’ve had recently people are building very complex and expensive Kubernetes container environments, with associated container CI/CD pipelines, mainly for portability. It is worth at least considering whether it is possible to move up one level of abstraction to the major providers’ serverless Function-as-a-Service (FaaS) offerings and still be portable but with lower cost and cognitive load – letting you focus more on your business logic and customer outcomes. A few simple yet deliberate software architecture decisions around what services to use and when to decouple functions and add adapters can help pave the way for portability in the future if/when you do need it – while keeping it inexpensive and simple for you today. 74 | --------------------------------------------------------------------------------