├── .gitignore ├── images ├── aws │ ├── ec2.png │ ├── repl.png │ ├── ssh-0.png │ ├── ec2-list.png │ ├── launch-0.png │ ├── launch-1.png │ ├── launch-2.png │ ├── launch-3.png │ ├── launch-4.png │ ├── launch-5.png │ ├── launch-6.png │ ├── launch-7.png │ ├── services.png │ ├── terminate.png │ ├── aws-fargate-vapor-mongo.png │ ├── aws-fargate-vapor-mongo-atlas-connection.png │ └── aws-fargate-vapor-mongo-atlas-network-address.png ├── gcp-cloud-run.png ├── gcp-cloud-build.png ├── gcp-connect-repo.png ├── gcp-cloud-build-settings.png ├── digital-ocean-create-droplet.png ├── digital-ocean-droplet-list.png ├── swift-download-ubuntu-18-copy-link.png ├── digital-ocean-distributions-ubuntu-18.png └── perf-issues-flamegraph.svg ├── docs ├── llvm-sanitizers.md ├── deployment.md ├── setup-and-ide-alternatives.md ├── digital-ocean.md ├── testing.md ├── building.md ├── heroku.md ├── performance.md ├── linux-perf.md ├── ubuntu.md ├── packaging.md ├── aws.md ├── gcp.md ├── memory-leaks-and-usage.md ├── allocations.md ├── concurrency-adoption-guidelines.md ├── libs │ └── log-levels.md └── aws-copilot-fargate-vapor-mongo.md └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | 3 | -------------------------------------------------------------------------------- /images/aws/ec2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/ec2.png -------------------------------------------------------------------------------- /images/aws/repl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/repl.png -------------------------------------------------------------------------------- /images/aws/ssh-0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/ssh-0.png -------------------------------------------------------------------------------- /images/aws/ec2-list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/ec2-list.png -------------------------------------------------------------------------------- /images/aws/launch-0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/launch-0.png -------------------------------------------------------------------------------- /images/aws/launch-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/launch-1.png -------------------------------------------------------------------------------- /images/aws/launch-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/launch-2.png -------------------------------------------------------------------------------- /images/aws/launch-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/launch-3.png -------------------------------------------------------------------------------- /images/aws/launch-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/launch-4.png -------------------------------------------------------------------------------- /images/aws/launch-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/launch-5.png -------------------------------------------------------------------------------- /images/aws/launch-6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/launch-6.png -------------------------------------------------------------------------------- /images/aws/launch-7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/launch-7.png -------------------------------------------------------------------------------- /images/aws/services.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/services.png -------------------------------------------------------------------------------- /images/aws/terminate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/terminate.png -------------------------------------------------------------------------------- /images/gcp-cloud-run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/gcp-cloud-run.png -------------------------------------------------------------------------------- /images/gcp-cloud-build.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/gcp-cloud-build.png -------------------------------------------------------------------------------- /images/gcp-connect-repo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/gcp-connect-repo.png -------------------------------------------------------------------------------- /images/gcp-cloud-build-settings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/gcp-cloud-build-settings.png -------------------------------------------------------------------------------- /images/aws/aws-fargate-vapor-mongo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/aws-fargate-vapor-mongo.png -------------------------------------------------------------------------------- /images/digital-ocean-create-droplet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/digital-ocean-create-droplet.png -------------------------------------------------------------------------------- /images/digital-ocean-droplet-list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/digital-ocean-droplet-list.png -------------------------------------------------------------------------------- /images/swift-download-ubuntu-18-copy-link.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/swift-download-ubuntu-18-copy-link.png -------------------------------------------------------------------------------- /images/digital-ocean-distributions-ubuntu-18.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/digital-ocean-distributions-ubuntu-18.png -------------------------------------------------------------------------------- /images/aws/aws-fargate-vapor-mongo-atlas-connection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/aws-fargate-vapor-mongo-atlas-connection.png -------------------------------------------------------------------------------- /images/aws/aws-fargate-vapor-mongo-atlas-network-address.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/swift-server/guides/HEAD/images/aws/aws-fargate-vapor-mongo-atlas-network-address.png -------------------------------------------------------------------------------- /docs/llvm-sanitizers.md: -------------------------------------------------------------------------------- 1 | # LLVM TSAN / ASAN 2 | 3 | For multithreaded and low-level unsafe interfacing server code, the ability to use LLVM's [ThreadSanitizer](https://clang.llvm.org/docs/ThreadSanitizer.html) and 4 | [AddressSanitizer](https://clang.llvm.org/docs/AddressSanitizer.html) can help troubleshoot invalid thread usage and invalid usage/access of memory. 5 | 6 | There is a [blog post](https://swift.org/blog/tsan-support-on-linux/) outlining the usage of TSAN. 7 | 8 | The short story is to use the swiftc command line options `-sanitize=address` and `-santize=thread` to each respective tool. 9 | 10 | Also for Swift Package Manager projects you can use `--sanitize` at the command line, e.g.: 11 | 12 | ``` 13 | swift build --sanitize=address 14 | ``` 15 | 16 | or 17 | 18 | ``` 19 | swift build --sanitize=thread 20 | ``` 21 | 22 | and it can be used for the tests too: 23 | 24 | ``` 25 | swift test --sanitize=address 26 | ``` 27 | 28 | or 29 | 30 | ``` 31 | swift test --sanitize=thread 32 | ``` 33 | -------------------------------------------------------------------------------- /docs/deployment.md: -------------------------------------------------------------------------------- 1 | 2 | ## Deployment to Servers or Public Cloud 3 | 4 | The following guides can help with the deployment to public cloud providers: 5 | * [AWS](aws.md) 6 | * [DigitalOcean](digital-ocean.md) 7 | * [Heroku](heroku.md) 8 | * [Kubernetes & Docker](packaging.md#docker) 9 | * [GCP](gcp.md) 10 | * _Have a guides for other popular public clouds like Azure? Add it here!_ 11 | 12 | If you are deploying to you own servers (e.g. bare metal, VMs or Docker) there are several strategies for packaging Swift applications for deployment, see the [Packaging Guide](packaging.md) for more information. 13 | 14 | ### Deploying a Debuggable Configuration (Production on Linux) 15 | 16 | - If you have `--privileged`/`--security-opt seccomp=unconfined` containers or are running in VMs or even bare metal, you can run your binary with 17 | 18 | lldb --batch -o "break set -n main --auto-continue 1 -C \"process handle SIGPIPE -s 0\"" -o run -k "image list" -k "register read" -k "bt all" -k "exit 134" ./my-program 19 | 20 | instead of `./my-program` to get something something akin to a 'crash report' on crash. 21 | 22 | - If you don't have `--privileged` (or `--security-opt seccomp=unconfined`) containers (meaning you won't be able to use `lldb`) or you don't want to use lldb, consider using a library like [`swift-backtrace`](https://github.com/swift-server/swift-backtrace) to get stack traces on crash. 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | > **Warning** 2 | > The guides are now part of Swift.org and will continue to be evolved there. This repo is now archived 3 | 4 | # Swift on Server Development Guide 5 | 6 | ## Introduction 7 | 8 | This guide is designed to help teams and individuals running Swift Server applications on Linux and to provide orientation for those who want to start with such development. 9 | It focuses on how to build, test, deploy and debug such application and provides tips in those areas. 10 | 11 | ## Contents 12 | 13 | - [Setup and code editing](docs/setup-and-ide-alternatives.md) 14 | - [Building](docs/building.md) 15 | - [Testing](docs/testing.md) 16 | - [Debugging Memory leaks](docs/memory-leaks-and-usage.md) 17 | - [Performance troubleshooting and analysis](docs/performance.md) 18 | - [Debugging multithreading issues and memory checks](docs/llvm-sanitizers.md) 19 | - [Deployment](docs/deployment.md) 20 | 21 | ### Server-side library development 22 | 23 | Server-side libraries should, to the best of their ability, play well with established patterns in the SSWG library ecosystem. 24 | They should also utilize the core observability libraries, such as: logging, metrics and distributed tracing, where applicable. 25 | 26 | The below guidelines are aimed to help library developers with some of the typical questions and challenges they might face when designing server-side focused libraries with Swift: 27 | 28 | - [SwiftLog: Log level guidelines](docs/libs/log-levels.md) 29 | - [Swift Concurrency Adoption Guidelines](docs/concurrency-adoption-guidelines.md) 30 | 31 | _The guide is a community effort, and all are invited to share their tips and know-how. Please provide a PR if you have ideas for improving this guide!_ 32 | -------------------------------------------------------------------------------- /docs/setup-and-ide-alternatives.md: -------------------------------------------------------------------------------- 1 | ## Installing Swift 2 | 3 | The [supported platforms](https://swift.org/platform-support/) for running Swift on the server and the [ready-built tools packages](https://swift.org/download/) are all hosted on swift.org together with installation instructions. There you also can find the [language reference documentation](https://swift.org/documentation/). 4 | 5 | ## IDEs/Editors with Swift Support 6 | 7 | A number of editors you may already be familiar with have support for writing Swift code. Here we provide a non-exhaustive list of such editors and relevant plugins/extensions, sorted alphabetically. 8 | 9 | * [Atom IDE support](https://atom.io/packages/ide-swift) 10 | * [Atomic Blonde](https://atom.io/packages/atomic-blonde) a SourceKit based syntax highlighter for Atom. 11 | 12 | * [CLion](https://www.jetbrains.com/help/clion/swift.html) 13 | 14 | * [Emacs plugin](https://github.com/swift-emacs/swift-mode) 15 | 16 | * [VIM plugin](https://github.com/keith/swift.vim) 17 | 18 | * [Visual Studio Code](https://code.visualstudio.com) 19 | * [Swift for Visual Studio Code extension](https://marketplace.visualstudio.com/items?itemName=sswg.swift-lang) 20 | 21 | * [Xcode](https://developer.apple.com/xcode/ide/) 22 | 23 | ## Language Server Protocol (LSP) Support 24 | 25 | The [SourceKit-LSP project](https://github.com/apple/sourcekit-lsp) provides a Swift implementation of the [Language Server Protocol](https://microsoft.github.io/language-server-protocol/), which provides features such as code completion and jump-to-definition in supported editors. 26 | 27 | The project has both an [extensive list of editors that support it](https://github.com/apple/sourcekit-lsp/tree/main/Editors) and setup instructions for those editors, including many of those listed above. 28 | 29 | _Do you know about another IDE or IDE plugin that is missing? Please submit a PR to add them here!_ 30 | -------------------------------------------------------------------------------- /docs/digital-ocean.md: -------------------------------------------------------------------------------- 1 | # Deploying to DigitalOcean 2 | 3 | This guide will walk you through setting up an Ubuntu virtual machine on a DigitalOcean [Droplet](https://www.digitalocean.com/products/droplets/). To follow this guide, you will need to have a [DigitalOcean](https://www.digitalocean.com) account with billing configured. 4 | 5 | ## Create Server 6 | 7 | Use the create menu to create a new Droplet. 8 | 9 | ![Create Droplet](../images/digital-ocean-create-droplet.png) 10 | 11 | Under distributions, select Ubuntu 18.04 LTS. 12 | 13 | ![Ubuntu Distro](../images/digital-ocean-distributions-ubuntu-18.png) 14 | 15 | > Note: You may select any version of Linux that Swift supports. You can check which operating systems are officially supported on the [Swift Releases](https://swift.org/download/#releases) page. 16 | 17 | After selecting the distribution, choose any plan and datacenter region you prefer. Then setup an SSH key to access the server after it is created. Finally, click create Droplet and wait for the new server to spin up. 18 | 19 | Once the new server is ready, hover over the Droplet's IP address and click copy. 20 | 21 | ![Droplet List](../images/digital-ocean-droplet-list.png) 22 | 23 | ## Initial Setup 24 | 25 | Open your terminal and connect to the server as root using SSH. 26 | 27 | ```sh 28 | ssh root@ 29 | ``` 30 | 31 | DigitalOcean has an in-depth guide for [initial server setup on Ubuntu 18.04](https://www.digitalocean.com/community/tutorials/initial-server-setup-with-ubuntu-18-04). This guide will quickly cover the basics. 32 | 33 | ### Configure Firewall 34 | 35 | Allow OpenSSH through the firewall and enable it. 36 | 37 | ```sh 38 | ufw allow OpenSSH 39 | ufw enable 40 | ``` 41 | 42 | Then enable a non-root accessible HTTP port. 43 | 44 | ```sh 45 | ufw allow 8080 46 | ``` 47 | 48 | ### Add User 49 | 50 | Create a new user besides `root` that will be responsible for running your application. This guide uses a non-root user without access to `sudo` for added security. 51 | 52 | The following guides assume the user is named `swift`. 53 | 54 | ```sh 55 | adduser swift 56 | ``` 57 | 58 | Copy the root user's authorized SSH keys to the newly created user. This will allow you to use SSH (`scp`) as the new user. 59 | 60 | ```sh 61 | rsync --archive --chown=swift:swift ~/.ssh /home/swift 62 | ``` 63 | 64 | Your DigitalOcean virtual machine is now ready. Continue using the [Ubuntu](ubuntu.md) guide. 65 | -------------------------------------------------------------------------------- /docs/testing.md: -------------------------------------------------------------------------------- 1 | # Testing 2 | 3 | SwiftPM is integrated with [XCTest, Apple’s unit test framework](https://developer.apple.com/documentation/xctest). Running `swift test` from the terminal, or triggering the test action in your IDE (Xcode or similar), will run all of your XCTest test cases. Test results will be displayed in your IDE or printed out to the terminal. 4 | 5 | A convenient way to test on Linux is using Docker. For example: 6 | 7 | `$ docker run -v "$PWD:/code" -w /code swift:latest swift test` 8 | 9 | The above command will run the tests using the latest Swift Docker image, utilizing bind mounts to the sources on your file system. 10 | 11 | Swift supports architecture-specific code. By default, Foundation imports architecture-specific libraries like Darwin or Glibc. While developing on macOS, you may end up using APIs that are not available on Linux. Since you are most likely to deploy a cloud service on Linux, it is critical to test on Linux. 12 | 13 | A historically important detail about testing for Linux is the `Tests/LinuxMain.swift` file. 14 | 15 | - In Swift versions 5.4 and newer tests are automatically discovered on all platforms, no special file or flag needed. 16 | - In Swift versions >= 5.1 < 5.4, tests can be automaticlaly discovered on Linux using `swift test --enable-test-discovery` flag. 17 | - In Swift versions older than 5.1 the `Tests/LinuxMain.swift` file provides SwiftPM an index of all the tests it needs to run on Linux and it is critical to keep this file up-to-date as you add more unit tests. To regenerate this file, run `swift test --generate-linuxmain` after adding tests. It is also a good idea to include this command as part of your continuous integration setup. 18 | 19 | ### Testing for production 20 | 21 | - For Swift versions between Swift 5.1 and 5.4, always test with `--enable-test-discovery` to avoid forgetting tests on Linux. 22 | 23 | - Make use of the sanitizers. Before running code in production, and preferably as a regular part of your CI process, do the following: 24 | * Run your test suite with TSan (thread sanitizer): `swift test --sanitize=thread` 25 | * Run your test suite with ASan (address sanitizer): `swift test --sanitize=address` and `swift test --sanitize=address -c release -Xswiftc -enable-testing` 26 | 27 | - Generally, whilst testing, you may want to build using `swift build --sanitize=thread`. The binary will run slower and is not suitable for production, but you might be able to catch threading issues early - before you deploy your software. Often threading issues are really hard to debug and reproduce and also cause random problems. TSan helps catch them early. 28 | -------------------------------------------------------------------------------- /docs/building.md: -------------------------------------------------------------------------------- 1 | # Build system 2 | 3 | The recommended way to build server applications is with [Swift Package Manager](https://swift.org/package-manager/). SwiftPM provides a cross-platform foundation for building Swift code and works nicely for having one code base that can be edited as well as run on many Swift platforms. 4 | 5 | ## Building 6 | SwiftPM works from the command line and is also integrated within Xcode. 7 | 8 | You can build your code either by running `swift build` from the terminal, or by triggering the build action in Xcode. 9 | 10 | ### Docker Usage 11 | Swift binaries are architecture-specific, so running the build command on macOS will create a macOS binary, and similarly running the command on Linux will create a Linux binary. 12 | 13 | Many Swift developers use macOS for development, which enables taking advantage of the great tooling that comes with Xcode. However, most server applications are designed to run on Linux. 14 | 15 | If you are developing on macOS, Docker is a useful tool for building on Linux and creating Linux binaries. Apple publishes official Swift Docker images to [Docker Hub](https://hub.docker.com/_/swift). 16 | 17 | For example, to build your application using the latest Swift Docker image: 18 | 19 | `$ docker run -v "$PWD:/code" -w /code swift:latest swift build` 20 | 21 | Note, if you want to run the Swift compiler for Intel CPUs on an Apple Silicon (M1) Mac, please add `--platform linux/amd64 -e QEMU_CPU=max` to the command line. For example: 22 | 23 | `$ docker run -v "$PWD:/code" -w /code --platform linux/amd64 -e QEMU_CPU=max swift:latest swift build` 24 | 25 | The above command will run the build using the latest Swift Docker image, utilizing bind mounts to the sources on your Mac. 26 | 27 | ### Debug vs. Release Mode 28 | By default, SwiftPM will build a debug version of the application. Note that debug versions are not suitable for running in production as they are significantly slower. To build a release version of your app, run `swift build -c release`. 29 | 30 | ### Locating Binaries 31 | Binary artifacts that can be deployed are found under `.build/x86_64-unknown-linux` on Linux, and `.build/x86_64-apple-macosx` on macOS. 32 | 33 | SwiftPM can show you the full binary path using `swift build --show-bin-path -c release`. 34 | 35 | ### Building for production 36 | 37 | - Build production code in release mode by compiling with `swift build -c release`. Running code compiled in debug mode will hurt performance significantly. 38 | 39 | - For best performance in Swift 5.2 or later, pass `-Xswiftc -cross-module-optimization` (this won't work in Swift versions before 5.2) - enabling this should be verified with performance tests (as any optimization changes) as it may sometimes cause performance regressions. 40 | 41 | - Integrate [`swift-backtrace`](https://github.com/swift-server/swift-backtrace) into your application to make sure backtraces are printed on crash. Backtraces do not work out-of-the-box on Linux, and this library helps to fill the gap. Eventually this will become a language feature and not require a discrete library. 42 | -------------------------------------------------------------------------------- /docs/heroku.md: -------------------------------------------------------------------------------- 1 | # What is Heroku 2 | 3 | Heroku is a popular all in one hosting solution, you can find more at heroku.com 4 | 5 | ## Signing Up 6 | 7 | You'll need a heroku account, if you don't have one, please sign up here: [https://signup.heroku.com/](https://signup.heroku.com/) 8 | 9 | ## Installing CLI 10 | 11 | Make sure that you've installed the heroku cli tool. 12 | 13 | ### HomeBrew 14 | 15 | ```bash 16 | brew install heroku/brew/heroku 17 | ``` 18 | 19 | ### Other Install Options 20 | 21 | See alternative install options here: [https://devcenter.heroku.com/articles/heroku-cli#download-and-install](https://devcenter.heroku.com/articles/heroku-cli#download-and-install). 22 | 23 | ### Logging in 24 | 25 | Once you've installed the CLI, login with the following: 26 | 27 | ```bash 28 | heroku login 29 | ``` 30 | 31 | ### Create an application 32 | 33 | Visit dashboard.heroku.com to access your account, and create a new application from the drop down in the upper right hand corner. Heroku will ask a few questions such as region and application name, just follow their prompts. 34 | 35 | ### Project 36 | 37 | Today we're going to be hosting Swift NIO's example http server, you can apply these concepts to your own project. Let's start by cloning NIO 38 | 39 | ```bash 40 | 41 | git clone https://github.com/apple/swift-nio 42 | ``` 43 | 44 | Make sure to make our newly cloned directory the working directory 45 | 46 | 47 | ```bash 48 | cd swift-nio 49 | ``` 50 | 51 | By default, Heroku deploys the **master** branch. Always make sure all changes are checked into this branch before pushing. 52 | 53 | #### Connect with Heroku 54 | 55 | Connect your app with Heroku (replace with your app's name). 56 | 57 | ```bash 58 | $ heroku git:remote -a your-apps-name-here 59 | ``` 60 | 61 | ### Set Stack 62 | 63 | As of 13 September 2018, Heroku’s default stack is Heroku 18, we need it to be 16 for swift projects. 64 | 65 | ```bash 66 | heroku stack:set heroku-16 -a your-apps-name-here 67 | ``` 68 | 69 | ### Set Buildpack 70 | 71 | Set the buildpack to teach Heroku how to deal with swift, the vapor-communnity buildpack is a good buildpack for *any swift project*. It doesn't install vapor, and it doesn't have any vapor specific setup. 72 | 73 | 74 | ```bash 75 | heroku buildpacks:set vapor/vapor 76 | ``` 77 | 78 | ### Swift version file 79 | 80 | The buildpack we added looks for a **.swift-version** file to know which version of swift to use. 81 | 82 | ```bash 83 | echo "5.2" > .swift-version 84 | ``` 85 | 86 | This creates **.swift-version** with `5.2` as its contents. 87 | 88 | 89 | ### Procfile 90 | 91 | Heroku uses the **Procfile** to know how to run your app. This includes the executable name and any arguments necessary. You'll see `$PORT` below, this allows heroku to assign a specific port when it launches the app. 92 | 93 | ``` 94 | web: NIOHTTP1Server 0.0.0.0 $PORT 95 | ``` 96 | 97 | You can use this command in terminal to set the file 98 | 99 | 100 | ```bash 101 | echo "web: NIOHTTP1Server 0.0.0.0 $PORT" > Procfile 102 | ``` 103 | 104 | ### Commit changes 105 | 106 | We have now added the `.swift-version` file, and the `Procfile`, make sure these are committed into master or Heroku will not find them. 107 | 108 | ### Deploying to Heroku 109 | 110 | You're ready to deploy, run this from the terminal. It may take a while to build, this is normal. 111 | 112 | ```none 113 | git push heroku master 114 | ``` 115 | -------------------------------------------------------------------------------- /docs/performance.md: -------------------------------------------------------------------------------- 1 | # Debugging Performance Issues 2 | 3 | First of all, it's very important to make sure that you compiled your Swift code in _release mode_. The performance difference between debug and release builds is huge in Swift. You can compile your Swift code in release mode using 4 | 5 | swift build -c release 6 | 7 | ## Instruments 8 | 9 | If you can reproduce your performance issue on macOS, you probably want to check out Instrument's [Time Profiler](https://developer.apple.com/videos/play/wwdc2016/418/). 10 | 11 | ## Flamegraphs 12 | 13 | [Flamegraphs](http://www.brendangregg.com/flamegraphs.html) are a nice way to visualise what stack frames were running for what percentage of the time. That often helps pinpointing the areas of your program that need improvement. Flamegraphs can be created on most platforms, in this document we will focus on Linux. 14 | 15 | ### Flamegraphs on Linux 16 | 17 | To have something to discuss, let's use a program that has a pretty big performance problem: 18 | 19 | ```swift 20 | /* a terrible data structure which has a subset of the operations that Swift's 21 | * array does: 22 | * - retrieving elements by index 23 | * --> user's reasonable performance expectation: O(1) (like Swift's Array) 24 | * --> implementation's actual performance: O(n) 25 | * - adding elements 26 | * --> user's reasonable performance expectation: amortised O(1) (like Swift's Array) 27 | * --> implementation's actual performance: O(n) 28 | * 29 | * ie. the problem I'm trying to demo here is that this is an implementation 30 | * where the user would expect (amortised) constant time access but in reality 31 | * is linear time. 32 | */ 33 | struct TerribleArray { 34 | /* this is a terrible idea: storing the index inside of the array (so we can 35 | * waste some performance later ;) 36 | */ 37 | private var storage: Array<(Int, T)> = Array() 38 | 39 | /* oh my */ 40 | private func maximumIndex() -> Int { 41 | return (self.storage.map { $0.0 }.max()) ?? -1 42 | } 43 | 44 | /* expectation: amortised O(1) but implementation is O(n) */ 45 | public mutating func append(_ value: T) { 46 | let maxIdx = self.maximumIndex() 47 | self.storage.append((maxIdx + 1, value)) 48 | assert(self.storage.count == maxIdx + 2) 49 | } 50 | 51 | /* expectation: O(1) but implementation is O(n) */ 52 | public subscript(index: Int) -> T? { 53 | get { 54 | return self.storage.filter({ $0.0 == index }).first?.1 55 | } 56 | } 57 | } 58 | 59 | protocol FavouriteNumbers { 60 | func addFavouriteNumber(_ number: Int) 61 | func isFavouriteNumber(_ number: Int) -> Bool 62 | } 63 | 64 | public class MyFavouriteNumbers: FavouriteNumbers { 65 | private var storage: TerribleArray 66 | public init() { 67 | self.storage = TerribleArray() 68 | } 69 | 70 | /* - user's expectation: O(n) 71 | * - reality O(n^2) because of TerribleArray */ 72 | public func isFavouriteNumber(_ number: Int) -> Bool { 73 | var idx = 0 74 | var found = false 75 | while true { 76 | if let storageNum = self.storage[idx] { 77 | if number == storageNum { 78 | found = true 79 | break 80 | } 81 | } else { 82 | break 83 | } 84 | idx += 1 85 | } 86 | return found 87 | } 88 | 89 | /* - user's expectation: amortised O(1) 90 | * - reality O(n) because of TerribleArray */ 91 | public func addFavouriteNumber(_ number: Int) { 92 | self.storage.append(number) 93 | precondition(self.isFavouriteNumber(number)) 94 | } 95 | } 96 | 97 | let x: FavouriteNumbers = MyFavouriteNumbers() 98 | 99 | for f in 0..<2_000 { 100 | x.addFavouriteNumber(f) 101 | } 102 | ``` 103 | 104 | The above program contains the `TerribleArray` data structure which has _O(n)_ appends and not the amortised _O(1)_ that users are used to from `Array`. 105 | 106 | We will assume, that you have Linux's `perf` installed and configured, documentation on how to install `perf` can be found in [this guide](linux-perf.md). 107 | 108 | Let's assume we have compiled the above code using `swift build -c release` into a binary called `./slow`. We also assume that the `https://github.com/brendangregg/FlameGraph` repository is cloned in `~/FlameGraph`: 109 | 110 | ``` 111 | # Step 1: Record the stack frames with a 99 Hz sampling frequency 112 | sudo perf record -F 99 --call-graph dwarf -- ./slow 113 | # Alternatively, to attach to an existing process use 114 | # sudo perf record -F 99 --call-graph dwarf -p PID_OF_SLOW 115 | # or if you don't know the pid, you can try (assuming your binary name is "slow") 116 | # sudo perf record -F 99 --call-graph dwarf -p $(pgrep slow) 117 | 118 | # Step 2: Export the recording into `out.perf` 119 | sudo perf script > out.perf 120 | 121 | # Step 3: Aggregate the recorded stacks and demangle the symbols 122 | ~/FlameGraph/stackcollapse-perf.pl out.perf | swift demangle > out.folded 123 | 124 | # Step 4: Export the result into a SVG file. 125 | ~/FlameGraph/flamegraph.pl out.folded > out.svg # Produce 126 | ``` 127 | 128 | The resulting file will look something like: 129 | 130 | ![](../images/perf-issues-flamegraph.svg) 131 | 132 | And we can see that almost all of our runtime is spent in `isFavouriteNumber` which is invoked from `addFavouriteNumber`. That should be a very good hint to the programmer on where to look for improvements. Maybe after all, we should use `Set` to store the favourite numbers, that should get is an answer to if a number is a favourite number in constant time (_O(1)_). 133 | 134 | ## Alternate `malloc` libraries 135 | For some workloads putting serious pressure on the memory allocation subsystem, it may be beneficial with a custom `malloc` library. 136 | It requires no changes to the code, but needs interposing with e.g. an environment variable before running your server. 137 | It is worth benchmarking with the default and with a custom memory allocator to see how much it helps for the specific workload. 138 | There are many `malloc` implementations out there, but a portable and well-performing one is [Microsofts mimalloc](https://github.com/microsoft/mimalloc). 139 | 140 | Typically these are simply enabled by using LD_PRELOAD: 141 | 142 | `> LD_PRELOAD=/usr/bin/libmimalloc.so myprogram` 143 | -------------------------------------------------------------------------------- /docs/linux-perf.md: -------------------------------------------------------------------------------- 1 | # Linux `perf` 2 | 3 | ## `perf`, what's that? 4 | 5 | The Linux [`perf` tool](https://perf.wiki.kernel.org/index.php/Main_Page) is an incredibly powerful tool, that can amongst other things be used for: 6 | 7 | - Sampling CPU-bound processes (or the whole) system to analyse which part of your application is consuming the CPU time 8 | - Accessing CPU performance counters (PMU) 9 | - "user probes" (uprobes) which trigger for example when a certain function in your application runs 10 | 11 | In general, `perf` can count and/or record the call stacks of your threads when a certain event occurs. These events can be triggered by: 12 | 13 | - Time (e.g. 1000 times per second), useful for time profiling. For an example use, see the [CPU performance debugging guide](performance.md). 14 | - System calls, useful to see where your system calls are happening. 15 | - Various system events, for example if you'd like to know when context switches occur. 16 | - CPU performance counters, useful if your performance issues can be traced down to micro-architectural details of your CPU (such as branch mispredictions). For an example, see [SwiftNIO's Advanced Performance Analysis guide](https://github.com/apple/swift-nio/blob/main/docs/advanced-performance-analysis.md). 17 | - and much more 18 | 19 | ## Getting `perf` to work 20 | 21 | Unfortunately, getting `perf` to work depends on your environment. Below, please find a selection of environments and how to get `perf` to work there. 22 | 23 | ### Installing `perf` 24 | 25 | Technically, `perf` is part of the Linux kernel sources and you'd want a `perf` version that exactly matches your Linux kernel version. In many cases however a "close-enough" `perf` version will do too. If in doubt, use a `perf` version that's slightly older than your kernel over one that's newer. 26 | 27 | - Ubuntu 28 | 29 | ``` 30 | apt-get update && apt-get -y install linux-tools-generic 31 | ``` 32 | 33 | See below for more information because Ubuntu packages a different `perf` per kernel version. 34 | - Debian 35 | 36 | ``` 37 | apt-get update && apt-get -y install linux-perf 38 | ``` 39 | 40 | - Fedora/RedHat derivates 41 | 42 | ``` 43 | yum install -y perf 44 | ``` 45 | 46 | You can confirm that your `perf` installation works using `perf stat -- sleep 0.1` (if you're already `root`) or `sudo perf stat -- sleep 0.1`. 47 | 48 | 49 | ##### `perf` on Ubuntu when you can't match the kernel version 50 | 51 | On Ubuntu (and other distributions that package `perf` per kernel version) you may see an error after installing `linux-tools-generic`. The error message will look similar to 52 | 53 | ``` 54 | $ perf stat -- sleep 0.1 55 | WARNING: perf not found for kernel 5.10.25 56 | 57 | You may need to install the following packages for this specific kernel: 58 | linux-tools-5.10.25-linuxkit 59 | linux-cloud-tools-5.10.25-linuxkit 60 | 61 | You may also want to install one of the following packages to keep up to date: 62 | linux-tools-linuxkit 63 | linux-cloud-tools-linuxkit 64 | ``` 65 | 66 | The best fix for this is to follow what `perf` says and to install one of the above packages. If you're in a Docker container, this may not be possible because you'd need to match the kernel version (which is especially difficult in Docker for Mac because it uses a VM). For example, the suggested `linux-tools-5.10.25-linuxkit` is not actually available. 67 | 68 | As a workaround, you can try one of the following options 69 | 70 | - If you're already `root` and prefer a shell `alias` (only valid in this shell) 71 | 72 | ``` 73 | alias perf=$(find /usr/lib/linux-tools/*/perf | head -1) 74 | ``` 75 | 76 | - If you're a user and would like to prefer to link `/usr/local/bin/perf` 77 | 78 | ``` 79 | sudo ln -s "$(find /usr/lib/linux-tools/*/perf | head -1)" /usr/local/bin/perf 80 | ``` 81 | 82 | After this, you should be able to use `perf stat -- sleep 0.1` (if you're already `root`) or `sudo perf stat -- sleep 0.1` successfully. 83 | 84 | ### Bare metal 85 | 86 | For a bare metal Linux machine, all you need to do is to install `perf` which should then work in full fidelity. 87 | 88 | ### In Docker (running on bare-metal Linux) 89 | 90 | You will need to launch your container with `docker run --privileged` (don't run this in production) and then you should have full access to perf (including the PMU). 91 | 92 | To validate that `perf` works correctly, run for example `perf stat -- sleep 0.1`. Whether you'll see the `` next to some information will depend on if you have access to the CPU's performance counters (the PMU). In Docker on bare metal, this should work, ie. no ``s should show up. 93 | 94 | ### Docker for Mac 95 | 96 | Docker for Mac is like Docker on bare metal but with some extra complexity because we're actually running the Docker containers hosted in a Linux VM. So matching the kernel version will be difficult. 97 | 98 | If you follow the above installation instructions, you should nevertheless get `perf` to work but you won't have access to the CPU's performance counters (the PMU) so you'll see a few events show up as ``. 99 | 100 | ``` 101 | $ perf stat -- sleep 0.1 102 | 103 | Performance counter stats for 'sleep 0.1': 104 | 105 | 0.44 msec task-clock # 0.004 CPUs utilized 106 | 1 context-switches # 0.002 M/sec 107 | 0 cpu-migrations # 0.000 K/sec 108 | 57 page-faults # 0.129 M/sec 109 | cycles 110 | instructions 111 | branches 112 | branch-misses 113 | 114 | 0.102869000 seconds time elapsed 115 | 116 | 0.000000000 seconds user 117 | 0.001069000 seconds sys 118 | ``` 119 | 120 | ### In a VM 121 | 122 | In a virtual machine, you would install `perf` just like on bare metal. And either `perf` will work just fine with all its features or it will look similarly to what you get on Docker for Mac. 123 | 124 | What you need your hypervisor to support (& allow) is "PMU passthrough" or "PMU virtualisation". VMware Fusion does support PMU virtualisation which they call vPMC (VM settings -> Processors & Memory -> Advanced -> Allow code profiling applications in this VM). If you're on a Mac this setting is unfortunately only supported up to including macOS Catalina (and [not on Big Sur](https://kb.vmware.com/s/article/81623)). 125 | 126 | If you use `libvirt` to manage your hypervisor and VMs, you can use `sudo virsh edit your-domain` and replace the `` XML tag with 127 | 128 | 129 | 130 | to allow the PMU to be passed through to the guest. For other hypervisors, an internet search will usually reveal how to enable PMU passthrough. 131 | -------------------------------------------------------------------------------- /docs/ubuntu.md: -------------------------------------------------------------------------------- 1 | # Deploying on Ubuntu 2 | 3 | Once you have your Ubuntu virtual machine ready, you can deploy your Swift app. This guide assumes you have a fresh install with a non-root user named `swift`. It also assumes both `root` and `swift` are accessible via SSH. For information on setting this up, check out the platform guides: 4 | 5 | - [DigitalOcean](digital-ocean.md) 6 | 7 | The [packaging](packaging.md) guide provides an overview of available deployment options. This guide takes you through each deployment option step-by-step for Ubuntu specifically. These examples will deploy SwiftNIO's [example HTTP server](https://github.com/apple/swift-nio/tree/master/Sources/NIOHTTP1Server), but you can test with your own project. 8 | 9 | - [Binary Deployment](#binary-deployment) 10 | - [Source Deployment](#source-deployment) 11 | 12 | ## Binary Deployment 13 | 14 | This section shows you how to build your app locally and deploy just the binary. 15 | 16 | ### Build Binaries 17 | 18 | The first step is to build your app locally. The easiest way to do this is with Docker. For this example, we'll be deploying SwiftNIO's demo HTTP server. Start by cloning the repository. 19 | 20 | ```sh 21 | git clone https://github.com/apple/swift-nio.git 22 | cd swift-nio 23 | ``` 24 | 25 | Once inside the project folder, use the following command to build the app though Docker and copy all build arifacts into `.build/install`. Since this example will be deploying to Ubuntu 18.04, the `-bionic` Docker image is used to build. 26 | 27 | ```sh 28 | docker run --rm \ 29 | -v "$PWD:/workspace" \ 30 | -w /workspace \ 31 | swift:5.2-bionic \ 32 | /bin/bash -cl ' \ 33 | swift build && \ 34 | rm -rf .build/install && mkdir -p .build/install && \ 35 | cp -P .build/debug/NIOHTTP1Server .build/install/ && \ 36 | cp -P /usr/lib/swift/linux/lib*so* .build/install/' 37 | ``` 38 | 39 | > Tip: If you are building this project for production, use `swift build -c release`, see [building for production](building.md#building-for-production) for more information. 40 | 41 | Notice that Swift's shared libraries are being included. This is important since Swift is not ABI stable on Linux. This means Swift programs must run against the shared libraries they were compiled with. 42 | 43 | After your project is built, use the following command to create an archive for easy transport to the server. 44 | 45 | ```sh 46 | tar cvzf hello-world.tar.gz -C .build/install . 47 | ``` 48 | 49 | Next, use `scp` to copy the archive to the deploy server's home folder. 50 | 51 | ```sh 52 | scp hello-world.tar.gz swift@:~/ 53 | ``` 54 | 55 | Once the copy is complete, login to the deploy server. 56 | 57 | ```sh 58 | ssh swift@ 59 | ``` 60 | 61 | Create a new folder to hold the app binaries and decompress the archive. 62 | 63 | ```sh 64 | mkdir hello-world 65 | tar -xvf hello-world.tar.gz -C hello-world 66 | ``` 67 | 68 | You can now start the executable. Supply the desired IP address and port. Binding to port `80` requires sudo, so we use `8080` instead. 69 | 70 | [TODO]: <> (Link to Nginx guide once available for serving on port 80) 71 | 72 | ```sh 73 | ./hello-world/NIOHTTP1Server 8080 74 | ``` 75 | 76 | You may need to install additional system libraries like `libxml` or `tzdata` if your app uses Foundation. The system dependencies installed by Swift's slim docker images are a [good reference](https://github.com/apple/swift-docker/blob/master/5.2/ubuntu/18.04/slim/Dockerfile). 77 | 78 | Finally, visit your server's IP via browser or local terminal and you should see a response. 79 | 80 | ``` 81 | $ curl http://:8080 82 | Hello world! 83 | ``` 84 | 85 | Use `CTRL+C` to quit the server. 86 | 87 | Congratulations on getting your Swift server app running on Ubuntu! 88 | 89 | ## Source Deployement 90 | 91 | This section shows you how to build and run your project on the deployment server. 92 | 93 | ## Install Swift 94 | 95 | Now that you've created a new Ubuntu server you can install Swift. You must be logged in as `root` (or separate user with `sudo` access) to do this. 96 | 97 | ```sh 98 | ssh root@ 99 | ``` 100 | 101 | ### Swift Dependencies 102 | 103 | Install Swift's required dependencies. 104 | 105 | ```sh 106 | sudo apt update 107 | sudo apt install clang libicu-dev build-essential pkg-config 108 | ``` 109 | 110 | ### Download Toolchain 111 | 112 | This guide will install Swift 5.2. Visit the [Swift Downloads](https://swift.org/download/#releases) page for a link to latest release. Copy the download link for Ubuntu 18.04. 113 | 114 | ![Download Swift](../images/swift-download-ubuntu-18-copy-link.png) 115 | 116 | Download and decompress the Swift toolchain. 117 | 118 | ```sh 119 | wget https://swift.org/builds/swift-5.2-release/ubuntu1804/swift-5.2-RELEASE/swift-5.2-RELEASE-ubuntu18.04.tar.gz 120 | tar xzf swift-5.2-RELEASE-ubuntu18.04.tar.gz 121 | ``` 122 | 123 | > Note: Swift's [Using Downloads](https://swift.org/download/#using-downloads) guide includes information on how to verify downloads using PGP signatures. 124 | 125 | ### Install Toolchain 126 | 127 | Move Swift somewhere easy to acess. This guide will use `/swift` with each compiler version in a subfolder. 128 | 129 | ```sh 130 | sudo mkdir /swift 131 | sudo mv swift-5.2-RELEASE-ubuntu18.04 /swift/5.2.0 132 | ``` 133 | 134 | Add Swift to `/usr/bin` so it can be executed by `swift` and `root`. 135 | 136 | ```sh 137 | sudo ln -s /swift/5.2.0/usr/bin/swift /usr/bin/swift 138 | ``` 139 | 140 | Verify that Swift was installed correctly. 141 | 142 | ```sh 143 | swift --version 144 | ``` 145 | 146 | ## Setup Project 147 | 148 | Now that Swift is installed, let's clone and compile your project. For this example, we'll be using SwiftNIO's [example HTTP server](https://github.com/apple/swift-nio/tree/master/Sources/NIOHTTP1Server). 149 | 150 | First let's install SwiftNIO's system dependencies. 151 | 152 | ```sh 153 | sudo apt-get install zlib1g-dev 154 | ``` 155 | 156 | ### Clone & Build 157 | 158 | Now that we're done installing things, we can switch to a non-root user to build and run our application. 159 | 160 | ```sh 161 | su swift 162 | cd ~ 163 | ``` 164 | 165 | Clone the project, then use `swift build` to compile it. 166 | 167 | ```sh 168 | git clone https://github.com/apple/swift-nio.git 169 | cd swift-nio 170 | swift build 171 | ``` 172 | 173 | > Tip: If you are building this project for production, use `swift build -c release`, see [building for production](building.md#building-for-production) for more information. 174 | 175 | ### Run 176 | 177 | Once the project has finished compiling, run it on your server's IP at port `8080`. 178 | 179 | ```sh 180 | .build/debug/NIOHTTP1Server 8080 181 | ``` 182 | 183 | If you used `swift build -c release`, then you need to run: 184 | 185 | ```sh 186 | .build/release/NIOHTTP1Server 8080 187 | ``` 188 | 189 | Visit your server's IP via browser or local terminal and you should see a response. 190 | 191 | ``` 192 | $ curl http://:8080 193 | Hello world! 194 | ``` 195 | 196 | Use `CTRL+C` to quit the server. 197 | 198 | Congratulations on getting your Swift server app running on Ubuntu! 199 | -------------------------------------------------------------------------------- /docs/packaging.md: -------------------------------------------------------------------------------- 1 | # Packaging Applications for Deployment 2 | 3 | Once an application is built for production, it still needs to be packaged before it can be deployed to servers. There are several strategies for packaging Swift applications for deployment. 4 | 5 | ## Docker 6 | 7 | One of the most popular ways to package applications these days is using container technologies such as [Docker](https://www.docker.com). 8 | 9 | Using Docker's tooling, we can build and package the application as a Docker image, publish it to a Docker repository, and later launch it directly on a server or on a platform that supports Docker deployments such as [Kubernetes](https://kubernetes.io). Many public cloud providers including AWS, GCP, Azure, IBM and others encourage this kind of deployment. 10 | 11 | Here is an example `Dockerfile` that builds and packages the application on top of CentOS: 12 | 13 | ```Dockerfile 14 | #------- build ------- 15 | FROM swift:centos8 as builder 16 | 17 | # set up the workspace 18 | RUN mkdir /workspace 19 | WORKDIR /workspace 20 | 21 | # copy the source to the docker image 22 | COPY . /workspace 23 | 24 | RUN swift build -c release --static-swift-stdlib 25 | 26 | #------- package ------- 27 | FROM centos 28 | # copy executables 29 | COPY --from=builder /workspace/.build/release/ / 30 | 31 | # set the entry point (application name) 32 | CMD [""] 33 | ``` 34 | 35 | To create a local Docker image from the `Dockerfile` use the `docker build` command from the application's source location, e.g.: 36 | 37 | ```bash 38 | $ docker build . -t : 39 | ``` 40 | 41 | To test the local image use the `docker run` command, e.g.: 42 | 43 | ```bash 44 | $ docker run : 45 | ``` 46 | 47 | Finally, use the `docker push` command to publish the application's Docker image to a Docker repository of your choice, e.g.: 48 | 49 | ```bash 50 | $ docker tag : /: 51 | $ docker push /: 52 | ``` 53 | 54 | At this point, the application's Docker image is ready to be deployed to the server hosts (which need to run docker), or to one of the platforms that supports Docker deployments. 55 | 56 | See [Docker's documentation](https://docs.docker.com/engine/reference/commandline/) for more complete information about Docker. 57 | 58 | ### Distroless 59 | 60 | [Distroless](https://github.com/GoogleContainerTools/distroless) is a project by Google that attempts to create minimal images containing only the application and its runtime dependencies. They do not contain package managers, shells or any other programs you would expect to find in a standard Linux distribution. 61 | 62 | Since distroless supports Docker and is based on Debian, packaging a Swift application on it is fairly similar to the Docker process above. Here is an example `Dockerfile` that builds and packages the application on top of a distroless's C++ base image: 63 | 64 | ```Dockerfile 65 | #------- build ------- 66 | # Building using Ubuntu Bionic since its compatible with Debian runtime 67 | FROM swift:bionic as builder 68 | 69 | # set up the workspace 70 | RUN mkdir /workspace 71 | WORKDIR /workspace 72 | 73 | # copy the source to the docker image 74 | COPY . /workspace 75 | 76 | RUN swift build -c release --static-swift-stdlib 77 | 78 | #------- package ------- 79 | # Running on distroless C++ since it includes 80 | # all(*) the runtime dependencies Swift programs need 81 | FROM gcr.io/distroless/cc-debian10 82 | # copy executables 83 | COPY --from=builder /workspace/.build/release/ / 84 | 85 | # set the entry point (application name) 86 | CMD [""] 87 | ``` 88 | 89 | Note the above uses `gcr.io/distroless/cc-debian10` as the runtime image which should work for Swift programs that do not use `FoundationNetworking` or `FoundationXML`. In order to provide more complete support we (the community) could put in a PR into distroless to introduce a base image for Swift that includes `libcurl` and `libxml` which are required for `FoundationNetworking` and `FoundationXML` respectively. 90 | 91 | ## Archive (Tarball, ZIP file, etc.) 92 | 93 | Since cross-compiling Swift for Linux is not (yet) supported on Mac or Windows, we need to use virtualization technologies like Docker to compile applications we are targeting to run on Linux. 94 | 95 | That said, this does not mean we must also package the applications as Docker images in order to deploy them. While using Docker images for deployment is convenient and popular, an application can also be packaged using a simple and lightweight archive format like tarball or ZIP file, then uploaded to the server where it can be extracted and run. 96 | 97 | Here is an example of using Docker and `tar` to build and package the application for deployment on Ubuntu servers: 98 | 99 | First, use the `docker run` command from the application's source location to build it: 100 | 101 | ```bash 102 | $ docker run --rm \ 103 | -v "$PWD:/workspace" \ 104 | -w /workspace \ 105 | swift:bionic \ 106 | /bin/bash -cl "swift build -c release --static-swift-stdlib" 107 | ``` 108 | 109 | Note we are bind mounting the source directory so that the build writes the build artifacts to the local drive from which we will package them later. 110 | 111 | Next we can create a staging area with the application's executable: 112 | 113 | ```bash 114 | $ docker run --rm \ 115 | -v "$PWD:/workspace" \ 116 | -w /workspace \ 117 | swift:bionic \ 118 | /bin/bash -cl ' \ 119 | rm -rf .build/install && mkdir -p .build/install && \ 120 | cp -P .build/release/ .build/install/' 121 | ``` 122 | 123 | Note this command could be combined with the build command above--we separated them to make the example more readable. 124 | 125 | Finally, create a tarball from the staging directory: 126 | 127 | ```bash 128 | $ tar cvzf -.tar.gz -C .build/install . 129 | ``` 130 | 131 | We can test the integrity of the tarball by extracting it to a directory and running the application in a Docker runtime container: 132 | 133 | ```bash 134 | $ cd 135 | $ docker run -v "$PWD:/app" -w /app bionic ./ 136 | ``` 137 | 138 | Deploying the application's tarball to the target server can be done using utilities like `scp`, or in a more sophisticated setup using configuration management system like `chef`, `puppet`, `ansible`, etc. 139 | 140 | 141 | ## Source Distribution 142 | 143 | Another distribution technique popular with dynamic languages like Ruby or Javascript is distributing the source to the server, then compiling it on the server itself. 144 | 145 | To build Swift applications directly on the server, the server must have the correct Swift toolchain installed. [Swift.org](https://swift.org/download/#linux) publishes toolchains for a variety of Linux distributions, make sure to use the one matching your server Linux version and desired Swift version. 146 | 147 | The main advantage of this approach is that it is easy. Additional advantage is the server has the full toolchain (e.g. debugger) that can help troubleshoot issues "live" on the server. 148 | 149 | The main disadvantage of this approach that the server has the full toolchain (e.g. compiler) which means a sophisticated attacker can potentially find ways to execute code. They can also potentially gain access to the source code which might be sensitive. If the application code needs to be cloned from a private or protected repository, the server needs access to credentials which adds additional attack surface area. 150 | 151 | In most cases, source distribution is not advised due to these security concerns. 152 | 153 | ## Static linking and Curl/XML 154 | 155 | **Note:** if you are compiling with `-static-stdlib` and using Curl with FoundationNetworking or XML with FoundationXML you must have libcurl and/or libxml2 installed on the target system for it to work. -------------------------------------------------------------------------------- /docs/aws.md: -------------------------------------------------------------------------------- 1 | # Deploying to AWS on Amazon Linux 2 2 | 3 | This guide describes how to launch an AWS instance running Amazon Linux 2 and configure it to run Swift. The approach taken here is a step by step approach through the console. This is a great way to learn, but for a more mature approach we recommend using Infrastructure as Code tools such as AWS Cloudformation, and the instances are created and managed through automated tools such as Autoscaling Groups. For one approach using those tools see this blog article: https://aws.amazon.com/blogs/opensource/continuous-delivery-with-server-side-swift-on-amazon-linux-2/ 4 | 5 | ## Launch AWS Instance 6 | 7 | Use the Service menu to select the EC2 service. 8 | 9 | ![Select EC2 service](../images/aws/services.png) 10 | 11 | Click on "Instances" in the "Instances" menu 12 | 13 | ![Select Instances](../images/aws/ec2.png) 14 | 15 | Click on "Launch Instance", either on the top of the screen, or if this is the first instance you have created in the region, in the main section of the screen. 16 | 17 | ![Launch instance](../images/aws/launch-0.png) 18 | 19 | Choose an Amazon Machine Image (AMI). In this case the guide is assuming that we will be using Amazon Linux 2, so select that AMI type. 20 | 21 | ![Choose AMI](../images/aws/launch-1.png) 22 | 23 | Choose an instance type. Larger instances types will have more memory and CPU, but will be more expensive. A smaller instance type will be sufficient to experiment. In this case I have a `t2.micro` instance type selected. 24 | 25 | ![Choose Instance type](../images/aws/launch-2.png) 26 | 27 | Configure instance details. If you want to access this instance directly to the internet, ensure that the subnet that you select is auto-assigns a public IP. It is assumed that the VPC has internet connectivity, which means that it needs to have a Internet Gateway (IGW) and the correct networking rules, but this is the case for the default VPC. If you wish to set this instance up in a private (non-internet accessible) VPC you will need to set up a bastion host, AWS Systems Manager Session Manager, or some other mechanism to connect to the instance. 28 | 29 | ![Choose Instance details](../images/aws/launch-3.png) 30 | 31 | Add storage. The AWS EC2 launch wizard will suggest some form of storage by default. For our testing purposes this should be fine, but if you know that you need more storage, or a different storage performance requirements, then you can change the size and volume type here. 32 | 33 | ![Choose Instance storage](../images/aws/launch-4.png) 34 | 35 | Add tags. It is recommended you add as many tags as you need to correctly identify this server later. Especially if you have many servers, it can be difficult to remember which one was used for which purpose. At a very minimum, add a `Name` tag with something memorable. 36 | 37 | ![Add tags](../images/aws/launch-5.png) 38 | 39 | Configure security group. The security group is a stateful firewall that limits the traffic that is accepted by your instance. It is recommended to limit this as much as possible. In this case we are configuring it to only allow traffic on port 22 (ssh). It is recommended to restrict the source as well. To limit it to your workstation's current IP, click on the dropdown under "Source" and select "My IP". 40 | 41 | ![Configure security group](../images/aws/launch-6.png) 42 | 43 | Launch instance. Click on "Launch", and select a key pair that you will use to connect to the instance. If you already have a keypair that you have used previously, you can reuse it here by selecting "Choose an existing key pair". Otherwise you can create a keypair now by selecting "Create a new key pair". 44 | 45 | ![Launch instance](../images/aws/launch-7.png) 46 | 47 | Wait for instance to launch. When it is ready it will show as "running" under "Instance state", and "2/2 checks pass" under "Status Checks". Click on the instance to view the details on the bottom pane of the window, and look for the "IPv4 Public IP". 48 | 49 | ![Wait for instance launch and view details](../images/aws/ec2-list.png) 50 | 51 | Connect to instance. Using the keypair that you used or created in the launch step and the IP in the previous step, run ssh. Be sure to use the `-A` option with ssh so that in a future step we will be able to use the same key to connect to a second instance. 52 | 53 | ![Connect to instance](../images/aws/ssh-0.png) 54 | 55 | We have two options to compile the binary: either directly on the instance or using Docker. We will go through both options here. 56 | 57 | ## Compile on instance 58 | There are two alternative ways to compile code on the instance, either by: 59 | 60 | - [downloading and using the toolchain directly on the instance](#compile-using-a-downloaded-toolchain), 61 | - or by [using docker, and compiling inside a docker container](#compile-with-docker) 62 | 63 | ### Compile using a downloaded toolchain 64 | Run the following command in the SSH terminal. Note that there may be a more up to date version of the swift toolchain. Check https://swift.org/download/#releases for the latest available toolchain url for Amazon Linux 2. 65 | 66 | ``` 67 | SwiftToolchainUrl="https://swift.org/builds/swift-5.4.1-release/amazonlinux2/swift-5.4.1-RELEASE/swift-5.4.1-RELEASE-amazonlinux2.tar.gz" 68 | sudo yum install ruby binutils gcc git glibc-static gzip libbsd libcurl libedit libicu libsqlite libstdc++-static libuuid libxml2 tar tzdata ruby -y 69 | cd $(mktemp -d) 70 | wget ${SwiftToolchainUrl} -O swift.tar.gz 71 | gunzip < swift.tar.gz | sudo tar -C / -xv --strip-components 1 72 | ``` 73 | 74 | Finally, check that Swift is correctly installed by running the Swift REPL: `swift`. 75 | 76 | ![Invoke REPL](../images/aws/repl.png) 77 | 78 | Let's now download and build an test application. We will use the `--static-swift-stdlib` option so that it can be deployed to a different server without the Swift toolchain installed. These examples will deploy SwiftNIO's [example HTTP server](https://github.com/apple/swift-nio/tree/master/Sources/NIOHTTP1Server), but you can test with your own project. 79 | 80 | ``` 81 | git clone https://github.com/apple/swift-nio.git 82 | cd swift-nio 83 | swift build -v --static-swift-stdlib -c release 84 | ``` 85 | 86 | ## Compile with Docker 87 | 88 | Ensure that Docker and git are installed on the instance: 89 | 90 | ``` 91 | sudo yum install docker git 92 | sudo usermod -a -G docker ec2-user 93 | sudo systemctl start docker 94 | ``` 95 | 96 | You may have to log out and log back in to be able to use Docker. Check by running `docker ps`, and ensure that it runs without errors. 97 | 98 | Download and compile SwiftNIO's [example HTTP server](https://github.com/apple/swift-nio/tree/master/Sources/NIOHTTP1Server): 99 | 100 | ``` 101 | docker run --rm -v "$PWD:/workspace" -w /workspace swift:5.4-amazonlinux2 /bin/bash -cl ' \ 102 | swift build -v --static-swift-stdlib -c release 103 | ``` 104 | ## Test binary 105 | Using the same steps as above, launch a second instance (but don't run any of the bash commands above!). Be sure to use the same SSH keypair. 106 | 107 | From within the AWS management console, navigate to the EC2 service and find the instance that you just launched. Click on the instance to see the details, and find the internal IP. In my example, the internal IP is `172.31.3.29` 108 | 109 | From the original build instance, copy the binary to the new server instance: 110 | ```scp .build/release/NIOHTTP1Server ec2-user@172.31.3.29``` 111 | 112 | Now connect to the new instance: 113 | ```ssh ec2-user@172.31.3.29``` 114 | 115 | From within the new instance, test the Swift binary: 116 | ``` 117 | NIOHTTP1Server localhost 8080 & 118 | curl localhost:8080 119 | ``` 120 | 121 | From here, options are endless and will depend on your application of Swift. If you wish to run a web service be sure to open the Security Group to the correct port and from the correct source. When you are done testing Swift, shut down the instance to avoid paying for unneeded compute. From the EC2 dashboard, select both instances, select "Actions" from the menu, then select "Instance state" and then finally "terminate". 122 | 123 | ![Terminate Instance](../images/aws/terminate.png) 124 | -------------------------------------------------------------------------------- /docs/gcp.md: -------------------------------------------------------------------------------- 1 | # Deploying to Google Cloud Platform (GCP) 2 | 3 | This guide describes how to build and run your Swift Server on serverless 4 | architecture with [Google Cloud Build](https://cloud.google.com/build) and 5 | [Google Cloud Run](https://cloud.google.com/run). We'll use 6 | [Artifact Registry](https://cloud.google.com/artifact-registry/docs/docker/quickstart) 7 | to store the Docker images. 8 | 9 | ## Google Cloud Platform Setup 10 | 11 | You can read about 12 | [Getting Started with GCP](https://cloud.google.com/gcp/getting-started/) in 13 | more detail. In order to run Swift Server applications, we need to: 14 | 15 | - enable [Billing](https://console.cloud.google.com/billing) (requires a credit 16 | card). Note that when creating a new account, GCP provides you with $300 of 17 | free credit to use in the first 90 days. You can follow this guide for free 18 | for a new account. Everything in this guide should fall into the "Free Tier" 19 | category at GCP (120 build minutes per day, 2 million Cloud Run requests per 20 | month 21 | [Free Tier Usage Limits](https://cloud.google.com/free/docs/gcp-free-tier#free-tier-usage-limits)) 22 | - enable the 23 | [Cloud Build API](https://console.cloud.google.com/apis/api/cloudbuild.googleapis.com/overview) 24 | - enable the 25 | [Cloud Run Admin API](https://console.cloud.google.com/apis/api/run.googleapis.com/overview) 26 | - enable the 27 | [Artifact Registry API](https://console.cloud.google.com/apis/api/artifactregistry.googleapis.com/overview) 28 | - [create a Repository in the Artifact Registry](https://console.cloud.google.com/artifacts/create-repo) 29 | (Format: Docker, Region: your choice) 30 | 31 | ## Project Requirements 32 | 33 | Please verify that your server listens on `0.0.0.0`, not `127.0.0.1` and it's 34 | recommended to use the environment variable `$PORT` instead of a hard-coded 35 | value. For the workflow to pass, two files are essential, both need to be in the 36 | project root: 37 | 38 | 1. Dockerfile 39 | 2. cloudbuild.yaml 40 | 41 | ### `Dockerfile` 42 | 43 | You should test your Dockerfile with `docker build . -t test` and 44 | `docker run -p 8080:8080 test` and make sure it builds and runs locally. 45 | 46 | The _Dockerfile_ is the same as in the [packaging guide](./packaging.md#docker). 47 | Replace `` with your `executableTarget` (ie. "Server"): 48 | 49 | ```Dockerfile 50 | #------- build ------- 51 | FROM swift:centos as builder 52 | 53 | # set up the workspace 54 | RUN mkdir /workspace 55 | WORKDIR /workspace 56 | 57 | # copy the source to the docker image 58 | COPY . /workspace 59 | 60 | RUN swift build -c release --static-swift-stdlib 61 | 62 | #------- package ------- 63 | FROM centos:8 64 | # copy executable 65 | COPY --from=builder /workspace/.build/release/ / 66 | 67 | # set the entry point (application name) 68 | CMD [""] 69 | ``` 70 | 71 | ### `cloudbuild.yaml` 72 | 73 | The `cloudbuild.yaml` files contains a set of steps to build the server image 74 | directly in the cloud and deploy a new Cloud Run instance after the successful 75 | build. `${_VAR}` are 76 | ["substitution variables"](https://cloud.google.com/cloud-build/docs/configuring-builds/substitute-variable-values) 77 | that are available during build time and can be passed on into the runtime 78 | environment in the "deploy" phase. We will set the variables later when we 79 | configure the [Build Trigger](#deployment) (Step 5). 80 | 81 | ```yaml 82 | steps: 83 | - name: 'gcr.io/cloud-builders/docker' 84 | entrypoint: 'bash' 85 | args: 86 | - '-c' 87 | - | 88 | docker pull ${_REGION}-docker.pkg.dev/$PROJECT_ID/${_REPOSITORY_NAME}/${_SERVICE_NAME}:latest || exit 0 89 | - name: 'gcr.io/cloud-builders/docker' 90 | args: 91 | - build 92 | - -t 93 | - ${_REGION}-docker.pkg.dev/$PROJECT_ID/${_REPOSITORY_NAME}/${_SERVICE_NAME}:$SHORT_SHA 94 | - -t 95 | - ${_REGION}-docker.pkg.dev/$PROJECT_ID/${_REPOSITORY_NAME}/${_SERVICE_NAME}:latest 96 | - . 97 | - --cache-from 98 | - ${_REGION}-docker.pkg.dev/$PROJECT_ID/${_REPOSITORY_NAME}/${_SERVICE_NAME}:latest 99 | - name: 'gcr.io/cloud-builders/docker' 100 | args: 101 | [ 102 | 'push', 103 | '${_REGION}-docker.pkg.dev/$PROJECT_ID/${_REPOSITORY_NAME}/${_SERVICE_NAME}:$SHORT_SHA' 104 | ] 105 | - name: 'gcr.io/cloud-builders/gcloud' 106 | args: 107 | - run 108 | - deploy 109 | - swift-service 110 | - --image=${_REGION}-docker.pkg.dev/$PROJECT_ID/${_REPOSITORY_NAME}/${_SERVICE_NAME}:$SHORT_SHA 111 | - --port=8080 112 | - --region=${_REGION} 113 | - --memory=512Mi 114 | - --platform=managed 115 | - --allow-unauthenticated 116 | - --min-instances=0 117 | - --max-instances=5 118 | images: 119 | - '${_REGION}-docker.pkg.dev/$PROJECT_ID/${_REPOSITORY_NAME}/${_SERVICE_NAME}:$SHORT_SHA' 120 | - '${_REGION}-docker.pkg.dev/$PROJECT_ID/${_REPOSITORY_NAME}/${_SERVICE_NAME}:latest' 121 | timeout: 1800s 122 | ``` 123 | 124 | ### The steps in detail 125 | 126 | 1. Pull the latest image from the Artifact Registry to retrieve cached layers 127 | 2. Build the image with `$SHORT_SHA` and `latest` tag 128 | 3. Push the image to the Artifact Registry 129 | 4. Deploy the image to Cloud Run 130 | 131 | `images` specifies the build images to store in the registry. The default 132 | `timeout` is 10 minutes, so we'll need to increase it for Swift builds. We use 133 | `8080` as the default port here, though it's recommended to remove this line and 134 | have the server listen on `$PORT`. 135 | 136 | ## Deployment 137 | 138 | ![cloud build trigger settings and how to connect a code repository](../images/gcp-connect-repo.png) 139 | 140 | Push all files to a remote repository. Cloud Build currently supports, GitHub, 141 | Bitbucket and GitLab. now) and head to 142 | [Cloud Build Triggers](https://console.cloud.google.com/cloud-build/triggers) 143 | and click "Create Trigger": 144 | 145 | 1. Add a name and description 146 | 2. Event: "Push to a branch" is active 147 | 3. Source: "Connect New Repository" and authorize with your code provider, and 148 | add the repository where your Swift server code is hosted. Note that you need 149 | to configure 150 | [GitHub](https://cloud.google.com/build/docs/automating-builds/build-repos-from-github), 151 | [GitLab](https://cloud.google.com/build/docs/automating-builds/build-repos-from-gitlab) 152 | or 153 | [Bitbucket](https://cloud.google.com/build/docs/automating-builds/build-repos-from-bitbucket-cloud) 154 | to allow GCP access first. 155 | 4. Configuration: "Cloud Build configuration file" / Location: Repository 156 | 5. Advanced: 157 | [Substitution variables](https://cloud.google.com/cloud-build/docs/configuring-builds/substitute-variable-values): 158 | You need to set the variables for region, repository name and service name 159 | here. You can pick a 160 | [region of your choice](https://cloud.google.com/about/locations/) (ie. 161 | `us-central1`). All custom variables must start with an underscore 162 | (`_REGION`). `_REPOSITORY_NAME` and `_SERVICE_NAME` are up to you. If you use 163 | environment variables for example to connect to a database or 3rd party 164 | services, you can set the values here too. 165 | 6. "Create" 166 | 167 | As a last step before deploying the new service, go to the 168 | [Cloud Build Settings](https://console.cloud.google.com/cloud-build/settings) 169 | and make sure "Cloud Run" is enabled. This gives Cloud Build the necessary IAM 170 | permissions to deploy Cloud Run services. 171 | 172 | ![cloud build settings](../images/gcp-cloud-build-settings.png) 173 | 174 | In the Trigger overview page, you should see your new "swift-service" trigger. 175 | Click on "RUN" on the right to start the trigger manually from the `main` 176 | branch. With a simple Hummingbird project the build takes about 7-8 minutes. 177 | Vapor takes about 25 minutes on the standard/small build machines, which are 178 | fairly slow. "Jordane" from the Vapor Discord community 179 | [recommends using `machineType: E2_HIGHCPU_8`](https://discord.com/channels/431917998102675485/447893851374616576/915819735738888222) 180 | in the `cloudbuild.yaml` to speed up deployments: 181 | 182 | ```yaml 183 | options: 184 | machineType: 'E2_HIGHCPU_8' 185 | ``` 186 | 187 | After a successful build you should see the service URL in the build logs: 188 | 189 | ![successful build and deployment to cloud run](../images/gcp-cloud-build.png) 190 | 191 | You can head over to Cloud Run and see your service running there: 192 | 193 | ![cloud run overview](../images/gcp-cloud-run.png) 194 | 195 | The trigger will deploy every new commit on `main`. You can also enable Pull 196 | Request triggers for feature-driven workflows. Cloud Build also allows 197 | blue/green builds, auto-scaling and much more. 198 | 199 | You can now connect your custom domain to the new service and go live. 200 | 201 | ## Cleanup 202 | 203 | - delete the Cloud Run service 204 | - delete the Cloud Build trigger 205 | - delete the Artifact Registry repository 206 | -------------------------------------------------------------------------------- /docs/memory-leaks-and-usage.md: -------------------------------------------------------------------------------- 1 | # Debugging Memory Leaks and Usage 2 | 3 | There are many different tools for troubleshooting memory leaks both on Linux and macOS, each with different strengths and ease-of-use. One excellent tool is the Xcode's [Memory Graph Debugger](https://developer.apple.com/library/archive/documentation/DeveloperTools/Conceptual/debugging_with_xcode/chapters/special_debugging_workflows.html#//apple_ref/doc/uid/TP40015022-CH9-DontLinkElementID_1). 4 | [Instruments](https://help.apple.com/instruments/mac/10.0/#/dev022f987b) and `leaks` can also be very useful. If you cannot run or reproduce the problem on macOS, there are a number of server-side alternatives below. 5 | 6 | ## Example program 7 | 8 | The following program doesn't do anything useful but leaks memory so will serve as the example: 9 | 10 | ```swift 11 | public class MemoryLeaker { 12 | var closure: () -> Void = { () } 13 | 14 | public init() {} 15 | 16 | public func doNothing() {} 17 | 18 | public func doSomethingThatLeaks() { 19 | self.closure = { 20 | // This will leak as it'll create a permanent reference cycle: 21 | // 22 | // self -> self.closure -> self 23 | self.doNothing() 24 | } 25 | } 26 | } 27 | 28 | @inline(never) // just to be sure to get this in a stack trace 29 | func myFunctionDoingTheAllocation() { 30 | let thing = MemoryLeaker() 31 | thing.doSomethingThatLeaks() 32 | } 33 | 34 | myFunctionDoingTheAllocation() 35 | ``` 36 | 37 | ## Debugging leaks with `valgrind` 38 | 39 | If you run your program using 40 | 41 | valgrind --leak-check=full ./test 42 | 43 | then `valgrind` will output 44 | 45 | ``` 46 | ==1== Memcheck, a memory error detector 47 | ==1== Copyright (C) 2002-2017, and GNU GPL'd, by Julian Seward et al. 48 | ==1== Using Valgrind-3.13.0 and LibVEX; rerun with -h for copyright info 49 | ==1== Command: ./test 50 | ==1== 51 | ==1== 52 | ==1== HEAP SUMMARY: 53 | ==1== in use at exit: 824 bytes in 4 blocks 54 | ==1== total heap usage: 5 allocs, 1 frees, 73,528 bytes allocated 55 | ==1== 56 | ==1== 32 bytes in 1 blocks are definitely lost in loss record 1 of 4 57 | ==1== at 0x4C2FB0F: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so) 58 | ==1== by 0x52076B1: swift_slowAlloc (in /usr/lib/swift/linux/libswiftCore.so) 59 | ==1== by 0x5207721: swift_allocObject (in /usr/lib/swift/linux/libswiftCore.so) 60 | ==1== by 0x108E58: $s4test12MemoryLeakerCACycfC (in /tmp/test) 61 | ==1== by 0x10900E: $s4test28myFunctionDoingTheAllocationyyF (in /tmp/test) 62 | ==1== by 0x108CA3: main (in /tmp/test) 63 | ==1== 64 | ==1== LEAK SUMMARY: 65 | ==1== definitely lost: 32 bytes in 1 blocks 66 | ==1== indirectly lost: 0 bytes in 0 blocks 67 | ==1== possibly lost: 0 bytes in 0 blocks 68 | ==1== still reachable: 792 bytes in 3 blocks 69 | ==1== suppressed: 0 bytes in 0 blocks 70 | ==1== Reachable blocks (those to which a pointer was found) are not shown. 71 | ==1== To see them, rerun with: --leak-check=full --show-leak-kinds=all 72 | ==1== 73 | ==1== For counts of detected and suppressed errors, rerun with: -v 74 | ==1== ERROR SUMMARY: 1 errors from 1 contexts (suppressed: 0 from 0) 75 | ``` 76 | 77 | The important part is 78 | 79 | ``` 80 | ==1== 32 bytes in 1 blocks are definitely lost in loss record 1 of 4 81 | ==1== at 0x4C2FB0F: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so) 82 | ==1== by 0x52076B1: swift_slowAlloc (in /usr/lib/swift/linux/libswiftCore.so) 83 | ==1== by 0x5207721: swift_allocObject (in /usr/lib/swift/linux/libswiftCore.so) 84 | ==1== by 0x108E58: $s4test12MemoryLeakerCACycfC (in /tmp/test) 85 | ==1== by 0x10900E: $s4test28myFunctionDoingTheAllocationyyF (in /tmp/test) 86 | ==1== by 0x108CA3: main (in /tmp/test) 87 | ``` 88 | 89 | which can demangled by pasting it into `swift demangle`: 90 | 91 | ``` 92 | ==1== 32 bytes in 1 blocks are definitely lost in loss record 1 of 4 93 | ==1== at 0x4C2FB0F: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so) 94 | ==1== by 0x52076B1: swift_slowAlloc (in /usr/lib/swift/linux/libswiftCore.so) 95 | ==1== by 0x5207721: swift_allocObject (in /usr/lib/swift/linux/libswiftCore.so) 96 | ==1== by 0x108E58: test.MemoryLeaker.__allocating_init() -> test.MemoryLeaker (in /tmp/test) 97 | ==1== by 0x10900E: test.myFunctionDoingTheAllocation() -> () (in /tmp/test) 98 | ==1== by 0x108CA3: main (in /tmp/test) 99 | ``` 100 | 101 | So valgrind is telling us that the allocation that eventually leaked is coming from `test.myFunctionDoingTheAllocation` calling `test.MemoryLeaker.__allocating_init()` which is correct. 102 | 103 | ### Limitations 104 | 105 | - `valgrind` doesn't understand the bit packing that is used in many Swift data types (like `String`) or when you create `enum`s with associated values. Therefore `valgrind` sometimes claims a certain allocation was leaked even though it might not have 106 | - `valgrind` will make your program run _very slow_ (possibly 100x slower) which might stop you from even getting far enough to reproduce the issue. 107 | 108 | ## Debugging leaks with `Leak Sanitizer` 109 | 110 | If you build your application using 111 | 112 | swift build --sanitize=address 113 | 114 | it will be built with [Address Sanitizer](https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer) enabled. Address Sanitizer also automatically tries to find leaked memory blocks, just like `valgrind`. 115 | 116 | The output from the above example program would be 117 | 118 | ``` 119 | ================================================================= 120 | ==478==ERROR: LeakSanitizer: detected memory leaks 121 | 122 | Direct leak of 32 byte(s) in 1 object(s) allocated from: 123 | #0 0x55f72c21ac8d (/tmp/test+0x95c8d) 124 | #1 0x7f7e44e686b1 (/usr/lib/swift/linux/libswiftCore.so+0x3cb6b1) 125 | #2 0x55f72c24b2ce (/tmp/test+0xc62ce) 126 | #3 0x55f72c24a4c3 (/tmp/test+0xc54c3) 127 | #4 0x7f7e43aecb96 (/lib/x86_64-linux-gnu/libc.so.6+0x21b96) 128 | 129 | SUMMARY: AddressSanitizer: 32 byte(s) leaked in 1 allocation(s). 130 | ``` 131 | 132 | which shows the same information as `valgrind`, unfortunately however not symbolicated due to [SR-12601](https://bugs.swift.org/browse/SR-12601). 133 | 134 | You can symbolicate it using `llvm-symbolizer` or `addr2line` if you have `binutils` installed like so: 135 | 136 | ``` 137 | # /tmp/test+0xc62ce 138 | addr2line -e /tmp/test -a 0xc62ce -ipf | swift demangle 139 | 0x00000000000c62ce: test.myFunctionDoingTheAllocation() -> () at crtstuff.c:? 140 | ``` 141 | ## Debugging transient memory usage with `heaptrack` 142 | [Heaptrack](https://github.com/KDE/heaptrack) is very useful for analyzing memory leaks/usage with less overhead than `valgrind` - but more importantly is also allows for analyzing transient memory usage which may significantly impact performance by putting to much pressure on the allocator. 143 | 144 | In addition to command line acccess, there is a graphical front-end `heaptrack_gui`. 145 | 146 | A key feature is that it allows for diffing between two different runs of your application, making it fairly easy to troubleshoot differences in `malloc` behavior between e.g. feature branches and main. 147 | 148 | A short how-to run on Ubuntu 20.04 (using a different example than above, as we look at transient usage in this example), install `heaptrack` with: 149 | 150 | ``` 151 | sudo apt-get install heaptrack 152 | ``` 153 | 154 | Then run the binary with `heaptrack` two times — first we do it for `main` to get a baseline: 155 | ``` 156 | > heaptrack .build/x86_64-unknown-linux-gnu/release/test_1000_autoReadGetAndSet 157 | heaptrack output will be written to "/tmp/.nio_alloc_counter_tests_GRusAy/heaptrack.test_1000_autoReadGetAndSet.84341.gz" 158 | starting application, this might take some time... 159 | ... 160 | heaptrack stats: 161 | allocations: 319347 162 | leaked allocations: 107 163 | temporary allocations: 68 164 | Heaptrack finished! Now run the following to investigate the data: 165 | 166 | heaptrack --analyze "/tmp/.nio_alloc_counter_tests_GRusAy/heaptrack.test_1000_autoReadGetAndSet.84341.gz" 167 | ``` 168 | Then run it a second time for the feature branch: 169 | ``` 170 | > heaptrack .build/x86_64-unknown-linux-gnu/release/test_1000_autoReadGetAndSet 171 | heaptrack output will be written to "/tmp/.nio_alloc_counter_tests_GRusAy/heaptrack.test_1000_autoReadGetAndSet.84372.gz" 172 | starting application, this might take some time... 173 | ... 174 | heaptrack stats: 175 | allocations: 673989 176 | leaked allocations: 117 177 | temporary allocations: 341011 178 | Heaptrack finished! Now run the following to investigate the data: 179 | 180 | heaptrack --analyze "/tmp/.nio_alloc_counter_tests_GRusAy/heaptrack.test_1000_autoReadGetAndSet.84372.gz" 181 | ubuntu@ip-172-31-25-161 /t/.nio_alloc_counter_tests_GRusAy> 182 | ``` 183 | Here we could see that we had 673989 allocations in the feature branch version and 319347 in `main`, so clearly a regression. 184 | 185 | Finally, we can analyze the output as a diff from these runs using `heaptrack_print` and pipe it through `swift demangle` for readability: 186 | 187 | ``` 188 | heaptrack_print -T -d heaptrack.test_1000_autoReadGetAndSet.84341.gz heaptrack.test_1000_autoReadGetAndSet.84372.gz | swift demangle 189 | ``` 190 | `-T` gives us the temporary allocations (as it in this case was not a leak, but a transient alloaction - if you have leaks remove `-T`). 191 | 192 | The output can be quite long, but in this case as we look for transient allocations, scroll down to: 193 | 194 | ``` 195 | MOST TEMPORARY ALLOCATIONS 196 | 307740 temporary allocations of 290324 allocations in total (106.00%) from 197 | swift_slowAlloc 198 | in /home/ubuntu/bin/usr/lib/swift/linux/libswiftCore.so 199 | 43623 temporary allocations of 44553 allocations in total (97.91%) from: 200 | swift_allocObject 201 | in /home/ubuntu/bin/usr/lib/swift/linux/libswiftCore.so 202 | NIO.ServerBootstrap.(bind0 in _C131C0126670CF68D8B594DDFAE0CE57)(makeServerChannel: (NIO.SelectableEventLoop, NIO.EventLoopGroup) throws -> NIO.ServerSocketChannel, _: (NIO.EventLoop, NIO.ServerSocketChannel) -> NIO.EventLoopFuture<()>) -> NIO.EventLoopFuture 203 | at /home/ubuntu/swiftnio/swift-nio/Sources/NIO/Bootstrap.swift:295 204 | in /tmp/.nio_alloc_counter_tests_GRusAy/.build/x86_64-unknown-linux-gnu/release/test_1000_autoReadGetAndSet 205 | merged NIO.ServerBootstrap.bind(host: Swift.String, port: Swift.Int) -> NIO.EventLoopFuture 206 | in /tmp/.nio_alloc_counter_tests_GRusAy/.build/x86_64-unknown-linux-gnu/release/test_1000_autoReadGetAndSet 207 | NIO.ServerBootstrap.bind(host: Swift.String, port: Swift.Int) -> NIO.EventLoopFuture 208 | in /tmp/.nio_alloc_counter_tests_GRusAy/.build/x86_64-unknown-linux-gnu/release/test_1000_autoReadGetAndSet 209 | Test_test_1000_autoReadGetAndSet.run(identifier: Swift.String) -> () 210 | at /tmp/.nio_alloc_counter_tests_GRusAy/Sources/Test_test_1000_autoReadGetAndSet/file.swift:24 211 | in /tmp/.nio_alloc_counter_tests_GRusAy/.build/x86_64-unknown-linux-gnu/release/test_1000_autoReadGetAndSet 212 | main 213 | at Sources/bootstrap_test_1000_autoReadGetAndSet/main.c:18 214 | in /tmp/.nio_alloc_counter_tests_GRusAy/.build/x86_64-unknown-linux-gnu/release/test_1000_autoReadGetAndSet 215 | 22208 temporary allocations of 22276 allocations in total (99.69%) from: 216 | swift_allocObject 217 | in /home/ubuntu/bin/usr/lib/swift/linux/libswiftCore.so 218 | generic specialization > of Swift._copyCollectionToContiguousArray(A) -> Swift.ContiguousArray 219 | in /home/ubuntu/bin/usr/lib/swift/linux/libswiftCore.so 220 | Swift.String.utf8CString.getter : Swift.ContiguousArray 221 | in /home/ubuntu/bin/usr/lib/swift/linux/libswiftCore.so 222 | NIO.URing.getEnvironmentVar(Swift.String) -> Swift.String? 223 | at /home/ubuntu/swiftnio/swift-nio/Sources/NIO/LinuxURing.swift:291 224 | in /tmp/.nio_alloc_counter_tests_GRusAy/.build/x86_64-unknown-linux-gnu/release/test_1000_autoReadGetAndSet 225 | NIO.URing._debugPrint(@autoclosure () -> Swift.String) -> () 226 | at /home/ubuntu/swiftnio/swift-nio/Sources/NIO/LinuxURing.swift:297 227 | ... 228 | 22196 temporary allocations of 22276 allocations in total (99.64%) from: 229 | ``` 230 | 231 | And here we could fairly quickly see that the transient extra allocations was due to extra debug printing and querying of environment variables: 232 | 233 | ``` 234 | NIO.URing.getEnvironmentVar(Swift.String) -> Swift.String? 235 | at /home/ubuntu/swiftnio/swift-nio/Sources/NIO/LinuxURing.swift:291 236 | in /tmp/.nio_alloc_counter_tests_GRusAy/.build/x86_64-unknown-linux-gnu/release/test_1000_autoReadGetAndSet 237 | NIO.URing._debugPrint(@autoclosure () -> Swift.String) -> () 238 | ``` 239 | 240 | And this code will be removed before final integration of the feature branch, so the diff will go away. 241 | -------------------------------------------------------------------------------- /docs/allocations.md: -------------------------------------------------------------------------------- 1 | # Allocations 2 | 3 | For high-performance software in Swift, it's often important to understand where your heap allocations are coming from. The next step can then be to reduce the number of allocations your software makes. 4 | 5 | This is very similar to other performance questions: Before you can optimise performance you need to understand where you spend your resources. And resources can be CPU time, as well as memory, or heap allocations. 6 | In this document we will solely focus on the number of heap allocations, not their size. 7 | 8 | On macOS, you can use Instruments's "Allocations" instrument. The Allocations instrument shows you two sets of values: The live allocations (i.e. allocated and not freed) as well as the transient allocations (all allocations made). 9 | 10 | Your production workloads however will likely run on Linux and depending on your setup the number of allocations can differ significantly between macOS and Linux. 11 | 12 | ## Preparation 13 | 14 | To not waste your time, be sure to do any profiling in _release mode_. Swift's optimiser will produce significantly faster code which will also allocate less in release mode. Usually this means you need to run 15 | 16 | swift run -c release 17 | 18 | #### Install `perf` 19 | 20 | Follow the [installation instructions](linux-perf.md) in the Linux `perf` utility guide. 21 | 22 | #### Clone the `FlameGraph` project 23 | 24 | To see some pretty graphs, clone the [`FlameGraph`](https://github.com/brendangregg/FlameGraph) repository on the machine/container where you need it. The rest of this guide will assume that it's available at `/FlameGraph`: 25 | 26 | ``` 27 | git clone https://github.com/brendangregg/FlameGraph 28 | ``` 29 | 30 | Tip: With Docker, you may want to bind mount the `FlameGraph` repository into the container using 31 | 32 | ``` 33 | docker run -it --rm \ 34 | --privileged \ 35 | -v "/path/to/FlameGraphOnYourMachine:/FlameGraph:ro" \ 36 | -v "$PWD:PWD" -w "$PWD" \ 37 | swift:latest 38 | ``` 39 | 40 | or similar. 41 | 42 | 43 | ## Tools 44 | 45 | In this guide, we will be using the [Linux `perf`](https://perf.wiki.kernel.org/index.php/Main_Page) tool. If you're struggling to get `perf` to work, have a look at our [information regarding `perf`](linux-perf.md). If you're running in a Docker container, don't forget that you'll need a privileged container. And generally, you will need `root` access, so you may need to prefix the commands with `sudo`. 46 | 47 | ## Getting a `perf` user probe 48 | 49 | In this guide, we will be counting the number of allocations. Most allocations from a Swift program (on Linux) will be done through the `malloc` function. 50 | 51 | To get information about when an allocation function is called, we will install a `perf` "user probes" on the allocation functions. Because Swift also uses other allocation functions such as `calloc` and `posix_memalign`, we'll install a user probe for them all. From then on, there will be an event in `perf` that will fire whenever one of the allocation functions is called. 52 | 53 | ```bash 54 | # figures out the path to libc 55 | libc_path=$(readlink -e /lib64/libc.so.6 /lib/x86_64-linux-gnu/libc.so.6) 56 | 57 | # delete all existing user probes on libc (instead of * you can also list them individually) 58 | perf probe --del 'probe_libc:*' 59 | 60 | # installs a probe on `malloc`, `calloc`, and `posix_memalign` 61 | perf probe -x "$libc_path" --add malloc --add calloc --add posix_memalign 62 | ``` 63 | 64 | The result (hopefully) looks somewhat like this: 65 | 66 | ``` 67 | Added new events: 68 | probe_libc:malloc (on malloc in /usr/lib/x86_64-linux-gnu/libc-2.31.so) 69 | probe_libc:calloc (on calloc in /usr/lib/x86_64-linux-gnu/libc-2.31.so) 70 | probe_libc:posix_memalign (on posix_memalign in /usr/lib/x86_64-linux-gnu/libc-2.31.so) 71 | 72 | [...] 73 | ``` 74 | 75 | What `perf` is telling you here is that it added a new events called `probe_libc:malloc`, `probe_libc:calloc`, ... which will fire every time the respective function is called. 76 | 77 | Let's confirm that our `probe_libc:malloc` probe actually works by running: 78 | 79 | perf stat -e probe_libc:malloc -- bash -c 'echo Hello World' 80 | 81 | which should output something like 82 | 83 | ``` 84 | Hello World 85 | 86 | Performance counter stats for 'bash -c echo Hello World': 87 | 88 | 1021 probe_libc:malloc 89 | 90 | 0.003840500 seconds time elapsed 91 | 92 | 0.000000000 seconds user 93 | 0.003867000 seconds sys 94 | ``` 95 | 96 | Which seems to have allocated 1021 times, great. If that probe fired 0 times, something went wrong. 97 | 98 | ## Running the allocation analysis 99 | 100 | After we have confirmed that our user probe on `malloc` works in general, let's dial it up a little. The first thing we'll need is a program that we'd like to analyse the allocations of. 101 | 102 | For example, we could analyse a program which does 10 subsequent HTTP requests using [AsyncHTTPClient](https://github.com/swift-server/async-http-client). If you're interested in the full source code, please expand below. 103 | 104 |
105 | Demo program source code 106 | 107 | With the following dependencies 108 | 109 | ```swift 110 | dependencies: [ 111 | .package(url: "https://github.com/swift-server/async-http-client.git", from: "1.3.0"), 112 | .package(url: "https://github.com/apple/swift-nio.git", from: "2.29.0"), 113 | .package(url: "https://github.com/apple/swift-log.git", from: "1.4.2"), 114 | ], 115 | ``` 116 | 117 | We could write this program 118 | 119 | ```swift 120 | import AsyncHTTPClient 121 | import NIO 122 | import Logging 123 | 124 | let urls = Array(repeating:"http://httpbin.org/get", count: 10) 125 | var logger = Logger(label: "ahc-alloc-demo") 126 | 127 | logger.info("running HTTP requests", metadata: ["count": "\(urls.count)"]) 128 | MultiThreadedEventLoopGroup.withCurrentThreadAsEventLoop { eventLoop in 129 | let httpClient = HTTPClient(eventLoopGroupProvider: .shared(eventLoop), 130 | backgroundActivityLogger: logger) 131 | 132 | func doRemainingRequests(_ remaining: ArraySlice, 133 | overallResult: EventLoopPromise, 134 | eventLoop: EventLoop) { 135 | var remaining = remaining 136 | if let first = remaining.popFirst() { 137 | httpClient.get(url: first, logger: logger).map { [remaining] _ in 138 | eventLoop.execute { // for shorter stacks 139 | doRemainingRequests(remaining, overallResult: overallResult, eventLoop: eventLoop) 140 | } 141 | }.whenFailure { error in 142 | overallResult.fail(error) 143 | } 144 | } else { 145 | return overallResult.succeed(()) 146 | } 147 | } 148 | 149 | let promise = eventLoop.makePromise(of: Void.self) 150 | // Kick off the process 151 | doRemainingRequests(urls[...], 152 | overallResult: promise, 153 | eventLoop: eventLoop) 154 | 155 | promise.futureResult.whenComplete { result in 156 | switch result { 157 | case .success: 158 | logger.info("all HTTP requests succeeded") 159 | case .failure(let error): 160 | logger.error("HTTP request failure", metadata: ["error": "\(error)"]) 161 | } 162 | 163 | httpClient.shutdown { maybeError in 164 | if let error = maybeError { 165 | logger.error("AHC shutdown failed", metadata: ["error": "\(error)"]) 166 | } 167 | eventLoop.shutdownGracefully { maybeError in 168 | if let error = maybeError { 169 | logger.error("EventLoop shutdown failed", metadata: ["error": "\(error)"]) 170 | } 171 | } 172 | } 173 | } 174 | } 175 | 176 | logger.info("exiting") 177 | ``` 178 |
179 | 180 | Assuming you have a program as a Swift package, we should first of all compile it in release mode using `swift build -c release`. Then you should find a binary called `.build/release/your-program-name` which we can then analyse. 181 | 182 | ### Allocation counts 183 | 184 | Before we go into visualising the allocations as a flame graph, let's start with the simplest analysis: Getting the total number of allocations 185 | 186 | ``` 187 | perf stat -e 'probe_libc:*' -- .build/release/your-program-name 188 | ``` 189 | 190 | The above command instructs perf to run your program and count the number of times the `probe_libc:malloc` probe was hit. This should be the number of allocations done by your program. 191 | 192 | The output should look something like 193 | 194 | ``` 195 | Performance counter stats for '.build/release/your-program-name': 196 | 197 | 68 probe_libc:posix_memalign 198 | 35 probe_libc:calloc_1 199 | 0 probe_libc:calloc 200 | 2977 probe_libc:malloc 201 | 202 | [...] 203 | ``` 204 | 205 | In this case, my program allocated 2,977 times through `malloc` and a few more times through the other allocation functions. If you just want to compare the effects of a pull request you may just want to run this `perf stat` command twice. If you would like to find out _where_ your allocations come from, read on. 206 | 207 | Please note that in this guide we'll use `-e probe_libc:*` instead of individually listing every event like `-e probe_libc:malloc,probe_libc:calloc,probe_libc:calloc_1,probe_libc:posix_memalign`. This assumes that you have _no other_ `perf` user probes installed. If you do, please specify each event you would like to use individually. 208 | 209 | ### Collecting the raw data 210 | 211 | With `perf`, we can't really create live graphs whilst the program is running. For most analyses, we want to first record some raw data (usually with `perf record`) and later on transform the recorded data into a graph. 212 | 213 | To get started, let's have `perf` run the program for us and collect the information using the `libc_probe:malloc` we set up before. 214 | 215 | ``` 216 | perf record --call-graph dwarf,16384 \ 217 | -m 50000 \ 218 | -e 'probe_libc:*' -- \ 219 | .build/release/your-program-name 220 | ``` 221 | 222 | Let's break down this command a little: 223 | 224 | - `perf record` instructs `perf` to `record` data, makes sense. 225 | - `--call-graph dwarf,16384` instructs `perf` to use the [DWARF](http://www.dwarfstd.org) information to create the call graphs. It also sets the maximum stack dump size to 16k which should be enough to get you full stack traces. Unfortunately, using DWARF is rather slow (see below) but it creates the best call graphs for you. 226 | - `-m 50000`: The size of the ring buffer that `perf` uses to buffer. This is given in multiples of `PAGE_SIZE` (usually 4kB) and especially with DWARF this needs to be pretty huge to prevent data loss. 227 | - `-e 'probe_libc:*'`: Record when the `malloc`/`calloc`/... probes fire 228 | 229 | What you want to see if output like this 230 | 231 | ``` 232 | 233 | [ perf record: Woken up 2 times to write data ] 234 | [ perf record: Captured and wrote 401.088 MB perf.data (49640 samples) ] 235 | ``` 236 | 237 | If perf tells you about "lost chunks" and asks you to "check the IO/CPU overhead", you should jump to the 'Overcoming "lost chunks"' section at the end of this document. 238 | 239 | ### Flame graphs 240 | 241 | After a successful `perf record`, you can invoke the following command line to produce an SVG file with the flame graph 242 | 243 | ```bash 244 | perf script | \ 245 | /FlameGraph/stackcollapse-perf.pl - | \ 246 | swift demangle --simplified | \ 247 | /FlameGraph/flamegraph.pl --countname allocations \ 248 | --width 1600 > out.svg 249 | ``` 250 | 251 | Let's expand a little on what the above command does: 252 | 253 | - It runs `perf script` which dumps the binary information that `perf record` recorded into a textual form. 254 | - Next, we invoke `stackcollapse-perf` on it which transforms the stacks that `perf script` outputs into the right format for Flame Graphs, 255 | - then we invoke `swift demangle --simplified` which will give us nice symbol names, 256 | - and lastly we create the Flame Graph itself 257 | 258 | After this command has run (which may run for a while), you should have an SVG file that you can open in your browser. 259 | 260 | For the above example program, please see an example flame graph below. Note how you can hover over the stack frames and get more information. To focus on a sub tree, you can click any stack frame too. 261 | 262 | Generally, in flame graphs, the X axis just means "count", it does **not** mean time. In other words, whether a stack appears on the left or the right is not determined when that stack was live (this is different in flame _charts_). 263 | 264 | Note that this flame graph is _not_ a CPU flame graph, 1 sample means 1 allocation here and not time spent on the CPU. Also be aware that stack frames that appear wide don't necessarily allocate directly, it means that they or something they call has allocated a lot. For example, `BaseSocketChannel.readable` is a very wide frame, and yet, it is not a function which allocates directly. However, it calls other functions (such as other parts of SwiftNIO and AsyncHTTPClient) that do allocate a lot. It may take a little while to get familiar with flame graphs but there are great resources available online. 265 | 266 | ![](../images/perf-malloc-full.svg) 267 | 268 | ## Allocation flame graphs on macOS 269 | 270 | So far, this tutorial focussed on Linux and the `perf` tool. You can however create the same graphs on macOS. The process is fairly similar. 271 | 272 | First, let's collect the raw data using [DTrace](https://en.wikipedia.org/wiki/DTrace). 273 | 274 | ``` 275 | sudo dtrace -n 'pid$target::malloc:entry,pid$target::posix_memalign:entry,pid$target::calloc:entry,pid$target::malloc_zone_malloc:entry,pid$target::malloc_zone_calloc:entry,pid$target::malloc_zone_memalign:entry { @s[ustack(100)] = count(); } ::END { printa(@s); }' -c .build/release/your-program > raw.stacks 276 | ``` 277 | 278 | Similar to `perf`'s user probes, dtrace also has probes and the above command instructs DTrace to aggregate the number of calls to the allocation functions `malloc`, `posix_memalign`, `calloc`, and the `malloc_zone_*` equivalents. On Apple platforms, Swift uses a slightly larger number of allocation functions than on Linux, therefore we need to specify a few more functions. 279 | 280 | Once we collected the data, we can also create an SVG file using 281 | 282 | ```bash 283 | cat raw.stacks |\ 284 | /FlameGraph/stackcollapse.pl - | \ 285 | swift demangle --simplified | \ 286 | /FlameGraph/flamegraph.pl --countname allocations \ 287 | --width 1600 > out.svg 288 | ``` 289 | 290 | which you will notice is very similar to the `perf` invocation. The only differences are: 291 | 292 | - We use `cat raw.stacks` instead of `perf script` because we already have the textual data in a file with DTrace 293 | - Instead of `stackcollapse-perf.pl` (which parses `perf script` output) we use `stackcollapse.pl` (which parses DTrace aggregation output) 294 | 295 | ## Other `perf` tricks 296 | 297 | ### Prettifying Swift's allocation pattern 298 | 299 | Allocations in Swift usually have a very distinct shape: 300 | - Some code creates for example a class instance (which allocates). 301 | - This calls `swift_allocObject`, 302 | - which calls `swift_slowAlloc`, 303 | - which calls `malloc` (where we have our probe). 304 | 305 | To make our flame graphs look nicer, we can apply a small transformation after we have demangled the collapsed stacks: 306 | 307 | ``` 308 | sed -e 's/specialized //g' \ 309 | -e 's/;swift_allocObject;swift_slowAlloc;__libc_malloc/;A/g' 310 | ``` 311 | 312 | which will get rid of `"specialized "` and replaces `swift_allocObject` calling `swift_slowAlloc`, calling `malloc` with just an `A` (for allocation). The full command will then look like 313 | 314 | ``` 315 | perf script | \ 316 | /FlameGraph/stackcollapse-perf.pl - | \ 317 | swift demangle --simplified | \ 318 | sed -e 's/specialized //g' \ 319 | -e 's/;swift_allocObject;swift_slowAlloc;__libc_malloc/;A/g' | \ 320 | /FlameGraph/flamegraph.pl --countname allocations --flamechart --hash \ 321 | > out.svg 322 | ``` 323 | 324 | ### Overcoming "lost chunks" 325 | 326 | When using `perf` with the DWARF call stack unwinding, it is unfortunately easy to run into the following issue 327 | 328 | ``` 329 | [ perf record: Woken up 189 times to write data ] 330 | Warning: 331 | Processed 4346 events and lost 144 chunks! 332 | 333 | Check IO/CPU overload! 334 | 335 | [ perf record: Captured and wrote 30.868 MB perf.data (3817 samples) ] 336 | ``` 337 | 338 | When `perf` tells you that it lost a number of chunks it means that it lost data. If `perf` lost data, you have a few options: 339 | 340 | - Reduce the amount of work your program is doing. For every allocation, `perf` will need to record a stack trace. 341 | - Reduce the maximum "stack dump" that `perf` records by changing the `--call-graph dwarf` parameter to for example `--call-graph dwarf,2048`. The default is to record a maximum of 4096 bytes which gives you pretty deep stacks, if you don't need that you can reduce the number. The tradeoff is that the flame graph may show you `[unknown]` stack frames which means that there are missing stack frames there. The unit is bytes. 342 | - You can raise the number of the `-m` parameter which is the size of the ring buffer that `perf` uses in memory (in multiples of `PAGE_SIZE`, usually that is 4kB) 343 | - You can give up nice call graphs and replace `--call-tree dwarf` with `--call-tree fp` (`fp` stands for frame pointer). 344 | -------------------------------------------------------------------------------- /docs/concurrency-adoption-guidelines.md: -------------------------------------------------------------------------------- 1 | # Swift Concurrency adoption guidelines for Swift Server Libraries 2 | 3 | This writeup attempts to provide a set of guidelines to follow by authors of server-side Swift libraries. Specifically a lot of the discussion here revolves around what to do about existing APIs and libraries making extensive use of Swift NIO’s `EventLoopFuture` and related types. 4 | 5 | Swift Concurrency is a multi-year effort. It is very valuable for the server community to participate in this multi-year adoption of the concurrency features, one by one, and provide feedback while doing so. As such, we should not hold off adopting concurrency features until Swift 6 as we may miss valuable opportunity to improve the concurrency model. 6 | 7 | In 2021 we saw structured concurrency and actors arrive with Swift 5.5. Now is a great time to provide APIs using those primitives. In the future we will see fully checked Swift concurrency. This will come with breaking changes. For this reason adopting the new concurrency features can be split into two phases. 8 | 9 | 10 | ## What you can do right now 11 | 12 | ### API Design 13 | 14 | Firstly, existing libraries should strive to add `async` functions where possible to their user-facing “surface” APIs in addition to existing `*Future` based APIs wherever possible. These additive APIs can be gated on the Swift version and can be added without breaking existing users' code, for example like this: 15 | 16 | ```swift 17 | extension Worker { 18 | func work() -> EventLoopFuture { ... } 19 | 20 | #if compiler(>=5.5) && canImport(_Concurrency) 21 | @available(macOS 12.0, iOS 15.0, watchOS 8.0, tvOS 15.0, *) 22 | func work() async throws -> Value { ... } 23 | #endif 24 | } 25 | ``` 26 | 27 | If a function cannot fail but was using futures before, it should not include the `throws` keyword in its new incarnation. 28 | 29 | Such adoption can begin immediately, and should not cause any issues to existing users of existing libraries. 30 | 31 | ### SwiftNIO helper functions 32 | 33 | To allow an easy transition to async code, SwiftNIO offers a number of helper methods on `EventLoopFuture` and `-Promise`. 34 | 35 | On every `EventLoopFuture` you can call `.get()` to transition the future into an `await`-able invocation. If you want to translate async/await calls to an `EventLoopFuture` we recommend the following pattern: 36 | 37 | ```swift 38 | #if compiler(>=5.5) && canImport(_Concurrency) 39 | 40 | func yourAsyncFunctionConvertedToAFuture(on eventLoop: EventLoop) 41 | -> EventLoopFuture { 42 | let promise = context.eventLoop.makePromise(of: Out.self) 43 | promise.completeWithTask { 44 | try await yourMethod(yourInputs) 45 | } 46 | return promise.futureResult 47 | } 48 | #endif 49 | ``` 50 | 51 | Further helpers exist for `EventLoopGroup`, `Channel`, `ChannelOutboundInvoker` and `ChannelPipeline`. 52 | 53 | 54 | ### `#if` guarding code using Concurrency 55 | 56 | In order to have code using concurrency along with code not using concurrency, you may have to `#if` guard certain pieces of code. The correct way to do so is the following: 57 | 58 | ```swift 59 | #if compiler(>=5.5) && canImport(_Concurrency) 60 | ... 61 | #endif 62 | ``` 63 | 64 | Please note that you do _not_ need to _import_ the `_Concurrency` at all, if it is present it is imported automatically. 65 | 66 | ```swift 67 | #if compiler(>=5.5) && canImport(_Concurrency) 68 | // DO NOT DO THIS. 69 | // Instead don't do any import and it'll import automatically when possible. 70 | import _Concurrency 71 | #endif 72 | ``` 73 | 74 | 75 | ### Sendable Checking 76 | 77 | > [SE-0302][SE-0302] introduced the `Sendable` protocol, which is used to indicate which types have values that can safely be copied across actors or, more generally, into any context where a copy of the value might be used concurrently with the original. Applied uniformly to all Swift code, `Sendable` checking eliminates a large class of data races caused by shared mutable state. 78 | > 79 | > -- from [Staging in Sendable checking][sendable-staging], which outlines the `Sendable` adoption plan for Swift 6. 80 | 81 | In the future we will see fully checked Swift concurrency. The language features to support this are the `Sendable` protocol and the `@Sendable` keyword for closures. Since sendable checking will break existing Swift code, a new major Swift version is required. 82 | 83 | To ease the transition to fully checked Swift code, it is possible to annotate your APIs with the `Sendable` protocol today. 84 | 85 | You can start adopting Sendable and getting appropriate warnings in Swift 5.5 already by passing the `-warn-concurrency` flag, you can do so in SwiftPM for the entire project like so: 86 | 87 | ``` 88 | swift build -Xswiftc -Xfrontend -Xswiftc -warn-concurrency 89 | ``` 90 | 91 | 92 | #### Sendable checking today 93 | 94 | Sendable checking is currently disabled in Swift 5.5(.0) because it was causing a number of tricky situations for which we lacked the tools to resolve. 95 | 96 | Most of these issues have been resolved on today’s `main` branch of the compiler, and are expected to land in the next Swift 5.5 releases. It may be worthwhile waiting for adoption until the next version(s) after 5.5.0. 97 | 98 | For example, one of such capabilities is the ability for tuples of `Sendable` types to conform to `Sendable` as well. We recommend holding off adoption of `Sendable` until this patch lands in Swift 5.5 (which should be relatively soon). With this change, the difference between Swift 5.5 with `-warn-concurrency` enabled and Swift 6 mode should be very small, and manageable on a case by case basis. 99 | 100 | #### Backwards compatibility of declarations and “checked” Swift Concurrency 101 | 102 | Adopting Swift Concurrency will progressively cause more warnings, and eventually compile time errors in Swift 6 when sendability checks are violated, marking potentially unsafe code. 103 | 104 | It may be difficult for a library to maintain a version that is compatible with versions prior to Swift 6 while also fully embracing the new concurrency checks. For example, it may be necessary to mark generic types as `Sendable`, like so: 105 | 106 | ```swift 107 | struct Container: Sendable { ... } 108 | ``` 109 | 110 | Here, the `Value` type must be marked `Sendable` for Swift 6’s concurrency checks to work properly with such container. However, since the `Sendable` type does not exist in Swift prior to Swift 5.5, it would be difficult to maintain a library that supports both Swift 5.4+ as well as Swift 6. 111 | 112 | In such situations, it may be helpful to utilize the following trick to be able to share the same `Container` declaration between both Swift versions of the library: 113 | 114 | ```swift 115 | #if swift(>=5.5) && canImport(_Concurrency) 116 | public typealias MYPREFIX_Sendable = Swift.Sendable 117 | #else 118 | public typealias MYPREFIX_Sendable = Any 119 | #endif 120 | ``` 121 | 122 | > **NOTE:** Yes, we're using `swift(>=5.5)` here, while we're using `compiler(>=5.5)` to guard specific APIs using concurrency features. 123 | 124 | The `Any` alias is effectively a no-op when applied as generic constraint, and thus this way it is possible to keep the same `Container` declaration working across Swift versions. 125 | 126 | ### Task Local Values and Logging 127 | 128 | The newly introduced Task Local Values API ([SE-0311][SE-0311]) allows for implicit carrying of metadata along with `Task` execution. It is a natural fit for tracing and carrying metadata around with task execution, and e.g. including it in log messages. 129 | 130 | We are working on adjusting [SwiftLog](https://github.com/apple/swift-log) to become powerful enough to automatically pick up and log specific task local values. This change will be introduced in a source compatible way. 131 | 132 | For now libraries should continue using logger metadata, but we expect that in the future a lot of the cases where metadata is manually passed to each log statement can be replaced with setting task local values. 133 | 134 | ### Preparing for the concept of Deadlines 135 | 136 | Deadlines are another feature that closely relate to Swift Concurrency, and were originally pitched during the early versions of the Structured Concurrency proposal and later on moved out of it. The Swift team remains interested in introducing deadline concepts to the language and some preparation for it already has been performed inside the concurrency runtime. Right now however, there is no support for deadlines in Swift Concurrency and it is fine to continue using mechanisms like `NIODeadline` or similar mechanisms to cancel tasks after some period of time has passed. 137 | 138 | Once Swift Concurrency gains deadline support, they will manifest in being able to cancel a task (and its child tasks) once such deadline (point in time) has been exceeded. For APIs to be “ready for deadlines” they don’t have to do anything special other than preparing to be able to deal with `Task`s and their cancellation. 139 | 140 | ### Cooperatively handling Task cancellation 141 | 142 | `Task` cancellation exists today in Swift Concurrency and is something that libraries may already handle. In practice it means that any asynchronous function (or function which is expected to be called from within `Task`s), may use the [`Task.isCancelled`](https://developer.apple.com/documentation/swift/task/3814832-iscancelled) or [`try Task.checkCancellation()`](https://developer.apple.com/documentation/swift/task/3814826-checkcancellation) APIs to check if the task it is executing in was cancelled, and if so, it may cooperatively abort any operation it was currently performing. 143 | 144 | Cancellation can be useful in long running operations, or before kicking off some expensive operation. For example, an HTTP client MAY check for cancellation before it sends a request — it perhaps does not make sense to send a request if it is known the task awaiting on it does not care for the result anymore after all! 145 | 146 | Cancellation in general can be understood as “the one waiting for the result of this task is not interested in it anymore”, and it usually is best to throw a “cancelled” error when the cancellation is encountered. However, in some situations returning a “partial” result may also be appropriate (e.g. if a task is collecting many results, it may return those it managed to collect until now, rather than returning none or ignoring the cancellation and collecting all remaining results). 147 | 148 | ## What to expect with Swift 6 149 | 150 | ### Sendable: Global variables & imported code 151 | 152 | Today, Swift 5.5 does not yet handle global variables at all within its concurrency checking model. This will soon change but the exact semantics are not set in stone yet. In general, avoid using global properties and variables wherever possible to avoid running into issues in the future. Consider deprecating global variables if able to. 153 | 154 | Some global variables have special properties, such as `errno` which contains the error code of system calls. It is a thread local variable and therefore safe to read from any thread/`Task`. We expect to improve the importer to annotate such globals with some kind of “known to be safe” annotation, such that the Swift code using it, even in fully checked concurrency mode won’t complain about it. Having that said, using `errno` and other “thread local” APIs is very error prone in Swift Concurrency because thread-hops may occur at any suspension point, so the following snippet is very likely incorrect: 155 | 156 | ```swift 157 | sys_call(...) 158 | await ... 159 | let err = errno // BAD, we are most likely on a different thread here (!) 160 | ``` 161 | 162 | Please take care when interacting with any thread-local API from Swift Concurrency. If your library had used thread local storage before, you will want to move them to use [task-local values](https://github.com/apple/swift-evolution/blob/main/proposals/0311-task-locals.md) instead as they work correctly with Swift’s structured concurrency tasks. 163 | 164 | Another tricky situation is with imported C code. There may be no good way to annotate the imported types as Sendable (or it would be too troublesome to do so by hand). Swift is likely to gain improved support for imported code and potentially allow ignoring some of the concurrency safety checks on imported code. 165 | 166 | These relaxed semantics for imported code are not implemented yet, but keep it in mind when working with C APIs from Swift and trying to adopt the `-warn-concurrency` mode today. Please file any issues you hit on [bugs.swift.org](https://bugs.swift.org/secure/Dashboard.jspa) so we can inform the development of these checking heuristics based on real issues you hit. 167 | 168 | ### Custom Executors 169 | 170 | We expect that Swift Concurrency will allow custom executors in the future. A custom executor would allow the ability to run actors / tasks “on” such executor. It is possible that `EventLoop`s could become such executors, however the custom executor proposal has not been pitched yet. 171 | 172 | While we expect potential performance gains from using custom executors “on the same event loop” by avoiding asynchronous hops between calls to different actors, their introduction will not fundamentally change how NIO libraries are structured. 173 | 174 | The guidance here will evolve as Swift Evolution proposals for Custom Executors are proposed, but don’t hold off adopting Swift Concurrency until custom executors “land” - it is important to start adoption early. For most code we believe that the gains from adopting Swift Concurrency vastly outweigh the slight performance cost actor-hops might induce. 175 | 176 | 177 | ### Reduce use of SwiftNIO Futures as “Concurrency Library“ 178 | 179 | SwiftNIO currently provides a number of concurrency types for the Swift on Server ecosystem. Most notably `EventLoopFuture`s and `EventLoopPromise`s, that are used widely for asynchronous results. While the SSWG recommended using those at the API level in the past for easier interplay of server libraries, we advise to deprecate or remove such APIs once Swift 6 lands. The swift-server ecosystem should go all in on the structured concurrency features the languages provides. For this reason, it is crucial to provide async/await APIs today, to give your library users time to adopt the new APIs. 180 | 181 | Some NIO types will remain however in the public interfaces of Swift on server libraries. We expect that networking clients and servers continue to be initialized with `EventLoopGroup`s. The underlying transport mechanism (`NIOPosix` and `NIOTransportServices`) should become implementation details however and should not be exposed to library adopters. 182 | 183 | ### SwiftNIO 3 184 | 185 | While subject to change, it is likely that SwiftNIO will cut a 3.0 release in the months after Swift 6.0, at which point in time Swift will have enabled “full” `Sendable` checking. 186 | 187 | You should not expect NIO to suddenly become “more async”, NIO’s inherent design principles are about performing small tasks on the event loop and using Futures for any async operations. The design of NIO is not expected to change. Channel pipelines are not expected to become "async" in the Swift Concurrency meaning of the word. This is because SwiftNIO is, at its heard, an IO system, and that poses a challenge to the co-operative, shared, thread-pool used by Swift Concurrency. This thread pool must not be blocked by any operation, because doing so will starve the pool and prevent further progress of other async tasks. 188 | 189 | I/O systems however must, at some point, block a thread waiting for more I/O events, either in an I/O syscall or in something like epoll_wait. This is how NIO works: each of the event loop threads ultimately blocks on epoll_wait. We can’t do that inside the cooperative thread pool, as to do so would starve it for other async tasks, so we’d have to do so on a different thread. As such, SwiftNIO should not be used _on_ the cooperative threadpool, but should take ownership and full control of its threads–because it is an I/O system. 190 | 191 | It would be possible to make all NIO work happen on the co-operative pool, and thread-hop between each I/O operation and dispatching it onto the async/await pool, however this is not acceptable for high performance I/O: the context switch for _each I/O operation_ is too expensive. As a result, SwiftNIO is not planning to just adopt Swift Concurrency for the ease of use it brings, because in its specific context, the context switches are not an acceptable tradeoff. SwiftNIO could however cooperate with Swift Concurrency with the arrival of "custom executors" in the language runtime, however this has not been fully proposed yet, so we are not going to speculate about this too much. 192 | 193 | The NIO team will however use the chance to remove deprecated APIs and improve some APIs. The scope of changes should be comparable to the NIO1 → NIO2 version bump. If your SwiftNIO code compiles today without warnings, chances are high that it will continue to work without modifications in NIO3. 194 | 195 | After the release of NIO3, NIO2 will see bug fixes only. 196 | 197 | ### End-user code breakage 198 | 199 | It is expected that Swift 6 will break some code. As mentioned SwiftNIO 3 is also going to be released sometime around Swift 6 dropping. Keeping this in mind, it might be a good idea to align major version releases around the same time, along with updating version requirements to Swift 6 and NIO 3 in your libraries. 200 | 201 | Both Swift and SwiftNIO are not planning to do “vast amounts of change”, so adoption should be possible without major pains. 202 | 203 | ### Guidance for library users 204 | 205 | As soon as Swift 6 comes out, we recommend using the latest Swift 6 toolchains, even if using the Swift 5.5.n language mode (which may yield only warnings rather than hard failures on failed Sendability checks). This will result in better warnings and compiler hints, than just using a 5.5 toolchain. 206 | 207 | [sendable-staging]: https://github.com/DougGregor/swift-evolution/blob/sendable-staging/proposals/nnnn-sendable-staging.md 208 | [SE-0302]: https://github.com/apple/swift-evolution/blob/main/proposals/0302-concurrent-value-and-concurrent-closures.md 209 | [SE-0311]: https://github.com/apple/swift-evolution/blob/main/proposals/0311-task-locals.md 210 | -------------------------------------------------------------------------------- /docs/libs/log-levels.md: -------------------------------------------------------------------------------- 1 | # Library guidelines: Log Levels 2 | 3 | This guide serves as guidelines for library authors with regard to what [SwiftLog](https://github.com/apple/swift-log) log levels are appropriate for use in libraries, and in what situations to use what level. 4 | 5 | Libraries need to be well-behaved across various use cases, and cannot assume a specific style of logging backend will be used with them. It is up to developers implementing specific applications and systems to configure those specifics of their application, and some may choose to log to disk, some to memory, or some may employ sophisticated log aggregators. In all those cases a library should behave "well", meaning that it should not overwhelm typical ("stdout") log backends by logging too much, alerting too much by over-using `error` level log statements etc. 6 | 7 | This is aimed for library authors with regards to what [SwiftLog](https://github.com/apple/swift-log) log levels are appropriate for use in libraries, and also general logging style hints. 8 | 9 | ## Guidelines for Libraries 10 | 11 | SwiftLog defines the following 7 log levels via the [`Logger.Level` enum](https://apple.github.io/swift-log/docs/current/Logging/Structs/Logger/Level.html), ordered from least to most severe: 12 | 13 | * `trace` 14 | * `debug` 15 | * `info` 16 | * `notice` 17 | * `warning` 18 | * `error` 19 | * `critical` 20 | 21 | Out of those, only levels _less severe than_ info (exclusively) are generally okay to be used by libraries. 22 | 23 | In the following section we'll explore how to use them in practice. 24 | 25 | ### Recommended log levels 26 | 27 | It is always fine for a library to log at `trace` and `debug` levels, and these two should be the primary levels any library is logging at. 28 | 29 | `trace` is the finest log level, and end-users of a library will not usually use it unless debugging very specific issues. You should consider it as a way for library developers to "log everything we could possibly need to diagnose a hard to reproduce bug." Unrestricted logging at `trace` level may take a toll on the performance of a system, and developers can assume trace level logging will not be used in production deployments, unless enabled specifically to locate some specific issue. 30 | 31 | This is in contrast with `debug` which some users _may_ choose to run enabled on their production systems. 32 | 33 | > Debug level logging should be not "too" noisy. Developers should assume some production deployments may need to (or want to) run with debug level logging enabled. 34 | > 35 | > Debug level logging should not completely undermine the performance of a production system. 36 | 37 | As such, `debug` logging should provide a high value understanding of what is going on in the library for end users, using domain relevant language. Logging at `debug` level should not be overly noisy or dive deep into internals; this is what `trace` is intended for. 38 | 39 | Use `warning` level sparingly. Whenever possible, try to rather return or throw `Error` to end users that are descriptive enough so they can inspect, log them and figure out the issue. Potentially, they may then enable debug logging to find out more about the issue. 40 | 41 | It is okay to log a `warning` "once", for example on system startup. This may include some one off "more secure configuration is available, try upgrading to it!" log statement upon a server's startup. You may also log warnings from background processes, which otherwise have no other means of informing the end user about some issue. 42 | 43 | Logging on `error` level is similar to warnings: prefer to avoid doing so whenever possible. Instead, report errors via your library's API. For example, it is _not_ a good idea to log "connection failed" from an HTTP client. Perhaps the end-user intended to make this request to a known offline server to _confirm_ it is offline? From their perspective, this connection error is not a "real" error, it is just what they expected -- as such the HTTP client should return or throw such an error, but _not_ log it. 44 | 45 | Do also note that in situations when you decide to log an error, be mindful of error rates. Will this error potentially be logged for every single operation while some network failure is happening? Some teams and companies have alerting systems set up based on the rate of errors logged in a system, and if it exceeds some threshold it may start calling and paging people in the middle of the night. When logging at error level, consider if the issue indeed is something that should be waking up people at night. You may also want to consider offering configuration in your library: "at what log level should this issue be reported?" This can come in handy in clustered systems which may log network failures themselves, or depend on external systems detecting and reporting this. 46 | 47 | Logging `critical` logs is allowed for libraries, however as the name implies - only in the most critical situations. Most often this implies that the library will *stop functioning* after such log has been issued. End users are thought to expect that a logged critical error is _very_ important, and they may have set up their systems to page people in the middle of the night to investigate the production system _right now_ when such log statements are detected. So please be careful about logging these kinds of errors. 48 | 49 | Some libraries and situations may not be entirely clear with regard to what log level is "best" for them. In such situations, it sometimes is worth it to allow the end-users of the library to be able to configure the levels of specific groups of messages. You can see this in action in the Soto library [here](https://github.com/soto-project/soto-core/pull/423/files#diff-4a8ca7e54da5b22287900dd8cf6b47ded38a94194c1f0b544119030c81a2f238R649) where an `Options` object allows end users to configure the level at which requests are logged (`options.requestLogLevel`) which is then used as `log.log(self.options.requestLogLevel)`. 50 | 51 | #### Examples 52 | 53 | `trace` level logging: 54 | 55 | - Could include various additional information about a request, such as various diagnostics about created data structures, the state of caches or similar, which are created in order to serve a request. 56 | - Could include "begin operation" and "end operation" logging statements. 57 | 58 | `debug` level logging: 59 | 60 | - May include a single log statement for opening a connection, accepting a request, and so on. 61 | - It can include a _high level_ overview of control flow in an operation. For example: "started work, processing step X, made X decision, finished work X, result code 200". This overview may consist of high cardinality structured data. 62 | 63 | > You may also want to consider using [swift-distributed-tracing](https://github.com/apple/swift-distributed-tracing) to instrument "begin" and "end" events, as tracing may give you additional insights into your system behavior you would have missed with just manually analysing log statements. 64 | 65 | ### Log levels to avoid 66 | 67 | All these rules are only _general_ guidelines, and as such may have exceptions. Consider the following examples and rationale for why logging at high log levels by a library may not be desirable: 68 | 69 | It is generally _not acceptable_ for a service client (for example, an http client) to log an `error` when a request has failed. End-users may be using the client to probe if an endpoint is even responsive or not, and a failure to respond may be _expected_ behavior. Logging errors would only confuse and pollute their logs. 70 | 71 | Instead, libraries should either `throw`, or return an `Error` value that users of the library will have enough knowledge about if they should log or ignore it. 72 | 73 | It is even less acceptable for a library to log any successful operations. This leads to flooding server side systems, especially if, for example, one were to log every successfully handled request. In a server side application, this can easily flood and overwhelm logging systems when deployed to production where many end users are connected to the same server. Such issues are rarely found in development time, because of only a single peer requesting things from the service-under-test. 74 | 75 | #### Examples (of things to avoid) 76 | 77 | Avoid using `info` or any higher log level for: 78 | 79 | - "Normal operation" of the library, that is there is no need to log on info level "accepted a request" as this is the normal operation of a web service. 80 | 81 | Avoid using `error` or `warning`: 82 | 83 | - To report errors which the end-user of the library has the means of logging themselves. For example, if a database driver fails to fetch all rows of a query, it should not log an error or warning, but instead return or throw an error on the stream of values (or function, async function, or even the async sequence) that was providing the returned values. 84 | - Since the end-user is consuming these values, and has a mean of reporting (or swallowing) this error, the library should not log anything on their behalf. 85 | - Never report as warnings which is merely an information. For example. "weird header detected" may look like a good idea to log as a warning at first sight, however if the "weird header" is simply a misconfigured client (or just a "weird browser") you may be accidentally completely flooding an end-users logs with these "weird header" warnings (!) 86 | - Only log warnings about actionable things which the end-user of your library can do something about. Using the "weird header detected" log statement as an example: it would not be a good candidate to log as a warning because the server developer has no way to fix the users of their service to stop sending weird headers, so the server should not be logging this information as a warning. 87 | - It may be tempting to implement a "log as warning only once" technique for per-request style situations which may be almost important enough to be a warning, but should not be logged repeatedly after all. Authors may think of smart techniques to log a warning only once per "weird header discovered" and later on log the same issue on a different level, such as trace... Such techniques result in confusing hard to debug logs, where developers of a system unaware of the stateful nature of the logging would be left confused when trying to reproduce the issue. 88 | - For example, if a developer spots such a warning in a production system, they may attempt to reproduce it — thinking that it only happens in the production environment. However, if the logging system's log level choice is _stateful_ they may actually be successfully reproducing the issue but never seeing it manifest. For this, and related performance reasons (as implementing "only once per X" implies growing storage and per-request additional checking requirements), it is not recommended to apply this pattern. 89 | 90 | Exceptions to the "avoid logging warnings" rule: 91 | 92 | - "Background processes" such as tasks scheduled on a periodic timer, may not have any other means of communicating a failure or warning to the end user of the library other than through logging. 93 | - Consider offering an API that would collect errors at runtime, and then you can avoid logging errors manually. This can often take the form of a customizable "on error" hook that the library accepts when constructing the scheduled job. If the handler is not customized, we can log the errors, but if it was, it again is up to the end-user of the library to decide what to do with them. 94 | - An exception to the "log a warning only once" rule is when things do not happen very frequently. For example, if a library is warning about an outdated license or something similar during _its initialization_ this isn't necessarily a bad idea. After all, we'd rather see this warning once during initialization rather during every request made to the library. Use your best judgement and consider the developers using your library when designing how often and where from to log such information. 95 | 96 | ### Suggested logging style 97 | 98 | While libraries are free to use whichever logging message style they choose, here are some best practices to follow if you want users of your libraries to *love* the logs your library produces. 99 | 100 | Firstly, it is important to remember that both the message of a log statement as well as the metadata in [swift-log](https://github.com/apple/swift-log) are [autoclosures](https://docs.swift.org/swift-book/LanguageGuide/Closures.html#ID543), which are only invoked if the logger has a log level set such that it must emit a message for the message given. As such, messages logged at `trace` do not "materialize" their string and metadata representation unless they are actually needed: 101 | 102 | ```swift 103 | public func debug(_ message: @autoclosure () -> Logger.Message, 104 | metadata: @autoclosure () -> Logger.Metadata? = nil, 105 | source: @autoclosure () -> String? = nil, 106 | file: String = #file, function: String = #function, line: UInt = #line) { 107 | ``` 108 | 109 | And a minor yet important hint: avoid inserting newlines and other control characters into log statements (!). Many log aggregation systems assume that a single line in a logged output is specifically "one log statement" which can accidentally break if we log not sanitized, potentially multi-line, strings. This isn't a problem for _all_ log backends. For example, some will automatically sanitize and form a JSON payload with `{message: "..."}` before emitting it to a backend service collecting the logs, but plain old stream (or file) loggers usually assume that one line equals one log statement. It also makes grepping through logs more reliable. 110 | 111 | #### Structured Logging (Semantic Logging) 112 | 113 | Libraries may want to embrace the structured logging style, which renders logs in a [semi-structured data format](https://en.wikipedia.org/wiki/Semi-structured_data). 114 | 115 | It is a fantastic pattern which makes it easier and more reliable for automated code to process logged information. 116 | 117 | Consider the following "not structured" log statement: 118 | 119 | ```swift 120 | // NOT structured logging style 121 | log.info("Accepted connection \(connection.id) from \(connection.peer), total: \(connections.count)") 122 | ``` 123 | 124 | It contains 4 pieces of information: 125 | 126 | - We accepted a connection. 127 | - This is its string representation. 128 | - It is from this peer. 129 | - We currently have `connections.count` active connections. 130 | 131 | While this log statement contains all useful information that we meant to relay to end users, it is hard to visually and mechanically parse the detailed information it contains. For example, if we know connections start failing around the time when we reach a total of 100 concurrent connections, it is not trivial to find the specific log statement at which we hit this number. We would have to `grep 'total: 100'` for example, however perhaps there are many other `"total: "` strings present in all of our log systems. 132 | 133 | Instead, we can express the same information using the structured logging pattern, as follows: 134 | 135 | ```swift 136 | log.info("Accepted connection", metadata: [ 137 | "connection.id": "\(connection.id)", 138 | "connection.peer": "\(connection.peer)", 139 | "connections.total": "\(connections.count)" 140 | ]) 141 | 142 | // example output: 143 | // info [connection.id:?,connection.peer:?, connections.total:?] Accepted connection 144 | ``` 145 | 146 | This structured log can be formatted, depending on the logging backend, slightly differently on various systems. Even in the simple string representation of such a log, we'd be able to grep for `connections.total: 100` rather than having to guess the correct string. 147 | 148 | Also, since the message now does not contain all that much "human readable wording", it is less prone to randomly change from "Accepted" to "We have accepted" or vice versa. This kind of change could break alerting systems which are set up to parse and alert on specific log messages. 149 | 150 | Structured logs are very useful in combination with [swift-distributed-tracing](https://github.com/apple/swift-distributed-tracing)'s `LoggingContext`, which automatically populates the metadata with any present trace information. Thanks to this, all logs made in response to some specific request will automatically carry the same TraceID. 151 | 152 | You can see more examples of structured logging on the following pages, and example implementations thereof: 153 | 154 | - 155 | - 156 | - 157 | - 158 | 159 | #### Logging with Correlation IDs / Trace IDs 160 | 161 | A very common pattern is to log messages with some "correlation id". The best approach in general here is to use a `LoggingContext` from [swift-distributed-tracing](https://github.com/apple/swift-distributed-tracing) as then your library will be able to be traced and used with correlation contexts regardless what tracing system the end-user is using (such as open telemetry, zipkin, xray, and other tracing systems) The concept though can be explained well with just a manually logged `requestID` which we'll explain below. 162 | 163 | Consider an HTTP client as an example of a library that has a lot of metadata about some request, perhaps something like this: 164 | 165 | ```swift 166 | log.trace("Received response", metadata: [ 167 | "id": "...", 168 | "peer.host": "...", 169 | "payload.size": "...", 170 | "headers": "...", 171 | "responseCode": "...", 172 | "responseCode.text": "...", 173 | ]) 174 | ``` 175 | 176 | The exact metadata does not matter, they're just some placeholder in this example. What matters is that there's "a lot of it". 177 | 178 | > Side note on metadata keys: while there is no single right way to structure metadata keys, we recommend thinking of them as-if JSON keys: camelCased and `.` separated identifiers. This allows many log analysis backends to treat them as such nested structure. 179 | 180 | Now, we would like to avoid logging _all_ this information in every single log statement. Instead, we are able to just repeatedly log the `"id"` metadata, like this: 181 | 182 | ```swift 183 | // ... 184 | log.trace("Something something...", metadata: ["id": "..."]) 185 | log.trace("Finished streaming response", metadata: ["id": "..."]) // good, the same ID is propagated 186 | ``` 187 | 188 | Thanks to the correlation ID (or a tracing provided ID, in which case we'd log as `context.log.trace("...")` as the ID is propagated automatically), in each following log statement after the initial log statement we're able to correlate all those log statements. Then we know that this `"Finished streaming response"` message was about a response with a `responseCode` that we're able to look up from the `"Received response"` log message. 189 | 190 | This pattern is somewhat advanced and may not always be the right approach, but consider it in high performance code where logging the same information repeatedly can be too costly. 191 | 192 | ##### Things to avoid with Correlation ID logging 193 | 194 | When logging with correlation contexts make sure to never "drop the ID". It is easiest to get this right when using distributed tracing's `LoggingContext` since propagating it ensures the carrying of identifiers, however the same applies to any kind of correlation identifier. 195 | 196 | Specifically, avoid situations like these: 197 | 198 | ```swift 199 | debug: connection established [connection-id: 7] 200 | debug: connection closed unexpectedly [error: foobar] // BAD, the connection-id was dropped 201 | ``` 202 | 203 | On the second line, we don't know which connection had the error since the `connection-id` was dropped. Make sure to audit your logging code to ensure all relevant log statements carry necessary correlation identifiers. 204 | 205 | ### Exceptions to the rules 206 | 207 | These are only general guidelines, and there always will be exceptions to these rules and other situations where these suggestions will be broken, for good reason. Please use your best judgement, and always consider the end-user of a system, and how they'll be interacting with your library and decide case-by-case depending on the library and situation at hand how to handle each situation. 208 | 209 | Here are a few examples of situations when logging a message on a relatively high level might still be tolerable for a library. 210 | 211 | It's permissible for a library to log at `critical` level right before a _hard_ crash of the process, as a last resort of informing the log collection systems or end-user about additional information detailing the reason for the crash. This should be _in addition to_ the message from a `fatalError` and can lead to an improved diagnosis/debugging experience for end users. 212 | 213 | Sometimes libraries may be able to detect a harmful misconfiguration of the library. For example, selecting deprecated protocol versions. In such situations it may be useful to inform users in production by issuing a `warning`. However you should ensure that the warning is not logged repeatedly! For example, it is not acceptable for an HTTP client to log a warning on every single http request using some misconfiguration of the client. It _may_ be acceptable however for the client to log such a warning, for example, _once_ at configuration time, if the library has a good way to do this. 214 | 215 | Some libraries may implement a "log this warning only once", "log this warning only at startup", "log this error only once an hour", or similar tricks to keep the noise level low but still informative enough to not be missed. This is, however, usually a pattern reserved for stateful long running libraries, rather than clients of databases and related persistent stores. 216 | -------------------------------------------------------------------------------- /docs/aws-copilot-fargate-vapor-mongo.md: -------------------------------------------------------------------------------- 1 | # Server Side Swift on AWS with Fargate, Vapor, and MongoDB Atlas 2 | 3 | This guide illustrates how to deploy a Server-Side Swift workload on AWS. The workload is a REST API for tracking a To Do List. It uses the [Vapor](https://vapor.codes/) framework to program the API methods. The methods store and retrieve data in a [MongoDB Atlas](https://www.mongodb.com/atlas/database) cloud database. The Vapor application is containerized and deployed to AWS on AWS Fargate using the [AWS Copilot](https://aws.github.io/copilot-cli/) toolkit. 4 | 5 | ## Architecture 6 | ![Architecture](../images/aws/aws-fargate-vapor-mongo.png) 7 | 8 | - Amazon API Gateway receives API requests 9 | - API Gateway locates your application containers in AWS Fargate through internal DNS managed by AWS Cloud Map 10 | - API Gateway forwards the requests to the containers 11 | - The containers run the Vapor framework and have methods to GET and POST items 12 | - Vapor stores and retrieves items in a MongoDB Atlas cloud database which runs in a MongoDB managed AWS account 13 | 14 | ## Prerequisites 15 | To build this sample application, you need: 16 | 17 | - [AWS Account](https://console.aws.amazon.com/) 18 | - [MongoDB Atlas Database](https://www.mongodb.com/atlas/database) 19 | - [AWS Copilot](https://aws.github.io/copilot-cli/) - a command-line tool used to create containerized workloads on AWS 20 | - [Docker Desktop](https://www.docker.com/products/docker-desktop/) - to compile your Swift code into a Docker image 21 | - [Vapor](https://vapor.codes/) - to code the REST service 22 | - [AWS Command Line Interface (AWS CLI)](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html) - install the CLI and [configure](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) it with credentials to your AWS account 23 | 24 | ## Step 1: Create Your Database 25 | If you are new to MongoDB Atlas, follow this [Getting Started Guide](https://www.mongodb.com/docs/atlas/getting-started/). You need to create the following items: 26 | - Atlas Account 27 | - Cluster 28 | - Database Username / Password 29 | - Database 30 | - Collection 31 | 32 | In subsequent steps, you provide values to these items to configure the application. 33 | 34 | ## Step 2: Initialize a New Vapor Project 35 | 36 | Create a folder for your project. 37 | 38 | ``` 39 | mkdir todo-app && cd todo-app 40 | ``` 41 | 42 | Initialize a Vapor project named *api*. 43 | 44 | ``` 45 | vapor new api -n 46 | ``` 47 | 48 | ## Step 3: Add Project Dependencies 49 | Vapor initializes a *Package.swift* file for the project dependencies. Your project requires an additional library, [MongoDBVapor](https://github.com/mongodb/mongodb-vapor). Add the MongoDBVapor library to the project and target dependencies of your *Package.swift* file. 50 | 51 | Your updated file should look like this: 52 | 53 | **api/Package.swift** 54 | ```swift 55 | // swift-tools-version:5.6 56 | import PackageDescription 57 | 58 | let package = Package( 59 | name: "api", 60 | platforms: [ 61 | .macOS(.v12) 62 | ], 63 | dependencies: [ 64 | .package(url: "https://github.com/vapor/vapor", .upToNextMajor(from: "4.7.0")), 65 | .package(url: "https://github.com/mongodb/mongodb-vapor", .upToNextMajor(from: "1.1.0")) 66 | ], 67 | targets: [ 68 | .target( 69 | name: "App", 70 | dependencies: [ 71 | .product(name: "Vapor", package: "vapor"), 72 | .product(name: "MongoDBVapor", package: "mongodb-vapor") 73 | ], 74 | swiftSettings: [ 75 | .unsafeFlags(["-cross-module-optimization"], .when(configuration: .release)) 76 | ] 77 | ), 78 | .executableTarget(name: "Run", dependencies: [.target(name: "App")]), 79 | .testTarget(name: "AppTests", dependencies: [ 80 | .target(name: "App"), 81 | .product(name: "XCTVapor", package: "vapor"), 82 | ]) 83 | ] 84 | ) 85 | ``` 86 | 87 | ## Step 4: Update the Dockerfile 88 | You deploy your Swift Server code to AWS Fargate as a Docker image. Vapor generates an initial Dockerfile for your application. Your application requires a few modifications to this Dockerfile: 89 | 90 | - pull the *build* and *run* images from the [Amazon ECR Public Gallery](https://gallery.ecr.aws) container repository 91 | - install *libssl-dev* in the build image 92 | - install *libxml2* and *curl* in the run image 93 | 94 | Replace the contents of the Vapor generated Dockerfile with the following code: 95 | 96 | **api/Dockerfile** 97 | ```Dockerfile 98 | # ================================ 99 | # Build image 100 | # ================================ 101 | FROM public.ecr.aws/docker/library/swift:5.6.2-focal as build 102 | 103 | # Install OS updates 104 | RUN export DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true \ 105 | && apt-get -q update \ 106 | && apt-get -q dist-upgrade -y \ 107 | && apt-get -y install libssl-dev \ 108 | && rm -rf /var/lib/apt/lists/* 109 | 110 | # Set up a build area 111 | WORKDIR /build 112 | 113 | # First just resolve dependencies. 114 | # This creates a cached layer that can be reused 115 | # as long as your Package.swift/Package.resolved 116 | # files do not change. 117 | COPY ./Package.* ./ 118 | RUN swift package resolve 119 | 120 | # Copy entire repo into container 121 | COPY . . 122 | 123 | # Build everything, with optimizations 124 | RUN swift build -c release --static-swift-stdlib 125 | 126 | # Switch to the staging area 127 | WORKDIR /staging 128 | 129 | # Copy main executable to staging area 130 | RUN cp "$(swift build --package-path /build -c release --show-bin-path)/Run" ./ 131 | 132 | # Copy resources bundled by SPM to staging area 133 | RUN find -L "$(swift build --package-path /build -c release --show-bin-path)/" -regex '.*\.resources$' -exec cp -Ra {} ./ \; 134 | 135 | # Copy any resources from the public directory and views directory if the directories exist 136 | # Ensure that by default, neither the directory nor any of its contents are writable. 137 | RUN [ -d /build/Public ] && { mv /build/Public ./Public && chmod -R a-w ./Public; } || true 138 | RUN [ -d /build/Resources ] && { mv /build/Resources ./Resources && chmod -R a-w ./Resources; } || true 139 | 140 | # ================================ 141 | # Run image 142 | # ================================ 143 | FROM public.ecr.aws/ubuntu/ubuntu:focal 144 | 145 | # Make sure all system packages are up to date, and install only essential packages. 146 | RUN export DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true \ 147 | && apt-get -q update \ 148 | && apt-get -q dist-upgrade -y \ 149 | && apt-get -q install -y \ 150 | ca-certificates \ 151 | tzdata \ 152 | curl \ 153 | libxml2 \ 154 | && rm -r /var/lib/apt/lists/* 155 | 156 | # Create a vapor user and group with /app as its home directory 157 | RUN useradd --user-group --create-home --system --skel /dev/null --home-dir /app vapor 158 | 159 | # Switch to the new home directory 160 | WORKDIR /app 161 | 162 | # Copy built executable and any staged resources from builder 163 | COPY --from=build --chown=vapor:vapor /staging /app 164 | 165 | # Ensure all further commands run as the vapor user 166 | USER vapor:vapor 167 | 168 | # Let Docker bind to port 8080 169 | EXPOSE 8080 170 | 171 | # Start the Vapor service when the image is run, default to listening on 8080 in production environment 172 | ENTRYPOINT ["./Run"] 173 | CMD ["serve", "--env", "production", "--hostname", "0.0.0.0", "--port", "8080"] 174 | ``` 175 | ## Step 5: Update the Vapor Source Code 176 | Vapor also generates the sample files needed to code an API. You must customize these files with code that exposes your To Do List API methods and interacts with your MongoDB database. 177 | 178 | The *configure.swift* file initializes an application-wide pool of connections to your MongoDB database. It retrieves the connection string to your MongoDB database from an environment variable at runtime. 179 | 180 | Replace the contents of the file with the following code: 181 | 182 | **api/Sources/App/configure.swift** 183 | ```swift 184 | import MongoDBVapor 185 | import Vapor 186 | 187 | public func configure(_ app: Application) throws { 188 | 189 | let MONGODB_URI = Environment.get("MONGODB_URI") ?? "" 190 | 191 | try app.mongoDB.configure(MONGODB_URI) 192 | 193 | ContentConfiguration.global.use(encoder: ExtendedJSONEncoder(), for: .json) 194 | ContentConfiguration.global.use(decoder: ExtendedJSONDecoder(), for: .json) 195 | 196 | try routes(app) 197 | } 198 | ``` 199 | 200 | The *routes.swift* file defines the methods to your API. These include a *POST Item* method to insert a new item and a *GET Items* method to retrieve a list of all existing items. See comments in the code to understand what happens in each section. 201 | 202 | Replace the contents of the file with the following code: 203 | 204 | **api/Sources/App/routes.swift** 205 | ```swift 206 | import Vapor 207 | import MongoDBVapor 208 | 209 | // define the structure of a ToDoItem 210 | struct ToDoItem: Content { 211 | var _id: BSONObjectID? 212 | let name: String 213 | var createdOn: Date? 214 | } 215 | 216 | // import the MongoDB database and collection names from environment variables 217 | let MONGODB_DATABASE = Environment.get("MONGODB_DATABASE") ?? "" 218 | let MONGODB_COLLECTION = Environment.get("MONGODB_COLLECTION") ?? "" 219 | 220 | // define an extenstion to the Vapor Request object to interact with the database and collection 221 | extension Request { 222 | 223 | var todoCollection: MongoCollection { 224 | self.application.mongoDB.client.db(MONGODB_DATABASE).collection(MONGODB_COLLECTION, withType: ToDoItem.self) 225 | } 226 | } 227 | 228 | // define the api routes 229 | func routes(_ app: Application) throws { 230 | 231 | // an base level route used for container healthchecks 232 | app.get { req in 233 | return "OK" 234 | } 235 | 236 | // GET items returns a JSON array of all items in the database 237 | app.get("items") { req async throws -> [ToDoItem] in 238 | try await req.todoCollection.find().toArray() 239 | } 240 | 241 | // POST item inserts a new item into the database and returns the item as JSON 242 | app.post("item") { req async throws -> ToDoItem in 243 | 244 | var item = try req.content.decode(ToDoItem.self) 245 | item.createdOn = Date() 246 | 247 | let response = try await req.todoCollection.insertOne(item) 248 | item._id = response?.insertedID.objectIDValue 249 | 250 | return item 251 | } 252 | } 253 | ``` 254 | 255 | The *main.swift* file defines the startup and shutdown code for the application. Change the code to include a *defer* statement to close the connection to your MongoDB database when the application ends. 256 | 257 | Replace the contents of the file with the following code: 258 | 259 | **api/Sources/Run/main.swift** 260 | ```swift 261 | import App 262 | import Vapor 263 | import MongoDBVapor 264 | 265 | var env = try Environment.detect() 266 | try LoggingSystem.bootstrap(from: &env) 267 | let app = Application(env) 268 | try configure(app) 269 | 270 | // shutdown and cleanup the MongoDB connection when the application terminates 271 | defer { 272 | app.mongoDB.cleanup() 273 | cleanupMongoSwift() 274 | app.shutdown() 275 | } 276 | 277 | try app.run() 278 | ``` 279 | 280 | ## Step 6: Initialize AWS Copilot 281 | [AWS Copilot](https://aws.github.io/copilot-cli/) is a command-line utility for generating a containerized application in AWS. You use Copilot to build and deploy your Vapor code as containers in Fargate. Copilot also creates and tracks an AWS Systems Manager secret parameter for the value of your MongoDB connection string. You store this value as a secret as it contains the username and password to your database. You never want to store this in your source code. Finally, Copilot creates an API Gateway to expose a public endpoint for your API. 282 | 283 | Initialize a new Copilot application. 284 | 285 | ```bash 286 | copilot app init todo 287 | ``` 288 | 289 | Add a new Copilot *Backend Service*. The service refers to the Dockerfile of your Vapor project for instructions on how to build the container. 290 | 291 | ```bash 292 | copilot svc init --name api --svc-type "Backend Service" --dockerfile ./api/Dockerfile 293 | ``` 294 | 295 | Create a Copilot environment for your application. An environment typically aligns to a phase, such as dev, test, or prod. When prompted, select the AWS credentials profile you configured with the AWS CLI. 296 | 297 | ```bash 298 | copilot env init --name dev --app todo --default-config 299 | ``` 300 | 301 | Deploy the *dev* environment: 302 | 303 | ```bash 304 | copilot env deploy --name dev 305 | ``` 306 | 307 | ## Step 7: Create a Copilot Secret for Database Credentials 308 | 309 | Your application requires credentials to authenticate to your MongoDB Atlas database. You should never store this sensitive information in your source code. Create a Copilot *secret* to store the credentials. This stores the connection string to your MongoDB cluster in an AWS Systems Manager Secret Parameter. 310 | 311 | Determine the connection string from the MongoDB Atlas website. Select the *Connect* button on your cluster page and the *Connect your application*. 312 | 313 | ![Architecture](../images/aws/aws-fargate-vapor-mongo-atlas-connection.png) 314 | 315 | Select *Swift version 1.2.0* as the Driver and copy the displayed connection string. It looks something like this: 316 | 317 | ```bash 318 | mongodb+srv://username:@mycluster.mongodb.net/?retryWrites=true&w=majority 319 | ``` 320 | 321 | The connection string contains your database username and a placeholder for the password. Replace the **\** section with your database password. Then create a new Copilot secret named MONGODB_URI and save your connection string when prompted for the value. 322 | 323 | ```bash 324 | copilot secret init --app todo --name MONGODB_URI 325 | ``` 326 | 327 | Fargate injects the secret value as an environment variable into your container at runtime. In Step 5 above, you extracted this value in your *api/Sources/App/configure.swift* file and used it to configure your MongoDB connection. 328 | 329 | ## Step 8: Configure the Backend Service 330 | 331 | Copilot generates a *manifest.yml* file for your application that defines the attributes of your service, such as the Docker image, network, secrets, and environment variables. Change the manifest file generated by Copilot to add the following properties: 332 | 333 | - configure a health check for the container image 334 | - add a reference to the MONGODB_URI secret 335 | - configure the service network as *private* 336 | - add environment variables for the MONGODB_DATABASE and MONGODB_COLLECTION 337 | 338 | To implement these changes, replace the contents of the *manifest.yml* file with the following code. Update the values of MONGODB_DATABASE and MONGODB_COLLECTION to reflect the names of the database and cluster you created in MongoDB Atlas for this application. 339 | 340 | If you are building this solution on a **Mac M1/M2** machine, uncomment the **platform** property in the manifest.yml file to specify an ARM build. The default value is *linux/x86_64*. 341 | 342 | **copilot/api/manifest.yml** 343 | ```yaml 344 | # The manifest for the "api" service. 345 | # Read the full specification for the "Backend Service" type at: 346 | # https://aws.github.io/copilot-cli/docs/manifest/backend-service/ 347 | 348 | # Your service name will be used in naming your resources like log groups, ECS services, etc. 349 | name: api 350 | type: Backend Service 351 | 352 | # Your service is reachable at "http://api.${COPILOT_SERVICE_DISCOVERY_ENDPOINT}:8080" but is not public. 353 | 354 | # Configuration for your containers and service. 355 | image: 356 | # Docker build arguments. For additional overrides: https://aws.github.io/copilot-cli/docs/manifest/backend-service/#image-build 357 | build: api/Dockerfile 358 | # Port exposed through your container to route traffic to it. 359 | port: 8080 360 | healthcheck: 361 | command: ["CMD-SHELL", "curl -f http://localhost:8080 || exit 1"] 362 | interval: 10s 363 | retries: 2 364 | timeout: 5s 365 | start_period: 0s 366 | 367 | # Mac M1/M2 users - uncomment the following platform line 368 | # the default platform is linux/x86_64 369 | 370 | # platform: linux/arm64 371 | 372 | cpu: 256 # Number of CPU units for the task. 373 | memory: 512 # Amount of memory in MiB used by the task. 374 | count: 2 # Number of tasks that should be running in your service. 375 | exec: true # Enable running commands in your container. 376 | 377 | # define the network as private. this will place Fargate in private subnets 378 | network: 379 | vpc: 380 | placement: private 381 | 382 | # Optional fields for more advanced use-cases. 383 | # 384 | # Pass environment variables as key value pairs. 385 | variables: 386 | MONGODB_DATABASE: home 387 | MONGODB_COLLECTION: todolist 388 | 389 | # Pass secrets from AWS Systems Manager (SSM) Parameter Store. 390 | secrets: 391 | MONGODB_URI: /copilot/${COPILOT_APPLICATION_NAME}/${COPILOT_ENVIRONMENT_NAME}/secrets/MONGODB_URI 392 | 393 | # You can override any of the values defined above by environment. 394 | #environments: 395 | # test: 396 | # count: 2 # Number of tasks to run for the "test" environment. 397 | # deployment: # The deployment strategy for the "test" environment. 398 | # rolling: 'recreate' # Stops existing tasks before new ones are started for faster deployments. 399 | ``` 400 | 401 | ## Step 9: Create a Copilot Addon Service for your API Gateway 402 | 403 | Copilot does not have the capability to add an API Gateway to your application. You can, however, add additional AWS resources to your application using [Copilot "Addons"](https://aws.github.io/copilot-cli/docs/developing/additional-aws-resources/#how-to-do-i-add-other-resources). 404 | 405 | Define an addon by creating an *addons* folder under your Copilot service folder and creating a CloudFormation yaml template to define the services you wish to create. 406 | 407 | Create a folder for the addon: 408 | 409 | ```bash 410 | mkdir -p copilot/api/addons 411 | ``` 412 | 413 | Create a file to define the API Gateway: 414 | 415 | ```bash 416 | touch copilot/api/addons/apigateway.yml 417 | ``` 418 | 419 | Create a file to pass parameters from the main service into the addon service: 420 | 421 | ```bash 422 | touch copilot/api/addons/addons.parameters.yml 423 | ``` 424 | 425 | Copy the following code into the *addons.parameters.yml* file. It passes the ID of the Cloud Map service into the addon stack. 426 | 427 | **copilot/api/addons/addons.parameters.yml** 428 | ```yaml 429 | Parameters: 430 | DiscoveryServiceARN: !GetAtt DiscoveryService.Arn 431 | ``` 432 | 433 | Copy the following code into the *addons/apigateway.yml* file. It creates an API Gateway using the DiscoveryServiceARN to integrate with the Cloud Map service Copilot created for your Fargate containers. 434 | 435 | **copilot/api/addons/apigateway.yml** 436 | ```yaml 437 | Parameters: 438 | App: 439 | Type: String 440 | Description: Your application's name. 441 | Env: 442 | Type: String 443 | Description: The environment name your service, job, or workflow is being deployed to. 444 | Name: 445 | Type: String 446 | Description: The name of the service, job, or workflow being deployed. 447 | DiscoveryServiceARN: 448 | Type: String 449 | Description: The ARN of the Cloud Map discovery service. 450 | 451 | Resources: 452 | ApiVpcLink: 453 | Type: AWS::ApiGatewayV2::VpcLink 454 | Properties: 455 | Name: !Sub "${App}-${Env}-${Name}" 456 | SubnetIds: 457 | !Split [",", Fn::ImportValue: !Sub "${App}-${Env}-PrivateSubnets"] 458 | SecurityGroupIds: 459 | - Fn::ImportValue: !Sub "${App}-${Env}-EnvironmentSecurityGroup" 460 | 461 | ApiGatewayV2Api: 462 | Type: "AWS::ApiGatewayV2::Api" 463 | Properties: 464 | Name: !Sub "${Name}.${Env}.${App}.api" 465 | ProtocolType: "HTTP" 466 | CorsConfiguration: 467 | AllowHeaders: 468 | - "*" 469 | AllowMethods: 470 | - "*" 471 | AllowOrigins: 472 | - "*" 473 | 474 | ApiGatewayV2Stage: 475 | Type: "AWS::ApiGatewayV2::Stage" 476 | Properties: 477 | StageName: "$default" 478 | ApiId: !Ref ApiGatewayV2Api 479 | AutoDeploy: true 480 | 481 | ApiGatewayV2Integration: 482 | Type: "AWS::ApiGatewayV2::Integration" 483 | Properties: 484 | ApiId: !Ref ApiGatewayV2Api 485 | ConnectionId: !Ref ApiVpcLink 486 | ConnectionType: "VPC_LINK" 487 | IntegrationMethod: "ANY" 488 | IntegrationType: "HTTP_PROXY" 489 | IntegrationUri: !Sub "${DiscoveryServiceARN}" 490 | TimeoutInMillis: 30000 491 | PayloadFormatVersion: "1.0" 492 | 493 | ApiGatewayV2Route: 494 | Type: "AWS::ApiGatewayV2::Route" 495 | Properties: 496 | ApiId: !Ref ApiGatewayV2Api 497 | RouteKey: "$default" 498 | Target: !Sub "integrations/${ApiGatewayV2Integration}" 499 | ``` 500 | 501 | ## Step 10: Deploy the Copilot Service 502 | When deploying your service, Copilot executes the following actions: 503 | 504 | - builds your Vapor Docker image 505 | - deploys the image to the Amazon Elastic Container Registry (ECR) in your AWS account 506 | - creates and deploys an AWS CloudFormation template into your AWS account. CloudFormation creates all the services defined in your application. 507 | 508 | ```bash 509 | copilot svc deploy --name api --app todo --env dev 510 | ``` 511 | 512 | ## Step 11: Configure MongoDB Atlas Network Access 513 | MongoDB Atlas uses an IP Access List to restrict access to your database to a specific list of source IP addresses. In your application, traffic from your containers originates from the public IP addresses of the NAT Gateways in your application's network. You must configure MongoDB Atlas to allow traffic from these IP addresses. 514 | 515 | To get the IP address of the NAT Gateways, run the following AWS CLI command: 516 | 517 | ```bash 518 | aws ec2 describe-nat-gateways --filter "Name=tag-key, Values=copilot-application" --query 'NatGateways[?State == `available`].NatGatewayAddresses[].PublicIp' --output table 519 | ``` 520 | 521 | Output: 522 | 523 | ```bash 524 | --------------------- 525 | |DescribeNatGateways| 526 | +-------------------+ 527 | | 1.1.1.1 | 528 | | 2.2.2.2 | 529 | +-------------------+ 530 | ``` 531 | 532 | Use the IP addresses to create a Network Access rule in your MongoDB Atlas account for each address. 533 | 534 | ![Architecture](../images/aws/aws-fargate-vapor-mongo-atlas-network-address.png) 535 | 536 | ## Step 12: Use your API 537 | 538 | To get the endpoint for your API, use the following AWS CLI command: 539 | 540 | ```bash 541 | aws apigatewayv2 get-apis --query 'Items[?Name==`api.dev.todo.api`].ApiEndpoint' --output table 542 | ``` 543 | 544 | Output: 545 | 546 | ```bash 547 | ------------------------------------------------------------ 548 | | GetApis | 549 | +----------------------------------------------------------+ 550 | | https://[your-api-endpoint] | 551 | +----------------------------------------------------------+ 552 | ``` 553 | 554 | Use cURL or a tool such as [Postman](https://www.postman.com/) to interact with your API: 555 | 556 | Add a To Do List item 557 | 558 | ```bash 559 | curl --request POST 'https://[your-api-endpoint]/item' --header 'Content-Type: application/json' --data-raw '{"name": "my todo item"}' 560 | ``` 561 | 562 | Retrieve To Do List items 563 | 564 | ```bash 565 | curl https://[your-api-endpoint]/items 566 | ``` 567 | 568 | ## Cleanup 569 | When finished with your application, use Copilot to delete it. This deletes all the services created in your AWS account. 570 | 571 | ```bash 572 | copilot app delete --name todo 573 | ``` 574 | -------------------------------------------------------------------------------- /images/perf-issues-flamegraph.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 23 | 367 | 368 | Flame Graph 369 | 370 | Reset Zoom 371 | Search 372 | ic 373 | 374 | 375 | 376 | load_elf_binary (1 samples, 0.76%) 377 | 378 | 379 | 380 | ext4_file_read_iter (4 samples, 3.05%) 381 | ext.. 382 | 383 | 384 | finish_task_switch (4 samples, 3.05%) 385 | fin.. 386 | 387 | 388 | main (127 samples, 96.95%) 389 | main 390 | 391 | 392 | sched_clock (1 samples, 0.76%) 393 | 394 | 395 | 396 | protocol witness for main.FavouriteNumbers.addFavouriteNumber(Swift.Int) -> () in conformance main.MyFavouriteNumbers : main.FavouriteNumbers in main (120 samples, 91.60%) 397 | protocol witness for main.FavouriteNumbers.addFavouriteNumber(Swift.Int) -> () in conformance main.MyFavouriteNumbers : main.FavouriteNumbers in main 398 | 399 | 400 | generic specialization <Swift._ContiguousArrayBuffer<(Swift.Int, Swift.Int)>> of (extension in Swift):Swift._ArrayBufferProtocol._arrayOutOfPlaceUpdate(inout Swift._ContiguousArrayBuffer<A.Element>, Swift.Int, Swift.Int, (Swift.UnsafeMutablePointer<A.Element>, Swift.Int) -> ()) -> () (1 samples, 0.76%) 401 | 402 | 403 | 404 | load_elf_binary (4 samples, 3.05%) 405 | loa.. 406 | 407 | 408 | swift_deallocClassInstance (1 samples, 0.76%) 409 | 410 | 411 | 412 | swift_release (1 samples, 0.76%) 413 | 414 | 415 | 416 | open_path (4 samples, 3.05%) 417 | ope.. 418 | 419 | 420 | do_execveat_common.isra.34 (1 samples, 0.76%) 421 | 422 | 423 | 424 | read (4 samples, 3.05%) 425 | read 426 | 427 | 428 | search_binary_handler (1 samples, 0.76%) 429 | 430 | 431 | 432 | do_syscall_64 (1 samples, 0.76%) 433 | 434 | 435 | 436 | generic_file_read_iter (4 samples, 3.05%) 437 | gen.. 438 | 439 | 440 | intel_pmu_enable_all (4 samples, 3.05%) 441 | int.. 442 | 443 | 444 | sys_execve (4 samples, 3.05%) 445 | sys.. 446 | 447 | 448 | entry_SYSCALL_64_after_hwframe (4 samples, 3.05%) 449 | ent.. 450 | 451 | 452 | __sched_text_start (4 samples, 3.05%) 453 | __s.. 454 | 455 | 456 | main.MyFavouriteNumbers.addFavouriteNumber(Swift.Int) -> () (120 samples, 91.60%) 457 | main.MyFavouriteNumbers.addFavouriteNumber(Swift.Int) -> () 458 | 459 | 460 | do_execveat_common.isra.34 (4 samples, 3.05%) 461 | do_.. 462 | 463 | 464 | generic specialization <Swift._ContiguousArrayBuffer<(Swift.Int, Swift.Int)>> of default argument 3 of (extension in Swift):Swift._ArrayBufferProtocol._arrayOutOfPlaceUpdate(inout Swift._ContiguousArrayBuffer<A.Element>, Swift.Int, Swift.Int, (Swift.UnsafeMutablePointer<A.Element>, Swift.Int) -> ()) -> () (1 samples, 0.76%) 465 | 466 | 467 | 468 | x86_pmu_enable (4 samples, 3.05%) 469 | x86.. 470 | 471 | 472 | mmap_region (1 samples, 0.76%) 473 | 474 | 475 | 476 | sys_mmap (1 samples, 0.76%) 477 | 478 | 479 | 480 | do_syscall_64 (1 samples, 0.76%) 481 | 482 | 483 | 484 | perf_event_exec (4 samples, 3.05%) 485 | per.. 486 | 487 | 488 | entry_SYSCALL_64_after_hwframe (1 samples, 0.76%) 489 | 490 | 491 | 492 | shift_arg_pages (1 samples, 0.76%) 493 | 494 | 495 | 496 | [unknown] (4 samples, 3.05%) 497 | [un.. 498 | 499 | 500 | __libc_start_main (120 samples, 91.60%) 501 | __libc_start_main 502 | 503 | 504 | swift_release (5 samples, 3.82%) 505 | swif.. 506 | 507 | 508 | vfs_read (4 samples, 3.05%) 509 | vfs.. 510 | 511 | 512 | nmi (1 samples, 0.76%) 513 | 514 | 515 | 516 | __perf_event_task_sched_in (4 samples, 3.05%) 517 | __p.. 518 | 519 | 520 | __malloc_usable_size (1 samples, 0.76%) 521 | 522 | 523 | 524 | perf_event_nmi_handler (1 samples, 0.76%) 525 | 526 | 527 | 528 | swift_release@plt (1 samples, 0.76%) 529 | 530 | 531 | 532 | do_mmap (1 samples, 0.76%) 533 | 534 | 535 | 536 | vm_mmap_pgoff (1 samples, 0.76%) 537 | 538 | 539 | 540 | main.MyFavouriteNumbers.isFavouriteNumber(Swift.Int) -> Swift.Bool (112 samples, 85.50%) 541 | main.MyFavouriteNumbers.isFavouriteNumber(Swift.Int) -> Swift.Bool 542 | 543 | 544 | intel_pmu_enable_all (4 samples, 3.05%) 545 | int.. 546 | 547 | 548 | entry_SYSCALL_64_after_hwframe (1 samples, 0.76%) 549 | 550 | 551 | 552 | swift_retain_n (1 samples, 0.76%) 553 | 554 | 555 | 556 | all (131 samples, 100%) 557 | 558 | 559 | 560 | main (120 samples, 91.60%) 561 | main 562 | 563 | 564 | io_schedule (4 samples, 3.05%) 565 | io_.. 566 | 567 | 568 | entry_SYSCALL_64_after_hwframe (4 samples, 3.05%) 569 | ent.. 570 | 571 | 572 | search_binary_handler (4 samples, 3.05%) 573 | sea.. 574 | 575 | 576 | __intel_pmu_enable_all.constprop.23 (4 samples, 3.05%) 577 | __i.. 578 | 579 | 580 | __vfs_read (4 samples, 3.05%) 581 | __v.. 582 | 583 | 584 | schedule (4 samples, 3.05%) 585 | sch.. 586 | 587 | 588 | do_syscall_64 (4 samples, 3.05%) 589 | do_.. 590 | 591 | 592 | perf (4 samples, 3.05%) 593 | perf 594 | 595 | 596 | __intel_pmu_enable_all.constprop.23 (4 samples, 3.05%) 597 | __i.. 598 | 599 | 600 | sys_execve (1 samples, 0.76%) 601 | 602 | 603 | 604 | ctx_resched (4 samples, 3.05%) 605 | ctx.. 606 | 607 | 608 | malloc (1 samples, 0.76%) 609 | 610 | 611 | 612 | sys_mmap_pgoff (1 samples, 0.76%) 613 | 614 | 615 | 616 | new_sync_read (4 samples, 3.05%) 617 | new.. 618 | 619 | 620 | native_write_msr (3 samples, 2.29%) 621 | n.. 622 | 623 | 624 | cfree@GLIBC_2.2.5 (1 samples, 0.76%) 625 | 626 | 627 | 628 | _dl_map_object (4 samples, 3.05%) 629 | _dl.. 630 | 631 | 632 | [unknown] (121 samples, 92.37%) 633 | [unknown] 634 | 635 | 636 | mmap64 (1 samples, 0.76%) 637 | 638 | 639 | 640 | x86_pmu_enable (4 samples, 3.05%) 641 | x86.. 642 | 643 | 644 | sys_read (4 samples, 3.05%) 645 | sys.. 646 | 647 | 648 | setup_new_exec (4 samples, 3.05%) 649 | set.. 650 | 651 | 652 | setup_arg_pages (1 samples, 0.76%) 653 | 654 | 655 | 656 | swift_release_n (1 samples, 0.76%) 657 | 658 | 659 | 660 | move_page_tables (1 samples, 0.76%) 661 | 662 | 663 | 664 | do_syscall_64 (4 samples, 3.05%) 665 | do_.. 666 | 667 | 668 | merged generic specialization <Swift.Int> of Swift.ContiguousArray._copyToNewBuffer(oldCount: Swift.Int) -> () (5 samples, 3.82%) 669 | merg.. 670 | 671 | 672 | native_write_msr (3 samples, 2.29%) 673 | n.. 674 | 675 | 676 | 677 | --------------------------------------------------------------------------------