├── .dockerignore ├── .github ├── dependabot.yml └── workflows │ ├── crds.yml │ └── rust.yml ├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── DESIGN.md ├── LICENSE ├── README.md ├── client ├── Cargo.toml ├── Dockerfile └── src │ ├── http_api.rs │ ├── lib.rs │ ├── main.rs │ └── watch_ports.rs ├── controller ├── Cargo.toml ├── Dockerfile ├── core │ ├── Cargo.toml │ └── src │ │ ├── identity_match.rs │ │ ├── lib.rs │ │ └── network_match.rs ├── grpc │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── k8s │ ├── api │ │ ├── Cargo.toml │ │ └── src │ │ │ ├── labels.rs │ │ │ ├── lib.rs │ │ │ ├── polixy │ │ │ ├── authz.rs │ │ │ ├── mod.rs │ │ │ └── server.rs │ │ │ └── watch.rs │ └── index │ │ ├── Cargo.toml │ │ └── src │ │ ├── authz.rs │ │ ├── default_allow.rs │ │ ├── lib.rs │ │ ├── lookup.rs │ │ ├── namespace.rs │ │ ├── node.rs │ │ ├── pod.rs │ │ ├── server.rs │ │ └── tests.rs └── src │ ├── admin.rs │ ├── lib.rs │ └── main.rs ├── default.nix ├── deny.toml ├── img └── resources.png ├── k8s ├── client-access.yml ├── client.yml ├── controller │ ├── deploy.yml │ ├── kubeconfig.sh │ └── sa.yml ├── crds │ ├── authz.yml │ └── server.yml ├── emojivoto │ ├── emoji-server.yml │ ├── emoji.yml │ ├── ns.yml │ ├── prom-server.yml │ ├── vote-bot.yml │ ├── voting-server.yml │ ├── voting.yml │ ├── web-server.yml │ └── web.yml └── linkerd │ ├── destination.yml │ ├── identity.yml │ ├── proxy-admin.yml │ ├── proxy-injector.yml │ └── sp-validator.yml ├── rust-toolchain └── shell.nix /.dockerignore: -------------------------------------------------------------------------------- 1 | **/Dockerfile* 2 | k8s 3 | img 4 | target 5 | *.nix 6 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: / 5 | schedule: 6 | interval: daily 7 | 8 | - package-ecosystem: github-actions 9 | directory: "/" 10 | schedule: 11 | interval: daily 12 | -------------------------------------------------------------------------------- /.github/workflows/crds.yml: -------------------------------------------------------------------------------- 1 | name: Custom Resource Definitions 2 | 3 | on: 4 | pull_request: {} 5 | push: 6 | branches: 7 | - "main" 8 | 9 | env: 10 | K3D_VERSION: v4.4.4 11 | KUBECTL_VERSION: v1.21.1 12 | 13 | jobs: 14 | test: 15 | name: CRD Tests 16 | permissions: 17 | contents: read 18 | timeout-minutes: 20 19 | runs-on: ubuntu-latest 20 | 21 | steps: 22 | - name: Checkout 23 | uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # v2.3.4 24 | 25 | - run: mkdir -p ./target/bin 26 | 27 | - name: Fetch kubectl ${{ env.KUBECTL_VERSION }} 28 | run: | 29 | # Fetch kubectl 30 | curl -vsL --retry 2 \ 31 | --output ./target/bin/kubectl \ 32 | "https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl" 33 | chmod 755 ./target/bin/kubectl 34 | 35 | - name: Fetch k3d ${{ env.K3D_VERSION }} 36 | run: | 37 | # Fetch k3d 38 | curl -vsL --retry 2 \ 39 | --output ./target/bin/k3d \ 40 | "https://github.com/rancher/k3d/releases/download/${K3D_VERSION}/k3d-linux-amd64" 41 | chmod 755 ./target/bin/k3d 42 | 43 | - name: Create cluster 44 | run: | 45 | # We really just need a k8s API server without any runtime facilities. 46 | ./target/bin/k3d cluster create \ 47 | --k3s-server-arg '--disable=coredns,servicelb,traefik,local-storage,metrics-server' \ 48 | --no-hostip --no-lb 49 | 50 | ./target/bin/kubectl cluster-info 51 | ./target/bin/kubectl version 52 | 53 | - name: Apply CRDs 54 | run: ./target/bin/kubectl apply -f ./k8s/crds 55 | 56 | - name: Create ns/emojivoto 57 | run: ./target/bin/kubectl create ns emojivoto 58 | 59 | - name: Apply srv/prom 60 | run: ./target/bin/kubectl apply -f ./k8s/emojivoto/prom-server.yml 61 | 62 | - name: Apply srv/emoji-grpc 63 | run: ./target/bin/kubectl apply -f ./k8s/emojivoto/emoji-server.yml 64 | 65 | - name: Apply srv/voting-grpc 66 | run: ./target/bin/kubectl apply -f ./k8s/emojivoto/voting-server.yml 67 | 68 | - name: Apply srv/web-http 69 | run: ./target/bin/kubectl apply -f ./k8s/emojivoto/web-server.yml 70 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust PR 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: {} 8 | 9 | jobs: 10 | fmt: 11 | timeout-minutes: 5 12 | runs-on: ubuntu-latest 13 | container: 14 | image: docker://rust:1.52.1-buster 15 | steps: 16 | - uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f 17 | - run: rustup component add rustfmt 18 | - run: cargo fmt -- --check 19 | 20 | audit: 21 | timeout-minutes: 5 22 | runs-on: ubuntu-latest 23 | strategy: 24 | matrix: 25 | checks: 26 | - advisories 27 | - bans licenses sources 28 | # Prevent sudden announcement of a new advisory from failing Ci. 29 | continue-on-error: ${{ matrix.checks == 'advisories' }} 30 | steps: 31 | - uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f 32 | - uses: EmbarkStudios/cargo-deny-action@0ca727bbae7b7b578b9a5f98186caac35aa2a00d 33 | with: 34 | command: check ${{ matrix.checks }} 35 | 36 | clippy: 37 | timeout-minutes: 5 38 | runs-on: ubuntu-latest 39 | container: 40 | image: docker://rust:1.52.1-buster 41 | steps: 42 | - uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f 43 | - run: rustup component add clippy 44 | - run: cargo clippy --all-targets 45 | 46 | check: 47 | timeout-minutes: 20 48 | runs-on: ubuntu-latest 49 | container: 50 | image: docker://rust:1.52.1-buster 51 | steps: 52 | - uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f 53 | - run: cargo check 54 | 55 | test: 56 | timeout-minutes: 15 57 | runs-on: ubuntu-latest 58 | steps: 59 | - uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f 60 | - run: cargo test 61 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | resolver = "2" 3 | members = [ 4 | "client", 5 | "controller", 6 | "controller/core", 7 | "controller/grpc", 8 | "controller/k8s/api", 9 | "controller/k8s/index" 10 | ] 11 | -------------------------------------------------------------------------------- /DESIGN.md: -------------------------------------------------------------------------------- 1 | # Linkerd policy exploration 2 | 3 | Linkerd proxies inbound (server-side) communication to pods and is well-positioned to enforce 4 | policies that place restrictions on the clients that may access it. 5 | 6 | All proxy-to-proxy communication is already protected by mutually-authenticated TLS, with identities 7 | based on each pod's `ServiceAccount`. Currently, this identity is opportunistic and operators have 8 | little means to require its use or to limit access based on this identity. 9 | 10 | This document proposes a mechanism for operators to configure inbound proxies enforce authorization 11 | for pod-to-pod communication. 12 | 13 | ## Design 14 | 15 | ### Goals 16 | 17 | * Define a mechanism for servers operators to authorize clients; 18 | * Integrate with Linkerd's identity system; 19 | * Opt-in -- not required, especially when getting started; 20 | * Can be adopted incrementally; 21 | * Leverage existing Kubernetes primitives/patterns; 22 | * Identify reusable components for future server configuration; 23 | * Keep proxy-facing Kubernetes-agnostic; 24 | * Support interoperability with SMI `TrafficPolicy`; 25 | * No measurable impact to HTTP request latencies; and 26 | * Negligible impact to proxy memory usage. 27 | 28 | ### Non-goals 29 | 30 | #### Cluster-wide policy 31 | 32 | Cluster-level policy control should be implemented by operators that generate or restrict 33 | application-level policy resources. Schemes for implementing these (e.g., OPA/Gatekeeper) are out of 34 | scope of this document. 35 | 36 | #### Policy based on HTTP metadata 37 | 38 | While we shouldn't preclude future support for other types of protocol-aware policy, this initial 39 | design is only intends to address connection-level policy. 40 | 41 | ### Server-centric 42 | 43 | Access policies need to be enforced on the server-side in order to provide any reasonable guarantees 44 | against malfeasant clients. As such, we need a means for an inbound proxy to discover policies for 45 | its local servers (ports that the pod-local containers listen on). 46 | 47 | While it may be attractive to use Kubernetes `Service`s for this, they are really not the right tool 48 | for the job: Kubernetes services represent a traffic target _for clients_. A server instance, 49 | however, has no way to correlate an inbound connection with the service the client targeted; and it 50 | may receive traffic that isn't associated with a `Service` at all. 51 | 52 | This points to the introduction of a new custom resource type that describes a *server*--matching a 53 | port on a set of pods. 54 | 55 | #### Dynamic Policy Discovery 56 | 57 | Outbound proxies perform service discovery based on the target IP:Port of a connection. Inbound 58 | proxies will similarly need to adapt to policies at runtime (i.e., without requiring that the proxy 59 | be restarted). 60 | 61 | Outbound proxies are lazy and dynamic, as we cannot require applications to document all endpoints 62 | to which they may connect; but, on the inbound side, it's reasonable to expect operators to document 63 | the ports on which an application accepts connections. In fact, this almost always done in each 64 | pod's spec. 65 | 66 | This will allow proxies to discovery policies at startup-time and block the pod's readiness based on 67 | this availability. 68 | 69 | #### Protocol hinting 70 | 71 | Linkerd 2.10 introduced the `config.linkerd.io/opaque-ports` annotation that configures server-side 72 | ports to skip protocol detection. With the introduction of a server descriptor, we have an 73 | opportunity to extend this configuration even further by allowing operators to document the protocol 74 | being proxied to avoid detection overhead (and their related timeouts). 75 | 76 | Clients can similarly discover these protocols via the Destination API. 77 | 78 | ### Authorizing clients 79 | 80 | Clients fall into two broad categories: 81 | 82 | 1. Meshed clients communicating with mutual-authenticated TLS; 83 | 2. Un-meshed clients communicating without (mesh-aware) authentication. 84 | 85 | #### Authenticated clients 86 | 87 | Meshed clients automatically authenticate to servers via mTLS so that the client's identity is 88 | available to the server. An operator may restrict access to all authenticated clients or a subset of 89 | authenticated clients. 90 | 91 | Authenticated clients may be matched by `ServiceAccount` or, more generally, Linkerd identity names. 92 | DNS-like identity names are encoded into each proxy's certificate, and each certificate is created 93 | using the pod's `ServiceAccount` in the form 94 | `..serviceaccount.identity.linkerd.`. 95 | 96 | It's most natural to authorize authenticated clients by referring to service accounts directly; 97 | however, we probably also want to support matching identity names as well. For instance, when 98 | authorizing clients to connect to multi-cluster gateways, we cannot reference service accounts in 99 | other clusters. Instead we might want to express matches like `*.` to match all 100 | clients in an identity domain or `*..serviceaccount.identity.linkerd.` to match 101 | all clients in a specific namespace. 102 | 103 | #### Lifecycle probes 104 | 105 | In Kubernetes, _kubelet_ is a process that runs on each node and is responsible for orchestrating a 106 | pod's lifecycle: it executes and terminates a pod's containers and, more importantly for our needs, 107 | it may issue networked probes to know when a container is _ready_ or _live. 108 | 109 | It is not feasible to configure kubelet to run within the mesh, so we can only identify this traffic 110 | by its source IP. Kubelet initiates network probes from the first address on the node's pod 111 | network--e.g., if the node's `podCIDR` is `10.0.1.0/24`, then the kubelet will initiate connections 112 | from `10.0.1.1`. (See [this blog post on pod networking][pod-ips] for more information.) 113 | 114 | If a policy were to block this communication, pods would not start properly. So we need to be 115 | careful to allow this traffic by default to minimize pain. Furthermore, there's really no benefit to 116 | disallowing communication from the kubelet--kubelet is necessarily a privileged application that 117 | must be trusted by a pod. 118 | 119 | #### Default behavior 120 | 121 | We must not require that all servers are described, as this would dramatically complicate getting 122 | started with Linkerd. To support incremental adoption of Linkerd in general, and specifically 123 | policies, we need to allow traffic by default. 124 | 125 | As soon as a Server is described, however, we can require that clients must be explicitly authorized 126 | to communicate with the server. 127 | 128 | But a default-allow policy isn't desirable from a security point-of-view. If an operator has taken 129 | the time to document all servers in a cluster, they may not want a subsequent misconfiguration to 130 | expose servers without authentication. So, we probably want to support a few different default 131 | modes: 132 | 133 | 1. Allow unauthenticated from everywhere 134 | 2. Allow unauthenticated from within the cluster 135 | 3. Allow mesh-authenticated from everywhere 136 | 4. Allow mesh-authenticated from within the cluster 137 | 5. Deny 138 | 139 | This default setting should be configurable at the control-plane-level, or per-workload via the 140 | `polixy.linkerd.io/default-allow` annotation. The proxy injector should copy these annotations from 141 | namespaces onto each workload so the controller only needs to track workload annotations for 142 | discovery. 143 | 144 | ## Proposal 145 | 146 | ### Resources 147 | 148 | We propose introducing two new `CustomResourceDefinition`s to Linkerd: 149 | 150 | ![Policy resources](./img/resources.png "Policy resources") 151 | 152 | #### [`Server`](k8s/crds/server.yml) 153 | 154 | Each `Server` instance: 155 | 156 | * Selects over pods by label 157 | * Matches ports by name or value 158 | * Optionally indicates how the proxy should detect the protocol of these streams 159 | 160 | ##### `proxyProtocol: unknown` 161 | 162 | If no proxy protocol is set (or `unknown` is set explicitly), the proxy's (HTTP) protocol detection 163 | is performed. This is the default behavior in current proxy versions. 164 | 165 | ##### `proxyProtocol: opaque` 166 | 167 | Equivalent to setting the port in `config.linkerd.io/opaque-ports` -- indicates that the server 168 | should not do any protocol detection (and neither should meshed clients). 169 | 170 | ##### `proxyProtocol: TLS` 171 | 172 | Indicates that the server terminates TLS. The proxy may require that all connections include a TLS 173 | ClientHello and it should skip HTTP-level detection. 174 | 175 | ##### `proxyProtocol: HTTP/1 | HTTP/2 | gRPC` 176 | 177 | Indicates that the server supports the referenced HTTP variant. gRPC is provided as a special case 178 | for HTTP/2 to support future specialization. 179 | 180 | ##### Handling conflicts 181 | 182 | It's possible for multiple `Server` instances to conflict by matching the same workloads + port, 183 | much in the way that it's possible for multiple `Deployment` instances to match the same pods. This 184 | behavior is undefined. Operators must not create conflicting servers. 185 | 186 | It should be possible to detect this situation at `Server`-creation time--at least, we should be 187 | able to detect overlapping label selectors for the same port. It may **not** be feasible to reliably 188 | detect servers that match the same _port_, however, as named ports may only conflict with numbered 189 | pots at pod-creation time. So, the validating webhook could potentially prevent the creation of 190 | these pods, or we'll need to implement CLI checks that detect this situation. 191 | 192 | #### [`ServerAuthorization`](k8s/crds/authz.yml) 193 | 194 | Authorizes clients to access `Server`s. 195 | 196 | * References servers in the same namespace by name or label selector. 197 | * Scoped to source IP networks. If no networks are specified, the authorization applies to clients 198 | in all networks. 199 | * Indicates whether connections may be unauthenticated (i.e. without mesh TLS); or 200 | * Expresses mesh TLS requirements: 201 | * By referencing service accounts (in arbitrary namespaces); or 202 | * By matching identity strings (including globbed suffix matches); or 203 | * Not requiring client identities at all -- only relevant for the `identity` controller that must 204 | serve requests to clients that have not yet obtained an identity. 205 | 206 | ### Overview 207 | 208 | * A new _server policy_ controller is added to the control plane, responsible for serving a gRPC API 209 | to proxies for discovery and for validating resources as they are created (via 210 | `ValidatingAdmissionWebhook`). 211 | * The proxy injector is modified to configure proxies with: 212 | * The location & identity of the API server; 213 | * A "workload coordinate", potentially reusing the destination controller's "context token", which 214 | encodes at least the namespace and pod name. 215 | * A comma-separated list of numeric container ports for the pod. 216 | * The proxy does not permit connections for ports that are not documented in the pod spec. 217 | * The proxy no longer forwards inbound connections on localhost. Instead, the discovered 218 | configuration indicates the IPs on which connections are permitted, and the proxy only forwards 219 | connections targeting these IPs. This may interfere with complicated networking schemes (e.g. 220 | Docker-in-Docker); but we're probably better off figuring out how to support these networking 221 | overlays in proxy-init, etc. 222 | * Protocol detection is informed by discovery: 223 | * HTTP: 224 | * When a connection is authorized, requests are [annotated with headers](#headers). 225 | * When a connection is not authorized, HTTP responses are emitted with the status `403 Forbidden`. 226 | * gRPC: 227 | * When a connection is authorized, requests are [annotated with headers](#headers). 228 | * When a connection is not authorized, gRPC responses are emitted with a header 229 | `grpc-status: PERMISSION_DENIED` 230 | * Unauthenticated connections are _always_ permitted from the kubelet. 231 | 232 | #### HTTP/gRPC headers 233 | 234 | Proxies should surface informational headers to the application describing authorized clients for 235 | Servers with a `proxyProtocol` value of `HTTP` or `gRPC`. 236 | 237 | Use of these headers may be disabled by setting a server annotation: 238 | 239 | ```yaml 240 | apiVersion: polixy.linkerd.io/v1alpha1 241 | kind: Server 242 | metadata: 243 | annotations: 244 | polixy.linkerd.io/http-informational-headers: disabled 245 | ``` 246 | 247 | ##### `l5d-connection-secure: true | false` 248 | 249 | The `l5d-connection-secure` indicates whether the client connected to the server via meshed TLS. 250 | When the value is `true`, the `l5d-client-id` header may also be set to indicate the client's 251 | identity. 252 | 253 | This header is always set by the proxy (when informational headers are not disabled). 254 | 255 | ##### `l5d-client-id: ` 256 | 257 | The `l5d-client-id` header is only set when the client has been authenticated via meshed TLS. Its 258 | value is the client's identity, e.g. `default.default.serviceaccount.identity.linkerd.cluster.local`. 259 | 260 | ##### `forwarded: for=;by=` 261 | 262 | [RFC 7239](https://tools.ietf.org/html/rfc7239) standardizes use of the `forwarded` header to 263 | replace `x-forwarded-*` headers. In order to inform the client of the client's IP address, the proxy 264 | appends a ` 265 | 266 | #### Identity Controller Bootstrapping 267 | 268 | The above scheme poses a "*Wyld Stallyns* problem" for the identity controller: the identity 269 | controller needs to discover inbound policy in order to start issuing certificates, but the 270 | destination controller cannot accept connections until it obtains a certificate from the identity 271 | controller. 272 | 273 | We want the identity controller to remain in a distinct deployment, separate from the other 274 | controller containers, as it requires access to signing secrets that these other processes should 275 | not be able to access. 276 | 277 | We'll need to figure out a way for the identity controller to startup without requiring access to 278 | the destination controller. One approach could be to serve a specialized version of the API 279 | endpoints--only for the identity controller's proxy--from the identity controller. This only 280 | feasible because the identity controller's proxy has very limited discovery needs: 281 | 282 | * It only initiates outbound connections to the Kubernetes API (on 443). 283 | * It needs to discover policy for its local ports (identity gRPC + admin, proxy ports) 284 | * It attempts to discover a service profile for inbound gRPC requests 285 | 286 | #### Control plane policies 287 | 288 | The core control plane should ship with a set of default policies: 289 | 290 | * The destination controller requires mutually authenticated requests. 291 | [[k8s/linkerd/destination.yml](./k8s/linkerd/destination.yml)] 292 | * The identity controller requires secured connections that may not be 293 | authenticated (because clients have not yet received identity). 294 | [[k8s/linkerd/identity.yml](./k8s/linkerd/identity.yml)] 295 | * Webhook connections must use TLS. 296 | [[k8s/linkerd/proxy-injector.yml](./k8s/linkerd/proxy-injector.yml)] 297 | * Admin server connections must be authenticated or originate from the 298 | node-local network. [[k8s/linkerd/proxy-injector.yml](./k8s/linkerd/proxy-injector.yml)] 299 | 300 | 303 | 304 | ### Why not `access.smi-spec.org`? 305 | 306 | There are a few things that don't... mesh ;) 307 | 308 | #### Ports 309 | 310 | SMI isn't port-aware. Our `Server` abstraction gives us a useful, extensible building block that 311 | allows us to attach configuration to pod-ports. In the same way that we can attach authorizations to 312 | a `Server`, we'll be able to extend the server API to support, for instance, HTTP routes, gRPC 313 | services, etc. 314 | 315 | #### Destinations 316 | 317 | SMI binds policy to destinations as follows: 318 | 319 | ```yaml 320 | kind: TrafficTarget 321 | metadata: 322 | name: target 323 | namespace: default 324 | spec: 325 | destination: 326 | kind: ServiceAccount 327 | name: service-a 328 | namespace: default 329 | ... 330 | ``` 331 | 332 | This is a bit awkward for a few reasons: 333 | 334 | * These targets need not exist in the same namespace as the policies? So it appears as if policies 335 | can be created in unrelated namespaces (by unrelated owners), and it's not clear how these 336 | policies should be applied. 337 | * While it makes sense for us to bind clients to `ServiceAccounts`--this is how we authenticate pods 338 | to the identity service--it's unnatural and unnecessary to do this for servers. All pods that 339 | share a service account need not have the same access patterns. For instance, it's common for all 340 | pods in a namespace to share a common (`default`) `ServiceAccount`, though the pods serve varying 341 | APIs to multitude of clients. 342 | 343 | We _really_ want to bind policies to pod-and-port pairs (as described by our `Server` resource). And 344 | we _really_ want all authorizations to _only_ be defined in the same namespace as the server. It 345 | makes no sense to support (inbound) policies defined in other namespaces--only a service's owners 346 | can define its access policies. 347 | 348 | ## Open questions 349 | 350 | * What should we call the API group? `polixy` is a placeholder (policy + olix0r). We should change 351 | this to something a bit more concrete. This name should probably match the controller's name. 352 | * How do we provide interop with SMI? I.e. something that reads TrafficTarget resources and 353 | generates Linkerd resources (and vice-versa?). It will probably be clunky but it seems doable. 354 | * How will we support HTTP routes & gRPC services? How does authorization work for these? 355 | * What Linkerd CLI tools do we need to interact with policies? 356 | * Do we need `check`s for policies? 357 | * How are policies reflected in metrics/tap? 358 | * How do we restrict requests to the controller? I.e. API clients should not be able to request 359 | policies for servers they do not serve; but we still may need to support non-proxy clients for 360 | tooling. 361 | * How do policies interact with the multi-cluster gateway? 362 | * How do policies interact with tap servers? 363 | * How do policies interact with admin servers? 364 | * Do we want to stick with a [controller written in Rust](./src)? Or would it be better to 365 | re-implement this with `client-go`? 366 | 367 | ## Implementation 368 | 369 | ### Control-plane 370 | 371 | #### Injector 372 | 373 | * Set `LINKERD2_PROXY_INBOUND_CONTEXT` env with a namespace and pod name (similarly to 374 | `LINKERD2_PROXY_DESTINATION_CONTEXT`). 375 | * Set `LINKERD2_PROXY_INBOUND_PORTS` env with a comma-separated list of all ports documented on the 376 | pod, including proxy ports. 377 | * Set `LINKERD2_PROXY_INBOUND_IPS` env to a comma-separated lits of all podIPs. 378 | * Set the `inbound.linkerd.io/default-allow` annotation when it is not set. Either from the 379 | namespace or the cluster-wide default. 380 | * Set `LINKERD2_PROXY_INBOUND_DEFAULT_ALLOW` env with the same value. 381 | 382 | ### Proxy 383 | 384 | 1. Modify proxy initialization to load per-connection policies. This should initially encapsulate 385 | the opaque-ports configuration. 386 | 2. Use the `INBOUND_IPS` setting to restrict which target ips are permitted on inbound connections. 387 | * Stop rewriting the inbound target ip address to 127.0.0.1. 388 | 3. Modify controller clients to be cached like outbound clients. A proxy may or may not be configured 389 | configured independently from the inbound controller--especially the identity controller, which 390 | needs to be able to discover inbound policy locally before a destination pod is available. The 391 | controller should be a `NewService` that accepts a target that specifies the target addr/tls. 392 | 393 | ### Controller 394 | 395 | * Extract `linkerd-drain` into a distinct, versioned [crate](https://crates.io/crates/drain) so it 396 | can be used by the controller without git dependencies. 397 | * Add indexer metrics. 398 | * Support a mode where watches are namespace-scoped instead of cluster-scoped. So that the identity 399 | controller's instance need not cache the whole cluster's information. 400 | 401 | ## Future work 402 | 403 | * HTTP route authorization 404 | * Egress policies 405 | * View isolation in the destination service 406 | 407 | 408 | [pod-ips]: https://web.archive.org/web/20201211005235/https://ronaknathani.com/blog/2020/08/how-a-kubernetes-pod-gets-an-ip-address/ 409 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # polixy 2 | 3 | A prototype of policy for Linkerd. 4 | 5 | See [DESIGN.md](./DESIGN.md) for details. 6 | 7 | ## Requires 8 | 9 | * A Kubernetes 1.16+ cluster, available via kubectl; 10 | * [Linkerd 2.10+](linkerd.io)--so that workloads are labeled appropriately; 11 | 12 | ## Running 13 | 14 | ### Install `polixy.linkerd.io` CRDs 15 | 16 | ```sh 17 | :; kubectl apply -f ./k8s/crds 18 | ``` 19 | 20 | ### Run the controller locally 21 | 22 | We create a new `polixy` namespace with a `controller` ServiceAccount, with 23 | limited cluster access, and extract a kubeconfig to the local filesystem to use with the controller: 24 | 25 | ```sh 26 | :; kubectl apply -f ./k8s/controller/sa.yml 27 | :; KUBECONFIG=$(./k8s/controller/kubeconfig.sh) cargo run -p polixy-controller 28 | ``` 29 | 30 | ### Install example application (with policies) 31 | 32 | ```sh 33 | :; kubectl apply -f ./k8s/emojivoto/ns.yml && kubectl apply -f ./k8s/emojivoto 34 | ``` 35 | 36 | ### Run a client 37 | 38 | ```sh 39 | :; pod=$(kubectl get -n emojivoto po -l app.kubernetes.io/name=web -o 'jsonpath={.items[*].metadata.name}') 40 | :; cargo run -p polixy-client -- get -n emojivoto $pod 8080 41 | ``` 42 | 43 | ```sh 44 | :; pod=$(kubectl get -n emojivoto po -l app.kubernetes.io/name=voting -o 'jsonpath={.items[*].metadata.name}') 45 | :; cargo run -p polixy-client -- get -n emojivoto $pod 8080 46 | ``` 47 | 48 | ```sh 49 | :; pod=$(kubectl get -n emojivoto po -l app.kubernetes.io/name=voting -o 'jsonpath={.items[*].metadata.name}') 50 | :; cargo run -p polixy-client -- watch -n emojivoto $pod 8801 51 | ` 52 | -------------------------------------------------------------------------------- /client/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "polixy-client" 3 | version = "0.1.0" 4 | authors = ["Oliver Gould "] 5 | edition = "2018" 6 | license = "Apache-2.0" 7 | publish = false 8 | 9 | [features] 10 | rustfmt = ["linkerd2-proxy-api/rustfmt"] 11 | 12 | [dependencies] 13 | anyhow = "1" 14 | bytes = "1" 15 | futures = "0.3" 16 | hyper = { version = "0.14", features = ["client", "http1", "runtime"] } 17 | ipnet = "2" 18 | linkerd2-proxy-api = { git = "https://github.com/linkerd/linkerd2-proxy-api", branch = "ver/inbound", features = ["inbound", "client"] } 19 | serde = { version = "1", features = ["derive"] } 20 | serde_json = "1" 21 | serde_yaml = "0.8" 22 | structopt = "0.3" 23 | tonic = { version = "0.5", default-features = false, features = ["transport"] } 24 | tokio = { version = "1", features = ["rt", "macros", "parking_lot", "signal", "sync"] } 25 | tracing = { version = "0.1", features = ["attributes"] } 26 | tracing-subscriber = "0.2" 27 | 28 | [dev-dependencies] 29 | quickcheck = "1" 30 | -------------------------------------------------------------------------------- /client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.52.1 as build 2 | WORKDIR /polixy 3 | ADD . /polixy 4 | RUN cargo build -p polixy-client --release 5 | 6 | FROM gcr.io/distroless/cc:nonroot 7 | COPY --from=build /polixy/target/release/polixy-client / 8 | ENTRYPOINT ["/polixy-client"] 9 | -------------------------------------------------------------------------------- /client/src/http_api.rs: -------------------------------------------------------------------------------- 1 | use crate::Inbound; 2 | use anyhow::Result; 3 | use bytes::Bytes; 4 | use hyper::{Body, Request, Response}; 5 | use serde::Deserialize; 6 | use std::{collections::HashMap, net::IpAddr}; 7 | use tokio::sync::watch; 8 | 9 | #[derive(Clone, Debug, Deserialize)] 10 | #[serde(rename_all = "camelCase")] 11 | struct Spec { 12 | server_port: u16, 13 | 14 | #[serde(rename = "clientIP")] 15 | client_ip: IpAddr, 16 | 17 | tls: Option, 18 | } 19 | 20 | #[derive(Clone, Debug, Deserialize)] 21 | struct TlsSpec { 22 | #[serde(rename = "clientID")] 23 | client_id: Option, 24 | } 25 | 26 | pub async fn serve( 27 | ports: &HashMap>, 28 | req: Request, 29 | ) -> Result> { 30 | if req.method() != hyper::Method::POST { 31 | return Ok(Response::builder() 32 | .status(hyper::StatusCode::METHOD_NOT_ALLOWED) 33 | .body(Body::default()) 34 | .unwrap()); 35 | } 36 | 37 | match req 38 | .headers() 39 | .get(hyper::header::CONTENT_TYPE) 40 | .and_then(|v| v.to_str().ok()) 41 | { 42 | Some("application/json") => { 43 | let Spec { 44 | server_port, 45 | client_ip, 46 | tls, 47 | } = { 48 | let body = hyper::body::to_bytes(req.into_body()).await?; 49 | serde_json::from_slice(body.as_ref())? 50 | }; 51 | 52 | match ports.get(&server_port) { 53 | Some(rx) => { 54 | let inbound = rx.borrow(); 55 | let labels = match tls { 56 | Some(TlsSpec { client_id }) => { 57 | inbound.check_tls(client_ip, client_id.as_deref()) 58 | } 59 | None => inbound.check_non_tls(client_ip), 60 | }; 61 | 62 | let rsp = serde_json::json!({ 63 | "authorization": labels, 64 | }); 65 | let bytes = serde_json::to_vec_pretty(&rsp).unwrap(); 66 | 67 | Ok(Response::builder() 68 | .status(hyper::StatusCode::OK) 69 | .header(hyper::header::CONTENT_TYPE, "text/plain") 70 | .body(Bytes::copy_from_slice(bytes.as_slice()).into()) 71 | .unwrap()) 72 | } 73 | 74 | None => { 75 | let msg = format!( 76 | "not in known ports: {:?}\n", 77 | ports 78 | .keys() 79 | .map(|p| p.to_string()) 80 | .collect::>() 81 | .join(",") 82 | ); 83 | Ok(Response::builder() 84 | .status(hyper::StatusCode::NOT_FOUND) 85 | .header(hyper::header::CONTENT_TYPE, "text/plain") 86 | .body(Bytes::copy_from_slice(msg.as_bytes()).into()) 87 | .unwrap()) 88 | } 89 | } 90 | } 91 | Some(ct) => { 92 | let msg = format!("unsupported content-type: {}", ct); 93 | Ok(Response::builder() 94 | .status(hyper::StatusCode::BAD_REQUEST) 95 | .header(hyper::header::CONTENT_TYPE, "text/plain") 96 | .body(Bytes::copy_from_slice(msg.as_bytes()).into()) 97 | .unwrap()) 98 | } 99 | None => Ok(Response::builder() 100 | .status(hyper::StatusCode::BAD_REQUEST) 101 | .header(hyper::header::CONTENT_TYPE, "text/plain") 102 | .body(Bytes::from_static(b"content-type must be set\n").into()) 103 | .unwrap()), 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /client/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings, rust_2018_idioms)] 2 | #![forbid(unsafe_code)] 3 | 4 | pub mod http_api; 5 | mod watch_ports; 6 | 7 | pub use self::watch_ports::{watch_ports, PortWatch}; 8 | use anyhow::{anyhow, bail, Context, Error, Result}; 9 | use futures::prelude::*; 10 | use ipnet::IpNet; 11 | use linkerd2_proxy_api::inbound::{ 12 | self as proto, inbound_server_discovery_client::InboundServerDiscoveryClient, 13 | }; 14 | use std::{ 15 | collections::{HashMap, HashSet}, 16 | convert::TryInto, 17 | net::IpAddr, 18 | }; 19 | use tokio::time; 20 | use tracing::{instrument, trace}; 21 | 22 | #[derive(Clone, Debug)] 23 | pub struct Client { 24 | client: InboundServerDiscoveryClient, 25 | } 26 | 27 | #[derive(Clone, Debug)] 28 | pub struct Inbound { 29 | pub authorizations: Vec, 30 | pub labels: HashMap, 31 | pub protocol: Protocol, 32 | } 33 | 34 | #[derive(Copy, Clone, Debug)] 35 | pub enum Protocol { 36 | Detect { timeout: time::Duration }, 37 | Http1, 38 | Http2, 39 | Grpc, 40 | Opaque, 41 | Tls, 42 | } 43 | 44 | #[derive(Clone, Debug)] 45 | pub struct Authz { 46 | networks: Vec, 47 | authn: Authn, 48 | labels: HashMap, 49 | } 50 | 51 | #[derive(Clone, Debug, Default)] 52 | pub struct Network { 53 | net: IpNet, 54 | except: Vec, 55 | } 56 | 57 | #[derive(Clone, Debug)] 58 | pub enum Authn { 59 | Unauthenticated, 60 | TlsUnauthenticated, 61 | TlsAuthenticated { 62 | identities: HashSet, 63 | suffixes: Vec, 64 | }, 65 | } 66 | 67 | #[derive(Clone, Debug)] 68 | pub struct Suffix { 69 | ends_with: String, 70 | } 71 | 72 | // === impl Client === 73 | 74 | impl Client { 75 | pub async fn connect(dst: D) -> Result 76 | where 77 | D: std::convert::TryInto, 78 | D::Error: Into>, 79 | { 80 | let client = InboundServerDiscoveryClient::connect(dst).await?; 81 | Ok(Client { client }) 82 | } 83 | 84 | #[instrument(skip(self))] 85 | pub async fn get_port(&mut self, workload: String, port: u16) -> Result { 86 | let req = tonic::Request::new(proto::PortSpec { 87 | workload, 88 | port: port.into(), 89 | }); 90 | 91 | let proto = self.client.get_port(req).await?.into_inner(); 92 | trace!(?proto); 93 | proto.try_into() 94 | } 95 | 96 | #[instrument(skip(self))] 97 | pub async fn watch_port( 98 | &mut self, 99 | workload: String, 100 | port: u16, 101 | ) -> Result>> { 102 | let req = tonic::Request::new(proto::PortSpec { 103 | workload, 104 | port: port.into(), 105 | }); 106 | 107 | let rsp = self.client.watch_port(req).await?; 108 | 109 | let updates = rsp.into_inner().map_err(Into::into).and_then(|proto| { 110 | trace!(?proto); 111 | future::ready(proto.try_into()) 112 | }); 113 | 114 | Ok(updates) 115 | } 116 | } 117 | 118 | // === impl Inbound === 119 | 120 | impl Inbound { 121 | #[instrument(skip(self))] 122 | pub fn check_non_tls(&self, client_ip: IpAddr) -> Option<&HashMap> { 123 | trace!(authorizations = %self.authorizations.len()); 124 | for Authz { 125 | networks, 126 | authn, 127 | labels, 128 | } in self.authorizations.iter() 129 | { 130 | trace!(?authn); 131 | trace!(?networks); 132 | trace!(?labels); 133 | if matches!(authn, Authn::Unauthenticated) 134 | && networks.iter().any(|net| net.contains(&client_ip)) 135 | { 136 | trace!("Match found"); 137 | return Some(labels); 138 | } 139 | } 140 | 141 | trace!("No match found"); 142 | None 143 | } 144 | 145 | #[instrument(skip(self))] 146 | pub fn check_tls( 147 | &self, 148 | client_ip: IpAddr, 149 | id: Option<&str>, 150 | ) -> Option<&HashMap> { 151 | trace!(authorizations = %self.authorizations.len()); 152 | for Authz { 153 | networks, 154 | authn, 155 | labels, 156 | } in self.authorizations.iter() 157 | { 158 | trace!(?networks); 159 | if networks.iter().any(|net| net.contains(&client_ip)) { 160 | trace!("Matches network"); 161 | trace!(?authn); 162 | match authn { 163 | Authn::Unauthenticated | Authn::TlsUnauthenticated => { 164 | trace!("Match found"); 165 | trace!(?labels); 166 | return Some(labels); 167 | } 168 | Authn::TlsAuthenticated { 169 | identities, 170 | suffixes, 171 | } => { 172 | if let Some(id) = id { 173 | trace!(identities = %identities.iter().map(|i| i.to_string()).collect::>().join(",")); 174 | if identities.contains(id) 175 | || suffixes.iter().any(|sfx| sfx.contains(id)) 176 | { 177 | trace!("Match found"); 178 | trace!(?labels); 179 | return Some(labels); 180 | } 181 | } 182 | } 183 | } 184 | } 185 | } 186 | 187 | trace!("No match found"); 188 | None 189 | } 190 | } 191 | 192 | impl std::convert::TryFrom for Inbound { 193 | type Error = Error; 194 | 195 | fn try_from(proto: proto::Server) -> Result { 196 | let protocol = match proto.protocol { 197 | Some(proto::ProxyProtocol { kind: Some(k) }) => match k { 198 | proto::proxy_protocol::Kind::Detect(proto::proxy_protocol::Detect { timeout }) => { 199 | Protocol::Detect { 200 | timeout: match timeout { 201 | Some(t) => t 202 | .try_into() 203 | .map_err(|t| anyhow!("negative detect timeout: {:?}", t))?, 204 | None => bail!("protocol missing detect timeout"), 205 | }, 206 | } 207 | } 208 | proto::proxy_protocol::Kind::Http1(_) => Protocol::Http1, 209 | proto::proxy_protocol::Kind::Http2(_) => Protocol::Http2, 210 | proto::proxy_protocol::Kind::Grpc(_) => Protocol::Grpc, 211 | proto::proxy_protocol::Kind::Opaque(_) => Protocol::Opaque, 212 | proto::proxy_protocol::Kind::Tls(_) => Protocol::Tls, 213 | }, 214 | _ => bail!("proxy protocol missing"), 215 | }; 216 | 217 | let authorizations = proto 218 | .authorizations 219 | .into_iter() 220 | .map( 221 | |proto::Authz { 222 | labels, 223 | authentication, 224 | networks, 225 | }| { 226 | if networks.is_empty() { 227 | bail!("networks missing"); 228 | } 229 | let networks = networks 230 | .into_iter() 231 | .map(|proto::Network { net, except }| { 232 | let net = net 233 | .ok_or_else(|| anyhow!("network missing"))? 234 | .try_into() 235 | .context("invalid network")?; 236 | let except = except 237 | .into_iter() 238 | .map(|net| net.try_into().context("invalid network")) 239 | .collect::>>()?; 240 | Ok(Network { net, except }) 241 | }) 242 | .collect::>>()?; 243 | 244 | let authn = match authentication.and_then(|proto::Authn { permit }| permit) { 245 | Some(proto::authn::Permit::Unauthenticated(_)) => Authn::Unauthenticated, 246 | Some(proto::authn::Permit::MeshTls(proto::authn::PermitMeshTls { 247 | clients, 248 | })) => match clients { 249 | Some(proto::authn::permit_mesh_tls::Clients::Unauthenticated(_)) => { 250 | Authn::TlsUnauthenticated 251 | } 252 | Some(proto::authn::permit_mesh_tls::Clients::Identities( 253 | proto::authn::permit_mesh_tls::PermitClientIdentities { 254 | identities, 255 | suffixes, 256 | }, 257 | )) => Authn::TlsAuthenticated { 258 | identities: identities 259 | .into_iter() 260 | .map(|proto::Identity { name }| name) 261 | .collect(), 262 | suffixes: suffixes 263 | .into_iter() 264 | .map(|proto::IdentitySuffix { parts }| Suffix::from(parts)) 265 | .collect(), 266 | }, 267 | None => bail!("no clients permitted"), 268 | }, 269 | authn => bail!("no authentication provided: {:?}", authn), 270 | }; 271 | 272 | Ok(Authz { 273 | networks, 274 | authn, 275 | labels, 276 | }) 277 | }, 278 | ) 279 | .collect::>>()?; 280 | 281 | Ok(Inbound { 282 | labels: proto.labels, 283 | authorizations, 284 | protocol, 285 | }) 286 | } 287 | } 288 | 289 | // === impl Network === 290 | 291 | impl Network { 292 | pub fn contains(&self, addr: &IpAddr) -> bool { 293 | self.net.contains(addr) && !self.except.iter().any(|net| net.contains(addr)) 294 | } 295 | } 296 | 297 | // === impl Suffix === 298 | 299 | impl From> for Suffix { 300 | fn from(parts: Vec) -> Self { 301 | let ends_with = if parts.is_empty() { 302 | "".to_string() 303 | } else { 304 | format!(".{}", parts.join(".")) 305 | }; 306 | Suffix { ends_with } 307 | } 308 | } 309 | 310 | impl Suffix { 311 | pub fn contains(&self, name: &str) -> bool { 312 | name.ends_with(&self.ends_with) 313 | } 314 | } 315 | 316 | #[cfg(test)] 317 | mod network_tests { 318 | use super::Network; 319 | use ipnet::{IpNet, Ipv4Net, Ipv6Net}; 320 | use quickcheck::{quickcheck, TestResult}; 321 | use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; 322 | 323 | quickcheck! { 324 | fn contains_v4(addr: Ipv4Addr, exclude: Option) -> TestResult { 325 | let net = Network { 326 | net: Ipv4Net::default().into(), 327 | except: exclude.into_iter().map(|a| IpNet::from(IpAddr::V4(a))).collect(), 328 | }; 329 | 330 | if let Some(e) = exclude { 331 | if net.contains(&e.into()) { 332 | return TestResult::failed(); 333 | } 334 | if addr == e { 335 | return TestResult::passed(); 336 | } 337 | } 338 | TestResult::from_bool(net.contains(&addr.into())) 339 | } 340 | 341 | fn contains_v6(addr: Ipv6Addr, exclude: Option) -> TestResult { 342 | let net = Network { 343 | net: Ipv6Net::default().into(), 344 | except: exclude.into_iter().map(|a| IpNet::from(IpAddr::V6(a))).collect(), 345 | }; 346 | 347 | if let Some(e) = exclude { 348 | if net.contains(&e.into()) { 349 | return TestResult::failed(); 350 | } 351 | if addr == e { 352 | return TestResult::passed(); 353 | } 354 | } 355 | TestResult::from_bool(net.contains(&addr.into())) 356 | } 357 | } 358 | } 359 | -------------------------------------------------------------------------------- /client/src/main.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings, rust_2018_idioms)] 2 | #![forbid(unsafe_code)] 3 | 4 | use anyhow::{bail, Result}; 5 | use futures::prelude::*; 6 | use std::{collections::HashMap, net::SocketAddr, sync::Arc}; 7 | use structopt::StructOpt; 8 | use tracing::info; 9 | 10 | #[derive(Debug, StructOpt)] 11 | #[structopt(name = "polixy", about = "A policy resource prototype")] 12 | struct Args { 13 | #[structopt(long, env, default_value = "http://127.0.0.1:8090")] 14 | grpc_addr: String, 15 | 16 | #[structopt(subcommand)] 17 | command: Command, 18 | } 19 | 20 | #[derive(Debug, StructOpt)] 21 | enum Command { 22 | Watch { 23 | #[structopt(short, long, default_value = "default")] 24 | namespace: String, 25 | pod: String, 26 | port: u16, 27 | }, 28 | Get { 29 | #[structopt(short, long, default_value = "default")] 30 | namespace: String, 31 | pod: String, 32 | port: u16, 33 | }, 34 | HttpApi { 35 | #[structopt(long, env, default_value = "127.0.0.1:0")] 36 | listen_addr: SocketAddr, 37 | 38 | #[structopt(env, short, long, default_value = "default")] 39 | namespace: String, 40 | 41 | #[structopt(env = "POD")] 42 | pod: String, 43 | 44 | #[structopt(env = "PORTS")] 45 | ports: Vec, 46 | }, 47 | } 48 | 49 | #[tokio::main(flavor = "current_thread")] 50 | async fn main() -> Result<()> { 51 | tracing_subscriber::fmt::init(); 52 | 53 | let Args { grpc_addr, command } = Args::from_args(); 54 | 55 | let mut client = polixy_client::Client::connect(grpc_addr).await?; 56 | 57 | match command { 58 | Command::Watch { 59 | namespace, 60 | pod, 61 | port, 62 | } => { 63 | let workload = format!("{}:{}", namespace, pod); 64 | let mut updates = client.watch_port(workload, port).await?; 65 | while let Some(res) = updates.next().await { 66 | match res { 67 | Ok(config) => println!("{:#?}", config), 68 | Err(error) => eprintln!("Update failed: {}", error), 69 | } 70 | } 71 | eprintln!("Stream closed"); 72 | Ok(()) 73 | } 74 | 75 | Command::Get { 76 | namespace, 77 | pod, 78 | port, 79 | } => { 80 | let workload = format!("{}:{}", namespace, pod); 81 | let server = client.get_port(workload, port).await?; 82 | println!("{:#?}", server); 83 | Ok(()) 84 | } 85 | 86 | Command::HttpApi { 87 | listen_addr, 88 | namespace, 89 | pod, 90 | ports, 91 | } => { 92 | if ports.is_empty() { 93 | bail!("no ports specified with ns={} and pod={}", namespace, pod); 94 | } 95 | 96 | let workload = format!("{}:{}", namespace, pod); 97 | 98 | let watches = polixy_client::watch_ports(client, workload, ports) 99 | .await 100 | .expect("Failed to watch ports"); 101 | 102 | let ports = Arc::new( 103 | watches 104 | .iter() 105 | .map(|(p, w)| (*p, w.rx.clone())) 106 | .collect::>(), 107 | ); 108 | 109 | let server = hyper::server::Server::bind(&listen_addr).serve( 110 | hyper::service::make_service_fn(move |_conn| { 111 | let ports = ports.clone(); 112 | future::ok::<_, hyper::Error>(hyper::service::service_fn( 113 | move |req: hyper::Request| { 114 | let ports = ports.clone(); 115 | async move { polixy_client::http_api::serve(ports.as_ref(), req).await } 116 | }, 117 | )) 118 | }), 119 | ); 120 | let addr = server.local_addr(); 121 | info!(%addr, "Listening"); 122 | tokio::select! { 123 | _ = tokio::signal::ctrl_c() => {} 124 | _ = server => {} 125 | } 126 | Ok(()) 127 | } 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /client/src/watch_ports.rs: -------------------------------------------------------------------------------- 1 | use crate::{Client, Inbound}; 2 | use anyhow::{Error, Result}; 3 | use futures::{future, prelude::*}; 4 | use std::collections::HashMap; 5 | use tokio::{sync::watch, time}; 6 | 7 | #[derive(Debug)] 8 | pub struct PortWatch { 9 | pub rx: watch::Receiver, 10 | pub task: tokio::task::JoinHandle>, 11 | } 12 | 13 | pub async fn watch_ports( 14 | client: Client, 15 | workload: String, 16 | ports: Vec, 17 | ) -> Result> { 18 | let futures = ports.into_iter().map(move |port| { 19 | watch_port(client.clone(), workload.clone(), port).map_ok(move |pw| (port, pw)) 20 | }); 21 | let watches = future::try_join_all(futures).await?; 22 | Ok(watches.into_iter().collect::>()) 23 | } 24 | 25 | async fn watch_port(mut client: Client, workload: String, port: u16) -> Result { 26 | let (inbound, mut updates) = start_watch(&mut client, workload.clone(), port).await?; 27 | let (tx, rx) = watch::channel(inbound); 28 | 29 | let task = tokio::spawn(async move { 30 | loop { 31 | let res = tokio::select! { 32 | _ = tx.closed() => { 33 | return Ok(()); 34 | } 35 | res = updates.try_next() => res, 36 | }; 37 | 38 | match res { 39 | Ok(Some(inbound)) => { 40 | let _ = tx.send(inbound); 41 | } 42 | 43 | Ok(None) => { 44 | let (inbound, stream) = tokio::select! { 45 | res = start_watch(&mut client, workload.clone(), port) => res?, 46 | _ = tx.closed() => { 47 | return Ok(()); 48 | } 49 | }; 50 | 51 | let _ = tx.send(inbound); 52 | updates = stream; 53 | } 54 | 55 | Err(error) => { 56 | tracing::debug!(%error); 57 | let (inbound, stream) = tokio::select! { 58 | res = start_watch(&mut client, workload.clone(), port) => res?, 59 | _ = tx.closed() => { 60 | return Ok(()); 61 | } 62 | }; 63 | 64 | let _ = tx.send(inbound); 65 | updates = stream; 66 | } 67 | } 68 | } 69 | }); 70 | 71 | Ok(PortWatch { rx, task }) 72 | } 73 | 74 | async fn start_watch( 75 | client: &mut Client, 76 | workload: String, 77 | port: u16, 78 | ) -> Result<(Inbound, impl Stream>)> { 79 | loop { 80 | match client.watch_port(workload.clone(), port).await { 81 | Ok(mut updates) => match updates.try_next().await { 82 | Ok(Some(inbound)) => return Ok((inbound, updates)), 83 | Ok(None) => {} 84 | Err(error) => recover(error).await?, 85 | }, 86 | Err(error) => recover(error).await?, 87 | } 88 | } 89 | } 90 | 91 | async fn recover(error: Error) -> Result<()> { 92 | // Check unrecoverable errors. For now, we assume that InvalidArgument means we're querying 93 | // about a workload or port that doesn't exist. 94 | if let Some(status) = error.downcast_ref::() { 95 | if let tonic::Code::InvalidArgument = status.code() { 96 | return Err(error); 97 | } 98 | } 99 | 100 | // TODO exponential back-off 101 | tracing::debug!(%error, "Recovering"); 102 | time::sleep(time::Duration::from_secs(1)).await; 103 | Ok(()) 104 | } 105 | -------------------------------------------------------------------------------- /controller/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "polixy-controller" 3 | version = "0.1.0" 4 | authors = ["Oliver Gould "] 5 | edition = "2018" 6 | license = "Apache-2.0" 7 | publish = false 8 | 9 | [dependencies] 10 | anyhow = "1" 11 | drain = "0.1" 12 | futures = "0.3" 13 | hyper = { version = "0.14", features = ["http1", "http2", "runtime", "server"] } 14 | kube = { version = "0.58.1", default-features = false, features = ["client", "derive", "native-tls"] } 15 | polixy-controller-core = { path = "./core" } 16 | polixy-controller-grpc = { path = "./grpc" } 17 | polixy-controller-k8s-index = { path = "./k8s/index" } 18 | structopt = "0.3" 19 | tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros", "parking_lot", "signal", "sync"] } 20 | tracing = "0.1" 21 | tracing-subscriber = "0.2" 22 | -------------------------------------------------------------------------------- /controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rust:1.52.1 as build 2 | WORKDIR /polixy 3 | ADD . /polixy 4 | RUN cargo build -p polixy-controller --release 5 | 6 | FROM gcr.io/distroless/cc:nonroot 7 | COPY --from=build /polixy/target/release/polixy-controller / 8 | ENTRYPOINT ["/polixy-controller"] 9 | -------------------------------------------------------------------------------- /controller/core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "polixy-controller-core" 3 | version = "0.1.0" 4 | edition = "2018" 5 | license = "Apache-2.0" 6 | publish = false 7 | 8 | [dependencies] 9 | anyhow = "1" 10 | async-trait = "0.1" 11 | futures = { version = "0.3", default-features = false, features = ["std"] } 12 | ipnet = "2" 13 | -------------------------------------------------------------------------------- /controller/core/src/identity_match.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | 3 | /// Matches a client's mesh identity. 4 | #[derive(Clone, Debug, PartialEq, Eq, Hash)] 5 | pub enum IdentityMatch { 6 | /// An exact match. 7 | Name(String), 8 | 9 | /// A suffix match.. 10 | Suffix(Vec), 11 | } 12 | 13 | // === impl IdentityMatch === 14 | 15 | impl fmt::Display for IdentityMatch { 16 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 17 | match self { 18 | Self::Name(name) => name.fmt(f), 19 | Self::Suffix(suffix) => { 20 | write!(f, "*")?; 21 | for part in suffix.iter() { 22 | write!(f, ".{}", part)?; 23 | } 24 | Ok(()) 25 | } 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /controller/core/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings, rust_2018_idioms)] 2 | #![forbid(unsafe_code)] 3 | 4 | mod identity_match; 5 | mod network_match; 6 | 7 | pub use self::{identity_match::IdentityMatch, network_match::NetworkMatch}; 8 | use anyhow::Result; 9 | use futures::prelude::*; 10 | pub use ipnet::{IpNet, Ipv4Net, Ipv6Net}; 11 | use std::{collections::BTreeMap, pin::Pin, time::Duration}; 12 | 13 | /// Models inbound server configuration discovery. 14 | #[async_trait::async_trait] 15 | pub trait DiscoverInboundServer { 16 | async fn get_inbound_server(&self, target: T) -> Result>; 17 | 18 | async fn watch_inbound_server(&self, target: T) -> Result>; 19 | } 20 | 21 | pub type InboundServerStream = Pin + Send + Sync + 'static>>; 22 | 23 | /// Inbound server configuration. 24 | #[derive(Clone, Debug, PartialEq, Eq)] 25 | pub struct InboundServer { 26 | pub protocol: ProxyProtocol, 27 | pub authorizations: BTreeMap, 28 | } 29 | 30 | /// Describes how a proxy should handle inbound connections. 31 | #[derive(Clone, Debug, PartialEq, Eq)] 32 | pub enum ProxyProtocol { 33 | /// Indicates that the protocol should be discovered dynamically. 34 | Detect { 35 | timeout: Duration, 36 | }, 37 | 38 | Http1, 39 | Http2, 40 | Grpc, 41 | 42 | /// Indicates that connections should be handled opaquely. 43 | Opaque, 44 | 45 | /// Indicates that connections should be handled as application-terminated TLS. 46 | Tls, 47 | } 48 | 49 | /// Describes a class of authorized clients. 50 | #[derive(Clone, Debug, PartialEq, Eq)] 51 | pub struct ClientAuthorization { 52 | /// Limits which source networks this authorization applies to. 53 | pub networks: Vec, 54 | 55 | /// Describes the client's authentication requirements. 56 | pub authentication: ClientAuthentication, 57 | } 58 | 59 | #[derive(Clone, Debug, PartialEq, Eq)] 60 | pub enum ClientAuthentication { 61 | /// Indicates that clients need not be authenticated. 62 | Unauthenticated, 63 | 64 | /// Indicates that clients must use TLS bu need not provide a client identity. 65 | TlsUnauthenticated, 66 | 67 | /// Indicates that clients must use mutually-authenticated TLS. 68 | TlsAuthenticated(Vec), 69 | } 70 | -------------------------------------------------------------------------------- /controller/core/src/network_match.rs: -------------------------------------------------------------------------------- 1 | use ipnet::{IpNet, Ipv4Net, Ipv6Net}; 2 | use std::net::IpAddr; 3 | 4 | #[derive(Clone, Debug, PartialEq, Eq)] 5 | pub struct NetworkMatch { 6 | /// A network to match against. 7 | pub net: IpNet, 8 | 9 | /// Neteworks to exclude from the match. 10 | pub except: Vec, 11 | } 12 | 13 | // === impl NetworkMatch === 14 | 15 | impl From for NetworkMatch { 16 | fn from(net: IpAddr) -> Self { 17 | IpNet::from(net).into() 18 | } 19 | } 20 | 21 | impl From for NetworkMatch { 22 | fn from(net: IpNet) -> Self { 23 | Self { 24 | net, 25 | except: vec![], 26 | } 27 | } 28 | } 29 | 30 | impl From for NetworkMatch { 31 | fn from(net: Ipv4Net) -> Self { 32 | IpNet::from(net).into() 33 | } 34 | } 35 | 36 | impl From for NetworkMatch { 37 | fn from(net: Ipv6Net) -> Self { 38 | IpNet::from(net).into() 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /controller/grpc/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "polixy-controller-grpc" 3 | version = "0.1.0" 4 | edition = "2018" 5 | license = "Apache-2.0" 6 | publish = false 7 | 8 | [features] 9 | rustfmt = ["linkerd2-proxy-api/rustfmt"] 10 | 11 | [dependencies] 12 | async-stream = "0.3" 13 | async-trait = "0.1" 14 | drain = "0.1" 15 | futures = "0.3" 16 | linkerd2-proxy-api = { git = "https://github.com/linkerd/linkerd2-proxy-api", branch = "ver/inbound", features = ["inbound", "server"] } 17 | polixy-controller-core = { path = "../core" } 18 | tokio = { version = "1", features = ["macros"] } 19 | tonic = { version = "0.5", default-features = false, features = ["transport"] } 20 | tracing = "0.1" 21 | -------------------------------------------------------------------------------- /controller/grpc/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings, rust_2018_idioms)] 2 | #![forbid(unsafe_code)] 3 | 4 | use futures::prelude::*; 5 | use linkerd2_proxy_api::inbound::{ 6 | self as proto, 7 | inbound_server_discovery_server::{InboundServerDiscovery, InboundServerDiscoveryServer}, 8 | }; 9 | use polixy_controller_core::{ 10 | ClientAuthentication, ClientAuthorization, DiscoverInboundServer, IdentityMatch, InboundServer, 11 | InboundServerStream, IpNet, NetworkMatch, ProxyProtocol, 12 | }; 13 | use tracing::trace; 14 | 15 | #[derive(Clone, Debug)] 16 | pub struct Server { 17 | discover: T, 18 | drain: drain::Watch, 19 | } 20 | 21 | impl Server 22 | where 23 | T: DiscoverInboundServer<(String, String, u16)> + Send + Sync + 'static, 24 | { 25 | pub fn new(discover: T, drain: drain::Watch) -> Self { 26 | Self { discover, drain } 27 | } 28 | 29 | pub async fn serve( 30 | self, 31 | addr: std::net::SocketAddr, 32 | shutdown: impl std::future::Future, 33 | ) -> Result<(), tonic::transport::Error> { 34 | tonic::transport::Server::builder() 35 | .add_service(InboundServerDiscoveryServer::new(self)) 36 | .serve_with_shutdown(addr, shutdown) 37 | .await 38 | } 39 | 40 | fn check_target( 41 | &self, 42 | proto::PortSpec { workload, port }: proto::PortSpec, 43 | ) -> Result<(String, String, u16), tonic::Status> { 44 | // Parse a workload name in the form namespace:name. 45 | let (ns, name) = match workload.split_once(':') { 46 | None => { 47 | return Err(tonic::Status::invalid_argument(format!( 48 | "Invalid workload: {}", 49 | workload 50 | ))); 51 | } 52 | Some((ns, pod)) if ns.is_empty() || pod.is_empty() => { 53 | return Err(tonic::Status::invalid_argument(format!( 54 | "Invalid workload: {}", 55 | workload 56 | ))); 57 | } 58 | Some((ns, pod)) => (ns, pod), 59 | }; 60 | 61 | // Ensure that the port is in the valid range. 62 | let port = { 63 | if port == 0 || port > std::u16::MAX as u32 { 64 | return Err(tonic::Status::invalid_argument(format!( 65 | "Invalid port: {}", 66 | port 67 | ))); 68 | } 69 | port as u16 70 | }; 71 | 72 | Ok((ns.to_string(), name.to_string(), port)) 73 | } 74 | } 75 | 76 | #[async_trait::async_trait] 77 | impl InboundServerDiscovery for Server 78 | where 79 | T: DiscoverInboundServer<(String, String, u16)> + Send + Sync + 'static, 80 | { 81 | async fn get_port( 82 | &self, 83 | req: tonic::Request, 84 | ) -> Result, tonic::Status> { 85 | let target = self.check_target(req.into_inner())?; 86 | 87 | // Lookup the configuration for an inbound port. If the pod hasn't (yet) 88 | // been indexed, return a Not Found error. 89 | let s = self 90 | .discover 91 | .get_inbound_server(target) 92 | .await 93 | .map_err(|e| tonic::Status::internal(format!("lookup failed: {}", e)))? 94 | .ok_or_else(|| tonic::Status::not_found("unknown server"))?; 95 | 96 | Ok(tonic::Response::new(to_server(&s))) 97 | } 98 | 99 | type WatchPortStream = BoxWatchStream; 100 | 101 | async fn watch_port( 102 | &self, 103 | req: tonic::Request, 104 | ) -> Result, tonic::Status> { 105 | let target = self.check_target(req.into_inner())?; 106 | let drain = self.drain.clone(); 107 | let rx = self 108 | .discover 109 | .watch_inbound_server(target) 110 | .await 111 | .map_err(|e| tonic::Status::internal(format!("lookup failed: {}", e)))? 112 | .ok_or_else(|| tonic::Status::not_found("unknown server"))?; 113 | Ok(tonic::Response::new(response_stream(drain, rx))) 114 | } 115 | } 116 | 117 | type BoxWatchStream = 118 | std::pin::Pin> + Send + Sync>>; 119 | 120 | fn response_stream(drain: drain::Watch, mut rx: InboundServerStream) -> BoxWatchStream { 121 | Box::pin(async_stream::try_stream! { 122 | tokio::pin! { 123 | let shutdown = drain.signaled(); 124 | } 125 | 126 | loop { 127 | tokio::select! { 128 | // When the port is updated with a new server, update the server watch. 129 | res = rx.next() => match res { 130 | Some(s) => { 131 | yield to_server(&s); 132 | } 133 | None => return, 134 | }, 135 | 136 | // If the server starts shutting down, close the stream so that it doesn't hold the 137 | // server open. 138 | _ = (&mut shutdown) => { 139 | return; 140 | } 141 | } 142 | } 143 | }) 144 | } 145 | 146 | fn to_server(srv: &InboundServer) -> proto::Server { 147 | // Convert the protocol object into a protobuf response. 148 | let protocol = proto::ProxyProtocol { 149 | kind: match srv.protocol { 150 | ProxyProtocol::Detect { timeout } => Some(proto::proxy_protocol::Kind::Detect( 151 | proto::proxy_protocol::Detect { 152 | timeout: Some(timeout.into()), 153 | }, 154 | )), 155 | ProxyProtocol::Http1 => Some(proto::proxy_protocol::Kind::Http1( 156 | proto::proxy_protocol::Http1::default(), 157 | )), 158 | ProxyProtocol::Http2 => Some(proto::proxy_protocol::Kind::Http2( 159 | proto::proxy_protocol::Http2::default(), 160 | )), 161 | ProxyProtocol::Grpc => Some(proto::proxy_protocol::Kind::Grpc( 162 | proto::proxy_protocol::Grpc::default(), 163 | )), 164 | ProxyProtocol::Opaque => Some(proto::proxy_protocol::Kind::Opaque( 165 | proto::proxy_protocol::Opaque {}, 166 | )), 167 | ProxyProtocol::Tls => Some(proto::proxy_protocol::Kind::Tls( 168 | proto::proxy_protocol::Tls {}, 169 | )), 170 | }, 171 | }; 172 | trace!(?protocol); 173 | 174 | let authorizations = srv 175 | .authorizations 176 | .iter() 177 | .map(|(n, c)| to_authz(n, c)) 178 | .collect(); 179 | trace!(?authorizations); 180 | 181 | proto::Server { 182 | protocol: Some(protocol), 183 | authorizations, 184 | ..Default::default() 185 | } 186 | } 187 | 188 | fn to_authz( 189 | name: impl ToString, 190 | ClientAuthorization { 191 | networks, 192 | authentication, 193 | }: &ClientAuthorization, 194 | ) -> proto::Authz { 195 | let networks = if networks.is_empty() { 196 | // TODO use cluster networks (from config). 197 | vec![ 198 | proto::Network { 199 | net: Some(IpNet::V4(Default::default()).into()), 200 | except: vec![], 201 | }, 202 | proto::Network { 203 | net: Some(IpNet::V6(Default::default()).into()), 204 | except: vec![], 205 | }, 206 | ] 207 | } else { 208 | networks 209 | .iter() 210 | .map(|NetworkMatch { net, except }| proto::Network { 211 | net: Some((*net).into()), 212 | except: except.iter().cloned().map(Into::into).collect(), 213 | }) 214 | .collect() 215 | }; 216 | 217 | match authentication { 218 | ClientAuthentication::Unauthenticated => { 219 | let labels = Some(("authn".to_string(), "false".to_string())) 220 | .into_iter() 221 | .chain(Some(("tls".to_string(), "false".to_string()))) 222 | .chain(Some(("name".to_string(), name.to_string()))) 223 | .collect(); 224 | 225 | proto::Authz { 226 | labels, 227 | networks, 228 | authentication: Some(proto::Authn { 229 | permit: Some(proto::authn::Permit::Unauthenticated( 230 | proto::authn::PermitUnauthenticated {}, 231 | )), 232 | }), 233 | } 234 | } 235 | 236 | ClientAuthentication::TlsUnauthenticated => { 237 | let labels = Some(("authn".to_string(), "false".to_string())) 238 | .into_iter() 239 | .chain(Some(("tls".to_string(), "true".to_string()))) 240 | .chain(Some(("name".to_string(), name.to_string()))) 241 | .collect(); 242 | 243 | // todo 244 | proto::Authz { 245 | labels, 246 | networks, 247 | authentication: Some(proto::Authn { 248 | permit: Some(proto::authn::Permit::MeshTls(proto::authn::PermitMeshTls { 249 | clients: Some(proto::authn::permit_mesh_tls::Clients::Unauthenticated( 250 | proto::authn::PermitUnauthenticated {}, 251 | )), 252 | })), 253 | }), 254 | } 255 | } 256 | 257 | // Authenticated connections must have TLS and apply to all 258 | // networks. 259 | ClientAuthentication::TlsAuthenticated(identities) => { 260 | let labels = Some(("authn".to_string(), "true".to_string())) 261 | .into_iter() 262 | .chain(Some(("tls".to_string(), "true".to_string()))) 263 | .chain(Some(("name".to_string(), name.to_string()))) 264 | .collect(); 265 | 266 | let authn = { 267 | let suffixes = identities 268 | .iter() 269 | .filter_map(|i| match i { 270 | IdentityMatch::Suffix(s) => { 271 | Some(proto::IdentitySuffix { parts: s.to_vec() }) 272 | } 273 | _ => None, 274 | }) 275 | .collect(); 276 | 277 | let identities = identities 278 | .iter() 279 | .filter_map(|i| match i { 280 | IdentityMatch::Name(n) => Some(proto::Identity { 281 | name: n.to_string(), 282 | }), 283 | _ => None, 284 | }) 285 | .collect(); 286 | 287 | proto::Authn { 288 | permit: Some(proto::authn::Permit::MeshTls(proto::authn::PermitMeshTls { 289 | clients: Some(proto::authn::permit_mesh_tls::Clients::Identities( 290 | proto::authn::permit_mesh_tls::PermitClientIdentities { 291 | identities, 292 | suffixes, 293 | }, 294 | )), 295 | })), 296 | } 297 | }; 298 | 299 | proto::Authz { 300 | labels, 301 | networks, 302 | authentication: Some(authn), 303 | } 304 | } 305 | } 306 | } 307 | -------------------------------------------------------------------------------- /controller/k8s/api/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "polixy-controller-k8s-api" 3 | version = "0.1.0" 4 | edition = "2018" 5 | license = "Apache-2.0" 6 | publish = false 7 | 8 | [dependencies] 9 | futures = { version = "0.3", default-features = false } 10 | k8s-openapi = { version = "0.12.0", default-features = false, features = ["v1_20"] } 11 | kube = { version = "0.58.1", default-features = false, features = ["client", "derive", "native-tls"] } 12 | kube-runtime = { version = "0.58.1", default-features = false } 13 | schemars = "0.8" 14 | serde = { version = "1", features = ["derive"] } 15 | serde_json = "1" 16 | serde_yaml = "0.8" 17 | tokio = { version = "1", features = ["time"] } 18 | tracing = "0.1" 19 | -------------------------------------------------------------------------------- /controller/k8s/api/src/labels.rs: -------------------------------------------------------------------------------- 1 | use schemars::JsonSchema; 2 | use serde::{Deserialize, Serialize}; 3 | use std::{ 4 | collections::{BTreeMap, BTreeSet}, 5 | sync::Arc, 6 | }; 7 | 8 | #[derive(Clone, Debug, Eq, Default)] 9 | pub struct Labels(Arc); 10 | 11 | pub type Map = BTreeMap; 12 | 13 | pub type Expressions = Vec; 14 | 15 | #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] 16 | pub struct Expression { 17 | key: String, 18 | operator: Operator, 19 | values: BTreeSet, 20 | } 21 | 22 | #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] 23 | pub enum Operator { 24 | In, 25 | NotIn, 26 | } 27 | 28 | /// Selects a set of pods that expose a server. 29 | #[derive(Clone, Debug, Eq, PartialEq, Default, Deserialize, Serialize, JsonSchema)] 30 | #[serde(rename_all = "camelCase")] 31 | pub struct Selector { 32 | match_labels: Option, 33 | match_expressions: Option, 34 | } 35 | 36 | // === Selector === 37 | 38 | impl Selector { 39 | pub fn from_expressions(exprs: Expressions) -> Self { 40 | Self { 41 | match_labels: None, 42 | match_expressions: Some(exprs), 43 | } 44 | } 45 | 46 | pub fn from_map(map: Map) -> Self { 47 | Self { 48 | match_labels: Some(map), 49 | match_expressions: None, 50 | } 51 | } 52 | 53 | pub fn matches(&self, labels: &Labels) -> bool { 54 | for expr in self.match_expressions.iter().flatten() { 55 | if !expr.matches(labels.as_ref()) { 56 | return false; 57 | } 58 | } 59 | 60 | if let Some(match_labels) = self.match_labels.as_ref() { 61 | for (k, v) in match_labels.iter() { 62 | if labels.0.get(k) != Some(v) { 63 | return false; 64 | } 65 | } 66 | } 67 | 68 | true 69 | } 70 | } 71 | 72 | impl std::iter::FromIterator<(String, String)> for Selector { 73 | fn from_iter>(iter: T) -> Self { 74 | Self::from_map(iter.into_iter().collect()) 75 | } 76 | } 77 | 78 | impl std::iter::FromIterator<(&'static str, &'static str)> for Selector { 79 | fn from_iter>(iter: T) -> Self { 80 | Self::from_map( 81 | iter.into_iter() 82 | .map(|(k, v)| (k.to_string(), v.to_string())) 83 | .collect(), 84 | ) 85 | } 86 | } 87 | 88 | impl std::iter::FromIterator for Selector { 89 | fn from_iter>(iter: T) -> Self { 90 | Self::from_expressions(iter.into_iter().collect()) 91 | } 92 | } 93 | 94 | // === Labels === 95 | 96 | impl From for Labels { 97 | #[inline] 98 | fn from(labels: Map) -> Self { 99 | Self(Arc::new(labels)) 100 | } 101 | } 102 | 103 | impl AsRef for Labels { 104 | #[inline] 105 | fn as_ref(&self) -> &Map { 106 | self.0.as_ref() 107 | } 108 | } 109 | 110 | impl> std::cmp::PartialEq for Labels { 111 | #[inline] 112 | fn eq(&self, t: &T) -> bool { 113 | self.0.as_ref().eq(t.as_ref()) 114 | } 115 | } 116 | 117 | impl std::iter::FromIterator<(String, String)> for Labels { 118 | fn from_iter>(iter: T) -> Self { 119 | Self(Arc::new(iter.into_iter().collect())) 120 | } 121 | } 122 | 123 | impl std::iter::FromIterator<(&'static str, &'static str)> for Labels { 124 | fn from_iter>(iter: T) -> Self { 125 | iter.into_iter() 126 | .map(|(k, v)| (k.to_string(), v.to_string())) 127 | .collect() 128 | } 129 | } 130 | 131 | // === Expression === 132 | 133 | impl Expression { 134 | fn matches(&self, labels: &Map) -> bool { 135 | match self.operator { 136 | Operator::In => { 137 | if let Some(v) = labels.get(&self.key) { 138 | return self.values.contains(v); 139 | } 140 | } 141 | Operator::NotIn => { 142 | return match labels.get(&self.key) { 143 | Some(v) => self.values.contains(v), 144 | None => true, 145 | } 146 | } 147 | } 148 | 149 | false 150 | } 151 | } 152 | 153 | #[cfg(test)] 154 | mod tests { 155 | use super::*; 156 | use std::iter::FromIterator; 157 | 158 | #[test] 159 | fn test_matches() { 160 | for (selector, labels, matches, msg) in &[ 161 | (Selector::default(), Labels::default(), true, "empty match"), 162 | ( 163 | Selector::from_iter(Some(("foo", "bar"))), 164 | Labels::from_iter(Some(("foo", "bar"))), 165 | true, 166 | "exact label match", 167 | ), 168 | ( 169 | Selector::from_iter(Some(("foo", "bar"))), 170 | Labels::from_iter(vec![("foo", "bar"), ("bah", "baz")]), 171 | true, 172 | "sufficient label match", 173 | ), 174 | ( 175 | Selector::from_iter(Some(Expression { 176 | key: "foo".into(), 177 | operator: Operator::In, 178 | values: Some("bar".to_string()).into_iter().collect(), 179 | })), 180 | Labels::from_iter(vec![("foo", "bar"), ("bah", "baz")]), 181 | true, 182 | "expression match", 183 | ), 184 | ] { 185 | assert_eq!(selector.matches(labels), *matches, "{}", msg); 186 | } 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /controller/k8s/api/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings, rust_2018_idioms)] 2 | #![forbid(unsafe_code)] 3 | 4 | pub mod labels; 5 | pub mod polixy; 6 | mod watch; 7 | 8 | pub use self::{ 9 | labels::Labels, 10 | watch::{Event, Watch}, 11 | }; 12 | pub use k8s_openapi::api::{ 13 | self, 14 | core::v1::{Namespace, Node, NodeSpec, Pod, PodSpec, PodStatus}, 15 | }; 16 | use kube::api::{Api, ListParams}; 17 | pub use kube::api::{ObjectMeta, ResourceExt}; 18 | use kube_runtime::watcher; 19 | 20 | /// Resource watches. 21 | pub struct ResourceWatches { 22 | pub nodes_rx: Watch, 23 | pub pods_rx: Watch, 24 | pub servers_rx: Watch, 25 | pub authorizations_rx: Watch, 26 | } 27 | 28 | // === impl ResourceWatches === 29 | 30 | impl ResourceWatches { 31 | const DEFAULT_TIMEOUT_SECS: u32 = 5 * 60; 32 | } 33 | 34 | impl From for ResourceWatches { 35 | fn from(client: kube::Client) -> Self { 36 | let params = ListParams::default().timeout(Self::DEFAULT_TIMEOUT_SECS); 37 | Self { 38 | nodes_rx: watcher(Api::all(client.clone()), params.clone()).into(), 39 | pods_rx: watcher( 40 | Api::all(client.clone()), 41 | params.clone().labels("linkerd.io/control-plane-ns"), 42 | ) 43 | .into(), 44 | servers_rx: watcher(Api::all(client.clone()), params.clone()).into(), 45 | authorizations_rx: watcher(Api::all(client), params).into(), 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /controller/k8s/api/src/polixy/authz.rs: -------------------------------------------------------------------------------- 1 | use super::super::labels; 2 | use kube::CustomResource; 3 | use schemars::JsonSchema; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | /// Authorizes clients to connect to a Server. 7 | #[derive(CustomResource, Default, Deserialize, Serialize, Clone, Debug, JsonSchema)] 8 | #[kube( 9 | group = "polixy.linkerd.io", 10 | version = "v1alpha1", 11 | kind = "ServerAuthorization", 12 | namespaced 13 | )] 14 | #[serde(rename_all = "camelCase")] 15 | pub struct ServerAuthorizationSpec { 16 | pub server: Server, 17 | pub client: Client, 18 | } 19 | 20 | #[derive(Default, Deserialize, Serialize, Clone, Debug, JsonSchema)] 21 | pub struct Server { 22 | pub name: Option, 23 | pub selector: Option, 24 | } 25 | 26 | /// Describes an authenticated client. 27 | /// 28 | /// Exactly one of `identities` and `service_accounts` should be set. 29 | #[derive(Default, Deserialize, Serialize, Clone, Debug, JsonSchema)] 30 | #[serde(rename_all = "camelCase")] 31 | pub struct Client { 32 | pub networks: Option>, 33 | 34 | #[serde(default)] 35 | pub unauthenticated: bool, 36 | 37 | #[serde(rename = "meshTLS")] 38 | pub mesh_tls: Option, 39 | } 40 | 41 | /// Describes an authenticated client. 42 | /// 43 | /// Exactly one of `identities` and `service_accounts` should be set. 44 | #[derive(Default, Deserialize, Serialize, Clone, Debug, JsonSchema)] 45 | #[serde(rename_all = "camelCase")] 46 | pub struct MeshTls { 47 | #[serde(rename = "unauthenticatedTLS", default)] 48 | pub unauthenticated_tls: bool, 49 | 50 | /// Indicates a Linkerd identity that is authorized to access a server. 51 | pub identities: Vec, 52 | 53 | /// Identifies a `ServiceAccount` authorized to access a server. 54 | pub service_accounts: Vec, 55 | } 56 | 57 | #[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] 58 | pub struct Network { 59 | pub cidr: String, 60 | pub except: Vec, 61 | } 62 | 63 | /// References a Kubernetes `ServiceAccount` instance. 64 | /// 65 | /// If no namespace is specified, the `Authorization`'s namespace is used. 66 | #[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)] 67 | pub struct ServiceAccountRef { 68 | pub namespace: Option, 69 | pub name: String, 70 | // TODO pub selector: labels::Selector, 71 | } 72 | -------------------------------------------------------------------------------- /controller/k8s/api/src/polixy/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod authz; 2 | pub mod server; 3 | 4 | pub use self::authz::{ServerAuthorization, ServerAuthorizationSpec}; 5 | pub use self::server::{Server, ServerSpec}; 6 | -------------------------------------------------------------------------------- /controller/k8s/api/src/polixy/server.rs: -------------------------------------------------------------------------------- 1 | use super::super::labels; 2 | use kube::CustomResource; 3 | use schemars::JsonSchema; 4 | use serde::{Deserialize, Serialize}; 5 | 6 | /// Describes a server interface exposed by a set of pods. 7 | #[derive(Clone, Debug, CustomResource, Deserialize, Serialize, JsonSchema)] 8 | #[kube( 9 | group = "polixy.linkerd.io", 10 | version = "v1alpha1", 11 | kind = "Server", 12 | namespaced 13 | )] 14 | #[serde(rename_all = "camelCase")] 15 | pub struct ServerSpec { 16 | pub pod_selector: labels::Selector, 17 | pub port: Port, 18 | pub proxy_protocol: Option, 19 | } 20 | 21 | /// References a pod spec's port by name or number. 22 | #[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)] 23 | #[serde(untagged)] 24 | pub enum Port { 25 | Number(u16), 26 | Name(String), 27 | } 28 | 29 | #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] 30 | pub enum ProxyProtocol { 31 | #[serde(rename = "unknown")] 32 | Unknown, 33 | #[serde(rename = "HTTP/1")] 34 | Http1, 35 | #[serde(rename = "HTTP/2")] 36 | Http2, 37 | #[serde(rename = "gRPC")] 38 | Grpc, 39 | #[serde(rename = "opaque")] 40 | Opaque, 41 | #[serde(rename = "TLS")] 42 | Tls, 43 | } 44 | -------------------------------------------------------------------------------- /controller/k8s/api/src/watch.rs: -------------------------------------------------------------------------------- 1 | use futures::prelude::*; 2 | use std::pin::Pin; 3 | use tokio::time; 4 | use tracing::info; 5 | 6 | pub use kube_runtime::watcher::{Event, Result}; 7 | 8 | /// Wraps an event stream that never terminates. 9 | pub struct Watch { 10 | ready: bool, 11 | rx: Pin>> + Send + 'static>>, 12 | } 13 | 14 | // === impl Watch === 15 | 16 | impl From for Watch 17 | where 18 | W: Stream>> + Send + 'static, 19 | { 20 | fn from(watch: W) -> Self { 21 | Watch { 22 | ready: false, 23 | rx: watch.boxed(), 24 | } 25 | } 26 | } 27 | 28 | impl Watch { 29 | pub fn ready(&self) -> bool { 30 | self.ready 31 | } 32 | 33 | /// Receive the next event in the stream. 34 | /// 35 | /// If the stream fails, log the error and sleep for 1s before polling for a reset event. 36 | pub async fn recv(&mut self) -> Event { 37 | loop { 38 | match self 39 | .rx 40 | .next() 41 | .await 42 | .expect("watch stream must not terminate") 43 | { 44 | Ok(ev) => { 45 | self.ready = true; 46 | return ev; 47 | } 48 | Err(error) => { 49 | self.ready = false; 50 | info!(%error, "Disconnected"); 51 | time::sleep(time::Duration::from_secs(1)).await; 52 | } 53 | } 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /controller/k8s/index/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "polixy-controller-k8s-index" 3 | version = "0.1.0" 4 | edition = "2018" 5 | license = "Apache-2.0" 6 | publish = false 7 | 8 | [dependencies] 9 | anyhow = "1" 10 | async-stream = "0.3" 11 | async-trait = "0.1" 12 | dashmap = "4" 13 | futures = "0.3" 14 | polixy-controller-core = { path = "../../core" } 15 | polixy-controller-k8s-api = { path = "../api" } 16 | tokio = { version = "1", features = ["macros", "rt", "sync"] } 17 | tracing = "0.1" 18 | -------------------------------------------------------------------------------- /controller/k8s/index/src/authz.rs: -------------------------------------------------------------------------------- 1 | use crate::{Index, ServerSelector, SrvIndex}; 2 | use anyhow::{anyhow, bail, Result}; 3 | use polixy_controller_core::{ 4 | ClientAuthentication, ClientAuthorization, IdentityMatch, IpNet, NetworkMatch, 5 | }; 6 | use polixy_controller_k8s_api::{ 7 | self as k8s, 8 | polixy::{self, authz::MeshTls}, 9 | ResourceExt, 10 | }; 11 | use std::collections::{hash_map::Entry as HashEntry, HashMap, HashSet}; 12 | use tracing::{debug, instrument, trace}; 13 | 14 | #[derive(Debug, Default)] 15 | pub(crate) struct AuthzIndex { 16 | index: HashMap, 17 | } 18 | 19 | #[derive(Clone, Debug, PartialEq, Eq)] 20 | struct Authz { 21 | servers: ServerSelector, 22 | clients: ClientAuthorization, 23 | } 24 | 25 | // === impl AuthzIndex === 26 | 27 | impl AuthzIndex { 28 | /// Updates the authorization and server indexes with a new or updated authorization instance. 29 | fn apply( 30 | &mut self, 31 | authz: polixy::ServerAuthorization, 32 | servers: &mut SrvIndex, 33 | domain: &str, 34 | ) -> Result<()> { 35 | let name = authz.name(); 36 | let authz = mk_authz(authz, domain)?; 37 | 38 | match self.index.entry(name) { 39 | HashEntry::Vacant(entry) => { 40 | servers.add_authz(entry.key(), &authz.servers, authz.clients.clone()); 41 | entry.insert(authz); 42 | } 43 | 44 | HashEntry::Occupied(mut entry) => { 45 | // If the authorization changed materially, then update it in all servers. 46 | if entry.get() != &authz { 47 | servers.add_authz(entry.key(), &authz.servers, authz.clients.clone()); 48 | entry.insert(authz); 49 | } 50 | } 51 | } 52 | 53 | Ok(()) 54 | } 55 | 56 | fn delete(&mut self, name: &str) { 57 | self.index.remove(name); 58 | debug!("Removed authz"); 59 | } 60 | 61 | pub fn filter_selected( 62 | &self, 63 | name: impl Into, 64 | labels: k8s::Labels, 65 | ) -> impl Iterator { 66 | let name = name.into(); 67 | self.index.iter().filter_map(move |(authz_name, a)| { 68 | let matches = match a.servers { 69 | ServerSelector::Name(ref n) => { 70 | trace!(r#ref = %n, %name); 71 | n == &name 72 | } 73 | ServerSelector::Selector(ref s) => { 74 | trace!(selector = ?s, ?labels); 75 | s.matches(&labels) 76 | } 77 | }; 78 | debug!(authz = %authz_name, %matches); 79 | if matches { 80 | Some((authz_name.clone(), &a.clients)) 81 | } else { 82 | None 83 | } 84 | }) 85 | } 86 | } 87 | 88 | // === impl Index === 89 | 90 | impl Index { 91 | /// Constructs an `Authz` and adds it to `Servers` it selects. 92 | #[instrument( 93 | skip(self, authz), 94 | fields( 95 | ns = ?authz.metadata.namespace, 96 | name = ?authz.metadata.name, 97 | ) 98 | )] 99 | pub(crate) fn apply_authz(&mut self, authz: polixy::ServerAuthorization) -> Result<()> { 100 | let ns = self 101 | .namespaces 102 | .get_or_default(authz.namespace().expect("namespace required")); 103 | 104 | ns.authzs 105 | .apply(authz, &mut ns.servers, &*self.identity_domain) 106 | } 107 | 108 | #[instrument( 109 | skip(self, authz), 110 | fields( 111 | ns = ?authz.metadata.namespace, 112 | name = ?authz.metadata.name, 113 | ) 114 | )] 115 | pub(crate) fn delete_authz(&mut self, authz: polixy::ServerAuthorization) { 116 | if let Some(ns) = self 117 | .namespaces 118 | .index 119 | .get_mut(authz.namespace().unwrap().as_str()) 120 | { 121 | let name = authz.name(); 122 | ns.servers.remove_authz(name.as_str()); 123 | ns.authzs.delete(name.as_str()); 124 | } 125 | } 126 | 127 | #[instrument(skip(self, authzs))] 128 | pub(crate) fn reset_authzs(&mut self, authzs: Vec) -> Result<()> { 129 | let mut prior = self 130 | .namespaces 131 | .index 132 | .iter() 133 | .map(|(n, ns)| { 134 | let authzs = ns.authzs.index.keys().cloned().collect::>(); 135 | (n.clone(), authzs) 136 | }) 137 | .collect::>(); 138 | 139 | let mut result = Ok(()); 140 | for authz in authzs.into_iter() { 141 | if let Some(ns) = prior.get_mut(authz.namespace().unwrap().as_str()) { 142 | ns.remove(authz.name().as_str()); 143 | } 144 | 145 | if let Err(e) = self.apply_authz(authz) { 146 | result = Err(e); 147 | } 148 | } 149 | 150 | for (ns_name, authzs) in prior { 151 | if let Some(ns) = self.namespaces.index.get_mut(&ns_name) { 152 | for name in authzs.into_iter() { 153 | ns.servers.remove_authz(&name); 154 | ns.authzs.delete(&name); 155 | } 156 | } 157 | } 158 | 159 | result 160 | } 161 | } 162 | 163 | fn mk_authz(srv: polixy::authz::ServerAuthorization, domain: &str) -> Result { 164 | let polixy::authz::ServerAuthorization { metadata, spec, .. } = srv; 165 | 166 | let servers = { 167 | let polixy::authz::Server { name, selector } = spec.server; 168 | match (name, selector) { 169 | (Some(n), None) => ServerSelector::Name(n), 170 | (None, Some(sel)) => ServerSelector::Selector(sel.into()), 171 | (Some(_), Some(_)) => bail!("authorization selection is ambiguous"), 172 | (None, None) => bail!("authorization selects no servers"), 173 | } 174 | }; 175 | 176 | let networks = if let Some(nets) = spec.client.networks { 177 | nets.into_iter() 178 | .map(|polixy::authz::Network { cidr, except }| { 179 | let net = cidr.parse::()?; 180 | debug!(%net, "Unauthenticated"); 181 | let except = except 182 | .into_iter() 183 | .map(|cidr| cidr.parse().map_err(Into::into)) 184 | .collect::>>()?; 185 | Ok(NetworkMatch { net, except }) 186 | }) 187 | .collect::>>()? 188 | } else { 189 | // TODO this should only be cluster-local IPs. 190 | vec![ 191 | IpNet::V4(Default::default()).into(), 192 | IpNet::V6(Default::default()).into(), 193 | ] 194 | }; 195 | 196 | let authentication = if spec.client.unauthenticated { 197 | ClientAuthentication::Unauthenticated 198 | } else { 199 | let mtls = spec 200 | .client 201 | .mesh_tls 202 | .ok_or_else(|| anyhow!("client mtls missing"))?; 203 | mk_mtls_authn(&metadata, mtls, domain)? 204 | }; 205 | 206 | Ok(Authz { 207 | servers, 208 | clients: ClientAuthorization { 209 | networks, 210 | authentication, 211 | }, 212 | }) 213 | } 214 | 215 | fn mk_mtls_authn( 216 | metadata: &k8s::ObjectMeta, 217 | mtls: MeshTls, 218 | domain: &str, 219 | ) -> Result { 220 | if mtls.unauthenticated_tls { 221 | return Ok(ClientAuthentication::TlsUnauthenticated); 222 | } 223 | 224 | let mut identities = Vec::new(); 225 | 226 | for id in mtls.identities.into_iter() { 227 | if id == "*" { 228 | debug!(suffix = %id, "Authenticated"); 229 | identities.push(IdentityMatch::Suffix(vec![])); 230 | } else if id.starts_with("*.") { 231 | debug!(suffix = %id, "Authenticated"); 232 | let mut parts = id.split('.'); 233 | let star = parts.next(); 234 | debug_assert_eq!(star, Some("*")); 235 | identities.push(IdentityMatch::Suffix( 236 | parts.map(|p| p.to_string()).collect::>(), 237 | )); 238 | } else { 239 | debug!(%id, "Authenticated"); 240 | identities.push(IdentityMatch::Name(id)); 241 | } 242 | } 243 | 244 | for sa in mtls.service_accounts.into_iter() { 245 | let name = sa.name; 246 | let ns = sa 247 | .namespace 248 | .unwrap_or_else(|| metadata.namespace.clone().unwrap()); 249 | debug!(ns = %ns, serviceaccount = %name, "Authenticated"); 250 | let n = format!("{}.{}.serviceaccount.identity.linkerd.{}", name, ns, domain); 251 | identities.push(IdentityMatch::Name(n)); 252 | } 253 | 254 | if identities.is_empty() { 255 | bail!("authorization authorizes no clients"); 256 | } 257 | 258 | Ok(ClientAuthentication::TlsAuthenticated(identities)) 259 | } 260 | -------------------------------------------------------------------------------- /controller/k8s/index/src/default_allow.rs: -------------------------------------------------------------------------------- 1 | use crate::ServerRx; 2 | use anyhow::{anyhow, Error, Result}; 3 | use polixy_controller_core::{ 4 | ClientAuthentication, ClientAuthorization, IdentityMatch, InboundServer, IpNet, NetworkMatch, 5 | ProxyProtocol, 6 | }; 7 | use polixy_controller_k8s_api as k8s; 8 | use tokio::{sync::watch, time}; 9 | 10 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 11 | pub enum DefaultAllow { 12 | AllAuthenticated, 13 | AllUnauthenticated, 14 | ClusterAuthenticated, 15 | ClusterUnauthenticated, 16 | Deny, 17 | } 18 | 19 | /// Default server configs to use when no server matches. 20 | #[derive(Clone, Debug)] 21 | pub(crate) struct DefaultAllows { 22 | all_authed_rx: ServerRx, 23 | all_unauthed_rx: ServerRx, 24 | cluster_authed_rx: ServerRx, 25 | cluster_unauthed_rx: ServerRx, 26 | deny_rx: ServerRx, 27 | } 28 | 29 | // === impl DefaultAllow === 30 | 31 | impl DefaultAllow { 32 | pub const ANNOTATION: &'static str = "polixy.linkerd.io/default-allow"; 33 | 34 | pub fn from_annotation(meta: &k8s::ObjectMeta) -> Result> { 35 | if let Some(v) = meta.annotations.get(Self::ANNOTATION) { 36 | let mode = v.parse()?; 37 | Ok(Some(mode)) 38 | } else { 39 | Ok(None) 40 | } 41 | } 42 | } 43 | 44 | impl std::str::FromStr for DefaultAllow { 45 | type Err = Error; 46 | 47 | fn from_str(s: &str) -> Result { 48 | match s { 49 | "all-authenticated" => Ok(Self::AllAuthenticated), 50 | "all-unauthenticated" => Ok(Self::AllUnauthenticated), 51 | "cluster-authenticated" => Ok(Self::ClusterAuthenticated), 52 | "cluster-unauthenticated" => Ok(Self::ClusterUnauthenticated), 53 | "deny" => Ok(Self::Deny), 54 | s => Err(anyhow!("invalid mode: {}", s)), 55 | } 56 | } 57 | } 58 | 59 | impl std::fmt::Display for DefaultAllow { 60 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 61 | match self { 62 | Self::AllAuthenticated => "all-authenticated".fmt(f), 63 | Self::AllUnauthenticated => "all-unauthenticated".fmt(f), 64 | Self::ClusterAuthenticated => "cluster-authenticated".fmt(f), 65 | Self::ClusterUnauthenticated => "cluster-unauthenticated".fmt(f), 66 | Self::Deny => "deny".fmt(f), 67 | } 68 | } 69 | } 70 | 71 | // === impl DefaultAllows === 72 | 73 | impl DefaultAllows { 74 | /// Create default allow policy receivers. 75 | /// 76 | /// These receivers are never updated. The senders are spawned onto a background task so that 77 | /// the receivers continue to be live. The background task completes once all receivers are 78 | /// dropped. 79 | pub fn spawn(cluster_nets: Vec, detect_timeout: time::Duration) -> Self { 80 | let any_authenticated = 81 | ClientAuthentication::TlsAuthenticated(vec![IdentityMatch::Suffix(vec![])]); 82 | 83 | let all_nets = [IpNet::V4(Default::default()), IpNet::V6(Default::default())]; 84 | 85 | let (all_authed_tx, all_authed_rx) = watch::channel(mk_detect_config( 86 | "_all_authed", 87 | detect_timeout, 88 | all_nets.iter().cloned(), 89 | any_authenticated.clone(), 90 | )); 91 | 92 | let (all_unauthed_tx, all_unauthed_rx) = watch::channel(mk_detect_config( 93 | "_all_unauthed", 94 | detect_timeout, 95 | all_nets.iter().cloned(), 96 | ClientAuthentication::Unauthenticated, 97 | )); 98 | 99 | let (cluster_authed_tx, cluster_authed_rx) = watch::channel(mk_detect_config( 100 | "_cluster_authed", 101 | detect_timeout, 102 | cluster_nets.iter().cloned(), 103 | any_authenticated, 104 | )); 105 | 106 | let (cluster_unauthed_tx, cluster_unauthed_rx) = watch::channel(mk_detect_config( 107 | "_cluster_unauthed", 108 | detect_timeout, 109 | cluster_nets.into_iter(), 110 | ClientAuthentication::Unauthenticated, 111 | )); 112 | 113 | let (deny_tx, deny_rx) = watch::channel(InboundServer { 114 | protocol: ProxyProtocol::Detect { 115 | timeout: detect_timeout, 116 | }, 117 | authorizations: Default::default(), 118 | }); 119 | 120 | // Ensure the senders are not dropped until all receivers are dropped. 121 | tokio::spawn(async move { 122 | tokio::join!( 123 | all_authed_tx.closed(), 124 | all_unauthed_tx.closed(), 125 | cluster_authed_tx.closed(), 126 | cluster_unauthed_tx.closed(), 127 | deny_tx.closed(), 128 | ); 129 | }); 130 | 131 | Self { 132 | all_authed_rx, 133 | all_unauthed_rx, 134 | cluster_authed_rx, 135 | cluster_unauthed_rx, 136 | deny_rx, 137 | } 138 | } 139 | 140 | pub fn get(&self, mode: DefaultAllow) -> ServerRx { 141 | match mode { 142 | DefaultAllow::AllAuthenticated => self.all_authed_rx.clone(), 143 | DefaultAllow::AllUnauthenticated => self.all_unauthed_rx.clone(), 144 | DefaultAllow::ClusterAuthenticated => self.cluster_authed_rx.clone(), 145 | DefaultAllow::ClusterUnauthenticated => self.cluster_unauthed_rx.clone(), 146 | DefaultAllow::Deny => self.deny_rx.clone(), 147 | } 148 | } 149 | } 150 | 151 | fn mk_detect_config( 152 | name: &'static str, 153 | timeout: time::Duration, 154 | nets: impl IntoIterator, 155 | authentication: ClientAuthentication, 156 | ) -> InboundServer { 157 | let networks = nets 158 | .into_iter() 159 | .map(|net| NetworkMatch { 160 | net, 161 | except: vec![], 162 | }) 163 | .collect::>(); 164 | let authz = ClientAuthorization { 165 | networks, 166 | authentication, 167 | }; 168 | 169 | InboundServer { 170 | protocol: ProxyProtocol::Detect { timeout }, 171 | authorizations: Some((name.to_string(), authz)).into_iter().collect(), 172 | } 173 | } 174 | 175 | #[cfg(test)] 176 | mod test { 177 | use super::*; 178 | 179 | #[test] 180 | fn test_parse_displayed() { 181 | for default in &[ 182 | DefaultAllow::Deny, 183 | DefaultAllow::AllAuthenticated, 184 | DefaultAllow::AllUnauthenticated, 185 | DefaultAllow::ClusterAuthenticated, 186 | DefaultAllow::ClusterUnauthenticated, 187 | ] { 188 | assert_eq!( 189 | default.to_string().parse::().unwrap(), 190 | *default, 191 | "failed to parse displayed {:?}", 192 | *default 193 | ); 194 | } 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /controller/k8s/index/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Linkerd Policy Controller 2 | //! 3 | //! The policy controller serves discovery requests from inbound proxies, indicating how the proxy 4 | //! should admit connections into a Pod. It watches cluster resources (Namespaces, Nodes, Pods, 5 | //! Servers, and ServerAuthorizations). 6 | 7 | #![deny(warnings, rust_2018_idioms)] 8 | #![forbid(unsafe_code)] 9 | 10 | mod authz; 11 | mod default_allow; 12 | mod lookup; 13 | mod namespace; 14 | mod node; 15 | mod pod; 16 | mod server; 17 | #[cfg(test)] 18 | mod tests; 19 | 20 | pub use self::{default_allow::DefaultAllow, lookup::Reader}; 21 | use self::{ 22 | default_allow::DefaultAllows, 23 | namespace::{Namespace, NamespaceIndex}, 24 | node::NodeIndex, 25 | server::SrvIndex, 26 | }; 27 | use anyhow::{Context, Error}; 28 | use polixy_controller_core::{InboundServer, IpNet}; 29 | use polixy_controller_k8s_api::{self as k8s, ResourceExt}; 30 | use std::sync::Arc; 31 | use tokio::{sync::watch, time}; 32 | use tracing::{debug, instrument, warn}; 33 | 34 | /// Watches a server's configuration for server/authorization changes. 35 | type ServerRx = watch::Receiver; 36 | 37 | /// Publishes updates for a server's configuration for server/authorization changes. 38 | type ServerTx = watch::Sender; 39 | 40 | type ServerRxRx = watch::Receiver; 41 | 42 | /// Watches a pod port's for a new `ServerRx`. 43 | type ServerRxTx = watch::Sender; 44 | 45 | pub fn index( 46 | watches: impl Into, 47 | ready: watch::Sender, 48 | cluster_networks: Vec, 49 | identity_domain: String, 50 | default_mode: DefaultAllow, 51 | detect_timeout: time::Duration, 52 | ) -> ( 53 | lookup::Reader, 54 | impl std::future::Future, 55 | ) { 56 | let (writer, reader) = lookup::pair(); 57 | 58 | // Watches Nodes, Pods, Servers, and Authorizations to update the lookup map 59 | // with an entry for each linkerd-injected pod. 60 | let idx = Index::new( 61 | writer, 62 | cluster_networks, 63 | identity_domain, 64 | default_mode, 65 | detect_timeout, 66 | ); 67 | let task = idx.index(watches.into(), ready); 68 | 69 | (reader, task) 70 | } 71 | 72 | struct Index { 73 | /// Holds per-namespace pod/server/authorization indexes. 74 | namespaces: NamespaceIndex, 75 | 76 | /// Cached Node IPs. 77 | nodes: NodeIndex, 78 | 79 | identity_domain: String, 80 | 81 | default_allows: DefaultAllows, 82 | 83 | lookups: lookup::Writer, 84 | } 85 | 86 | /// Selects servers for an authorization. 87 | #[derive(Clone, Debug, PartialEq, Eq)] 88 | enum ServerSelector { 89 | Name(String), 90 | Selector(Arc), 91 | } 92 | 93 | // === impl Index === 94 | 95 | impl Index { 96 | pub(crate) fn new( 97 | lookups: lookup::Writer, 98 | cluster_nets: Vec, 99 | identity_domain: String, 100 | default_allow: DefaultAllow, 101 | detect_timeout: time::Duration, 102 | ) -> Self { 103 | // Create a common set of receivers for all supported default policies. 104 | // 105 | // XXX We shouldn't spawn in the constructor if we can avoid it. Instead, it seems best if 106 | // we can avoid having to wire this into the pods at all and lazily bind the default policy 107 | // at discovery time? 108 | let default_allows = DefaultAllows::spawn(cluster_nets, detect_timeout); 109 | 110 | // Provide the cluster-wide default-allow policy to the namespace index so that it may be 111 | // used when a workload-level annotation is not set. 112 | let namespaces = NamespaceIndex::new(default_allow); 113 | 114 | Self { 115 | lookups, 116 | namespaces, 117 | identity_domain, 118 | default_allows, 119 | nodes: NodeIndex::default(), 120 | } 121 | } 122 | 123 | /// Drives indexing for all resource types. 124 | /// 125 | /// This is all driven on a single task, so it's not necessary for any of the indexing logic to 126 | /// worry about concurrent access for the internal indexing structures. 127 | /// 128 | /// All updates are atomically published to the shared `lookups` map after indexing occurs; but 129 | /// the indexing task is solely responsible for mutating it. 130 | #[instrument(skip(self, resources, ready_tx), fields(result))] 131 | pub(crate) async fn index( 132 | mut self, 133 | resources: k8s::ResourceWatches, 134 | ready_tx: watch::Sender, 135 | ) -> Error { 136 | let k8s::ResourceWatches { 137 | mut nodes_rx, 138 | mut pods_rx, 139 | mut servers_rx, 140 | mut authorizations_rx, 141 | } = resources; 142 | 143 | let mut ready = false; 144 | loop { 145 | let res = tokio::select! { 146 | // Track the kubelet IPs for all nodes. 147 | up = nodes_rx.recv() => match up { 148 | k8s::Event::Applied(node) => self.apply_node(node).context("applying a node"), 149 | k8s::Event::Deleted(node) => self.delete_node(&node.name()).context("deleting a node"), 150 | k8s::Event::Restarted(nodes) => self.reset_nodes(nodes).context("resetting nodes"), 151 | }, 152 | 153 | up = pods_rx.recv() => match up { 154 | k8s::Event::Applied(pod) => self.apply_pod(pod).context("applying a pod"), 155 | k8s::Event::Deleted(pod) => self.delete_pod(pod).context("deleting a pod"), 156 | k8s::Event::Restarted(pods) => self.reset_pods(pods).context("resetting pods"), 157 | }, 158 | 159 | up = servers_rx.recv() => match up { 160 | k8s::Event::Applied(srv) => { 161 | self.apply_server(srv); 162 | Ok(()) 163 | } 164 | k8s::Event::Deleted(srv) => self.delete_server(srv).context("deleting a server"), 165 | k8s::Event::Restarted(srvs) => self.reset_servers(srvs).context("resetting servers"), 166 | }, 167 | 168 | up = authorizations_rx.recv() => match up { 169 | k8s::Event::Applied(authz) => self.apply_authz(authz).context("applying an authorization"), 170 | k8s::Event::Deleted(authz) => { 171 | self.delete_authz(authz); 172 | Ok(()) 173 | } 174 | k8s::Event::Restarted(authzs) => self.reset_authzs(authzs).context("resetting authorizations"), 175 | }, 176 | }; 177 | 178 | if let Err(error) = res { 179 | warn!(?error); 180 | } 181 | 182 | // Notify the readiness watch if readiness changes. 183 | let ready_now = nodes_rx.ready() 184 | && pods_rx.ready() 185 | && servers_rx.ready() 186 | && authorizations_rx.ready(); 187 | if ready != ready_now { 188 | let _ = ready_tx.send(ready_now); 189 | ready = ready_now; 190 | debug!(%ready); 191 | } 192 | } 193 | } 194 | } 195 | -------------------------------------------------------------------------------- /controller/k8s/index/src/lookup.rs: -------------------------------------------------------------------------------- 1 | use crate::{node::KubeletIps, ServerRxRx}; 2 | use anyhow::{anyhow, Result}; 3 | use dashmap::{mapref::entry::Entry, DashMap}; 4 | use polixy_controller_core::{ 5 | ClientAuthentication, ClientAuthorization, DiscoverInboundServer, InboundServer, 6 | InboundServerStream, NetworkMatch, 7 | }; 8 | use std::{collections::HashMap, net::IpAddr, sync::Arc}; 9 | 10 | #[derive(Debug, Default)] 11 | pub(crate) struct Writer(ByNs); 12 | 13 | #[derive(Clone, Debug)] 14 | pub struct Reader(ByNs); 15 | 16 | type ByNs = Arc>; 17 | type ByPod = DashMap; 18 | 19 | // Boxed to enforce immutability. 20 | type ByPort = Box>; 21 | 22 | pub(crate) fn pair() -> (Writer, Reader) { 23 | let by_ns = ByNs::default(); 24 | let w = Writer(by_ns.clone()); 25 | let r = Reader(by_ns); 26 | (w, r) 27 | } 28 | 29 | #[derive(Clone, Debug)] 30 | pub struct Rx { 31 | kubelet: KubeletIps, 32 | rx: ServerRxRx, 33 | } 34 | 35 | // === impl Writer === 36 | 37 | impl Writer { 38 | pub(crate) fn contains(&self, ns: impl AsRef, pod: impl AsRef) -> bool { 39 | self.0 40 | .get(ns.as_ref()) 41 | .map(|ns| ns.contains_key(pod.as_ref())) 42 | .unwrap_or(false) 43 | } 44 | 45 | pub(crate) fn set( 46 | &mut self, 47 | ns: impl ToString, 48 | pod: impl ToString, 49 | ports: impl IntoIterator, 50 | ) -> Result<()> { 51 | match self 52 | .0 53 | .entry(ns.to_string()) 54 | .or_default() 55 | .entry(pod.to_string()) 56 | { 57 | Entry::Vacant(entry) => { 58 | entry.insert(ports.into_iter().collect::>().into()); 59 | Ok(()) 60 | } 61 | Entry::Occupied(_) => Err(anyhow!( 62 | "pod {} already exists in namespace {}", 63 | pod.to_string(), 64 | ns.to_string() 65 | )), 66 | } 67 | } 68 | 69 | pub(crate) fn unset(&mut self, ns: impl AsRef, pod: impl AsRef) -> Result { 70 | let pods = self 71 | .0 72 | .get_mut(ns.as_ref()) 73 | .ok_or_else(|| anyhow!("missing namespace {}", ns.as_ref()))?; 74 | 75 | let (_, ports) = pods 76 | .remove(pod.as_ref()) 77 | .ok_or_else(|| anyhow!("missing pod {} in namespace {}", pod.as_ref(), ns.as_ref()))?; 78 | 79 | if (*pods).is_empty() { 80 | drop(pods); 81 | self.0.remove(ns.as_ref()).expect("namespace must exist"); 82 | } 83 | 84 | Ok(ports) 85 | } 86 | } 87 | 88 | // === impl Reader === 89 | 90 | impl Reader { 91 | #[inline] 92 | pub(crate) fn lookup(&self, ns: &str, pod: &str, port: u16) -> Option { 93 | self.0.get(ns)?.get(pod)?.get(&port).cloned() 94 | } 95 | } 96 | 97 | #[async_trait::async_trait] 98 | impl DiscoverInboundServer<(String, String, u16)> for Reader { 99 | async fn get_inbound_server( 100 | &self, 101 | (ns, pod, port): (String, String, u16), 102 | ) -> Result> { 103 | Ok(self.lookup(&*ns, &*pod, port).map(|rx| rx.get())) 104 | } 105 | 106 | async fn watch_inbound_server( 107 | &self, 108 | (ns, pod, port): (String, String, u16), 109 | ) -> Result> { 110 | Ok(self.lookup(&*ns, &*pod, port).map(|rx| rx.into_stream())) 111 | } 112 | } 113 | 114 | // === impl Rx === 115 | 116 | impl Rx { 117 | pub(crate) fn new(kubelet: KubeletIps, rx: ServerRxRx) -> Self { 118 | Self { kubelet, rx } 119 | } 120 | 121 | #[inline] 122 | fn mk_server(kubelet: &[IpAddr], mut inner: InboundServer) -> InboundServer { 123 | let networks = kubelet.iter().copied().map(NetworkMatch::from).collect(); 124 | let authz = ClientAuthorization { 125 | networks, 126 | authentication: ClientAuthentication::Unauthenticated, 127 | }; 128 | 129 | inner.authorizations.insert("_health_check".into(), authz); 130 | inner 131 | } 132 | 133 | pub(crate) fn get(&self) -> InboundServer { 134 | Self::mk_server(&*self.kubelet, (*(*self.rx.borrow()).borrow()).clone()) 135 | } 136 | 137 | pub(crate) fn into_stream(self) -> InboundServerStream { 138 | let kubelet = self.kubelet; 139 | let mut outer = self.rx; 140 | let mut inner = (*outer.borrow_and_update()).clone(); 141 | Box::pin(async_stream::stream! { 142 | let mut server = (*inner.borrow_and_update()).clone(); 143 | yield Self::mk_server(&*kubelet, server.clone()); 144 | 145 | loop { 146 | tokio::select! { 147 | res = inner.changed() => match res { 148 | Ok(()) => { 149 | let s = (*inner.borrow()).clone(); 150 | if s != server { 151 | yield Self::mk_server(&*kubelet, s.clone()); 152 | server = s; 153 | } 154 | } 155 | Err(_) => {}, 156 | }, 157 | 158 | res = outer.changed() => match res { 159 | Ok(()) => { 160 | inner = (*outer.borrow()).clone(); 161 | let s = (*inner.borrow_and_update()).clone(); 162 | if s != server { 163 | yield Self::mk_server(&*kubelet, s.clone()); 164 | server = s; 165 | } 166 | } 167 | Err(_) => return, 168 | }, 169 | } 170 | } 171 | }) 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /controller/k8s/index/src/namespace.rs: -------------------------------------------------------------------------------- 1 | use crate::{authz::AuthzIndex, pod::PodIndex, server::SrvIndex, DefaultAllow}; 2 | use std::collections::HashMap; 3 | 4 | #[derive(Debug)] 5 | pub(crate) struct NamespaceIndex { 6 | pub index: HashMap, 7 | 8 | // The global default-allow policy. 9 | default_allow: DefaultAllow, 10 | } 11 | 12 | #[derive(Debug)] 13 | pub(crate) struct Namespace { 14 | /// Holds the global default-allow policy, which may be overridden per-workload. 15 | pub default_allow: DefaultAllow, 16 | 17 | pub pods: PodIndex, 18 | pub servers: SrvIndex, 19 | pub authzs: AuthzIndex, 20 | } 21 | 22 | // === impl Namespaces === 23 | 24 | impl NamespaceIndex { 25 | pub fn new(default_allow: DefaultAllow) -> Self { 26 | Self { 27 | default_allow, 28 | index: HashMap::default(), 29 | } 30 | } 31 | 32 | pub fn get_or_default(&mut self, name: impl Into) -> &mut Namespace { 33 | let default_allow = self.default_allow; 34 | self.index.entry(name.into()).or_insert_with(|| Namespace { 35 | default_allow, 36 | pods: PodIndex::default(), 37 | servers: SrvIndex::default(), 38 | authzs: AuthzIndex::default(), 39 | }) 40 | } 41 | 42 | pub fn iter(&self) -> impl Iterator { 43 | self.index.iter() 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /controller/k8s/index/src/node.rs: -------------------------------------------------------------------------------- 1 | //! Node->Kubelet IP 2 | 3 | use crate::Index; 4 | use anyhow::{anyhow, Context, Result}; 5 | use polixy_controller_core::IpNet; 6 | use polixy_controller_k8s_api::{self as k8s, ResourceExt}; 7 | use std::{ 8 | collections::{hash_map::Entry as HashEntry, HashMap, HashSet}, 9 | net::IpAddr, 10 | sync::Arc, 11 | }; 12 | use tracing::{debug, instrument, trace, warn}; 13 | 14 | #[derive(Debug, Default)] 15 | pub(crate) struct NodeIndex { 16 | index: HashMap, 17 | } 18 | 19 | #[derive(Debug)] 20 | enum State { 21 | Pending(HashMap>), 22 | Known(KubeletIps), 23 | } 24 | 25 | #[derive(Clone, Debug, Hash, PartialEq, Eq)] 26 | pub(crate) struct KubeletIps(Arc<[IpAddr]>); 27 | 28 | // === impl NodeIndex === 29 | 30 | impl NodeIndex { 31 | pub fn get_or_push_pending(&mut self, pod: k8s::Pod) -> Option<(k8s::Pod, KubeletIps)> { 32 | let node_name = pod.spec.as_ref()?.node_name.clone()?; 33 | match self.index.entry(node_name) { 34 | HashEntry::Occupied(mut entry) => match entry.get_mut() { 35 | State::Known(ips) => Some((pod, ips.clone())), 36 | State::Pending(pods) => { 37 | pods.entry(pod.namespace()?) 38 | .or_default() 39 | .insert(pod.name(), pod); 40 | None 41 | } 42 | }, 43 | HashEntry::Vacant(entry) => { 44 | let ns = pod.namespace()?; 45 | let name = pod.name(); 46 | entry.insert(State::Pending( 47 | Some((ns, Some((name, pod)).into_iter().collect())) 48 | .into_iter() 49 | .collect(), 50 | )); 51 | None 52 | } 53 | } 54 | } 55 | 56 | pub fn clear_pending_pod(&mut self, ns: &str, pod: &str) -> bool { 57 | for state in self.index.values_mut() { 58 | if let State::Pending(by_ns) = state { 59 | if let Some(pods) = by_ns.get_mut(ns) { 60 | if pods.remove(pod).is_some() { 61 | return true; 62 | } 63 | } 64 | } 65 | } 66 | 67 | false 68 | } 69 | 70 | pub fn clear_pending_pods(&mut self) { 71 | let pending_nodes = self 72 | .index 73 | .iter() 74 | .filter_map(|(node, state)| match state { 75 | State::Known(_) => None, 76 | State::Pending(_) => Some(node.clone()), 77 | }) 78 | .collect::>(); 79 | 80 | for node in pending_nodes { 81 | self.index.remove(&node); 82 | } 83 | } 84 | } 85 | 86 | // === impl Index === 87 | 88 | impl Index { 89 | /// Tracks the kubelet IP for each node. 90 | /// 91 | /// As pods are we created, we refer to the node->kubelet index to automatically allow traffic 92 | /// from the kubelet. 93 | #[instrument( 94 | skip(self, node), 95 | fields(name = ?node.metadata.name) 96 | )] 97 | pub fn apply_node(&mut self, node: k8s::Node) -> Result<()> { 98 | match self.nodes.index.entry(node.name()) { 99 | HashEntry::Vacant(entry) => { 100 | let ips = KubeletIps::try_from_node(node) 101 | .with_context(|| format!("failed to load kubelet IPs for {}", entry.key()))?; 102 | debug!(?ips, "Adding"); 103 | entry.insert(State::Known(ips)); 104 | Ok(()) 105 | } 106 | 107 | HashEntry::Occupied(mut entry) => { 108 | // If the node is already configured, ignore the update. 109 | if let State::Known(_) = entry.get() { 110 | trace!("Already existed"); 111 | return Ok(()); 112 | } 113 | 114 | // Otherwise, the update is replacing a set of pending pods. Update the state to the 115 | // known set of IPs and then apply all of the pending pods. 116 | let ips = KubeletIps::try_from_node(node) 117 | .with_context(|| format!("failed to load kubelet IPs for {}", entry.key()))?; 118 | debug!(?ips, "Adding"); 119 | let pods = match std::mem::replace(entry.get_mut(), State::Known(ips)) { 120 | State::Pending(pods) => pods, 121 | State::Known(_) => unreachable!("the node state must have been pending"), 122 | }; 123 | 124 | let mut result = Ok(()); 125 | for (_, by_ns) in pods.into_iter() { 126 | for (_, pod) in by_ns.into_iter() { 127 | if let Err(e) = self.apply_pod(pod) { 128 | result = Err(e); 129 | } 130 | } 131 | } 132 | result 133 | } 134 | } 135 | } 136 | 137 | #[instrument(skip(self))] 138 | pub fn delete_node(&mut self, name: &str) -> Result<()> { 139 | self.nodes 140 | .index 141 | .remove(name) 142 | .ok_or_else(|| anyhow!("node {} does not exist", name))?; 143 | debug!("Deleted"); 144 | Ok(()) 145 | } 146 | 147 | #[instrument(skip(self, nodes))] 148 | pub fn reset_nodes(&mut self, nodes: Vec) -> Result<()> { 149 | // Avoid rebuilding data for nodes that have not changed. 150 | let mut prior = self 151 | .nodes 152 | .index 153 | .iter() 154 | .filter_map(|(name, state)| match state { 155 | State::Known(_) => Some(name.clone()), 156 | State::Pending(_) => None, 157 | }) 158 | .collect::>(); 159 | 160 | let mut result = Ok(()); 161 | for node in nodes.into_iter() { 162 | let name = node.name(); 163 | if prior.remove(&name) { 164 | trace!(%name, "Already existed"); 165 | } else if let Err(error) = self.apply_node(node) { 166 | warn!(%name, %error, "Failed to apply node"); 167 | result = Err(error); 168 | } 169 | } 170 | 171 | for name in prior.into_iter() { 172 | debug!(?name, "Removing defunct node"); 173 | let removed = self.nodes.index.remove(&name).is_some(); 174 | debug_assert!(removed, "node must be removable"); 175 | if !removed { 176 | result = Err(anyhow!("node {} already removed", name)); 177 | } 178 | } 179 | 180 | result 181 | } 182 | } 183 | 184 | // === impl KubeletIps === 185 | 186 | impl std::ops::Deref for KubeletIps { 187 | type Target = [IpAddr]; 188 | 189 | fn deref(&self) -> &[IpAddr] { 190 | &*self.0 191 | } 192 | } 193 | 194 | impl KubeletIps { 195 | fn try_from_cidr(cidr: String) -> Result { 196 | cidr.parse::() 197 | .with_context(|| format!("invalid CIDR {}", cidr))? 198 | .hosts() 199 | .next() 200 | .ok_or_else(|| anyhow!("pod CIDR network is empty")) 201 | } 202 | 203 | fn try_from_node(node: k8s::Node) -> Result { 204 | let spec = node.spec.ok_or_else(|| anyhow!("node missing spec"))?; 205 | 206 | let addrs = if spec.pod_cidrs.is_empty() { 207 | let cidr = spec 208 | .pod_cidr 209 | .ok_or_else(|| anyhow!("node missing pod_cidr"))?; 210 | let ip = Self::try_from_cidr(cidr)?; 211 | vec![ip] 212 | } else { 213 | spec.pod_cidrs 214 | .into_iter() 215 | .map(Self::try_from_cidr) 216 | .collect::>>()? 217 | }; 218 | 219 | Ok(Self(addrs.into())) 220 | } 221 | } 222 | -------------------------------------------------------------------------------- /controller/k8s/index/src/pod.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | lookup, node::KubeletIps, DefaultAllow, Index, Namespace, NodeIndex, ServerRx, ServerRxTx, 3 | SrvIndex, 4 | }; 5 | use anyhow::{anyhow, Result}; 6 | use polixy_controller_k8s_api::{self as k8s, polixy, ResourceExt}; 7 | use std::collections::{hash_map::Entry as HashEntry, HashMap, HashSet}; 8 | use tokio::sync::watch; 9 | use tracing::{debug, instrument, trace, warn}; 10 | 11 | #[derive(Debug, Default)] 12 | pub(crate) struct PodIndex { 13 | index: HashMap, 14 | } 15 | 16 | #[derive(Debug)] 17 | struct Pod { 18 | ports: PodPorts, 19 | labels: k8s::Labels, 20 | default_allow_rx: ServerRx, 21 | } 22 | 23 | #[derive(Debug, Default)] 24 | struct PodPorts { 25 | by_port: HashMap, 26 | by_name: HashMap>, 27 | } 28 | 29 | #[derive(Debug)] 30 | struct Port { 31 | server_name: Option, 32 | server_tx: ServerRxTx, 33 | } 34 | 35 | // === impl Index === 36 | 37 | impl Index { 38 | /// Builds a `Pod`, linking it with servers and nodes. 39 | #[instrument( 40 | skip(self, pod), 41 | fields( 42 | ns = ?pod.metadata.namespace, 43 | name = ?pod.metadata.name, 44 | ) 45 | )] 46 | pub(crate) fn apply_pod(&mut self, pod: k8s::Pod) -> Result<()> { 47 | let Namespace { 48 | default_allow, 49 | ref mut pods, 50 | ref mut servers, 51 | .. 52 | } = self 53 | .namespaces 54 | .get_or_default(pod.namespace().expect("namespace must be set")); 55 | 56 | let default_allow = *default_allow; 57 | let allows = self.default_allows.clone(); 58 | let mk_default_allow = 59 | move |da: Option| allows.get(da.unwrap_or(default_allow)); 60 | 61 | pods.apply( 62 | pod, 63 | &mut self.nodes, 64 | servers, 65 | &mut self.lookups, 66 | mk_default_allow, 67 | ) 68 | } 69 | 70 | #[instrument( 71 | skip(self, pod), 72 | fields( 73 | ns = ?pod.metadata.namespace, 74 | name = ?pod.metadata.name, 75 | ) 76 | )] 77 | pub(crate) fn delete_pod(&mut self, pod: k8s::Pod) -> Result<()> { 78 | let ns_name = pod.namespace().expect("namespace must be set"); 79 | let pod_name = pod.name(); 80 | self.rm_pod(ns_name.as_str(), pod_name.as_str()) 81 | } 82 | 83 | fn rm_pod(&mut self, ns: &str, pod: &str) -> Result<()> { 84 | if self.nodes.clear_pending_pod(ns, pod) { 85 | // If the pod was pending that it can't be in the main index. 86 | debug!("Cleared pending pod"); 87 | return Ok(()); 88 | } 89 | 90 | self.namespaces 91 | .index 92 | .get_mut(ns) 93 | .ok_or_else(|| anyhow!("namespace {} doesn't exist", ns))? 94 | .pods 95 | .index 96 | .remove(pod) 97 | .ok_or_else(|| anyhow!("pod {} doesn't exist", pod))?; 98 | 99 | self.lookups.unset(&ns, &pod)?; 100 | 101 | debug!("Removed pod"); 102 | 103 | Ok(()) 104 | } 105 | 106 | #[instrument(skip(self, pods))] 107 | pub(crate) fn reset_pods(&mut self, pods: Vec) -> Result<()> { 108 | self.nodes.clear_pending_pods(); 109 | 110 | let mut prior_pods = self 111 | .namespaces 112 | .iter() 113 | .map(|(name, ns)| { 114 | let pods = ns.pods.index.keys().cloned().collect::>(); 115 | (name.clone(), pods) 116 | }) 117 | .collect::>(); 118 | 119 | let mut result = Ok(()); 120 | for pod in pods.into_iter() { 121 | let ns_name = pod.namespace().unwrap(); 122 | if let Some(ns) = prior_pods.get_mut(ns_name.as_str()) { 123 | ns.remove(pod.name().as_str()); 124 | } 125 | 126 | if let Err(error) = self.apply_pod(pod) { 127 | result = Err(error); 128 | } 129 | } 130 | 131 | for (ns, pods) in prior_pods.into_iter() { 132 | for pod in pods.into_iter() { 133 | if let Err(error) = self.rm_pod(ns.as_str(), pod.as_str()) { 134 | result = Err(error); 135 | } 136 | } 137 | } 138 | 139 | result 140 | } 141 | } 142 | 143 | // === impl PodIndex === 144 | 145 | impl PodIndex { 146 | fn apply( 147 | &mut self, 148 | pod: k8s::Pod, 149 | nodes: &mut NodeIndex, 150 | servers: &SrvIndex, 151 | lookups: &mut lookup::Writer, 152 | get_default_allow_rx: impl Fn(Option) -> ServerRx, 153 | ) -> Result<()> { 154 | let ns_name = pod.namespace().expect("pod must have a namespace"); 155 | let pod_name = pod.name(); 156 | match self.index.entry(pod_name) { 157 | HashEntry::Vacant(pod_entry) => { 158 | // Lookup the pod's node's kubelet IP or stop processing the update. If the pod does 159 | // not yet have a node, it will be ignored. If the node isn't yet in the index, the 160 | // pod is saved to be processed later. 161 | let (pod, kubelet) = match nodes.get_or_push_pending(pod) { 162 | Some((pod, ips)) => (pod, ips), 163 | None => { 164 | debug!("Pod cannot yet assigned to a Node"); 165 | return Ok(()); 166 | } 167 | }; 168 | 169 | let spec = pod.spec.ok_or_else(|| anyhow!("pod missing spec"))?; 170 | 171 | // Check the pod for a default-allow annotation. If it's set, use it; otherwise use 172 | // the default policy from the namespace or cluster. We retain this value (and not 173 | // only the policy) so that we can more conveniently de-duplicate changes 174 | let default_allow_rx = match DefaultAllow::from_annotation(&pod.metadata) { 175 | Ok(allow) => get_default_allow_rx(allow), 176 | Err(error) => { 177 | warn!(%error, "Ignoring invalid default-allow annotation"); 178 | get_default_allow_rx(None) 179 | } 180 | }; 181 | 182 | // Read the pod's ports and extract: 183 | // - `ServerTx`s to be linkerd against the server index; and 184 | // - lookup receivers to be returned to API clients. 185 | let (ports, pod_lookups) = 186 | Self::extract_ports(spec, default_allow_rx.clone(), kubelet); 187 | 188 | // Start tracking the pod's metadata so it can be linked against servers as they are 189 | // created. Immediately link the pod against the server index. 190 | let mut pod = Pod { 191 | default_allow_rx, 192 | labels: pod.metadata.labels.into(), 193 | ports, 194 | }; 195 | pod.link_servers(&servers); 196 | 197 | // The pod has been linked against servers and is registered for subsequent updates, 198 | // so make it discoverable to API clients. 199 | lookups 200 | .set(ns_name, pod_entry.key(), pod_lookups) 201 | .expect("pod must not already exist"); 202 | 203 | pod_entry.insert(pod); 204 | 205 | Ok(()) 206 | } 207 | 208 | HashEntry::Occupied(mut entry) => { 209 | debug_assert!( 210 | lookups.contains(&ns_name, entry.key()), 211 | "pod must exist in lookups" 212 | ); 213 | 214 | // Labels can be updated at runtime (even though that's kind of weird). If the 215 | // labels have changed, then we relink servers to pods in case label selections have 216 | // changed. 217 | let p = entry.get_mut(); 218 | if p.labels.as_ref() != &pod.metadata.labels { 219 | p.labels = pod.metadata.labels.into(); 220 | p.link_servers(&servers); 221 | } 222 | 223 | // Note that the default-allow annotation may not be changed at runtime. 224 | Ok(()) 225 | } 226 | } 227 | } 228 | 229 | /// Extracts port information from a pod spec. 230 | fn extract_ports( 231 | spec: k8s::PodSpec, 232 | server_rx: ServerRx, 233 | kubelet: KubeletIps, 234 | ) -> (PodPorts, HashMap) { 235 | let mut ports = PodPorts::default(); 236 | let mut lookups = HashMap::new(); 237 | 238 | for container in spec.containers.into_iter() { 239 | for p in container.ports.into_iter() { 240 | if p.protocol.map(|p| p == "TCP").unwrap_or(true) { 241 | let port = p.container_port as u16; 242 | if ports.by_port.contains_key(&port) { 243 | debug!(port, "Port duplicated"); 244 | continue; 245 | } 246 | 247 | let (server_tx, rx) = watch::channel(server_rx.clone()); 248 | let pod_port = Port { 249 | server_name: None, 250 | server_tx, 251 | }; 252 | 253 | trace!(%port, name = ?p.name, "Adding port"); 254 | if let Some(name) = p.name { 255 | ports.by_name.entry(name).or_default().push(port); 256 | } 257 | 258 | ports.by_port.insert(port, pod_port); 259 | lookups.insert(port, lookup::Rx::new(kubelet.clone(), rx)); 260 | } 261 | } 262 | } 263 | 264 | (ports, lookups) 265 | } 266 | 267 | pub(crate) fn link_servers(&mut self, servers: &SrvIndex) { 268 | for pod in self.index.values_mut() { 269 | pod.link_servers(&servers) 270 | } 271 | } 272 | 273 | pub(crate) fn reset_server(&mut self, name: &str) { 274 | for (pod_name, pod) in self.index.iter_mut() { 275 | let rx = pod.default_allow_rx.clone(); 276 | for (p, port) in pod.ports.by_port.iter_mut() { 277 | if port 278 | .server_name 279 | .as_ref() 280 | .map(|n| n == name) 281 | .unwrap_or(false) 282 | { 283 | debug!(pod = %pod_name, port = %p, "Removing server from pod"); 284 | port.server_name = None; 285 | port.server_tx 286 | .send(rx.clone()) 287 | .expect("pod config receiver must still be held"); 288 | } else { 289 | trace!(pod = %pod_name, port = %p, server = ?port.server_name, "Server does not match"); 290 | } 291 | } 292 | } 293 | } 294 | } 295 | 296 | // === impl Pod === 297 | 298 | impl Pod { 299 | /// Links this pods to server (by label selector). 300 | // 301 | // XXX This doesn't properly reset a policy when a server is removed or de-selects a pod. 302 | fn link_servers(&mut self, servers: &SrvIndex) { 303 | let mut remaining_ports = self.ports.by_port.keys().copied().collect::>(); 304 | 305 | // Get all servers that match this pod. 306 | let matching = servers.iter_matching(self.labels.clone()); 307 | for (name, port_match, rx) in matching { 308 | // Get all pod ports that match this server. 309 | for p in self.ports.collect_port(&port_match).into_iter().flatten() { 310 | self.link_server_port(p, name, rx); 311 | remaining_ports.remove(&p); 312 | } 313 | } 314 | 315 | // Iterate through the ports that have not been matched to clear them. 316 | for p in remaining_ports.into_iter() { 317 | let port = self.ports.by_port.get_mut(&p).unwrap(); 318 | port.server_name = None; 319 | port.server_tx 320 | .send(self.default_allow_rx.clone()) 321 | .expect("pod config receiver must still be held"); 322 | } 323 | } 324 | 325 | fn link_server_port(&mut self, port: u16, name: &str, rx: &ServerRx) { 326 | let port = match self.ports.by_port.get_mut(&port) { 327 | Some(p) => p, 328 | None => return, 329 | }; 330 | 331 | // Either this port is using a default allow policy, and the server name is unset, 332 | // or multiple servers select this pod. If there's a conflict, we panic if the proxy 333 | // is running in debug mode. In release mode, we log a warning and ignore the 334 | // conflicting server. 335 | if let Some(sn) = port.server_name.as_ref() { 336 | if sn != name { 337 | debug_assert!(false, "Pod port must not match multiple servers"); 338 | tracing::warn!("Pod port matches multiple servers: {} and {}", sn, name); 339 | } 340 | // If the name matched there's no use in proceeding with a redundant update. If the 341 | return; 342 | } 343 | port.server_name = Some(name.to_string()); 344 | 345 | port.server_tx 346 | .send(rx.clone()) 347 | .expect("pod config receiver must be set"); 348 | debug!(server = %name, "Pod server updated"); 349 | } 350 | } 351 | 352 | // === impl PodPorts === 353 | 354 | impl PodPorts { 355 | /// Finds all ports on this pod that match a server's port reference. 356 | /// 357 | /// Numeric port matches will only return a single server, generally, while named port 358 | /// references may select an arbitrary number of server ports. 359 | fn collect_port(&self, port_match: &polixy::server::Port) -> Option> { 360 | match port_match { 361 | polixy::server::Port::Number(ref port) => Some(vec![*port]), 362 | polixy::server::Port::Name(ref name) => self.by_name.get(name).cloned(), 363 | } 364 | } 365 | } 366 | -------------------------------------------------------------------------------- /controller/k8s/index/src/server.rs: -------------------------------------------------------------------------------- 1 | use crate::{authz::AuthzIndex, Index, Namespace, ServerRx, ServerSelector, ServerTx}; 2 | use anyhow::{anyhow, bail, Result}; 3 | use polixy_controller_core::{ClientAuthorization, InboundServer, ProxyProtocol}; 4 | use polixy_controller_k8s_api::{self as k8s, polixy, ResourceExt}; 5 | use std::{ 6 | collections::{hash_map::Entry as HashEntry, BTreeMap, HashMap, HashSet}, 7 | sync::Arc, 8 | }; 9 | use tokio::{sync::watch, time}; 10 | use tracing::{debug, instrument, trace}; 11 | 12 | #[derive(Debug, Default)] 13 | pub(crate) struct SrvIndex { 14 | index: HashMap, 15 | } 16 | 17 | #[derive(Debug)] 18 | struct Server { 19 | meta: ServerMeta, 20 | authorizations: BTreeMap, 21 | rx: ServerRx, 22 | tx: ServerTx, 23 | } 24 | 25 | #[derive(Clone, Debug, PartialEq, Eq)] 26 | struct ServerMeta { 27 | labels: k8s::Labels, 28 | port: polixy::server::Port, 29 | pod_selector: Arc, 30 | protocol: ProxyProtocol, 31 | } 32 | 33 | // === impl SrvIndex === 34 | 35 | impl SrvIndex { 36 | pub fn add_authz(&mut self, name: &str, selector: &ServerSelector, authz: ClientAuthorization) { 37 | for (srv_name, srv) in self.index.iter_mut() { 38 | let matches = match selector { 39 | ServerSelector::Name(ref n) => n == srv_name, 40 | ServerSelector::Selector(ref s) => s.matches(&srv.meta.labels), 41 | }; 42 | if matches { 43 | debug!(server = %srv_name, authz = %name, "Adding authz to server"); 44 | srv.add_authz(name.to_string(), authz.clone()); 45 | } else { 46 | debug!(server = %srv_name, authz = %name, "Removing authz from server"); 47 | srv.remove_authz(name); 48 | } 49 | } 50 | } 51 | 52 | pub fn remove_authz(&mut self, name: &str) { 53 | for srv in self.index.values_mut() { 54 | srv.remove_authz(name); 55 | } 56 | } 57 | 58 | pub fn iter_matching( 59 | &self, 60 | labels: k8s::Labels, 61 | ) -> impl Iterator { 62 | self.index.iter().filter_map(move |(srv_name, server)| { 63 | let matches = server.meta.pod_selector.matches(&labels); 64 | trace!(server = %srv_name, %matches); 65 | if matches { 66 | Some((srv_name.as_str(), &server.meta.port, &server.rx)) 67 | } else { 68 | None 69 | } 70 | }) 71 | } 72 | 73 | /// Update the index with a server instance. 74 | fn apply(&mut self, srv: polixy::Server, ns_authzs: &AuthzIndex) { 75 | let srv_name = srv.name(); 76 | let port = srv.spec.port; 77 | let protocol = mk_protocol(srv.spec.proxy_protocol.as_ref()); 78 | 79 | match self.index.entry(srv_name) { 80 | HashEntry::Vacant(entry) => { 81 | let labels = k8s::Labels::from(srv.metadata.labels); 82 | let authzs = ns_authzs 83 | .filter_selected(entry.key(), labels.clone()) 84 | .map(|(n, a)| (n, a.clone())) 85 | .collect::>(); 86 | let meta = ServerMeta { 87 | labels, 88 | port, 89 | pod_selector: srv.spec.pod_selector.into(), 90 | protocol: protocol.clone(), 91 | }; 92 | debug!(authzs = ?authzs.keys()); 93 | let (tx, rx) = watch::channel(InboundServer { 94 | protocol, 95 | authorizations: authzs.clone(), 96 | }); 97 | entry.insert(Server { 98 | meta, 99 | rx, 100 | tx, 101 | authorizations: authzs, 102 | }); 103 | } 104 | 105 | HashEntry::Occupied(mut entry) => { 106 | // If something about the server changed, we need to update the config to reflect 107 | // the change. 108 | let new_labels = if entry.get().meta.labels.as_ref() != &srv.metadata.labels { 109 | Some(k8s::Labels::from(srv.metadata.labels)) 110 | } else { 111 | None 112 | }; 113 | 114 | let new_protocol = if entry.get().meta.protocol == protocol { 115 | Some(protocol) 116 | } else { 117 | None 118 | }; 119 | 120 | trace!(?new_labels, ?new_protocol); 121 | if new_labels.is_some() || new_protocol.is_some() { 122 | // NB: Only a single task applies server updates, so it's 123 | // okay to borrow a version, modify, and send it. We don't 124 | // need a lock because serialization is guaranteed. 125 | let mut config = entry.get().rx.borrow().clone(); 126 | 127 | if let Some(labels) = new_labels { 128 | let authzs = ns_authzs 129 | .filter_selected(entry.key(), labels.clone()) 130 | .map(|(n, a)| (n, a.clone())) 131 | .collect::>(); 132 | debug!(authzs = ?authzs.keys()); 133 | config.authorizations = authzs.clone(); 134 | entry.get_mut().meta.labels = labels; 135 | entry.get_mut().authorizations = authzs; 136 | } 137 | 138 | if let Some(protocol) = new_protocol { 139 | config.protocol = protocol.clone(); 140 | entry.get_mut().meta.protocol = protocol; 141 | } 142 | entry 143 | .get() 144 | .tx 145 | .send(config) 146 | .expect("server update must succeed"); 147 | } 148 | 149 | // If the pod/port selector didn't change, we don't need to 150 | // refresh the index. 151 | if *entry.get().meta.pod_selector == srv.spec.pod_selector 152 | && entry.get().meta.port == port 153 | { 154 | return; 155 | } 156 | 157 | entry.get_mut().meta.pod_selector = srv.spec.pod_selector.into(); 158 | entry.get_mut().meta.port = port; 159 | } 160 | } 161 | } 162 | } 163 | 164 | // === impl Server === 165 | 166 | impl Server { 167 | fn add_authz(&mut self, name: impl Into, authz: ClientAuthorization) { 168 | debug!("Adding authorization to server"); 169 | self.authorizations.insert(name.into(), authz); 170 | let mut config = self.rx.borrow().clone(); 171 | config.authorizations = self.authorizations.clone(); 172 | self.tx.send(config).expect("config must send") 173 | } 174 | 175 | fn remove_authz(&mut self, name: &str) { 176 | if self.authorizations.remove(name).is_some() { 177 | debug!("Removing authorization from server"); 178 | let mut config = self.rx.borrow().clone(); 179 | config.authorizations = self.authorizations.clone(); 180 | self.tx.send(config).expect("config must send") 181 | } 182 | } 183 | } 184 | 185 | // === impl Index === 186 | 187 | impl Index { 188 | /// Builds a `Server`, linking it against authorizations and pod ports. 189 | #[instrument( 190 | skip(self, srv), 191 | fields( 192 | ns = ?srv.metadata.namespace, 193 | name = ?srv.metadata.name, 194 | ) 195 | )] 196 | pub(crate) fn apply_server(&mut self, srv: polixy::Server) { 197 | let ns_name = srv.namespace().expect("namespace must be set"); 198 | let Namespace { 199 | ref mut pods, 200 | ref mut authzs, 201 | ref mut servers, 202 | default_allow: _, 203 | } = self.namespaces.get_or_default(ns_name); 204 | 205 | servers.apply(srv, authzs); 206 | 207 | // If we've updated the server->pod selection, then we need to re-index 208 | // all pods and servers. 209 | pods.link_servers(servers); 210 | } 211 | 212 | #[instrument( 213 | skip(self, srv), 214 | fields( 215 | ns = ?srv.metadata.namespace, 216 | name = ?srv.metadata.name, 217 | ) 218 | )] 219 | pub(crate) fn delete_server(&mut self, srv: polixy::Server) -> Result<()> { 220 | let ns_name = srv.namespace().expect("servers must be namespaced"); 221 | self.rm_server(ns_name.as_str(), srv.name().as_str()) 222 | } 223 | 224 | fn rm_server(&mut self, ns_name: &str, srv_name: &str) -> Result<()> { 225 | let ns = 226 | self.namespaces.index.get_mut(ns_name).ok_or_else(|| { 227 | anyhow!("removing server from non-existent namespace {}", ns_name) 228 | })?; 229 | 230 | if ns.servers.index.remove(srv_name).is_none() { 231 | bail!("removing non-existent server {}", srv_name); 232 | } 233 | 234 | // Reset the server config for all pods that were using this server. 235 | ns.pods.reset_server(srv_name); 236 | 237 | debug!("Removed server"); 238 | Ok(()) 239 | } 240 | 241 | #[instrument(skip(self, srvs))] 242 | pub(crate) fn reset_servers(&mut self, srvs: Vec) -> Result<()> { 243 | let mut prior_servers = self 244 | .namespaces 245 | .index 246 | .iter() 247 | .map(|(n, ns)| { 248 | let servers = ns.servers.index.keys().cloned().collect::>(); 249 | (n.clone(), servers) 250 | }) 251 | .collect::>(); 252 | 253 | let mut result = Ok(()); 254 | for srv in srvs.into_iter() { 255 | let ns_name = srv.namespace().expect("namespace must be set"); 256 | if let Some(ns) = prior_servers.get_mut(&ns_name) { 257 | ns.remove(srv.name().as_str()); 258 | } 259 | 260 | self.apply_server(srv); 261 | } 262 | 263 | for (ns_name, ns_servers) in prior_servers.into_iter() { 264 | for srv_name in ns_servers.into_iter() { 265 | if let Err(e) = self.rm_server(ns_name.as_str(), &srv_name) { 266 | result = Err(e); 267 | } 268 | } 269 | } 270 | 271 | result 272 | } 273 | } 274 | 275 | fn mk_protocol(p: Option<&polixy::server::ProxyProtocol>) -> ProxyProtocol { 276 | match p { 277 | Some(polixy::server::ProxyProtocol::Unknown) | None => ProxyProtocol::Detect { 278 | timeout: time::Duration::from_secs(10), 279 | }, 280 | Some(polixy::server::ProxyProtocol::Http1) => ProxyProtocol::Http1, 281 | Some(polixy::server::ProxyProtocol::Http2) => ProxyProtocol::Http2, 282 | Some(polixy::server::ProxyProtocol::Grpc) => ProxyProtocol::Grpc, 283 | Some(polixy::server::ProxyProtocol::Opaque) => ProxyProtocol::Opaque, 284 | Some(polixy::server::ProxyProtocol::Tls) => ProxyProtocol::Tls, 285 | } 286 | } 287 | -------------------------------------------------------------------------------- /controller/src/admin.rs: -------------------------------------------------------------------------------- 1 | use futures::future; 2 | use hyper::{Body, Request, Response}; 3 | use std::net::SocketAddr; 4 | use tokio::sync::watch; 5 | use tracing::{info, instrument}; 6 | 7 | #[instrument(skip(ready))] 8 | pub async fn serve(addr: SocketAddr, ready: watch::Receiver) -> Result<(), hyper::Error> { 9 | let server = 10 | hyper::server::Server::bind(&addr).serve(hyper::service::make_service_fn(move |_conn| { 11 | let ready = ready.clone(); 12 | future::ok::<_, hyper::Error>(hyper::service::service_fn( 13 | move |req: hyper::Request| match req.uri().path() { 14 | "/ready" => future::ok(handle_ready(&ready, req)), 15 | _ => future::ok::<_, hyper::Error>( 16 | hyper::Response::builder() 17 | .status(hyper::StatusCode::NOT_FOUND) 18 | .body(hyper::Body::default()) 19 | .unwrap(), 20 | ), 21 | }, 22 | )) 23 | })); 24 | let addr = server.local_addr(); 25 | info!(%addr, "HTTP admin server listening"); 26 | server.await 27 | } 28 | 29 | fn handle_ready(ready: &watch::Receiver, req: Request) -> Response { 30 | match *req.method() { 31 | hyper::Method::GET | hyper::Method::HEAD => { 32 | if *ready.borrow() { 33 | Response::builder() 34 | .status(hyper::StatusCode::OK) 35 | .header(hyper::header::CONTENT_TYPE, "text/plain") 36 | .body("ready\n".into()) 37 | .unwrap() 38 | } else { 39 | Response::builder() 40 | .status(hyper::StatusCode::INTERNAL_SERVER_ERROR) 41 | .header(hyper::header::CONTENT_TYPE, "text/plain") 42 | .body("not ready\n".into()) 43 | .unwrap() 44 | } 45 | } 46 | _ => Response::builder() 47 | .status(hyper::StatusCode::METHOD_NOT_ALLOWED) 48 | .body(Body::default()) 49 | .unwrap(), 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /controller/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings, rust_2018_idioms)] 2 | #![forbid(unsafe_code)] 3 | 4 | pub mod admin; 5 | 6 | pub use polixy_controller_grpc as grpc; 7 | pub use polixy_controller_k8s_index as k8s; 8 | -------------------------------------------------------------------------------- /controller/src/main.rs: -------------------------------------------------------------------------------- 1 | #![deny(warnings, rust_2018_idioms)] 2 | #![forbid(unsafe_code)] 3 | 4 | use anyhow::{Context, Result}; 5 | use futures::{future, prelude::*}; 6 | use polixy_controller::k8s::DefaultAllow; 7 | use polixy_controller_core::IpNet; 8 | use std::net::SocketAddr; 9 | use structopt::StructOpt; 10 | use tokio::{sync::watch, time}; 11 | use tracing::{debug, info, instrument}; 12 | 13 | #[derive(Debug, StructOpt)] 14 | #[structopt(name = "polixy", about = "A policy resource prototype")] 15 | struct Args { 16 | #[structopt(short, long, default_value = "0.0.0.0:8080")] 17 | admin_addr: SocketAddr, 18 | 19 | #[structopt(short, long, default_value = "0.0.0.0:8090")] 20 | grpc_addr: SocketAddr, 21 | 22 | #[structopt(long, default_value = "cluster.local")] 23 | identity_domain: String, 24 | 25 | /// Network CIDRs of pod IPs. 26 | /// 27 | /// The default reflects k3d's default node network. 28 | #[structopt(long, default_value = "10.42.0.0/16")] 29 | cluster_networks: Vec, 30 | 31 | #[structopt(long, default_value = "all-unauthenticated")] 32 | default_allow: DefaultAllow, 33 | } 34 | 35 | #[tokio::main] 36 | async fn main() -> Result<()> { 37 | tracing_subscriber::fmt::init(); 38 | 39 | let Args { 40 | admin_addr, 41 | grpc_addr, 42 | identity_domain, 43 | cluster_networks, 44 | default_allow, 45 | } = Args::from_args(); 46 | 47 | let (drain_tx, drain_rx) = drain::channel(); 48 | 49 | let client = kube::Client::try_default() 50 | .await 51 | .context("failed to initialize kubernetes client")?; 52 | 53 | let (ready_tx, ready_rx) = watch::channel(false); 54 | let admin = tokio::spawn(polixy_controller::admin::serve(admin_addr, ready_rx)); 55 | 56 | const DETECT_TIMEOUT: time::Duration = time::Duration::from_secs(10); 57 | let (handle, index_task) = polixy_controller::k8s::index( 58 | client, 59 | ready_tx, 60 | cluster_networks, 61 | identity_domain, 62 | default_allow, 63 | DETECT_TIMEOUT, 64 | ); 65 | let index_task = tokio::spawn(index_task); 66 | 67 | let grpc = tokio::spawn(grpc(grpc_addr, handle, drain_rx)); 68 | 69 | tokio::select! { 70 | _ = shutdown(drain_tx) => Ok(()), 71 | res = grpc => match res { 72 | Ok(res) => res.context("grpc server failed"), 73 | Err(e) if e.is_cancelled() => Ok(()), 74 | Err(e) => Err(e).context("grpc server panicked"), 75 | }, 76 | res = index_task => match res { 77 | Ok(e) => Err(e).context("indexer failed"), 78 | Err(e) if e.is_cancelled() => Ok(()), 79 | Err(e) => Err(e).context("indexer panicked"), 80 | }, 81 | res = admin => match res { 82 | Ok(res) => res.context("admin server failed"), 83 | Err(e) if e.is_cancelled() => Ok(()), 84 | Err(e) => Err(e).context("admin server panicked"), 85 | }, 86 | } 87 | } 88 | 89 | #[instrument(skip(handle, drain))] 90 | async fn grpc( 91 | addr: SocketAddr, 92 | handle: polixy_controller_k8s_index::Reader, 93 | drain: drain::Watch, 94 | ) -> Result<()> { 95 | let server = polixy_controller_grpc::Server::new(handle, drain.clone()); 96 | let (close_tx, close_rx) = tokio::sync::oneshot::channel(); 97 | tokio::pin! { 98 | let srv = server.serve(addr, close_rx.map(|_| {})); 99 | } 100 | info!(%addr, "gRPC server listening"); 101 | tokio::select! { 102 | res = (&mut srv) => res?, 103 | handle = drain.signaled() => { 104 | let _ = close_tx.send(()); 105 | handle.release_after(srv).await? 106 | } 107 | } 108 | Ok(()) 109 | } 110 | 111 | async fn shutdown(drain: drain::Signal) { 112 | tokio::select! { 113 | _ = tokio::signal::ctrl_c() => { 114 | debug!("Received ctrl-c"); 115 | }, 116 | _ = sigterm() => { 117 | debug!("Received SIGTERM"); 118 | } 119 | } 120 | info!("Shutting down"); 121 | drain.drain().await; 122 | } 123 | 124 | async fn sigterm() { 125 | use tokio::signal::unix::{signal, SignalKind}; 126 | match signal(SignalKind::terminate()) { 127 | Ok(mut term) => term.recv().await, 128 | _ => future::pending().await, 129 | }; 130 | } 131 | -------------------------------------------------------------------------------- /default.nix: -------------------------------------------------------------------------------- 1 | { pkgs ? import { } }: 2 | with pkgs; 3 | 4 | buildEnv { 5 | name = "polixy-env"; 6 | paths = [ 7 | binutils 8 | cacert 9 | cargo-deny 10 | cargo-fuzz 11 | cargo-udeps 12 | cargo-watch 13 | clang 14 | clang-tools 15 | cmake 16 | cosign 17 | curl 18 | docker 19 | jq 20 | kubectl 21 | kube3d 22 | loc 23 | git 24 | (glibcLocales.override { locales = [ "en_US.UTF-8" ]; }) 25 | gnupg 26 | openssl 27 | pkg-config 28 | protobuf 29 | rustup 30 | shellcheck 31 | stdenv 32 | ]; 33 | 34 | passthru = with pkgs; { 35 | CARGO_TERM_COLOR = "always"; 36 | CURL_CA_BUNDLE = "${cacert}/etc/ssl/certs/ca-bundle.crt"; 37 | GIT_SSL_CAINFO = "${cacert}/etc/ssl/certs/ca-bundle.crt"; 38 | LOCALE_ARCHIVE = "${glibcLocales}/lib/locale/locale-archive"; 39 | LC_ALL = "en_US.UTF-8"; 40 | OPENSSL_DIR = "${openssl.dev}"; 41 | OPENSSL_LIB_DIR = "${openssl.out}/lib"; 42 | PROTOC = "${protobuf}/bin/protoc"; 43 | PROTOC_INCLUDE = "${protobuf}/include"; 44 | RUST_BACKTRACE = "full"; 45 | SSL_CERT_FILE = "${cacert}/etc/ssl/certs/ca-bundle.crt"; 46 | }; 47 | } 48 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | targets = [ 2 | { triple = "x86_64-unknown-linux-gnu" }, 3 | { triple = "aarch64-unknown-linux-gnu" }, 4 | { triple = "armv7-unknown-linux-gnu" }, 5 | ] 6 | 7 | [advisories] 8 | db-path = "~/.cargo/advisory-db" 9 | db-urls = ["https://github.com/rustsec/advisory-db"] 10 | vulnerability = "deny" 11 | unmaintained = "warn" 12 | yanked = "deny" 13 | notice = "warn" 14 | ignore = [] 15 | 16 | [licenses] 17 | unlicensed = "deny" 18 | allow = ["Apache-2.0", "BSD-3-Clause", "MIT"] 19 | deny = [] 20 | copyleft = "deny" 21 | allow-osi-fsf-free = "neither" 22 | default = "deny" 23 | confidence-threshold = 0.8 24 | exceptions = [] 25 | 26 | [bans] 27 | multiple-versions = "deny" 28 | # Wildcard dependencies are used for all workspace-local crates. 29 | wildcards = "allow" 30 | highlight = "all" 31 | deny = [] 32 | skip-tree = [] 33 | # Kube seems to pull in a bunch of old dependencies transitively. 34 | skip = [ 35 | { name = "ansi_term" }, 36 | { name = "pin-project" }, 37 | { name = "pin-project-internal" }, 38 | { name = "strsim" }, 39 | ] 40 | 41 | [sources] 42 | unknown-registry = "deny" 43 | unknown-git = "deny" 44 | allow-registry = ["https://github.com/rust-lang/crates.io-index"] 45 | allow-git = [] 46 | 47 | [sources.allow-org] 48 | github = ["linkerd"] 49 | -------------------------------------------------------------------------------- /img/resources.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linkerd/polixy/25af9b5e1667b7ee7c503b55ea244b72b83eb8e4/img/resources.png -------------------------------------------------------------------------------- /k8s/client-access.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: polixy.linkerd.io/v1alpha1 3 | kind: Server 4 | metadata: 5 | namespace: polixy 6 | name: client-api 7 | labels: 8 | app.kubernetes.io/part-of: polixy 9 | app.kubernetes.io/name: client-api 10 | app.kubernetes.io/version: v0 11 | spec: 12 | podSelector: 13 | matchLabels: 14 | app.kubernetes.io/part-of: polixy 15 | app.kubernetes.io/name: client 16 | port: http-api 17 | proxyProtocol: HTTP/1 18 | --- 19 | apiVersion: polixy.linkerd.io/v1alpha1 20 | kind: ServerAuthorization 21 | metadata: 22 | namespace: polixy 23 | name: client-api 24 | labels: 25 | app.kubernetes.io/part-of: polixy 26 | app.kubernetes.io/name: client-api 27 | app.kubernetes.io/version: v0 28 | spec: 29 | server: 30 | name: client-api 31 | client: 32 | meshTLS: 33 | serviceAccounts: 34 | - name: default 35 | namespace: default 36 | -------------------------------------------------------------------------------- /k8s/client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: client 6 | namespace: polixy 7 | labels: 8 | app.kubernetes.io/name: client 9 | app.kubernetes.io/part-of: polixy 10 | spec: 11 | type: LoadBalancer 12 | ports: 13 | - name: http 14 | port: 8080 15 | targetPort: 8080 16 | selector: 17 | app.kubernetes.io/name: client 18 | app.kubernetes.io/part-of: polixy 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | labels: 24 | app.kubernetes.io/name: client 25 | app.kubernetes.io/part-of: polixy 26 | app.kubernetes.io/version: v0 27 | name: client 28 | namespace: polixy 29 | spec: 30 | replicas: 1 31 | selector: 32 | matchLabels: 33 | app.kubernetes.io/name: client 34 | app.kubernetes.io/part-of: polixy 35 | template: 36 | metadata: 37 | annotations: 38 | linkerd.io/inject: enabled 39 | labels: 40 | app.kubernetes.io/name: client 41 | app.kubernetes.io/part-of: polixy 42 | app.kubernetes.io/version: v0 43 | spec: 44 | containers: 45 | - name: main 46 | image: ghcr.io/olix0r/polixy-client:v0 47 | args: ["http-api"] 48 | env: 49 | - name: RUST_LOG 50 | value: polixy=trace,hyper=info,h2=info,tokio=info,debug 51 | - name: LISTEN_ADDR 52 | value: 0.0.0.0:8080 53 | - name: GRPC_ADDR 54 | value: http://controller.polixy:8090 55 | - name: NAMESPACE 56 | valueFrom: 57 | fieldRef: 58 | fieldPath: metadata.namespace 59 | - name: POD 60 | valueFrom: 61 | fieldRef: 62 | fieldPath: metadata.name 63 | - name: PORTS 64 | value: "8080" 65 | ports: 66 | - containerPort: 8080 67 | name: http-api 68 | resources: 69 | requests: 70 | cpu: 10m 71 | memory: 10Mi 72 | limits: 73 | cpu: 1000m 74 | memory: 100Mi 75 | -------------------------------------------------------------------------------- /k8s/controller/deploy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: controller 6 | namespace: polixy 7 | labels: 8 | app.kubernetes.io/name: controller 9 | app.kubernetes.io/part-of: polixy 10 | spec: 11 | ports: 12 | - name: grpc 13 | port: 8090 14 | targetPort: 8090 15 | selector: 16 | app.kubernetes.io/name: controller 17 | app.kubernetes.io/part-of: polixy 18 | --- 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | labels: 23 | app.kubernetes.io/name: controller 24 | app.kubernetes.io/part-of: polixy 25 | app.kubernetes.io/version: v0 26 | name: controller 27 | namespace: polixy 28 | spec: 29 | replicas: 1 30 | selector: 31 | matchLabels: 32 | app.kubernetes.io/name: controller 33 | app.kubernetes.io/part-of: polixy 34 | template: 35 | metadata: 36 | labels: 37 | app.kubernetes.io/name: controller 38 | app.kubernetes.io/part-of: polixy 39 | app.kubernetes.io/version: v0 40 | spec: 41 | serviceAccountName: controller 42 | containers: 43 | - name: main 44 | image: ghcr.io/olix0r/polixy-controller:v1 45 | env: 46 | - name: RUST_LOG 47 | value: polixy=trace,info 48 | args: 49 | - --admin-addr=0.0.0.0:8080 50 | - --grpc-addr=0.0.0.0:8090 51 | - --identity-domain=cluster.local 52 | - --cluster-networks=10.42.0.0/16 53 | - --default-allow=all-authenticated 54 | ports: 55 | - containerPort: 8080 56 | name: admin-http 57 | - containerPort: 8090 58 | name: grpc 59 | readinessProbe: 60 | httpGet: 61 | port: admin-http 62 | path: /ready 63 | livenessProbe: 64 | # Allow the controller to reman disconnected from the k8s API for 2 minutes before 65 | # failing the container. 66 | periodSeconds: 10 67 | failureThreshold: 12 68 | httpGet: 69 | port: admin-http 70 | path: /ready 71 | resources: 72 | requests: 73 | cpu: 10m 74 | memory: 10Mi 75 | limits: 76 | cpu: 1000m 77 | memory: 100Mi 78 | -------------------------------------------------------------------------------- /k8s/controller/kubeconfig.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Modified from https://gist.githubusercontent.com/innovia/fbba8259042f71db98ea8d4ad19bd708/raw/fd1c267488c2cf9aaecb541548e814cb41b23e03/kubernetes_add_service_account_kubeconfig.sh 4 | 5 | set -eu 6 | 7 | ns="polixy" 8 | sa="controller" 9 | 10 | # Extract metadata for the current context. 11 | context=$(kubectl config current-context) 12 | cluster=$(kubectl config view -o jsonpath="{.contexts[?(@.name ==\"${context}\")].context.cluster}") 13 | server=$(kubectl config view -o jsonpath="{.clusters[?(@.name == \"${cluster}\")].cluster.server}") 14 | 15 | user="${context}-${ns}-${sa}" 16 | 17 | dir="${1:-./target/${user}}" 18 | 19 | # If the kubeconfig already exists, don't regenerate it. 20 | if [ -f "$dir/config" ]; then 21 | echo "$dir/config" 22 | exit 23 | fi 24 | mkdir -p "$dir" 25 | 26 | # A helper to act on the a new kubeconfig 27 | kconfig() { 28 | kubectl config --kubeconfig="${dir}/config" "$@" 29 | } 30 | 31 | ## Generate a Kubeconfig: 32 | ( 33 | secret=$(kubectl -n "${ns}" get sa "${sa}" -o json | jq -r '.secrets[].name') 34 | 35 | # Extract the cluster's CA certificate from the ServiceAccount secret and ensure 36 | # it's embedded in the kubeconfig's context. 37 | kubectl -n "${ns}" get secret "${secret}" -o json \ 38 | | jq -r '.data["ca.crt"] | @base64d' \ 39 | > "${dir}/ca.crt" 40 | kconfig set-cluster "${cluster}" \ 41 | --server="${server}" \ 42 | --certificate-authority="${dir}/ca.crt" \ 43 | --embed-certs=true \ 44 | 45 | # Embed the service account token credentials 46 | token=$(kubectl -n "${ns}" get secret "${secret}" -o json |jq -r '.data["token"] | @base64d') 47 | kconfig set-credentials "${user}" --token="${token}" 48 | 49 | # Create & activate a new context. 50 | kconfig set-context "${user}" \ 51 | --cluster="${cluster}" \ 52 | --user="${user}" \ 53 | --namespace="${ns}" 54 | kconfig use-context "${user}" 55 | ) >/dev/null 56 | 57 | echo "${dir}/config" 58 | -------------------------------------------------------------------------------- /k8s/controller/sa.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: polixy 6 | labels: 7 | app.kubernetes.io/part-of: polixy 8 | --- 9 | apiVersion: v1 10 | kind: ServiceAccount 11 | metadata: 12 | namespace: polixy 13 | name: controller 14 | labels: 15 | app.kubernetes.io/part-of: polixy 16 | app.kubernetes.io/name: controller 17 | --- 18 | apiVersion: rbac.authorization.k8s.io/v1 19 | kind: ClusterRole 20 | metadata: 21 | name: polixy-controller 22 | labels: 23 | app.kubernetes.io/part-of: polixy 24 | app.kubernetes.io/name: controller 25 | rules: 26 | - apiGroups: 27 | - "" 28 | resources: 29 | - nodes 30 | - pods 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - apiGroups: 36 | - polixy.linkerd.io 37 | resources: 38 | - servers 39 | - serverauthorizations 40 | verbs: 41 | - get 42 | - list 43 | - watch 44 | --- 45 | apiVersion: rbac.authorization.k8s.io/v1 46 | kind: ClusterRoleBinding 47 | metadata: 48 | name: polixy-controller 49 | labels: 50 | app.kubernetes.io/part-of: polixy 51 | app.kubernetes.io/name: controller 52 | roleRef: 53 | apiGroup: rbac.authorization.k8s.io 54 | kind: ClusterRole 55 | name: polixy-controller 56 | subjects: 57 | - kind: ServiceAccount 58 | name: controller 59 | namespace: polixy 60 | -------------------------------------------------------------------------------- /k8s/crds/authz.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: serverauthorizations.polixy.linkerd.io 6 | labels: 7 | polixy.linkerd.io/crd: authz 8 | spec: 9 | group: polixy.linkerd.io 10 | scope: Namespaced 11 | 12 | names: 13 | kind: ServerAuthorization 14 | plural: serverauthorizations 15 | singular: serverauthorization 16 | shortNames: [srvauthz] 17 | 18 | versions: 19 | - name: v1alpha1 20 | served: true 21 | storage: true 22 | 23 | additionalPrinterColumns: 24 | - jsonPath: .spec.server.name 25 | name: server 26 | type: string 27 | - jsonPath: .spec.client.networks[*] 28 | name: networks 29 | type: string 30 | - jsonPath: .spec.client.identities[*] 31 | name: identities 32 | type: string 33 | - jsonPath: .spec.client.serviceAccounts[*] 34 | name: service accounts 35 | type: string 36 | 37 | schema: 38 | openAPIV3Schema: 39 | type: object 40 | required: [spec] 41 | properties: 42 | spec: 43 | description: >- 44 | Authorizes clients to communicate with Linkerd-proxied servers. 45 | 46 | type: object 47 | required: [server, client] 48 | properties: 49 | 50 | server: 51 | description: >- 52 | Identifies servers in the same namespace for which this 53 | authorization applies. 54 | 55 | Only one of `name` or `selector` may be specified. 56 | 57 | type: object 58 | oneOf: 59 | - required: [name] 60 | - required: [selector] 61 | properties: 62 | name: 63 | description: References a `Server` instance by name 64 | type: string 65 | pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' 66 | 67 | selector: 68 | description: >- 69 | A label query over servers on which this authorization applies. 70 | 71 | type: object 72 | oneOf: 73 | - required: [matchLabels] 74 | - required: [matchExpressions] 75 | properties: 76 | matchLabels: 77 | type: object 78 | x-kubernetes-preserve-unknown-fields: true 79 | matchExpressions: 80 | type: array 81 | items: 82 | type: object 83 | required: [key, operator, value] 84 | properties: 85 | key: 86 | type: string 87 | operator: 88 | type: string 89 | enum: [In, NotIn] 90 | values: 91 | type: array 92 | items: 93 | type: string 94 | 95 | client: 96 | description: Describes clients authorized to access a server. 97 | type: object 98 | oneOf: 99 | - required: [meshTLS] 100 | - required: [unauthenticated] 101 | properties: 102 | 103 | networks: 104 | description: >- 105 | Limits the client IP addresses to which this 106 | authorization applies. If unset, the server chooses a 107 | default (typically, all IPs or the cluster's pod 108 | network). 109 | type: array 110 | items: 111 | type: object 112 | required: [cidr] 113 | properties: 114 | cidr: 115 | type: string 116 | except: 117 | type: array 118 | items: 119 | type: string 120 | 121 | unauthenticated: 122 | description: >- 123 | Authorizes unauthenticated clients to access a server. 124 | type: boolean 125 | 126 | meshTLS: 127 | type: object 128 | oneOf: 129 | - required: [unauthenticatedTLS] 130 | - required: [identities] 131 | - required: [serviceAccounts] 132 | 133 | properties: 134 | unauthenticatedTLS: 135 | type: boolean 136 | description: >- 137 | Indicates that no client identity is required for 138 | communication. 139 | 140 | This is mostly important for the identity 141 | controller, which must terminate TLS connections 142 | from clients that do not yet have a certificate. 143 | 144 | identities: 145 | description: >- 146 | Authorizes clients with the provided proxy identity 147 | strings (as provided via MTLS) 148 | 149 | The `*` prefix can be used to match all identities in 150 | a domain. An identity string of `*` indicates that 151 | all authentication clients are authorized. 152 | 153 | type: array 154 | items: 155 | type: string 156 | pattern: '^(\*|[a-z0-9]([-a-z0-9]*[a-z0-9])?)(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$' 157 | 158 | serviceAccounts: 159 | description: >- 160 | Authorizes clients with the provided proxy identity 161 | service accounts (as provided via MTLS) 162 | 163 | type: array 164 | items: 165 | type: object 166 | required: [name] 167 | properties: 168 | name: 169 | description: The ServiceAccount's name. 170 | type: string 171 | pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' 172 | 173 | namespace: 174 | description: >- 175 | The ServiceAccount's namespace. If unset, the 176 | authorization's namespace is usd. 177 | type: string 178 | pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' 179 | 180 | #selector: 181 | # type: object 182 | # oneOf: 183 | # - required: [matchLabels] 184 | # - required: [matchExpressions] 185 | # properties: 186 | # matchLabels: 187 | # type: object 188 | # x-kubernetes-preserve-unknown-fields: true 189 | # matchExpressions: 190 | # type: array 191 | # items: 192 | # type: object 193 | # required: [key, operator, values] 194 | # properties: 195 | # key: 196 | # type: string 197 | # operator: 198 | # type: string 199 | # enum: [In, NotIn] 200 | # values: 201 | # type: array 202 | # type: string 203 | # items: 204 | -------------------------------------------------------------------------------- /k8s/crds/server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: servers.polixy.linkerd.io 6 | labels: 7 | polixy.linkerd.io/crd: srv 8 | spec: 9 | group: polixy.linkerd.io 10 | names: 11 | kind: Server 12 | plural: servers 13 | singular: server 14 | shortNames: [srv] 15 | scope: Namespaced 16 | versions: 17 | - name: v1alpha1 18 | served: true 19 | storage: true 20 | 21 | additionalPrinterColumns: 22 | - jsonPath: .spec.port 23 | name: podSelector 24 | type: string 25 | - jsonPath: .spec.podSelector 26 | name: port 27 | type: string 28 | 29 | schema: 30 | openAPIV3Schema: 31 | type: object 32 | required: [spec] 33 | properties: 34 | spec: 35 | type: object 36 | required: 37 | - podSelector 38 | - port 39 | properties: 40 | 41 | podSelector: 42 | type: object 43 | description: >- 44 | Selects pods in the same namespace. 45 | properties: 46 | matchLabels: 47 | type: object 48 | x-kubernetes-preserve-unknown-fields: true 49 | matchExpressions: 50 | type: array 51 | items: 52 | type: object 53 | required: [key, operator, values] 54 | properties: 55 | key: 56 | type: string 57 | operator: 58 | type: string 59 | enum: [In, NotIn] 60 | values: 61 | type: array 62 | items: 63 | type: string 64 | 65 | port: 66 | description: >- 67 | A port name or number. Must exist in a pod spec. 68 | x-kubernetes-int-or-string: true 69 | 70 | proxyProtocol: 71 | description: >- 72 | Configures protocol discovery for inbound connections. 73 | 74 | Supersedes the `config.linkerd.io/opaque-ports` annotation. 75 | type: string 76 | default: unknown 77 | enum: 78 | - unknown 79 | - HTTP/1 80 | - HTTP/2 81 | - gRPC 82 | - opaque 83 | - TLS 84 | -------------------------------------------------------------------------------- /k8s/emojivoto/emoji-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: polixy.linkerd.io/v1alpha1 3 | kind: Server 4 | metadata: 5 | namespace: emojivoto 6 | name: emoji-grpc 7 | labels: 8 | app.kubernetes.io/part-of: emojivoto 9 | app.kubernetes.io/name: emoji 10 | app.kubernetes.io/version: v11 11 | spec: 12 | podSelector: 13 | matchLabels: 14 | app.kubernetes.io/part-of: emojivoto 15 | app.kubernetes.io/name: emoji 16 | port: grpc 17 | proxyProtocol: gRPC 18 | --- 19 | apiVersion: polixy.linkerd.io/v1alpha1 20 | kind: ServerAuthorization 21 | metadata: 22 | namespace: emojivoto 23 | name: emoji-grpc 24 | labels: 25 | app.kubernetes.io/part-of: emojivoto 26 | app.kubernetes.io/name: emoji 27 | app.kubernetes.io/version: v11 28 | spec: 29 | # Allow all authenticated clients to access the (read-only) emoji service. 30 | server: 31 | name: emoji-grpc 32 | client: 33 | meshTLS: 34 | identities: 35 | - "*.emoji.serviceaccount.identity.linkerd.cluster.local" 36 | -------------------------------------------------------------------------------- /k8s/emojivoto/emoji.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: emoji 6 | namespace: emojivoto 7 | labels: 8 | app.kubernetes.io/part-of: emojivoto 9 | app.kubernetes.io/name: emoji 10 | --- 11 | apiVersion: v1 12 | kind: Service 13 | metadata: 14 | name: emoji 15 | namespace: emojivoto 16 | labels: 17 | app.kubernetes.io/part-of: emojivoto 18 | app.kubernetes.io/name: emoji 19 | app.kubernetes.io/version: v11 20 | spec: 21 | ports: 22 | - name: grpc 23 | port: 8080 24 | targetPort: 8080 25 | - name: prom 26 | port: 8801 27 | targetPort: 8801 28 | selector: 29 | app.kubernetes.io/part-of: emojivoto 30 | app.kubernetes.io/name: emoji 31 | --- 32 | apiVersion: apps/v1 33 | kind: Deployment 34 | metadata: 35 | labels: 36 | app.kubernetes.io/part-of: emojivoto 37 | app.kubernetes.io/name: emoji 38 | app.kubernetes.io/version: v11 39 | name: emoji 40 | namespace: emojivoto 41 | spec: 42 | replicas: 1 43 | selector: 44 | matchLabels: 45 | app.kubernetes.io/part-of: emojivoto 46 | app.kubernetes.io/name: emoji 47 | template: 48 | metadata: 49 | labels: 50 | app.kubernetes.io/part-of: emojivoto 51 | app.kubernetes.io/name: emoji 52 | app.kubernetes.io/version: v11 53 | spec: 54 | serviceAccountName: emoji 55 | containers: 56 | - name: main 57 | image: docker.l5d.io/buoyantio/emojivoto-emoji-svc:v11 58 | env: 59 | - name: GRPC_PORT 60 | value: "8080" 61 | - name: PROM_PORT 62 | value: "8801" 63 | ports: 64 | - containerPort: 8080 65 | name: grpc 66 | - containerPort: 8801 67 | name: prom 68 | resources: 69 | requests: 70 | cpu: 10m 71 | memory: 10Mi 72 | limits: 73 | cpu: 1000m 74 | memory: 100Mi 75 | -------------------------------------------------------------------------------- /k8s/emojivoto/ns.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: emojivoto 6 | labels: 7 | app.kubernetes.io/part-of: emojivoto 8 | app.kubernetes.io/version: v11 9 | annotations: 10 | linkerd.io/inject: enabled 11 | -------------------------------------------------------------------------------- /k8s/emojivoto/prom-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: polixy.linkerd.io/v1alpha1 3 | kind: Server 4 | metadata: 5 | namespace: emojivoto 6 | name: prom 7 | labels: 8 | app.kubernetes.io/part-of: emojivoto 9 | app.kubernetes.io/version: v11 10 | spec: 11 | port: prom 12 | podSelector: 13 | matchLabels: 14 | app.kubernetes.io/part-of: emojivoto 15 | proxyProtocol: HTTP/1 16 | --- 17 | apiVersion: polixy.linkerd.io/v1alpha1 18 | kind: ServerAuthorization 19 | metadata: 20 | namespace: emojivoto 21 | name: prom-prometheus 22 | labels: 23 | app.kubernetes.io/part-of: emojivoto 24 | app.kubernetes.io/version: v11 25 | spec: 26 | server: 27 | name: prom 28 | client: 29 | meshTLS: 30 | serviceAccounts: 31 | - namespace: linkerd-viz 32 | name: prometheus 33 | -------------------------------------------------------------------------------- /k8s/emojivoto/vote-bot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: vote-bot 6 | namespace: emojivoto 7 | labels: 8 | app.kubernetes.io/name: vote-bot 9 | app.kubernetes.io/part-of: emojivoto 10 | --- 11 | apiVersion: apps/v1 12 | kind: Deployment 13 | metadata: 14 | name: vote-bot 15 | namespace: emojivoto 16 | labels: 17 | app.kubernetes.io/name: vote-bot 18 | app.kubernetes.io/part-of: emojivoto 19 | app.kubernetes.io/version: v11 20 | spec: 21 | replicas: 1 22 | selector: 23 | matchLabels: 24 | app.kubernetes.io/name: vote-bot 25 | app.kubernetes.io/part-of: emojivoto 26 | template: 27 | metadata: 28 | labels: 29 | app.kubernetes.io/name: vote-bot 30 | app.kubernetes.io/part-of: emojivoto 31 | app.kubernetes.io/version: v11 32 | spec: 33 | serviceAccountName: vote-bot 34 | containers: 35 | - name: main 36 | image: docker.l5d.io/buoyantio/emojivoto-web:v11 37 | command: [emojivoto-vote-bot] 38 | env: 39 | - name: WEB_HOST 40 | value: web:80 41 | resources: 42 | requests: 43 | cpu: 10m 44 | memory: 10Mi 45 | limits: 46 | cpu: 1000m 47 | memory: 100Mi 48 | 49 | -------------------------------------------------------------------------------- /k8s/emojivoto/voting-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: polixy.linkerd.io/v1alpha1 3 | kind: Server 4 | metadata: 5 | namespace: emojivoto 6 | name: voting-grpc 7 | labels: 8 | app.kubernetes.io/part-of: emojivoto 9 | app.kubernetes.io/name: voting 10 | app.kubernetes.io/version: v11 11 | spec: 12 | podSelector: 13 | matchLabels: 14 | app.kubernetes.io/part-of: emojivoto 15 | app.kubernetes.io/name: voting 16 | port: grpc 17 | proxyProtocol: gRPC 18 | --- 19 | apiVersion: polixy.linkerd.io/v1alpha1 20 | kind: ServerAuthorization 21 | metadata: 22 | namespace: emojivoto 23 | name: voting-grpc 24 | labels: 25 | app.kubernetes.io/part-of: emojivoto 26 | app.kubernetes.io/name: voting 27 | app.kubernetes.io/version: v11 28 | spec: 29 | server: 30 | name: voting-grpc 31 | # The voting service only allows requests from the web service. 32 | client: 33 | meshTLS: 34 | serviceAccounts: 35 | - name: web 36 | -------------------------------------------------------------------------------- /k8s/emojivoto/voting.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: voting 6 | namespace: emojivoto 7 | labels: 8 | app.kubernetes.io/name: voting 9 | app.kubernetes.io/part-of: emojivoto 10 | --- 11 | apiVersion: v1 12 | kind: Service 13 | metadata: 14 | name: voting 15 | namespace: emojivoto 16 | labels: 17 | app.kubernetes.io/name: voting 18 | app.kubernetes.io/part-of: emojivoto 19 | spec: 20 | ports: 21 | - name: grpc 22 | port: 8080 23 | targetPort: 8080 24 | - name: prom 25 | port: 8801 26 | targetPort: 8801 27 | selector: 28 | app.kubernetes.io/name: voting 29 | app.kubernetes.io/part-of: emojivoto 30 | --- 31 | apiVersion: apps/v1 32 | kind: Deployment 33 | metadata: 34 | labels: 35 | app.kubernetes.io/name: voting 36 | app.kubernetes.io/part-of: emojivoto 37 | app.kubernetes.io/version: v11 38 | name: voting 39 | namespace: emojivoto 40 | spec: 41 | replicas: 1 42 | selector: 43 | matchLabels: 44 | app.kubernetes.io/name: voting 45 | app.kubernetes.io/part-of: emojivoto 46 | template: 47 | metadata: 48 | labels: 49 | app.kubernetes.io/name: voting 50 | app.kubernetes.io/part-of: emojivoto 51 | app.kubernetes.io/version: v11 52 | spec: 53 | serviceAccountName: voting 54 | containers: 55 | - name: main 56 | image: docker.l5d.io/buoyantio/emojivoto-voting-svc:v11 57 | env: 58 | - name: GRPC_PORT 59 | value: "8080" 60 | - name: PROM_PORT 61 | value: "8801" 62 | ports: 63 | - containerPort: 8080 64 | name: grpc 65 | - containerPort: 8801 66 | name: prom 67 | resources: 68 | requests: 69 | cpu: 10m 70 | memory: 10Mi 71 | limits: 72 | cpu: 1000m 73 | memory: 100Mi 74 | -------------------------------------------------------------------------------- /k8s/emojivoto/web-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: polixy.linkerd.io/v1alpha1 3 | kind: Server 4 | metadata: 5 | namespace: emojivoto 6 | name: web-http 7 | labels: 8 | app.kubernetes.io/part-of: emojivoto 9 | app.kubernetes.io/name: web 10 | app.kubernetes.io/version: v11 11 | spec: 12 | podSelector: 13 | matchLabels: 14 | app.kubernetes.io/part-of: emojivoto 15 | app.kubernetes.io/name: web 16 | port: http 17 | proxyProtocol: HTTP/1 18 | --- 19 | apiVersion: polixy.linkerd.io/v1alpha1 20 | kind: ServerAuthorization 21 | metadata: 22 | namespace: emojivoto 23 | name: web-public 24 | labels: 25 | app.kubernetes.io/part-of: emojivoto 26 | app.kubernetes.io/name: web 27 | app.kubernetes.io/version: v11 28 | spec: 29 | server: 30 | name: web-http 31 | # Allow all clients to access the web HTTP port without regard for 32 | # authentication. If unauthenticated connections are permitted, there is no 33 | # need to describe authenticated clients. 34 | client: 35 | unauthenticated: true 36 | networks: 37 | - cidr: 0.0.0.0/0 38 | - cidr: ::/0 39 | -------------------------------------------------------------------------------- /k8s/emojivoto/web.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: web 6 | namespace: emojivoto 7 | labels: 8 | app.kubernetes.io/name: web 9 | app.kubernetes.io/part-of: emojivoto 10 | --- 11 | apiVersion: v1 12 | kind: Service 13 | metadata: 14 | name: web 15 | namespace: emojivoto 16 | labels: 17 | app.kubernetes.io/name: web 18 | app.kubernetes.io/part-of: emojivoto 19 | spec: 20 | type: ClusterIP 21 | ports: 22 | - name: http 23 | port: 80 24 | targetPort: 8080 25 | selector: 26 | app.kubernetes.io/name: web 27 | app.kubernetes.io/part-of: emojivoto 28 | --- 29 | apiVersion: apps/v1 30 | kind: Deployment 31 | metadata: 32 | labels: 33 | app.kubernetes.io/name: web 34 | app.kubernetes.io/part-of: emojivoto 35 | app.kubernetes.io/version: v11 36 | name: web 37 | namespace: emojivoto 38 | spec: 39 | replicas: 1 40 | selector: 41 | matchLabels: 42 | app.kubernetes.io/name: web 43 | app.kubernetes.io/part-of: emojivoto 44 | template: 45 | metadata: 46 | labels: 47 | app.kubernetes.io/name: web 48 | app.kubernetes.io/part-of: emojivoto 49 | app.kubernetes.io/version: v11 50 | spec: 51 | serviceAccountName: web 52 | containers: 53 | - name: main 54 | image: docker.l5d.io/buoyantio/emojivoto-web:v11 55 | env: 56 | - name: WEB_PORT 57 | value: "8080" 58 | - name: EMOJISVC_HOST 59 | value: emoji:8080 60 | - name: VOTINGSVC_HOST 61 | value: voting:8080 62 | - name: INDEX_BUNDLE 63 | value: dist/index_bundle.js 64 | ports: 65 | - containerPort: 8080 66 | name: http 67 | resources: 68 | requests: 69 | cpu: 10m 70 | memory: 10Mi 71 | limits: 72 | cpu: 1000m 73 | memory: 100Mi 74 | -------------------------------------------------------------------------------- /k8s/linkerd/destination.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: polixy.linkerd.io/v1alpha1 3 | kind: Server 4 | metadata: 5 | namespace: linkerd 6 | name: linkerd-destination-grpc 7 | spec: 8 | port: 8086 9 | proxyProtocol: gRPC 10 | podSelector: 11 | matchLabels: 12 | linkerd.io/control-plane-ns: linkerd 13 | linkerd.io/control-plane-component: destination 14 | --- 15 | apiVersion: polixy.linkerd.io/v1alpha1 16 | kind: ServerAuthorization 17 | metadata: 18 | namespace: linkerd 19 | name: linkerd-destination-grpc 20 | spec: 21 | server: 22 | name: linkerd-destination-grpc 23 | client: 24 | meshTLS: 25 | identities: ["*"] 26 | -------------------------------------------------------------------------------- /k8s/linkerd/identity.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: polixy.linkerd.io/v1alpha1 3 | kind: Server 4 | metadata: 5 | namespace: linkerd 6 | name: linkerd-identity-grpc 7 | spec: 8 | port: 8080 9 | proxyProtocol: gRPC 10 | podSelector: 11 | matchLabels: 12 | linkerd.io/control-plane-ns: linkerd 13 | linkerd.io/control-plane-component: identity 14 | --- 15 | apiVersion: polixy.linkerd.io/v1alpha1 16 | kind: ServerAuthorization 17 | metadata: 18 | namespace: linkerd 19 | name: linkerd-identity-grpc 20 | spec: 21 | server: 22 | name: linkerd-identity-grpc 23 | client: 24 | meshTLS: 25 | unauthenticatedTLS: true 26 | -------------------------------------------------------------------------------- /k8s/linkerd/proxy-admin.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: polixy.linkerd.io/v1alpha1 3 | kind: Server 4 | metadata: 5 | namespace: linkerd 6 | name: linkerd-admin 7 | labels: 8 | viz.linkerd.io/prometheus: allow 9 | spec: 10 | port: linkerd-admin 11 | proxyProtocol: HTTP/1 12 | podSelector: 13 | matchLabels: 14 | linkerd.io/control-plane-ns: linkerd 15 | --- 16 | apiVersion: polixy.linkerd.io/v1alpha1 17 | kind: Server 18 | metadata: 19 | namespace: linkerd 20 | name: admin-http 21 | labels: 22 | viz.linkerd.io/prometheus: allow 23 | spec: 24 | port: admin-http 25 | proxyProtocol: HTTP/1 26 | podSelector: 27 | matchExpressions: 28 | - key: linkerd.io/control-plane-component 29 | operator: NotIn 30 | values: [] 31 | --- 32 | apiVersion: polixy.linkerd.io/v1alpha1 33 | kind: ServerAuthorization 34 | metadata: 35 | namespace: linkerd 36 | name: admin-prometheus 37 | spec: 38 | server: 39 | selector: 40 | matchLabels: 41 | viz.linkerd.io/prometheus: allow 42 | client: 43 | meshTLS: 44 | serviceAccounts: 45 | - namespace: linkerd-viz 46 | name: prometheus 47 | -------------------------------------------------------------------------------- /k8s/linkerd/proxy-injector.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: polixy.linkerd.io/v1alpha1 3 | kind: Server 4 | metadata: 5 | namespace: linkerd 6 | name: linkerd-proxy-injector-https 7 | spec: 8 | port: 8443 9 | proxyProtocol: TLS 10 | podSelector: 11 | matchLabels: 12 | linkerd.io/control-plane-ns: linkerd 13 | linkerd.io/control-plane-component: proxy-injector 14 | --- 15 | apiVersion: polixy.linkerd.io/v1alpha1 16 | kind: ServerAuthorization 17 | metadata: 18 | namespace: linkerd 19 | name: linkerd-proxy-injector-https 20 | spec: 21 | server: 22 | name: linkerd-proxy-injector-https 23 | client: 24 | unauthenticated: true 25 | -------------------------------------------------------------------------------- /k8s/linkerd/sp-validator.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: polixy.linkerd.io/v1alpha1 3 | kind: Server 4 | metadata: 5 | namespace: linkerd 6 | name: linkerd-sp-validator-https 7 | spec: 8 | port: 8443 9 | proxyProtocol: TLS 10 | podSelector: 11 | matchLabels: 12 | linkerd.io/control-plane-ns: linkerd 13 | linkerd.io/control-plane-component: destination 14 | --- 15 | apiVersion: polixy.linkerd.io/v1alpha1 16 | kind: ServerAuthorization 17 | metadata: 18 | namespace: linkerd 19 | name: linkerd-sp-validator-https 20 | spec: 21 | server: 22 | name: linkerd-sp-validator-https 23 | client: 24 | unauthenticated: true 25 | # TODO limit to pod networks. 26 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | 1.53.0 2 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | scope@{ pkgs ? import { } }: 2 | with pkgs; 3 | 4 | let env = (import ./default.nix scope); 5 | 6 | in mkShell { 7 | CARGO_TERM_COLOR = "always"; 8 | CURL_CA_BUNDLE = "${cacert}/etc/ssl/certs/ca-bundle.crt"; 9 | GIT_SSL_CAINFO = "${cacert}/etc/ssl/certs/ca-bundle.crt"; 10 | LC_ALL = "en_US.UTF-8"; 11 | LOCALE_ARCHIVE = "${glibcLocales}/lib/locale/locale-archive"; 12 | OPENSSL_DIR = "${openssl.dev}"; 13 | OPENSSL_LIB_DIR = "${openssl.out}/lib"; 14 | PROTOC = "${protobuf}/bin/protoc"; 15 | PROTOC_INCLUDE = "${protobuf}/include"; 16 | RUST_BACKTRACE = "1"; 17 | SSL_CERT_FILE = "${cacert}/etc/ssl/certs/ca-bundle.crt"; 18 | 19 | buildInputs = [ (import ./default.nix { inherit pkgs; }) ]; 20 | } 21 | --------------------------------------------------------------------------------