├── .github ├── semantic.yml └── workflows │ ├── e2e-tests.yml │ ├── lint-checker.yml │ ├── spell-checker.yml │ └── unit-tests.yml ├── .gitignore ├── .markdownlint.yml ├── CHANGELOG.md ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── api ├── base.proto ├── route.proto └── upstream.proto ├── cmd ├── cmd.go ├── iptables │ ├── cleanup-iptables_test.go │ ├── clenaup-iptables.go │ ├── iptables.go │ └── iptables_test.go ├── precheck │ ├── precheck.go │ └── precheck_test.go ├── sidecar │ └── sidecar.go └── version │ └── version.go ├── docs ├── design.md ├── development.md ├── etcdv3-api-mimicking.md ├── examples │ └── tiny-service-mesh-scnario.md ├── how-it-works.md ├── images │ ├── apisix-mesh-overview.png │ ├── data-plane-overview.png │ └── the-internal-of-apisix-mesh-agent.png ├── istio-mesh.md ├── the-internal-of-apisix-mesh-agent.md └── traffic-interception.md ├── e2e ├── README.md ├── charts │ └── istio │ │ ├── base │ │ ├── Chart.yaml │ │ ├── NOTES.txt │ │ ├── crds │ │ │ ├── crd-all.gen.yaml │ │ │ └── crd-operator.yaml │ │ ├── files │ │ │ └── gen-istio-cluster.yaml │ │ ├── kustomization.yaml │ │ ├── templates │ │ │ ├── clusterrole.yaml │ │ │ ├── clusterrolebinding.yaml │ │ │ ├── crds.yaml │ │ │ ├── endpoints.yaml │ │ │ ├── role.yaml │ │ │ ├── rolebinding.yaml │ │ │ ├── serviceaccount.yaml │ │ │ ├── services.yaml │ │ │ └── validatingwebhookconfiguration.yaml │ │ └── values.yaml │ │ └── istio-discovery │ │ ├── Chart.yaml │ │ ├── NOTES.txt │ │ ├── files │ │ ├── gen-istio.yaml │ │ └── injection-template.yaml │ │ ├── kustomization.yaml │ │ ├── templates │ │ ├── autoscale.yaml │ │ ├── configmap-jwks.yaml │ │ ├── configmap.yaml │ │ ├── deployment.yaml │ │ ├── istiod-injector-configmap.yaml │ │ ├── mutatingwebhook.yaml │ │ ├── poddisruptionbudget.yaml │ │ ├── service.yaml │ │ ├── telemetryv2_1.8.yaml │ │ └── telemetryv2_1.9.yaml │ │ └── values.yaml ├── e2e_test.go ├── framework │ ├── controlplane │ │ ├── controlplane.go │ │ └── istio.go │ ├── framework.go │ ├── httpbin.go │ ├── k8s.go │ ├── nginx.go │ └── springboard.go ├── go.mod ├── go.sum └── suites │ └── proxy.go ├── go.mod ├── go.sum ├── main.go ├── manifests └── istio │ └── injection-template.yaml ├── nginx └── patches │ └── nginx-1.19.3-connection-original-dst.patch ├── pkg ├── adaptor │ └── xds │ │ └── v3 │ │ ├── cluster.go │ │ ├── cluster_test.go │ │ ├── listener.go │ │ ├── listener_test.go │ │ ├── route.go │ │ ├── route_test.go │ │ └── types.go ├── apisix │ ├── doc.go │ ├── route.go │ ├── route_test.go │ ├── upstream.go │ └── upstream_test.go ├── cache │ ├── doc.go │ ├── route.go │ ├── route_test.go │ ├── types.go │ ├── types_test.go │ ├── upstream.go │ └── upstream_test.go ├── config │ ├── types.go │ └── types_test.go ├── etcdv3 │ ├── conformance.go │ ├── conformance_test.go │ ├── etcd.go │ ├── etcd_test.go │ ├── kv.go │ ├── kv_test.go │ ├── watch.go │ └── watch_test.go ├── id │ ├── idgen.go │ └── idgen_test.go ├── log │ ├── default_logger.go │ ├── default_logger_test.go │ ├── logger.go │ ├── logger_test.go │ └── options.go ├── provisioner │ ├── types.go │ ├── util │ │ ├── manifest.go │ │ ├── manifest_test.go │ │ ├── util.go │ │ └── util_test.go │ └── xds │ │ └── v3 │ │ ├── file │ │ ├── delivery.go │ │ ├── delivery_test.go │ │ ├── testdata │ │ │ ├── cluster.json │ │ │ └── route.json │ │ ├── types.go │ │ └── types_test.go │ │ └── grpc │ │ ├── delivery.go │ │ ├── delivery_test.go │ │ ├── types.go │ │ └── types_test.go ├── set │ ├── string.go │ └── string_test.go ├── sidecar │ ├── apisix.go │ ├── apisix │ │ └── config.yaml │ ├── apisix_test.go │ ├── cachereflection.go │ ├── cachereflection_test.go │ ├── testdata │ │ └── cluster.json │ ├── types.go │ └── types_test.go ├── types │ ├── apisix │ │ ├── base.pb.go │ │ ├── base.pb.validate.go │ │ ├── route.pb.go │ │ ├── route.pb.validate.go │ │ ├── upstream.pb.go │ │ ├── upstream.pb.validate.go │ │ └── workaround.go │ ├── event.go │ ├── iptables.go │ └── typeurl.go └── version │ ├── version.go │ └── version_test.go └── scripts └── kind-with-registry.sh /.github/semantic.yml: -------------------------------------------------------------------------------- 1 | titleOnly: true 2 | allowRevertCommits: true 3 | types: 4 | - feat 5 | - fix 6 | - docs 7 | - style 8 | - refactor 9 | - perf 10 | - test 11 | - build 12 | - ci 13 | - chore 14 | - revert 15 | - change 16 | -------------------------------------------------------------------------------- /.github/workflows/e2e-tests.yml: -------------------------------------------------------------------------------- 1 | name: e2e-test-ci 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | jobs: 11 | e2e-test: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | - name: Install kind 16 | run: | 17 | curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.10.0/kind-linux-amd64 18 | chmod +x ./kind 19 | sudo mv kind /usr/local/bin 20 | - name: Setup Go Environment 21 | uses: actions/setup-go@v1 22 | with: 23 | go-version: '1.16.5' 24 | - name: Install ginkgo 25 | run: | 26 | go get -u github.com/onsi/ginkgo/ginkgo 27 | sudo cp ~/go/bin/ginkgo /usr/local/bin 28 | - name: Run e2e test cases 29 | working-directory: ./ 30 | run: | 31 | cd e2e && go mod download && cd .. 32 | make e2e-test E2E_CONCURRENCY=1 33 | - name: upload coverage profile 34 | working-directory: ./e2e 35 | run: | 36 | bash <(curl -s https://codecov.io/bash) 37 | -------------------------------------------------------------------------------- /.github/workflows/lint-checker.yml: -------------------------------------------------------------------------------- 1 | name: Lint Checkers 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | jobs: 11 | gofmt: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | - name: Setup Go Environment 16 | uses: actions/setup-go@v1 17 | with: 18 | go-version: '1.16' 19 | - name: Run gofmt Check 20 | working-directory: ./ 21 | run: | 22 | diffs=`gofmt -l .` 23 | if [[ -n $diffs ]]; then 24 | echo "Files are not formatted by gofmt:" 25 | echo $diffs 26 | exit 1 27 | fi 28 | golint: 29 | runs-on: ubuntu-latest 30 | steps: 31 | - uses: actions/checkout@v2 32 | - name: Download golangci-lint 33 | uses: golangci/golangci-lint-action@v2.5.1 34 | with: 35 | version: v1.39.0 36 | - name: Run Golang Linters 37 | working-directory: ./ 38 | run: | 39 | make lint 40 | markdownlint: 41 | name: 🍇 Markdown 42 | runs-on: ubuntu-latest 43 | steps: 44 | - uses: actions/checkout@v2 45 | - name: 🚀 Use Node.js 46 | uses: actions/setup-node@v1 47 | with: 48 | node-version: '12.x' 49 | - run: npm install -g markdownlint-cli@0.25.0 50 | - run: markdownlint '**/*.md' --ignore node_modules 51 | -------------------------------------------------------------------------------- /.github/workflows/spell-checker.yml: -------------------------------------------------------------------------------- 1 | name: Spell Checker 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | branches: 8 | - main 9 | jobs: 10 | misspell: 11 | name: Runner / Misspell 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Check out code. 15 | uses: actions/checkout@v1 16 | - name: Install Misspell Tool 17 | run: | 18 | wget -O - -q https://git.io/misspell | sh -s -- -b . 19 | - name: Run Misspell Tool 20 | run: | 21 | find . -name "*.go" -type f | xargs ./misspell -error 22 | find docs -type f | xargs ./misspell -error 23 | -------------------------------------------------------------------------------- /.github/workflows/unit-tests.yml: -------------------------------------------------------------------------------- 1 | name: Unit Test Suites 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | jobs: 11 | run-test: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | - name: Setup Go Environment 16 | uses: actions/setup-go@v1 17 | with: 18 | go-version: '1.16' 19 | - name: Run Unit Test Suites 20 | working-directory: ./ 21 | run: | 22 | make unit-test 23 | - name: Upload Coverage Profile 24 | working-directory: ./ 25 | run: | 26 | bash <(curl -s https://codecov.io/bash) -t 615ec963-1638-4ca8-956c-7e33be81fd44 27 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | .idea 17 | coverage.txt 18 | apisix-mesh-agent 19 | .actions/openwhisk-utilities 20 | -------------------------------------------------------------------------------- /.markdownlint.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # MD004/ul-style Unordered list style 19 | MD004: false 20 | 21 | # MD013 Line length 22 | MD013: false 23 | 24 | #MD029/ol-prefix Ordered list item prefix 25 | MD029: false 26 | 27 | # MD033 Inline HTML 28 | MD033: false 29 | 30 | # MD036/no-emphasis-as-heading/no-emphasis-as-header Emphasis used instead of a heading 31 | MD036: false 32 | MD010: false 33 | MD003: false 34 | 35 | # MD024 36 | no-duplicate-heading: 37 | siblings_only: true 38 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## Table of Contents 4 | 5 | - [0.6](#06) 6 | 7 | ## 0.6 8 | 9 | This is the first public release. 10 | 11 | ### Core 12 | 13 | * Support basic xDS protocol; 14 | * Support iptables rules set up / clean up; 15 | * Support partial ETCD v3 APIs; 16 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | default: help 2 | .PHONY: default 3 | 4 | ### help: Show Makefile rules 5 | .PHONY: help 6 | help: 7 | @echo Makefile rules: 8 | @echo 9 | @grep -E '^### [-A-Za-z0-9_]+:' Makefile | sed 's/###/ /' 10 | 11 | VERSION ?= 0.0.1 12 | 13 | GITSHA ?= $(shell git rev-parse --short=7 HEAD) 14 | PWD ?= $(shell pwd) 15 | DATE ?= $(shell date +%s) 16 | DOCKER_IMAGE_TAG ?= dev 17 | ISTIO_IMAGE ?= istio/pilot:1.9.1 18 | NGINX_IMAGE ?= nginx:1.19.3 19 | HTTPBIN_IMAGE ?= kennethreitz/httpbin 20 | E2E_IMAGE_REGISTRY ?= localhost:5000 21 | E2E_CONCURRENCY ?= 1 22 | 23 | VERSYM=github.com/api7/apisix-mesh-agent/pkg/version._version 24 | GITSHASYM=github.com/api7/apisix-mesh-agent/pkg/version._gitRevision 25 | TIMESTAMPSYM=github.com/api7/apisix-mesh-agent/pkg/version._timestamp 26 | GO_LDFLAGS ?= "-X=$(VERSYM)=$(VERSION) -X=$(GITSHASYM)=$(GITSHA) -X=$(TIMESTAMPSYM)=$(DATE)" 27 | 28 | ### build: Build apisix-mesh-agent 29 | .PHONY: build 30 | build: 31 | go build \ 32 | -o apisix-mesh-agent \ 33 | -ldflags $(GO_LDFLAGS) \ 34 | main.go 35 | 36 | ### lint: Do static lint check 37 | .PHONY: lint 38 | lint: 39 | golangci-lint run 40 | 41 | ### gofmt: Format all go codes 42 | .PHONY: gofmt 43 | gofmt: 44 | find . -type f -name "*.go" | xargs gofmt -w -s 45 | 46 | ### build-image: Build image 47 | .PHONY: build-image 48 | build-image: 49 | docker build \ 50 | -t api7/apisix-mesh-agent:$(DOCKER_IMAGE_TAG) \ 51 | --build-arg ENABLE_PROXY=true \ 52 | --build-arg LUAROCKS_SERVER=https://luarocks.cn . 53 | 54 | ### unit-test: Run unit test cases 55 | .PHONY: unit-test 56 | unit-test: 57 | go test -cover -coverprofile=coverage.txt ./... 58 | 59 | ### kind-reset: Delete the kind k8s cluster 60 | .PHONY: kind-reset 61 | kind-reset: 62 | kind delete cluster --name apisix 63 | 64 | ### kind-up: Create a k8s cluster by kind 65 | .PHONY: kind-up 66 | kind-up: 67 | ./scripts/kind-with-registry.sh 68 | 69 | ### e2e-test: Run e2e test suites 70 | .PHONY: e2e-test 71 | e2e-test: kind-up prepare-e2e-env 72 | APISIX_MESH_AGENT_E2E_HOME=$(shell pwd)/e2e \ 73 | cd e2e && \ 74 | go env -w GOFLAGS="-mod=mod" && \ 75 | ginkgo -cover -coverprofile=coverage.txt -r --randomizeSuites --randomizeAllSpecs --trace -p --nodes=$(E2E_CONCURRENCY) 76 | 77 | ### prepare-e2e-env: Prepare the environment for running e2e test suites 78 | .PHONY: prepare-e2e-env 79 | prepare-e2e-env: cleanup-e2e-legacies build-image 80 | docker pull $(ISTIO_IMAGE) 81 | docker tag $(ISTIO_IMAGE) $(E2E_IMAGE_REGISTRY)/$(ISTIO_IMAGE) 82 | docker push $(E2E_IMAGE_REGISTRY)/$(ISTIO_IMAGE) 83 | 84 | docker pull $(NGINX_IMAGE) 85 | docker tag $(NGINX_IMAGE) $(E2E_IMAGE_REGISTRY)/$(NGINX_IMAGE) 86 | docker push $(E2E_IMAGE_REGISTRY)/$(NGINX_IMAGE) 87 | 88 | docker pull $(HTTPBIN_IMAGE) 89 | docker tag $(HTTPBIN_IMAGE) $(E2E_IMAGE_REGISTRY)/$(HTTPBIN_IMAGE) 90 | docker push $(E2E_IMAGE_REGISTRY)/$(HTTPBIN_IMAGE) 91 | 92 | docker tag api7/apisix-mesh-agent:$(DOCKER_IMAGE_TAG) $(E2E_IMAGE_REGISTRY)/api7/apisix-mesh-agent:$(DOCKER_IMAGE_TAG) 93 | docker push $(E2E_IMAGE_REGISTRY)/api7/apisix-mesh-agent:$(DOCKER_IMAGE_TAG) 94 | 95 | ### cleanup-e2e-legacies: Cleanup the e2e suites running legacies 96 | .PHONY: cleanup-e2e-legacies 97 | cleanup-e2e-legacies: 98 | kubectl get validatingwebhookconfigurations.admissionregistration.k8s.io | grep istio | awk '{print $$1}' | xargs kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io || true 99 | kubectl get mutatingwebhookconfigurations.admissionregistration.k8s.io | grep istio | awk '{print $$1}' | xargs kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io || true 100 | kubectl get ns | grep apisix-mesh-agent | awk '{print $$1}' | xargs kubectl delete ns || true 101 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | apisix-mesh-agent 2 | ================= 3 | 4 | Agent of [Apache APISIX](http://apisix.apache.org/) to extend it as a [Service 5 | Mesh](https://www.redhat.com/en/topics/microservices/what-is-a-service-mesh) Sidecar. 6 | 7 | ![apisix-mesh-overview](./docs/images/apisix-mesh-overview.png) 8 | 9 | Status 10 | ------ 11 | 12 | This project is currently considered as experimental. 13 | 14 | Why apisix-mesh-agent 15 | --------------------- 16 | 17 | APISIX provides rich traffic management features such as load balancing, dynamic upstream, canary release, circuit breaking, authentication, observability, and more. 18 | 19 | It's an excellent API Gateway but is not sufficient for Service Mesh, with the help of apisix-mesh-agent, it handles the East-West traffic well. 20 | 21 | Quick Start 22 | ----------- 23 | 24 | You can quickly use this project with Istio according to the [Getting Started Guide](./docs/istio-mesh.md). 25 | 26 | The Design of APISIX Mesh 27 | ------------------------- 28 | 29 | See the [Design](./docs/design.md) for the details. 30 | 31 | How it Works 32 | ------------- 33 | 34 | See [How it Works](./docs/how-it-works.md) to learn how apisix-mesh-agent extends Apache APISIX as a Service Mesh sidecar. 35 | 36 | The Internal of apisix-mesh-agent 37 | --------------------------------- 38 | 39 | If you're interested in the internal of apisix-mesh-agent, we recommand you 40 | to read the [the-internal-of-apisix-mesh-agent](./docs/the-internal-of-apisix-mesh-agent.md), it explains each 41 | module's function and responsibility. 42 | 43 | Get Involved to Development 44 | --------------------------- 45 | 46 | Welcome to make contributions, but before you start, please check out 47 | [development.md](./docs/development.md) to learn how to run and debug apisix-mesh-agent 48 | in your own environment. 49 | 50 | License 51 | ------- 52 | 53 | [Apache 2.0 LICENSE](./LICENSE) 54 | -------------------------------------------------------------------------------- /api/base.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option go_package = ".;apisix"; 4 | 5 | import "validate/validate.proto"; 6 | 7 | // Var represents the expression like: 8 | // ["arg_id", "equal", "543"]. 9 | message Var { 10 | // vars in Route is an two-dimensional array which cannot be represented 11 | // directly in protobuf, here we use https://github.com/favadi/protoc-go-inject-tag 12 | // to hack the ultimate pb.go. 13 | repeated string vars = 1 [(validate.rules).repeated = {min_items: 2, max_items: 4}]; 14 | } 15 | -------------------------------------------------------------------------------- /api/route.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | option go_package = ".;apisix"; 4 | 5 | import "base.proto"; 6 | import "validate/validate.proto"; 7 | import "google/protobuf/any.proto"; 8 | 9 | // [#protodoc-title: The Apache APISIX Route configuration] 10 | // A Route contains multiple parts but basically can be grouped 11 | // into three: 12 | // 1). Route match, fields like uris, hosts, remote_addrs are the 13 | // predicates to indicate whether a request can hit the route. 14 | // 2). Route action, upstream_id specifies the backend upstream 15 | // object, which guides Apache APISIX how to route request. 16 | // 3). Plugins, plugins will run before/after the route action, 17 | // some plugins are "terminated" so may be request will be returned 18 | // on the APISIX side (like authentication failures). 19 | // It's not totally equivalent to the jsonschema definition 20 | // in Apache APISIX's code base since there are some historical 21 | // considerations there which are not used here anymore. 22 | message Route { 23 | // URI array used to do the route match. 24 | // At least one item should be configured and each of them cannot be 25 | // duplicated. 26 | repeated string uris = 1 [(validate.rules).repeated = {min_items: 1, unique: true}]; 27 | // The route name, it's useful for the logging but it's not required. 28 | string name = 2 [(validate.rules).string = {min_len: 1, max_len: 100}]; 29 | // The route id, can be an int64 value or a string matching the specific pattern. 30 | string id = 3; 31 | // Textual descriptions used to describe the route use. 32 | string desc = 4 [(validate.rules).string.max_len = 256]; 33 | // Priority of this route, used to decide which route should be used when 34 | // multiple routes contains same URI. 35 | // Larger value means higher priority. The default value is 0. 36 | int32 priority = 5; 37 | // HTTP Methods used to do the route match. 38 | repeated string methods = 6 [ 39 | (validate.rules).repeated.unique = true, 40 | (validate.rules).repeated.items.string = {in: [ 41 | "GET", "POST", "PUT", "DELETE", "PATCH", 42 | "HEAD", "OPTIONS", "CONNECT", "TRACE" 43 | ]} 44 | ]; 45 | // Host array used to do the route match. 46 | repeated string hosts = 7 [ 47 | (validate.rules).repeated = { 48 | unique: true, 49 | ignore_empty: true, 50 | min_items: 1, 51 | }, 52 | (validate.rules).repeated.items.string.pattern = "^\\*?[0-9a-zA-Z-._]+$" 53 | ]; 54 | // Remote address array used to do the route match. 55 | repeated string remote_addrs = 8 [(validate.rules).repeated = { 56 | unique: true, 57 | ignore_empty: true, 58 | min_items: 1 59 | // TODO: IPv4 or IPv6 format pattern. 60 | }]; 61 | // Nginx vars used to do the route match. 62 | repeated Var vars = 9; 63 | // Embedded plugins. 64 | google.protobuf.Any plugins = 10; 65 | // The referred service id. 66 | string service_id = 11; 67 | // The referred upstream id. 68 | string upstream_id = 12; 69 | 70 | // RouteStatus Enumerations. 71 | enum RouteStatus { 72 | Disable = 0; 73 | Enable = 1; 74 | }; 75 | // The route status. 76 | RouteStatus status = 13; 77 | } 78 | -------------------------------------------------------------------------------- /cmd/cmd.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | 6 | "github.com/api7/apisix-mesh-agent/cmd/iptables" 7 | "github.com/api7/apisix-mesh-agent/cmd/precheck" 8 | "github.com/api7/apisix-mesh-agent/cmd/sidecar" 9 | "github.com/api7/apisix-mesh-agent/cmd/version" 10 | ) 11 | 12 | // NewMeshAgentCommand creates the root command for apisix-mesh-agent. 13 | func NewMeshAgentCommand() *cobra.Command { 14 | cmd := &cobra.Command{ 15 | Use: "apisix-mesh-agent [command] [flags]", 16 | Short: "Agent of Apache APISIX to extend it as a Service Mesh Sidecar.", 17 | } 18 | cmd.AddCommand( 19 | sidecar.NewCommand(), 20 | version.NewCommand(), 21 | precheck.NewCommand(), 22 | iptables.NewSetupCommand(), 23 | iptables.NewCleanupIptablesCommand(), 24 | ) 25 | return cmd 26 | } 27 | -------------------------------------------------------------------------------- /cmd/iptables/cleanup-iptables_test.go: -------------------------------------------------------------------------------- 1 | package iptables 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestCleanupIptables(t *testing.T) { 12 | f, err := ioutil.TempFile("./", "iptables-cleanup.*") 13 | assert.Nil(t, err) 14 | defer func() { 15 | assert.Nil(t, f.Close()) 16 | assert.Nil(t, os.Remove(f.Name())) 17 | }() 18 | rawStdout := os.Stdout 19 | os.Stdout = f 20 | cleanup(true) 21 | os.Stdout = rawStdout 22 | 23 | data, err := ioutil.ReadFile(f.Name()) 24 | assert.Nil(t, err) 25 | 26 | expect := `iptables -t nat -D PREROUTING -p tcp -j APISIX_INBOUND 27 | iptables -t nat -D OUTPUT -p tcp -j OUTPUT 28 | iptables -t nat -F APISIX_INBOUND 29 | iptables -t nat -X APISIX_INBOUND 30 | iptables -t nat -F OUTPUT 31 | iptables -t nat -X OUTPUT 32 | iptables -t nat -F APISIX_REDIRECT 33 | iptables -t nat -X APISIX_REDIRECT 34 | iptables -t nat -F APISIX_INBOUND_REDIRECT 35 | iptables -t nat -X APISIX_INBOUND_REDIRECT 36 | ` 37 | assert.Equal(t, expect, string(data)) 38 | } 39 | -------------------------------------------------------------------------------- /cmd/iptables/clenaup-iptables.go: -------------------------------------------------------------------------------- 1 | package iptables 2 | 3 | import ( 4 | "github.com/api7/apisix-mesh-agent/pkg/types" 5 | "github.com/spf13/cobra" 6 | "istio.io/istio/tools/istio-iptables/pkg/dependencies" 7 | ) 8 | 9 | // NewCleanupIptablesCommand creates the cleanup-iptables sub-command object. 10 | func NewCleanupIptablesCommand() *cobra.Command { 11 | var dryRun bool 12 | cmd := &cobra.Command{ 13 | Use: "cleanup-iptables [flags]", 14 | Short: "Cleanup iptables rules for the port forwarding", 15 | Run: func(cmd *cobra.Command, args []string) { 16 | cleanup(dryRun) 17 | }, 18 | } 19 | cmd.PersistentFlags().BoolVar(&dryRun, "dry-run", false, "dry run mode") 20 | return cmd 21 | } 22 | 23 | func cleanup(dryRun bool) { 24 | var ext dependencies.Dependencies 25 | if dryRun { 26 | ext = &dependencies.StdoutStubDependencies{} 27 | } else { 28 | ext = &dependencies.RealDependencies{} 29 | } 30 | removeOldChains(ext, "iptables") 31 | } 32 | 33 | func removeOldChains(ext dependencies.Dependencies, cmd string) { 34 | ext.RunQuietlyAndIgnore(cmd, "-t", "nat", "-D", types.PreRoutingChain, "-p", "tcp", "-j", types.InboundChain) 35 | ext.RunQuietlyAndIgnore(cmd, "-t", "nat", "-D", types.OutputChain, "-p", "tcp", "-j", types.OutputChain) 36 | flushAndDeleteChains(ext, cmd, "nat", []string{types.InboundChain, types.OutputChain, types.RedirectChain, types.InboundRedirectChain}) 37 | } 38 | 39 | func flushAndDeleteChains(ext dependencies.Dependencies, cmd string, table string, chains []string) { 40 | for _, chain := range chains { 41 | ext.RunQuietlyAndIgnore(cmd, "-t", table, "-F", chain) 42 | ext.RunQuietlyAndIgnore(cmd, "-t", table, "-X", chain) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /cmd/precheck/precheck.go: -------------------------------------------------------------------------------- 1 | package precheck 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "os/exec" 7 | "strings" 8 | 9 | "github.com/api7/apisix-mesh-agent/pkg/types" 10 | 11 | "github.com/spf13/cobra" 12 | ) 13 | 14 | var ( 15 | // Use variable so unit test cases can change it, as there is no 16 | // iptables utility on macos. 17 | _iptablesCmd = "iptables" 18 | ) 19 | 20 | // NewCommand creates the precheck subcommand object. 21 | func NewCommand() *cobra.Command { 22 | var ( 23 | apisixBinPath string 24 | apisixHomePath string 25 | ) 26 | 27 | cmd := &cobra.Command{ 28 | Use: "precheck [flags]", 29 | Short: "Check the running environment for Apache APISIX as the sidecar", 30 | Long: `Check the running environment for Apache APISIX as the sidecar. 31 | 32 | if you just run apisix-mesh-agent in standalone mode, then don't run this precheck as it reports 33 | false positive errors.`, 34 | Run: func(cmd *cobra.Command, args []string) { 35 | var code int 36 | if !check(apisixBinPath, apisixHomePath) { 37 | code = 1 38 | } 39 | os.Exit(code) 40 | }, 41 | } 42 | 43 | cmd.PersistentFlags().StringVar(&apisixBinPath, "apisix-bin-path", "/usr/local/bin/apisix", "the executable binary file path of Apache APISIX") 44 | cmd.PersistentFlags().StringVar(&apisixHomePath, "apisix-home-path", "/usr/local/apisix", "the home path of Apache APISIX") 45 | return cmd 46 | } 47 | 48 | func check(bin, home string) bool { 49 | var buffer strings.Builder 50 | defer func() { 51 | fmt.Fprint(os.Stderr, buffer.String()) 52 | }() 53 | 54 | if !checkBin(&buffer, bin) { 55 | return false 56 | } 57 | if !checkHome(&buffer, home) { 58 | return false 59 | } 60 | if !checkIptables(&buffer) { 61 | return false 62 | } 63 | return true 64 | } 65 | 66 | func checkBin(buffer *strings.Builder, path string) bool { 67 | defer func() { 68 | buffer.WriteByte('\n') 69 | }() 70 | buffer.WriteString("checking apisix binary path ") 71 | buffer.WriteString(path) 72 | buffer.WriteString(" ... ") 73 | _, err := os.Stat(path) 74 | if err != nil { 75 | buffer.WriteString(err.Error()) 76 | return false 77 | } 78 | buffer.WriteString("found") 79 | return true 80 | } 81 | 82 | func checkHome(buffer *strings.Builder, path string) bool { 83 | defer func() { 84 | buffer.WriteByte('\n') 85 | }() 86 | buffer.WriteString("checking apisix home path ") 87 | buffer.WriteString(path) 88 | buffer.WriteString(" ... ") 89 | s, err := os.Stat(path) 90 | if err != nil { 91 | buffer.WriteString(err.Error()) 92 | return false 93 | } 94 | if !s.IsDir() { 95 | buffer.WriteString("not a directory") 96 | return false 97 | } 98 | buffer.WriteString("found") 99 | return true 100 | } 101 | 102 | func checkIptables(buffer *strings.Builder) bool { 103 | checkChain := func(table, chain string) error { 104 | cmd := exec.Command(_iptablesCmd, "-t", table, "-L", chain) 105 | return cmd.Run() 106 | } 107 | for _, chain := range []string{types.InboundChain, types.OutputChain, types.RedirectChain, types.PreRoutingChain} { 108 | buffer.WriteString("checking iptables, table: nat, chain: ") 109 | buffer.WriteString(chain) 110 | buffer.WriteString(" ... ") 111 | err := checkChain("nat", chain) 112 | if err != nil { 113 | buffer.WriteString(err.Error()) 114 | buffer.WriteByte('\n') 115 | return false 116 | } else { 117 | buffer.WriteString("found\n") 118 | } 119 | } 120 | return true 121 | } 122 | -------------------------------------------------------------------------------- /cmd/precheck/precheck_test.go: -------------------------------------------------------------------------------- 1 | package precheck 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestCheckBin(t *testing.T) { 13 | var buffer strings.Builder 14 | 15 | assert.Equal(t, checkBin(&buffer, "./not_a_file"), false) 16 | expect := "checking apisix binary path ./not_a_file ... stat ./not_a_file: no such file or directory\n" 17 | assert.Equal(t, expect, buffer.String()) 18 | 19 | buffer.Reset() 20 | assert.Equal(t, checkBin(&buffer, "./precheck.go"), true) 21 | expect = "checking apisix binary path ./precheck.go ... found\n" 22 | assert.Equal(t, expect, buffer.String()) 23 | } 24 | 25 | func TestCheckHome(t *testing.T) { 26 | var buffer strings.Builder 27 | 28 | assert.Equal(t, checkHome(&buffer, "./not_a_file"), false) 29 | expect := "checking apisix home path ./not_a_file ... stat ./not_a_file: no such file or directory\n" 30 | assert.Equal(t, expect, buffer.String()) 31 | 32 | buffer.Reset() 33 | assert.Equal(t, checkHome(&buffer, "./precheck.go"), false) 34 | expect = "checking apisix home path ./precheck.go ... not a directory\n" 35 | assert.Equal(t, expect, buffer.String()) 36 | 37 | buffer.Reset() 38 | assert.Equal(t, checkHome(&buffer, "../"), true) 39 | expect = "checking apisix home path ../ ... found\n" 40 | assert.Equal(t, expect, buffer.String()) 41 | } 42 | 43 | func TestCheckIptables(t *testing.T) { 44 | var buffer strings.Builder 45 | 46 | _iptablesCmd = "not_a_command" 47 | 48 | assert.Equal(t, checkIptables(&buffer), false) 49 | expect := "checking iptables, table: nat, chain: APISIX_INBOUND ... exec: \"not_a_command\": executable file not found in $PATH\n" 50 | assert.Equal(t, expect, buffer.String()) 51 | 52 | _iptablesCmd = "true" 53 | buffer.Reset() 54 | assert.Equal(t, checkIptables(&buffer), true) 55 | expect = `checking iptables, table: nat, chain: APISIX_INBOUND ... found 56 | checking iptables, table: nat, chain: OUTPUT ... found 57 | checking iptables, table: nat, chain: APISIX_REDIRECT ... found 58 | checking iptables, table: nat, chain: PREROUTING ... found 59 | ` 60 | assert.Equal(t, expect, buffer.String()) 61 | } 62 | 63 | func TestCheck(t *testing.T) { 64 | _iptablesCmd = "true" 65 | 66 | f, err := ioutil.TempFile("./", "stdout.*") 67 | assert.Nil(t, err) 68 | defer func() { 69 | assert.Nil(t, f.Close()) 70 | assert.Nil(t, os.Remove(f.Name())) 71 | }() 72 | raw := os.Stderr 73 | os.Stderr = f 74 | 75 | ok := check("./precheck.go", "../") 76 | os.Stderr = raw 77 | assert.Equal(t, ok, true) 78 | 79 | data, err := ioutil.ReadFile(f.Name()) 80 | assert.Nil(t, err) 81 | expect := `checking apisix binary path ./precheck.go ... found 82 | checking apisix home path ../ ... found 83 | checking iptables, table: nat, chain: APISIX_INBOUND ... found 84 | checking iptables, table: nat, chain: OUTPUT ... found 85 | checking iptables, table: nat, chain: APISIX_REDIRECT ... found 86 | checking iptables, table: nat, chain: PREROUTING ... found 87 | ` 88 | assert.Equal(t, expect, string(data)) 89 | } 90 | -------------------------------------------------------------------------------- /cmd/sidecar/sidecar.go: -------------------------------------------------------------------------------- 1 | package sidecar 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | "os/signal" 8 | "strings" 9 | "syscall" 10 | 11 | "github.com/spf13/cobra" 12 | 13 | "github.com/api7/apisix-mesh-agent/pkg/config" 14 | "github.com/api7/apisix-mesh-agent/pkg/log" 15 | "github.com/api7/apisix-mesh-agent/pkg/sidecar" 16 | "github.com/api7/apisix-mesh-agent/pkg/version" 17 | ) 18 | 19 | func dief(template string, args ...interface{}) { 20 | if !strings.HasSuffix(template, "\n") { 21 | template += "\n" 22 | } 23 | _, _ = fmt.Fprintf(os.Stderr, template, args...) 24 | os.Exit(1) 25 | } 26 | 27 | func initializeDefaultLogger(cfg *config.Config) { 28 | logger, err := log.NewLogger( 29 | log.WithLogLevel(cfg.LogLevel), 30 | log.WithOutputFile(cfg.LogOutput), 31 | ) 32 | if err != nil { 33 | dief("failed to initialize logging: %s", err) 34 | } 35 | log.DefaultLogger = logger 36 | } 37 | 38 | func waitForSignal(stopCh chan struct{}) { 39 | sigCh := make(chan os.Signal, 1) 40 | signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) 41 | 42 | sig := <-sigCh 43 | log.Infof("signal %d (%s) received", sig, sig.String()) 44 | close(stopCh) 45 | } 46 | 47 | // NewCommand creates the sidecar command for apisix-mesh-agent. 48 | func NewCommand() *cobra.Command { 49 | cfg := config.NewDefaultConfig() 50 | cmd := &cobra.Command{ 51 | Use: "sidecar [flags]", 52 | Short: "Launch apisix-mesh-agent as a sidecar process", 53 | Run: func(cmd *cobra.Command, args []string) { 54 | initializeDefaultLogger(cfg) 55 | if err := cfg.Validate(); err != nil { 56 | dief("configuration validation failure: %s", err) 57 | } 58 | log.Infow("apisix-mesh-agent started") 59 | defer log.Info("apisix-mesh-agent exited") 60 | log.Info("version:\n", version.String()) 61 | data, err := json.MarshalIndent(cfg, "", " ") 62 | if err != nil { 63 | panic(err) 64 | } 65 | log.Info("use configuration:\n", string(data)) 66 | 67 | sc, err := sidecar.NewSidecar(cfg) 68 | if err != nil { 69 | dief("initialization failure: %s", err) 70 | } 71 | 72 | stop := make(chan struct{}) 73 | go waitForSignal(stop) 74 | if err := sc.Run(stop); err != nil { 75 | dief(err.Error()) 76 | } 77 | }, 78 | } 79 | 80 | cmd.PersistentFlags().StringVar(&cfg.LogOutput, "log-output", "stderr", "the output file path of error log") 81 | cmd.PersistentFlags().StringVar(&cfg.LogLevel, "log-level", "info", "the error log level") 82 | cmd.PersistentFlags().StringVar(&cfg.Provisioner, "provisioner", config.XDSV3FileProvisioner, "the provisioner to use, option can be \"xds-v3-file\", \"xds-v3-grpc\"") 83 | cmd.PersistentFlags().StringSliceVar(&cfg.XDSWatchFiles, "xds-watch-files", nil, "file paths watched by xds-v3-file provisioner") 84 | cmd.PersistentFlags().StringVar(&cfg.GRPCListen, "grpc-listen", config.DefaultGRPCListen, "grpc server listen address") 85 | cmd.PersistentFlags().StringVar(&cfg.EtcdKeyPrefix, "etcd-key-prefix", config.DefaultEtcdKeyPrefix, "the key prefix in the mimicking etcd v3 server") 86 | cmd.PersistentFlags().StringVar(&cfg.XDSConfigSource, "xds-config-source", "", "the xds config source address, required if provisioner is \"xds-v3-grpc\"") 87 | cmd.PersistentFlags().StringVar(&cfg.RunMode, "run-mode", config.StandaloneMode, "run mode for apisix-mesh-agent, can be \"standalone\" or \"bundle\"") 88 | cmd.PersistentFlags().StringVar(&cfg.APISIXBinPath, "apisix-bin-path", config.DefaultAPISIXBinPath, "executable binary file path for Apache APISIX, it's not concerned if run mode is \"standalone\"") 89 | cmd.PersistentFlags().StringVar(&cfg.APISIXHomePath, "apisix-home-path", config.DefaultAPISIXHomePath, "home path for Apache APISIX, it's not concerned if run mode is \"standalone\"") 90 | return cmd 91 | } 92 | -------------------------------------------------------------------------------- /cmd/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/spf13/cobra" 7 | 8 | "github.com/api7/apisix-mesh-agent/pkg/version" 9 | ) 10 | 11 | // NewCommand creates the version command for apisix-mesh-agent. 12 | func NewCommand() *cobra.Command { 13 | cmd := &cobra.Command{ 14 | Use: "version", 15 | Short: "Version for apisix-mesh-agent", 16 | Run: func(cmd *cobra.Command, args []string) { 17 | fmt.Println(version.String()) 18 | }, 19 | } 20 | return cmd 21 | } 22 | -------------------------------------------------------------------------------- /docs/design.md: -------------------------------------------------------------------------------- 1 | # The Design of APISIX Mesh 2 | 3 | This article explains how to implement yet another [Service Mesh](https://www.redhat.com/en/topics/microservices/what-is-a-service-mesh#:~:text=A%20service%20mesh%2C%20like%20the,share%20data%20with%20one%20another.&text=Each%20part%20of%20an%20app,give%20users%20what%20they%20want.) solution by extending [Apache APISIX](https://apisix.apache.org/). 4 | 5 | ## Status 6 | 7 | - Written for the first time on 2021/02/09 8 | 9 | ## Table of Contents 10 | 11 | - [Service Mesh](#service-mesh) 12 | - [Apache APISIX](#apache-apisix) 13 | - [Sidecar in APISIX Mesh](#sidecar-in-apisix-mesh) 14 | - [Communcation Bus Protocol](#communcation-bus-protocol) 15 | - [The Selection of Control Plane](#the-selection-of-control-plane) 16 | 17 | ## Service Mesh 18 | 19 | Service Mesh, is a technology to control how individual application talks with each other, applications get released from it since all essential functions like 20 | routing, service discovery, authenication, authorizatoin and etc are implemented in the Service Mesh solutions, 21 | just like what [Istio](https://istio.io/) and [Linkerd](https://linkerd.io/) does. 22 | 23 | Typically, there're two components in a Service Mesh solution, the control plane and data plane. 24 | The former, as the brain of Service Mesh, discovering services from the service registry ([Kubernetes](https://kubernetes.io), 25 | [Consul](https://www.consul.io/) and others), accepting configuration change requests from admin or CI/CD robots and 26 | delivering all configurations to the data plane; The data plane, usually composed by an sidecar process and an application process, 27 | is deployed distributed. 28 | 29 | ## Apache APISIX 30 | 31 | Just like the introduction in the [website](https://apisix.apache.org/) of Apache APISIX: 32 | 33 | > Apache APISIX is a dynamic, real-time, high-performance Cloud-Native API gateway, based on the Nginx library and etcd. 34 | > 35 | > Apache APISIX software provides rich traffic management features such as load balancing, dynamic upstream, canary release, circuit breaking, authentication, observability, and more. 36 | 37 | Apache APISIX is an excellent API Gateway solution, but not only for API Gateway, it owns all the necessary characters that a Service Mesh sidecar needs. 38 | It has fantastic resources utilization, flexible routing capabilities, rich plugins and can be extended easily. 39 | 40 | ## Sidecar in APISIX Mesh 41 | 42 | In the APISIX Mesh solution, Apache APISIX is designed as the proxy in the data plane, rather than the whole the sidecar component since Apache APISIX is coupled with [etcd v3 API](https://etcd.io/docs/v3.3.12/rfc/) tightly, while this protocol is not so common, 43 | for the sake of adjusting existing control plane solutions easily, it's not wise to use this protocol as the communication bus protocol between control plane and data plane. 44 | 45 | That's why another program (named as apisix-mesh-agent) comes in, it uses a well designed protocol to talk to the control plane, receiving configurations from it, minicking etcd v3 API for the concomitant Apache APISIX. 46 | 47 | ![Data Plane Overview](./images/data-plane-overview.png) 48 | 49 | Resort to this design, what Apache APISIX needs to change is only the value of `etcd.host`, just tweaking to the etcd v3 API address of concomitant apisix-mesh-agent (the blue arrow in the above diagram). 50 | 51 | More importantly, apisix mesh agent will set up dozens of iptables rules, to intercept the inbound (pink arrow) and outbound (brown arrow) traffics of the application. 52 | 53 | Of course, the above all are not all functions that the apisix mesh agent provides, it also has other auxiliary features such as delivering TLS/SSL certificates, uploading logs, tracing data, metrics for better observability and etc. But for the first stage, only core funcionalities (routing, inbound, outbound traffic interceptions) will be focused on, other features will be added gradually. 54 | 55 | ## Communcation Bus Protocol 56 | 57 | As above mentioned, the etcd v3 API protocol is not a good choice to as the communcation protocol between the data plane and control plane, instead, a well designed, service mesh dedicated protocol is required, and the [Envoy xDS protocol](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol) is the best one (at least for now), not only because its rich data structures, underlying transport protocol, but also for its spread and adoption. With the help of xDS protocol, the selection of control plane is not force as long as it also supports the xDS protocol. 58 | 59 | Therefore, the apisix-mesh-agent should implement the xDS client side protocol, fortunately, not much effort needs to be take, there is an existing SDK [go-control-plane](https://github.com/envoyproxy/go-control-plane). 60 | 61 | ## The Selection of Control Plane 62 | 63 | As the xDS protocol is used, the control plane selection is clear, any products once support xDS protocol can be used as the control plane of 64 | The APISIX Mesh, like Istio, Kuma. Use existing control plane products reduce the migration overheads since control planes are always incompatible with each other. It's difficult to ask users migrate from one control plane to another. Target of the first stage is the adoption of Apache APISIX as the data plane. 65 | 66 | The architecture is followed. In the future, Custom control plane will be supported, it'll be designed flexible, easy to use/deploy and high available. 67 | 68 | ![APISIX Mesh Overview](./images/apisix-mesh-overview.png) 69 | -------------------------------------------------------------------------------- /docs/development.md: -------------------------------------------------------------------------------- 1 | Development 2 | =========== 3 | 4 | This document explains how to get started to develop the apisix-mesh-agent. 5 | 6 | Prerequisites 7 | ------------- 8 | 9 | * You Go version should be at lease `1.14`. 10 | * Clone the [apisix-mesh-agent](https://github.com/api7/apisix-mesh-agent) project. 11 | 12 | Build 13 | ----- 14 | 15 | ```shell 16 | cd /path/to/apisix-mesh-agent 17 | make build 18 | ``` 19 | 20 | Test 21 | ---- 22 | 23 | ### Run Unit Test Suites 24 | 25 | ```shell 26 | cd /path/to/apisix-mesh-agent 27 | make unit-test 28 | ``` 29 | 30 | ### Mimic practical environment 31 | 32 | If you want to mimic the practical environment, iptables rules should be set up in your development 33 | environment, see [traffic-interception](./traffic-interception.md) for the details of creating 34 | the iptables rules. 35 | -------------------------------------------------------------------------------- /docs/how-it-works.md: -------------------------------------------------------------------------------- 1 | # How It Works 2 | 3 | This article explains how apisix-mesh-agent extends [Apache APISIX](https://apisix.apache.org) as the Service Mesh sidecar. 4 | 5 | ## Run Mode 6 | 7 | apisix-mesh-agent can be run alone or bundled with Apache APISIX. 8 | It depends on how you pass the running options to it. 9 | 10 | If you want to run it alone, for instance, you want to run the apisix-mesh-agent and APISIX in different 11 | Pods/VMs so that the apisix-mesh-agent can be shared by multiple APISIX instances, then just pass `--run-mode standalone` 12 | for it. In such a case, the `etcd.host` configuration in APISIX should be configured to the gRPC listen address 13 | of apisix-mesh-agent. 14 | 15 | ```shell 16 | /path/to/apisix-mesh-agent sidecar --provisioner xds-v3-file --xds-watch-files /path/to/xds-assets --run-mode standalone 17 | ``` 18 | 19 | As a common pattern, sidecar and apps are always deployed together, if you run apisix-mesh-agent under the "bundle" mode, it 20 | will launch the Apache APISIX and close it when you shut apisix-mesh-agent down. 21 | 22 | ```shell 23 | /path/to/apisix-mesh-agent sidecar --apisix-bin-path /path/to/bin/apisix --apisix-home-path /path/to/apisix/ --provisioner xds-v3-file --xds-watch-files /path/to/xds-assets --run-mode bundle 24 | ``` 25 | 26 | You should pass the correct Apache APISIX binary path and home path, apisix-mesh-agent render [a configuration file](../pkg/sidecar/apisix/config.yaml) for it, each time you start apisix-mesh-agent, 27 | configuration file will be written to `/path/to/apisix/conf/config-default.yaml`. 28 | 29 | ## Traffic Interception 30 | 31 | Before run apisix-mesh-agent and Apache APISIX, the iptables rules 32 | should be set up in advance, please see [traffic-interception](./traffic-interception.md) for 33 | details. 34 | 35 | ## Configuration Provisioning 36 | 37 | Currently, apisix-mesh-agent supports to fetch configurations from [xDS](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol) management servers, it converts the data structures from xDS to the [Routes](http://apisix.apache.org/docs/apisix/architecture-design/route), [Upstreams](http://apisix.apache.org/docs/apisix/architecture-design/upstream) and others in [Apache APISIX](https://apisix.apache.org). Now only the [SToW](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#four-variants) part was supported, apisix-mesh-agent compares the last two states and get the differences from them, then generating ADD, DELETE and UPDATE events so data in memory can be changed incrementally. 38 | 39 | ## ETCD V3 APIs 40 | 41 | In order to let APISIX fetches configuration from apisix-mesh-agent, the apisix-mesh-agent implments the [ETCD V3 APIs](https://etcd.io/docs/v3.3/rfc/), not all APIs were supported but at least the part that used by Apache APISIX was covered. 42 | 43 | From the perspective of Apache APISIX, apisix-mesh-agent is an ETCD cluster. 44 | -------------------------------------------------------------------------------- /docs/images/apisix-mesh-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/api7/apisix-mesh-agent/39db037a866c182ec15c7de90c2a7022bfd0caa6/docs/images/apisix-mesh-overview.png -------------------------------------------------------------------------------- /docs/images/data-plane-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/api7/apisix-mesh-agent/39db037a866c182ec15c7de90c2a7022bfd0caa6/docs/images/data-plane-overview.png -------------------------------------------------------------------------------- /docs/images/the-internal-of-apisix-mesh-agent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/api7/apisix-mesh-agent/39db037a866c182ec15c7de90c2a7022bfd0caa6/docs/images/the-internal-of-apisix-mesh-agent.png -------------------------------------------------------------------------------- /docs/istio-mesh.md: -------------------------------------------------------------------------------- 1 | Istio Mesh 2 | ========== 3 | 4 | This post gives the guide how to integrate apisix-mesh-agent into Istio mesh. 5 | 6 | Prerequisites 7 | ------------- 8 | 9 | ### Prepare the Kubernetes cluster 10 | 11 | Just use any [Kubernetes](https://kubernetes.io/) cluster that you want, if you don't have an existing one in your hand, we recommend you to use [Kind](https://kind.sigs.k8s.io/) to build a Kubernetes cluster for development quickly, you can run the following commands to set up and clean a Kubernetes cluster with a [Docker Registry](https://docs.docker.com/registry/#:~:text=The%20Registry%20is%20a%20stateless,under%20the%20permissive%20Apache%20license.). 12 | 13 | ```shell 14 | cd /path/to/apisix-mesh-agent 15 | make kind-up 16 | make kind-reset 17 | ``` 18 | 19 | ### Install Helm 20 | 21 | In this post, we use [Helm 3](https://helm.io) to install [Istio](https://istio.io). You should download the desired Istio release version to your local environment. In this document, we use [istio/1.9.1](https://github.com/istio/istio/releases/tag/1.9.1). 22 | 23 | ### Create Istio Root Namespace 24 | 25 | In this post, we use the typical `istio-system` as the istio root namespace. 26 | 27 | ```shell 28 | kubectl create namespace istio-system 29 | ``` 30 | 31 | Build and Push Image 32 | -------------------- 33 | 34 | ```shell 35 | export DOCKER_IMAGE_TAG=dev 36 | export DOCKER_IMAGE_REGISTRY=localhost:5000 37 | cd /path/to/apisix-mesh-agent 38 | make build-image 39 | docker tag api7/apisix-mesh-agent:$DOCKER_IMAGE_TAG $DOCKER_IMAGE_REGISTRY/api7/apisix-mesh-agent:$DOCKER_IMAGE_TAG 40 | docker push $DOCKER_IMAGE_REGISTRY/api7/apisix-mesh-agent:$DOCKER_IMAGE_TAG 41 | ``` 42 | 43 | The above commands build the image firstly and push the image to the target image registry (change the `DOCKER_IMAGE_REGISTRY` to your desired one). You should have [docker](https://www.docker.com/) installed in the running environment. 44 | 45 | > Note: 46 | > 47 | > 1. You should change the value of DOCKER_IMAGE_REGISTRY to the actual image registry address that you're using. 48 | > 49 | > 2. Your image registry should be accessible from the Kubernetes cluster. 50 | 51 | Install Istio-base Chart 52 | ------------------------- 53 | 54 | ```shell 55 | cd /path/to/istio/manifests 56 | helm install istio-base \ 57 | --namespace istio-system \ 58 | ./charts/base 59 | ``` 60 | 61 | istio-base chart contains several resources which are required for running `istiod`. 62 | 63 | > Before you execute the above commands, be sure you've cloned [istio](https://istio.io/) to your local. 64 | 65 | Install istio-control Chart 66 | ---------------------------- 67 | 68 | ```shell 69 | export ISTIO_RELEASE=1.9.1 70 | cd /path/to/istio/manifests 71 | cp /path/to/apisix-mesh-agent/manifests/istio/injection-template.yaml charts/istio-control/istio-discovery/files/ 72 | helm install istio-discovery \ 73 | --namespace istio-system \ 74 | --set pilot.image=istio/pilot:$ISTIO_RELEASE \ 75 | --set global.proxy.privileged=true \ 76 | --set global.proxy_init.hub=$DOCKER_IMAGE_REGISTRY \ 77 | --set global.proxy_init.image=api7/apisix-mesh-agent \ 78 | --set global.proxy_init.tag=dev \ 79 | --set global.proxy.hub=$DOCKER_IMAGE_REGISTRY \ 80 | --set global.proxy.image=api7/apisix-mesh-agent \ 81 | --set global.proxy.tag=dev \ 82 | ./charts/istio-control/istio-discovery 83 | ``` 84 | 85 | We changed the injection template to [injection-template.yaml](../manifests/istio/injection-template.yaml) as we want to change the sidecar from [Envoy](https://www.envoyproxy.io/) to apisix-mesh-agent. 86 | 87 | > Please make sure memory is enough as by default Istios requests `2G` memory, if that's expensive in your Kubernetes cluster, changing the resources configuration by specifying: `--set pilot.resources.requests.memory=`. 88 | 89 | Test 90 | ---- 91 | 92 | ```shell 93 | kubectl create namespace test 94 | kubectl run nginx --image=nginx -n test --port 80 95 | ``` 96 | 97 | Wait for a while and check out the pod status, the sidecar container should be injected into the nginx pod. 98 | 99 | ```shell 100 | kubectl get pods -n test 101 | NAME READY STATUS RESTARTS AGE 102 | nginx 2/2 Running 0 53s 103 | ``` 104 | 105 | For further learning, please read [tiny-service-mesh-scenario](./examples/tiny-service-mesh-scnario.md), so you can know how to verify this mesh by sending requests. 106 | 107 | Uninstall 108 | --------- 109 | 110 | ```shell 111 | helm uninstall istio-discovery --namespace istio-system 112 | helm uninstall istio-base --namespace istio-system 113 | kubectl delete namespace istio-system 114 | ``` 115 | -------------------------------------------------------------------------------- /docs/the-internal-of-apisix-mesh-agent.md: -------------------------------------------------------------------------------- 1 | # The Internal of apisix mesh agent 2 | 3 | This article explains the internal design of apisix-mesh-agent. 4 | 5 | ## Table of Contents 6 | 7 | - [Overview](#overview) 8 | - [Provisioner](#provisioner) 9 | - [Adaptor](#adaptor) 10 | - [Cache](#cache) 11 | - [Etcd V3](#etcd-v3) 12 | 13 | ## Overview 14 | 15 | The apisix-mesh-agent is modular, each modular exposes interfaces to let others invoke it. 16 | Now it has four modules: [Provisioner](#provisioner), [Adaptor](#adaptor), [Cache](#cache), [Etcd V3](#etcd-v3). The dependency relationships between them are depicted by the following illustration. 17 | 18 | ![the-internal-of-mesh-agent](./images/the-internal-of-apisix-mesh-agent.png) 19 | 20 | ## Provisioner 21 | 22 | Provisioner provides configurations and transfers them as the type of events. Provisioner is not limited by any kinds of source type. 23 | It depends on the implementations, for instance, there is a [xDS](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol) provisioner, 24 | which joints Service Mesh control plane like [Istio](https://istio.io/). 25 | 26 | The Provisioner is shown as a Go interface, which is concise. 27 | 28 | ```go 29 | // Provisioner provisions config event. 30 | // The source type can be xDS or UDPA or whatever anything else. 31 | type Provisioner interface { 32 | // Channel returns a readonly channel where caller can get events. 33 | Channel() <-chan []types.Event 34 | // Run launches the provisioner. 35 | Run(chan struct{}) error 36 | } 37 | ``` 38 | 39 | The `Channel` method gives a read only channel to let callers watch the configuration changes continuously. 40 | An event represents a configuration change, which contains the event type, 41 | event entity and the tombstone state of the deleted entity. 42 | 43 | ```go 44 | var ( 45 | // EventAdd represents the add event. 46 | EventAdd = EventType("add") 47 | // EventUpdate represents the update event. 48 | EventUpdate = EventType("update") 49 | // EventDelete represents the delete event. 50 | EventDelete = EventType("delete") 51 | ) 52 | 53 | // Event describes a specific event generated from the provisioner. 54 | type Event struct { 55 | Type EventType 56 | Object interface{} 57 | // Tombstone is only valid for delete event, 58 | // in such a case it stands for the final state 59 | // of the object. 60 | Tombstone interface{} 61 | } 62 | ``` 63 | 64 | Note basically the `Object` field will be assigned by APISIX resource data structures 65 | like `Route`, `Upstream` and etc. Provisioner is responsible for translating the source-dependent data types to 66 | the APISIX resources with the help of [Adaptor](#adaptor). Therefor, the caller of Provisioner 67 | doesn't care the orignial data types, they just knows the APISIX types. 68 | 69 | The design of provisioner also allows people to use other data centers to store 70 | configurations and delivery them to APISIX even if then don't use mesh. 71 | 72 | ## Adaptor 73 | 74 | Adaptor is the abstraction of data translator. It converts data from one type to another. There is not 75 | a superclass definition for it because it depends on the original type. It's used by Provisioner, so 76 | one Provisioner should have a matched Adaptor. Take xDS for example, there is a 77 | `XDSAdaptor` for the xDS version 3. 78 | 79 | ```go 80 | // Adaptor translates xDS resources like Route, Cluster 81 | // to the equivalent configs in Apache APISIX. 82 | type Adaptor interface { 83 | // TranslateRouteConfiguration translate a RouteConfiguration to a series APISIX 84 | // Routes. 85 | TranslateRouteConfiguration(*routev3.RouteConfiguration) ([]*apisix.Route, error) 86 | ...... 87 | } 88 | ``` 89 | 90 | ## Cache 91 | 92 | Events generated from Provisioner will be reflected into Cache. Data in Cache are native APISIX resources. 93 | 94 | ```go 95 | // Cache defines what capabilities a cache solution should provide. 96 | type Cache interface { 97 | // Route returns the route exclusive cache object. 98 | Route() Route 99 | // Upstream returns the upstream exclusive cache object. 100 | Upstream() Upstream 101 | } 102 | 103 | // Route defines the exclusive behaviors for apisix.Route. 104 | type Route interface { 105 | // Get the apisix.Route by its id. In case of the object not found, 106 | // ErrObjectNotFound is given. 107 | Get(string) (*apisix.Route, error) 108 | // List lists all apisix.Route. 109 | List() ([]*apisix.Route, error) 110 | // Insert inserts or updates an apisix.Route object, indexed by its id. 111 | Insert(*apisix.Route) error 112 | // Delete deletes the apisix.Route object by the id. In case of object not 113 | // exist, ErrObjectNotFound is given. 114 | Delete(string) error 115 | } 116 | 117 | // Upstream defines the exclusive behaviors for apisix.Upstream. 118 | type Upstream interface { 119 | // Get the apisix.Upstream by its id. In case of the object not found, 120 | // ErrObjectNotFound is given. 121 | Get(string) (*apisix.Upstream, error) 122 | // List lists all apisix.Upstream. 123 | List() ([]*apisix.Upstream, error) 124 | // Insert creates or updates an apisix.Upstream object, indexed by its id. 125 | Insert(*apisix.Upstream) error 126 | // Delete deletes the apisix.Upstream object by the id. In case of object not 127 | // exist, ErrObjectNotFound is given. 128 | Delete(string) error 129 | } 130 | ``` 131 | 132 | Data in Cache are used for the Etcd module. 133 | 134 | ## Etcd V3 135 | 136 | The Etcd V3 module implements part of [Etcd V3 API](https://etcd.io/docs/current/learning/api/), It gives Apache APISIX an illusion that the apisix-mesh-agent is just an etcd cluster. Not all features are supported, see [etcdv3-api-mimicking](./etcdv3-api-mimicking.md) for details. 137 | -------------------------------------------------------------------------------- /e2e/README.md: -------------------------------------------------------------------------------- 1 | E2E Test Suites 2 | =============== 3 | 4 | All E2E test suites are run in your local environment, but all related components are run in a Kuberentes cluster, we recommend you to use [Kind](https://kind.sigs.k8s.io/) and we provide some simple comands 5 | to create the Kubernetes cluster by kind quickly. 6 | 7 | Workflow 8 | --------- 9 | 10 | The e2e framework sets up hooks when running each [ginkgo.Describe](https://pkg.go.dev/github.com/onsi/ginkgo#Describe) block, the `BeforeEach` hook will do the following things before the test case can be run: 11 | 12 | 1. Create two namespaces, one for service mesh control plane (like [Istio](https://istio.io)), the other for apps. 13 | 2. Deploy the control plane, now Istio is in use, it uses [modified charts](./charts) to replace [Envoy](https://www.envoyproxy.io/) by apisix-mesh-agent. 14 | 3. Label the app namespace so Pods inside it can be injected by control plane. 15 | 4. Deploy the httpbin pod. 16 | 17 | Extra components might be deployed inside the test case, such as deploying a Pod as the springboard to send requests. 18 | 19 | How to run all the e2e test suites 20 | ----------------------------------- 21 | 22 | ```shell 23 | make e2e-test 24 | ``` 25 | 26 | You can pass the variable `E2E_CONCURRENCY` to control the concurrency. 27 | 28 | How can I focus on one test case 29 | --------------------------------- 30 | 31 | Edit the target test case, changing the `gingko.It` to `ginkgo.FIt` or 32 | `ginkgo.Describe` to `ginkgo.FDescribe`, then executing `make e2e-test`. 33 | 34 | What if the legacies are remaining due to aborted debugging 35 | ------------------------------------------------------------ 36 | 37 | Just run the following command: 38 | 39 | ```shell 40 | make cleanup-e2e-legacies 41 | ``` 42 | -------------------------------------------------------------------------------- /e2e/charts/istio/base/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: base 3 | version: 1.1.0 4 | tillerVersion: ">=2.7.2" 5 | description: Helm chart for deploying Istio cluster resources and CRDs 6 | keywords: 7 | - istio 8 | sources: 9 | - http://github.com/istio/istio 10 | engine: gotpl 11 | icon: https://istio.io/latest/favicons/android-192x192.png 12 | -------------------------------------------------------------------------------- /e2e/charts/istio/base/NOTES.txt: -------------------------------------------------------------------------------- 1 | Installs Istio cluster resources: CRDs, cluster bindings and associated service accounts. 2 | -------------------------------------------------------------------------------- /e2e/charts/istio/base/crds/crd-operator.yaml: -------------------------------------------------------------------------------- 1 | # SYNC WITH manifests/charts/istio-operator/templates 2 | apiVersion: apiextensions.k8s.io/v1beta1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: istiooperators.install.istio.io 6 | labels: 7 | release: istio 8 | spec: 9 | additionalPrinterColumns: 10 | - JSONPath: .spec.revision 11 | description: Istio control plane revision 12 | name: Revision 13 | type: string 14 | - JSONPath: .status.status 15 | description: IOP current state 16 | type: string 17 | name: Status 18 | - JSONPath: .metadata.creationTimestamp 19 | description: 'CreationTimestamp is a timestamp representing the server time when 20 | this object was created. It is not guaranteed to be set in happens-before order 21 | across separate operations. Clients may not set this value. It is represented 22 | in RFC3339 form and is in UTC. Populated by the system. Read-only. Null for 23 | lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata' 24 | name: Age 25 | type: date 26 | group: install.istio.io 27 | names: 28 | kind: IstioOperator 29 | plural: istiooperators 30 | singular: istiooperator 31 | shortNames: 32 | - iop 33 | - io 34 | scope: Namespaced 35 | subresources: 36 | status: {} 37 | validation: 38 | openAPIV3Schema: 39 | properties: 40 | apiVersion: 41 | description: 'APIVersion defines the versioned schema of this representation 42 | of an object. Servers should convert recognized schemas to the latest 43 | internal value, and may reject unrecognized values. 44 | More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#resources' 45 | type: string 46 | kind: 47 | description: 'Kind is a string value representing the REST resource this 48 | object represents. Servers may infer this from the endpoint the client 49 | submits requests to. Cannot be updated. In CamelCase. 50 | More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 51 | type: string 52 | spec: 53 | description: 'Specification of the desired state of the istio control plane resource. 54 | More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' 55 | type: object 56 | status: 57 | description: 'Status describes each of istio control plane component status at the current time. 58 | 0 means NONE, 1 means UPDATING, 2 means HEALTHY, 3 means ERROR, 4 means RECONCILING. 59 | More info: https://github.com/istio/api/blob/master/operator/v1alpha1/istio.operator.v1alpha1.pb.html & 60 | https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' 61 | type: object 62 | versions: 63 | - name: v1alpha1 64 | served: true 65 | storage: true 66 | --- 67 | -------------------------------------------------------------------------------- /e2e/charts/istio/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - files/gen-istio-cluster.yaml 6 | -------------------------------------------------------------------------------- /e2e/charts/istio/base/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: istio-reader-{{ .Values.global.istioNamespace }} 5 | labels: 6 | app: istio-reader 7 | release: {{ .Release.Name }} 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: istio-reader-{{ .Values.global.istioNamespace }} 12 | subjects: 13 | - kind: ServiceAccount 14 | name: istio-reader-service-account 15 | namespace: {{ .Values.global.istioNamespace }} 16 | --- 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: ClusterRoleBinding 19 | metadata: 20 | name: istiod-{{ .Values.global.istioNamespace }} 21 | labels: 22 | app: istiod 23 | release: {{ .Release.Name }} 24 | roleRef: 25 | apiGroup: rbac.authorization.k8s.io 26 | kind: ClusterRole 27 | name: istiod-{{ .Values.global.istioNamespace }} 28 | subjects: 29 | - kind: ServiceAccount 30 | name: istiod-service-account 31 | namespace: {{ .Values.global.istioNamespace }} 32 | --- 33 | -------------------------------------------------------------------------------- /e2e/charts/istio/base/templates/crds.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.base.enableCRDTemplates }} 2 | {{ .Files.Get "crds/crd-all.gen.yaml" }} 3 | {{ .Files.Get "crds/crd-operator.yaml" }} 4 | {{- end }} 5 | -------------------------------------------------------------------------------- /e2e/charts/istio/base/templates/endpoints.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.global.remotePilotAddress }} 2 | {{- if .Values.pilot.enabled }} 3 | apiVersion: v1 4 | kind: Endpoints 5 | metadata: 6 | name: istiod-remote 7 | namespace: {{ .Release.Namespace }} 8 | subsets: 9 | - addresses: 10 | - ip: {{ .Values.global.remotePilotAddress }} 11 | ports: 12 | - port: 15012 13 | name: tcp-istiod 14 | protocol: TCP 15 | {{- else if regexMatch "^([0-9]*\\.){3}[0-9]*$" .Values.global.remotePilotAddress }} 16 | apiVersion: v1 17 | kind: Endpoints 18 | metadata: 19 | name: istiod 20 | namespace: {{ .Release.Namespace }} 21 | subsets: 22 | - addresses: 23 | - ip: {{ .Values.global.remotePilotAddress }} 24 | ports: 25 | - port: 15012 26 | name: tcp-istiod 27 | protocol: TCP 28 | {{- end }} 29 | --- 30 | {{- end }} 31 | -------------------------------------------------------------------------------- /e2e/charts/istio/base/templates/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: istiod-{{ .Values.global.istioNamespace }} 5 | namespace: {{ .Values.global.istioNamespace }} 6 | labels: 7 | app: istiod 8 | release: {{ .Release.Name }} 9 | rules: 10 | # permissions to verify the webhook is ready and rejecting 11 | # invalid config. We use --server-dry-run so no config is persisted. 12 | - apiGroups: ["networking.istio.io"] 13 | verbs: ["create"] 14 | resources: ["gateways"] 15 | 16 | # For storing CA secret 17 | - apiGroups: [""] 18 | resources: ["secrets"] 19 | # TODO lock this down to istio-ca-cert if not using the DNS cert mesh config 20 | verbs: ["create", "get", "watch", "list", "update", "delete"] 21 | -------------------------------------------------------------------------------- /e2e/charts/istio/base/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: istiod-{{ .Values.global.istioNamespace }} 5 | namespace: {{ .Values.global.istioNamespace }} 6 | labels: 7 | app: istiod 8 | release: {{ .Release.Name }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: Role 12 | name: istiod-{{ .Values.global.istioNamespace }} 13 | subjects: 14 | - kind: ServiceAccount 15 | name: istiod-service-account 16 | namespace: {{ .Values.global.istioNamespace }} 17 | -------------------------------------------------------------------------------- /e2e/charts/istio/base/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | {{- if .Values.global.imagePullSecrets }} 4 | imagePullSecrets: 5 | {{- range .Values.global.imagePullSecrets }} 6 | - name: {{ . }} 7 | {{- end }} 8 | {{- end }} 9 | metadata: 10 | name: istio-reader-service-account 11 | namespace: {{ .Values.global.istioNamespace }} 12 | labels: 13 | app: istio-reader 14 | release: {{ .Release.Name }} 15 | --- 16 | apiVersion: v1 17 | kind: ServiceAccount 18 | {{- if .Values.global.imagePullSecrets }} 19 | imagePullSecrets: 20 | {{- range .Values.global.imagePullSecrets }} 21 | - name: {{ . }} 22 | {{- end }} 23 | {{- end }} 24 | metadata: 25 | name: istiod-service-account 26 | namespace: {{ .Values.global.istioNamespace }} 27 | labels: 28 | app: istiod 29 | release: {{ .Release.Name }} 30 | --- 31 | -------------------------------------------------------------------------------- /e2e/charts/istio/base/templates/services.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.global.remotePilotAddress }} 2 | {{- if .Values.pilot.enabled }} 3 | # when istiod is enabled in remote cluster, we can't use istiod service name 4 | apiVersion: v1 5 | kind: Service 6 | metadata: 7 | name: istiod-remote 8 | namespace: {{ .Release.Namespace }} 9 | spec: 10 | ports: 11 | - port: 15012 12 | name: tcp-istiod 13 | protocol: TCP 14 | clusterIP: None 15 | {{- else }} 16 | # when istiod isn't enabled in remote cluster, we can use istiod service name 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: istiod 21 | namespace: {{ .Release.Namespace }} 22 | spec: 23 | ports: 24 | - port: 15012 25 | name: tcp-istiod 26 | protocol: TCP 27 | # if the remotePilotAddress is IP addr, we use clusterIP: None. 28 | # else, we use externalName 29 | {{- if regexMatch "^([0-9]*\\.){3}[0-9]*$" .Values.global.remotePilotAddress }} 30 | clusterIP: None 31 | {{- else }} 32 | type: ExternalName 33 | externalName: {{ .Values.global.remotePilotAddress }} 34 | {{- end }} 35 | {{- end }} 36 | --- 37 | {{- end }} 38 | -------------------------------------------------------------------------------- /e2e/charts/istio/base/templates/validatingwebhookconfiguration.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.global.configValidation }} 2 | apiVersion: admissionregistration.k8s.io/v1beta1 3 | kind: ValidatingWebhookConfiguration 4 | metadata: 5 | name: istiod-{{ .Values.global.istioNamespace }} 6 | labels: 7 | app: istiod 8 | release: {{ .Release.Name }} 9 | istio: istiod 10 | webhooks: 11 | - name: validation.istio.io 12 | clientConfig: 13 | {{- if .Values.base.validationURL }} 14 | url: {{ .Values.base.validationURL }} 15 | {{- else }} 16 | service: 17 | name: istiod 18 | namespace: {{ .Values.global.istioNamespace }} 19 | path: "/validate" 20 | {{- end }} 21 | caBundle: "" # patched at runtime when the webhook is ready. 22 | rules: 23 | - operations: 24 | - CREATE 25 | - UPDATE 26 | apiGroups: 27 | - security.istio.io 28 | - networking.istio.io 29 | apiVersions: 30 | - "*" 31 | resources: 32 | - "*" 33 | # Fail open until the validation webhook is ready. The webhook controller 34 | # will update this to `Fail` and patch in the `caBundle` when the webhook 35 | # endpoint is ready. 36 | failurePolicy: Ignore 37 | sideEffects: None 38 | admissionReviewVersions: ["v1beta1", "v1"] 39 | --- 40 | {{- end }} -------------------------------------------------------------------------------- /e2e/charts/istio/base/values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | 3 | # ImagePullSecrets for control plane ServiceAccount, list of secrets in the same namespace 4 | # to use for pulling any images in pods that reference this ServiceAccount. 5 | # Must be set for any cluster configured with private docker registry. 6 | imagePullSecrets: [] 7 | 8 | # Used to locate istiod. 9 | istioNamespace: istio-system 10 | 11 | istiod: 12 | enableAnalysis: false 13 | 14 | configValidation: true 15 | externalIstiod: false 16 | remotePilotAddress: "" 17 | 18 | base: 19 | # Used for helm2 to add the CRDs to templates. 20 | enableCRDTemplates: false 21 | 22 | # Validation webhook configuration url 23 | # For example: https://$remotePilotAddress:15017/validate 24 | validationURL: "" 25 | 26 | # For istioctl usage to disable istio config crds in base 27 | enableIstioConfigCRDs: true 28 | -------------------------------------------------------------------------------- /e2e/charts/istio/istio-discovery/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: istio-discovery 3 | version: 1.2.0 4 | tillerVersion: ">=2.7.2" 5 | description: Helm chart for istio control plane 6 | keywords: 7 | - istio 8 | - istiod 9 | - istio-discovery 10 | sources: 11 | - http://github.com/istio/istio 12 | engine: gotpl 13 | icon: https://istio.io/latest/favicons/android-192x192.png 14 | -------------------------------------------------------------------------------- /e2e/charts/istio/istio-discovery/NOTES.txt: -------------------------------------------------------------------------------- 1 | Minimal control plane for Istio. Pilot and mesh config are included. 2 | 3 | MCP and injector should optionally be installed in the same namespace. Alternatively remote 4 | address of an MCP server can be set. 5 | 6 | -------------------------------------------------------------------------------- /e2e/charts/istio/istio-discovery/files/injection-template.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | labels: 3 | service.istio.io/canonical-name: {{ index .ObjectMeta.Labels `service.istio.io/canonical-name` | default (index .ObjectMeta.Labels `app.kubernetes.io/name`) | default (index .ObjectMeta.Labels `app`) | default .DeploymentMeta.Name | quote }} 4 | service.istio.io/canonical-revision: {{ index .ObjectMeta.Labels `service.istio.io/canonical-revision` | default (index .ObjectMeta.Labels `app.kubernetes.io/version`) | default (index .ObjectMeta.Labels `version`) | default "latest" | quote }} 5 | istio.io/rev: {{ .Revision | default "default" | quote }} 6 | spec: 7 | initContainers: 8 | - name: istio-init 9 | image: "{{ .Values.global.hub }}/{{ .Values.global.proxy_init.image }}:{{ .Values.global.tag }}" 10 | args: 11 | - iptables 12 | - --apisix-user 13 | - nobody 14 | - --apisix-inbound-capture-port 15 | - "9081" 16 | - --apisix-port 17 | - "9080" 18 | - --inbound-ports 19 | - "*" 20 | - --inbound-exclude-ports 21 | - "17739" 22 | - --outbound-ports 23 | - "*" 24 | - --outbound-exclude-ports 25 | - "17739,15010" 26 | imagePullPolicy: "{{ valueOrDefault .Values.global.imagePullPolicy `Always` }}" 27 | securityContext: 28 | allowPrivilegeEscalation: {{ .Values.global.proxy.privileged }} 29 | privileged: {{ .Values.global.proxy.privileged }} 30 | capabilities: 31 | add: 32 | - NET_ADMIN 33 | - NET_RAW 34 | drop: 35 | - ALL 36 | readOnlyRootFilesystem: false 37 | runAsGroup: 0 38 | runAsNonRoot: false 39 | runAsUser: 0 40 | restartPolicy: Always 41 | containers: 42 | - name: istio-proxy 43 | image: "{{ .Values.global.hub }}/{{ .Values.global.proxy.image }}:{{ .Values.global.tag }}" 44 | ports: 45 | - containerPort: 9080 46 | protocol: TCP 47 | name: http-outbound 48 | - containerPort: 9081 49 | protocol: TCP 50 | name: http-inbound 51 | - containerPort: 17739 52 | protocol: TCP 53 | name: agent 54 | args: 55 | - sidecar 56 | - --run-mode 57 | - bundle 58 | - --provisioner 59 | - xds-v3-grpc 60 | - --log-level 61 | - debug 62 | - --xds-config-source 63 | - "grpc://istiod.{{ .Values.global.istioNamespace }}.svc.{{ .Values.global.proxy.clusterDomain }}:15010" 64 | - --apisix-bin-path 65 | - /usr/bin/apisix 66 | - --grpc-listen 67 | - 0.0.0.0:17739 68 | env: 69 | - name: POD_NAME 70 | valueFrom: 71 | fieldRef: 72 | fieldPath: metadata.name 73 | - name: POD_NAMESPACE 74 | valueFrom: 75 | fieldRef: 76 | fieldPath: metadata.namespace 77 | imagePullPolicy: "{{ valueOrDefault .Values.global.imagePullPolicy `Always` }}" 78 | -------------------------------------------------------------------------------- /e2e/charts/istio/istio-discovery/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - files/gen-istio.yaml 6 | -------------------------------------------------------------------------------- /e2e/charts/istio/istio-discovery/templates/autoscale.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.pilot.autoscaleEnabled .Values.pilot.autoscaleMin .Values.pilot.autoscaleMax }} 2 | apiVersion: autoscaling/v2beta1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: istiod{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | app: istiod 9 | release: {{ .Release.Name }} 10 | istio.io/rev: {{ .Values.revision | default "default" }} 11 | install.operator.istio.io/owning-resource: {{ .Values.ownerName | default "unknown" }} 12 | operator.istio.io/component: "Pilot" 13 | spec: 14 | maxReplicas: {{ .Values.pilot.autoscaleMax }} 15 | minReplicas: {{ .Values.pilot.autoscaleMin }} 16 | scaleTargetRef: 17 | apiVersion: apps/v1 18 | kind: Deployment 19 | name: istiod{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} 20 | metrics: 21 | - type: Resource 22 | resource: 23 | name: cpu 24 | targetAverageUtilization: {{ .Values.pilot.cpu.targetAverageUtilization }} 25 | --- 26 | {{- end }} 27 | -------------------------------------------------------------------------------- /e2e/charts/istio/istio-discovery/templates/configmap-jwks.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.pilot.jwksResolverExtraRootCA }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: pilot-jwks-extra-cacerts{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | release: {{ .Release.Name }} 9 | istio.io/rev: {{ .Values.revision | default "default" }} 10 | install.operator.istio.io/owning-resource: {{ .Values.ownerName | default "unknown" }} 11 | operator.istio.io/component: "Pilot" 12 | data: 13 | extra.pem: {{ .Values.pilot.jwksResolverExtraRootCA | quote }} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /e2e/charts/istio/istio-discovery/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | 2 | {{- define "mesh" }} 3 | # The trust domain corresponds to the trust root of a system. 4 | # Refer to https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain 5 | trustDomain: {{ .Values.global.trustDomain | default "cluster.local" | quote }} 6 | 7 | # The namespace to treat as the administrative root namespace for Istio configuration. 8 | # When processing a leaf namespace Istio will search for declarations in that namespace first 9 | # and if none are found it will search in the root namespace. Any matching declaration found in the root namespace 10 | # is processed as if it were declared in the leaf namespace. 11 | rootNamespace: {{ .Values.meshConfig.rootNamespace | default .Values.global.istioNamespace }} 12 | 13 | defaultConfig: 14 | {{- if .Values.global.meshID }} 15 | meshId: {{ .Values.global.meshID }} 16 | {{- else if .Values.global.trustDomain }} 17 | meshId: {{ .Values.global.trustDomain }} 18 | {{- end }} 19 | tracing: 20 | {{- if eq .Values.global.proxy.tracer "lightstep" }} 21 | lightstep: 22 | # Address of the LightStep Satellite pool 23 | address: {{ .Values.global.tracer.lightstep.address }} 24 | # Access Token used to communicate with the Satellite pool 25 | accessToken: {{ .Values.global.tracer.lightstep.accessToken }} 26 | {{- else if eq .Values.global.proxy.tracer "zipkin" }} 27 | zipkin: 28 | # Address of the Zipkin collector 29 | address: {{ .Values.global.tracer.zipkin.address | default (print "zipkin." .Values.global.istioNamespace ":9411") }} 30 | {{- else if eq .Values.global.proxy.tracer "datadog" }} 31 | datadog: 32 | # Address of the Datadog Agent 33 | address: {{ .Values.global.tracer.datadog.address | default "$(HOST_IP):8126" }} 34 | {{- else if eq .Values.global.proxy.tracer "stackdriver" }} 35 | stackdriver: 36 | # enables trace output to stdout. 37 | {{- if $.Values.global.tracer.stackdriver.debug }} 38 | debug: {{ $.Values.global.tracer.stackdriver.debug }} 39 | {{- end }} 40 | {{- if $.Values.global.tracer.stackdriver.maxNumberOfAttributes }} 41 | # The global default max number of attributes per span. 42 | maxNumberOfAttributes: {{ $.Values.global.tracer.stackdriver.maxNumberOfAttributes | default "200" }} 43 | {{- end }} 44 | {{- if $.Values.global.tracer.stackdriver.maxNumberOfAnnotations }} 45 | # The global default max number of annotation events per span. 46 | maxNumberOfAnnotations: {{ $.Values.global.tracer.stackdriver.maxNumberOfAnnotations | default "200" }} 47 | {{- end }} 48 | {{- if $.Values.global.tracer.stackdriver.maxNumberOfMessageEvents }} 49 | # The global default max number of message events per span. 50 | maxNumberOfMessageEvents: {{ $.Values.global.tracer.stackdriver.maxNumberOfMessageEvents | default "200" }} 51 | {{- end }} 52 | {{- else if eq .Values.global.proxy.tracer "openCensusAgent" }} 53 | {{- /* Fill in openCensusAgent configuration from meshConfig so it isn't overwritten below */ -}} 54 | {{ toYaml $.Values.meshConfig.defaultConfig.tracing }} 55 | {{- end }} 56 | {{- if .Values.global.remotePilotAddress }} 57 | {{- if .Values.pilot.enabled }} 58 | discoveryAddress: {{ printf "istiod-remote.%s.svc" .Release.Namespace }}:15012 59 | {{- else }} 60 | discoveryAddress: {{ printf "istiod.%s.svc" .Release.Namespace }}:15012 61 | {{- end }} 62 | {{- else }} 63 | discoveryAddress: istiod{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }}.{{.Release.Namespace}}.svc:15012 64 | {{- end }} 65 | {{- end }} 66 | 67 | {{/* We take the mesh config above, defined with individual values.yaml, and merge with .Values.meshConfig */}} 68 | {{/* The intent here is that meshConfig.foo becomes the API, rather than re-inventing the API in values.yaml */}} 69 | {{- $originalMesh := include "mesh" . | fromYaml }} 70 | {{- $mesh := mergeOverwrite $originalMesh .Values.meshConfig }} 71 | 72 | {{- if .Values.pilot.configMap }} 73 | apiVersion: v1 74 | kind: ConfigMap 75 | metadata: 76 | name: istio{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} 77 | namespace: {{ .Release.Namespace }} 78 | labels: 79 | istio.io/rev: {{ .Values.revision | default "default" }} 80 | install.operator.istio.io/owning-resource: {{ .Values.ownerName | default "unknown" }} 81 | operator.istio.io/component: "Pilot" 82 | release: {{ .Release.Name }} 83 | data: 84 | 85 | # Configuration file for the mesh networks to be used by the Split Horizon EDS. 86 | meshNetworks: |- 87 | {{- if .Values.global.meshNetworks }} 88 | networks: 89 | {{ toYaml .Values.global.meshNetworks | trim | indent 6 }} 90 | {{- else }} 91 | networks: {} 92 | {{- end }} 93 | 94 | mesh: |- 95 | {{- if .Values.meshConfig }} 96 | {{ $mesh | toYaml | indent 4 }} 97 | {{- else }} 98 | {{- include "mesh" . }} 99 | {{- end }} 100 | --- 101 | {{- end }} 102 | -------------------------------------------------------------------------------- /e2e/charts/istio/istio-discovery/templates/istiod-injector-configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.global.omitSidecarInjectorConfigMap }} 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: istio-sidecar-injector{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | istio.io/rev: {{ .Values.revision | default "default" }} 9 | install.operator.istio.io/owning-resource: {{ .Values.ownerName | default "unknown" }} 10 | operator.istio.io/component: "Pilot" 11 | release: {{ .Release.Name }} 12 | data: 13 | {{/* Scope the values to just top level fields used in the template, to reduce the size. */}} 14 | values: |- 15 | {{ pick .Values "global" "istio_cni" "sidecarInjectorWebhook" "revision" | toPrettyJson | indent 4 }} 16 | 17 | # To disable injection: use omitSidecarInjectorConfigMap, which disables the webhook patching 18 | # and istiod webhook functionality. 19 | # 20 | # New fields should not use Values - it is a 'primary' config object, users should be able 21 | # to fine tune it or use it with kube-inject. 22 | config: |- 23 | # defaultTemplates defines the default template to use for pods that do not explicitly specify a template 24 | {{- if .Values.sidecarInjectorWebhook.defaultTemplates }} 25 | defaultTemplates: 26 | {{- range .Values.sidecarInjectorWebhook.defaultTemplates}} 27 | - {{ . }} 28 | {{- end }} 29 | {{- else }} 30 | defaultTemplates: [sidecar] 31 | {{- end }} 32 | policy: {{ .Values.global.proxy.autoInject }} 33 | alwaysInjectSelector: 34 | {{ toYaml .Values.sidecarInjectorWebhook.alwaysInjectSelector | trim | indent 6 }} 35 | neverInjectSelector: 36 | {{ toYaml .Values.sidecarInjectorWebhook.neverInjectSelector | trim | indent 6 }} 37 | injectedAnnotations: 38 | {{- range $key, $val := .Values.sidecarInjectorWebhook.injectedAnnotations }} 39 | "{{ $key }}": "{{ $val }}" 40 | {{- end }} 41 | {{- /* If someone ends up with this new template, but an older Istiod image, they will attempt to render this template 42 | which will fail with "Pod injection failed: template: inject:1: function "Istio_1_9_Required_Template_And_Version_Mismatched" not defined". 43 | This should make it obvious that their installation is broken. 44 | */}} 45 | template: {{ `{{ Template_Version_And_Istio_Version_Mismatched_Check_Installation }}` | quote }} 46 | templates: 47 | {{- if not (hasKey .Values.sidecarInjectorWebhook.templates "sidecar") }} 48 | sidecar: | 49 | {{ .Files.Get "files/injection-template.yaml" | trim | indent 8 }} 50 | {{- end }} 51 | {{- with .Values.sidecarInjectorWebhook.templates }} 52 | {{ toYaml . | trim | indent 6 }} 53 | {{- end }} 54 | 55 | {{- end }} 56 | -------------------------------------------------------------------------------- /e2e/charts/istio/istio-discovery/templates/poddisruptionbudget.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.global.defaultPodDisruptionBudget.enabled }} 2 | apiVersion: policy/v1beta1 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: istiod{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | app: istiod 9 | istio.io/rev: {{ .Values.revision | default "default" }} 10 | install.operator.istio.io/owning-resource: {{ .Values.ownerName | default "unknown" }} 11 | operator.istio.io/component: "Pilot" 12 | release: {{ .Release.Name }} 13 | istio: pilot 14 | spec: 15 | minAvailable: 1 16 | selector: 17 | matchLabels: 18 | app: istiod 19 | {{- if ne .Values.revision ""}} 20 | istio.io/rev: {{ .Values.revision }} 21 | {{- else }} 22 | istio: pilot 23 | {{- end }} 24 | --- 25 | {{- end }} 26 | -------------------------------------------------------------------------------- /e2e/charts/istio/istio-discovery/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: istiod{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | istio.io/rev: {{ .Values.revision | default "default" }} 8 | install.operator.istio.io/owning-resource: {{ .Values.ownerName | default "unknown" }} 9 | operator.istio.io/component: "Pilot" 10 | app: istiod 11 | istio: pilot 12 | release: {{ .Release.Name }} 13 | spec: 14 | ports: 15 | - port: 15010 16 | name: grpc-xds # plaintext 17 | protocol: TCP 18 | - port: 15012 19 | name: https-dns # mTLS with k8s-signed cert 20 | protocol: TCP 21 | - port: 443 22 | name: https-webhook # validation and injection 23 | targetPort: 15017 24 | protocol: TCP 25 | - port: 15014 26 | name: http-monitoring # prometheus stats 27 | protocol: TCP 28 | selector: 29 | app: istiod 30 | {{- if ne .Values.revision ""}} 31 | istio.io/rev: {{ .Values.revision }} 32 | {{- else }} 33 | # Label used by the 'default' service. For versioned deployments we match with app and version. 34 | # This avoids default deployment picking the canary 35 | istio: pilot 36 | {{- end }} 37 | --- 38 | -------------------------------------------------------------------------------- /e2e/e2e_test.go: -------------------------------------------------------------------------------- 1 | package e2e 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/onsi/ginkgo" 8 | 9 | _ "github.com/api7/apisix-mesh-agent/e2e/suites" 10 | ) 11 | 12 | // TestE2ESuites is the entry of apisix-mesh-agent e2e suites. 13 | func TestE2ESuites(t *testing.T) { 14 | pwd, err := os.Getwd() 15 | if err != nil { 16 | panic(err) 17 | } 18 | if err := os.Setenv("APISIX_MESH_AGENT_E2E_HOME", pwd); err != nil { 19 | panic(err) 20 | } 21 | 22 | ginkgo.RunSpecs(t, "apisix-mesh-agent e2e test cases") 23 | } 24 | -------------------------------------------------------------------------------- /e2e/framework/controlplane/controlplane.go: -------------------------------------------------------------------------------- 1 | package controlplane 2 | 3 | // ControlPlane represents the control plane in e2e test cases. 4 | type ControlPlane interface { 5 | // Type returns the control plane type. 6 | Type() string 7 | // Namespace fetches the deployed namespace of control plane components. 8 | Namespace() string 9 | // InjectNamespace marks the target namespace as injectable. Pod in this 10 | // namespace will be injected by control plane. 11 | InjectNamespace(string) error 12 | // Deploy deploys the control plane. 13 | Deploy() error 14 | // Uninstall uninstalls the control plane. 15 | Uninstall() error 16 | // Addr returns the address to communicate with the control plane for fetching 17 | // configuration changes. 18 | Addr() string 19 | } 20 | -------------------------------------------------------------------------------- /e2e/framework/httpbin.go: -------------------------------------------------------------------------------- 1 | package framework 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/gruntwork-io/terratest/modules/k8s" 8 | "github.com/onsi/ginkgo" 9 | corev1 "k8s.io/api/core/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | ) 12 | 13 | const ( 14 | _httpbinManifest = ` 15 | apiVersion: apps/v1 16 | kind: Deployment 17 | metadata: 18 | name: httpbin 19 | labels: 20 | app: httpbin 21 | spec: 22 | replicas: {{ .HttpBinReplicas }} 23 | selector: 24 | matchLabels: 25 | app: httpbin 26 | template: 27 | metadata: 28 | labels: 29 | app: httpbin 30 | spec: 31 | containers: 32 | - name: httpbin 33 | image: {{ .LocalRegistry }}/kennethreitz/httpbin 34 | imagePullPolicy: IfNotPresent 35 | ports: 36 | - containerPort: 80 37 | protocol: TCP 38 | name: http 39 | --- 40 | apiVersion: v1 41 | kind: Service 42 | metadata: 43 | name: httpbin 44 | spec: 45 | selector: 46 | app: httpbin 47 | ports: 48 | - name: http 49 | targetPort: 80 50 | port: 80 51 | protocol: TCP 52 | ` 53 | ) 54 | 55 | func (f *Framework) newHttpBin() error { 56 | artifact, err := f.renderManifest(_httpbinManifest) 57 | if err != nil { 58 | return err 59 | } 60 | if err := k8s.KubectlApplyFromStringE(ginkgo.GinkgoT(), f.kubectlOpts, artifact); err != nil { 61 | return err 62 | } 63 | return f.waitUntilAllHttpBinPodsReady() 64 | } 65 | 66 | func (f *Framework) waitUntilAllHttpBinPodsReady() error { 67 | opts := metav1.ListOptions{ 68 | LabelSelector: "app=httpbin", 69 | } 70 | condFunc := func() (bool, error) { 71 | items, err := k8s.ListPodsE(ginkgo.GinkgoT(), f.kubectlOpts, opts) 72 | if err != nil { 73 | return false, err 74 | } 75 | if len(items) == 0 { 76 | ginkgo.GinkgoT().Log("no httpbin pods created") 77 | clientset, err := k8s.GetKubernetesClientFromOptionsE(ginkgo.GinkgoT(), f.kubectlOpts) 78 | if err != nil { 79 | return false, err 80 | } 81 | 82 | deployments, err := clientset.AppsV1().Deployments(f.kubectlOpts.Namespace).List(context.Background(), opts) 83 | if err != nil { 84 | return false, err 85 | } 86 | if len(deployments.Items) == 0 { 87 | ginkgo.GinkgoT().Log("no httpbin deployment created") 88 | return false, nil 89 | } 90 | for _, deployment := range deployments.Items { 91 | for _, cond := range deployment.Status.Conditions { 92 | ginkgo.GinkgoT().Logf("deployment %v: %v", deployment.Name, cond.String()) 93 | } 94 | } 95 | return false, nil 96 | } 97 | for _, pod := range items { 98 | found := false 99 | for _, cond := range pod.Status.Conditions { 100 | if cond.Type != corev1.PodReady { 101 | continue 102 | } 103 | found = true 104 | if cond.Status != corev1.ConditionTrue { 105 | return false, nil 106 | } 107 | } 108 | if !found { 109 | return false, nil 110 | } 111 | } 112 | return true, nil 113 | } 114 | return waitExponentialBackoff(condFunc) 115 | } 116 | 117 | // GetHttpBinServiceFQDN returns the FQDN description for HttpBin service. 118 | func (f *Framework) GetHttpBinServiceFQDN() string { 119 | return fmt.Sprintf("httpbin.%s.svc.cluster.local", f.namespace) 120 | } 121 | -------------------------------------------------------------------------------- /e2e/framework/k8s.go: -------------------------------------------------------------------------------- 1 | package framework 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/gruntwork-io/terratest/modules/k8s" 7 | "github.com/onsi/ginkgo" 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | ) 11 | 12 | // CreateConfigMap create a ConfigMap object which filled by the key/value 13 | // specified by the caller. 14 | func (f *Framework) CreateConfigMap(name, key, value string) error { 15 | cm := &corev1.ConfigMap{ 16 | ObjectMeta: metav1.ObjectMeta{ 17 | Name: name, 18 | }, 19 | Data: map[string]string{ 20 | key: value, 21 | }, 22 | } 23 | client, err := k8s.GetKubernetesClientFromOptionsE(ginkgo.GinkgoT(), f.kubectlOpts) 24 | if err != nil { 25 | return err 26 | } 27 | if _, err := client.CoreV1().ConfigMaps(f.namespace).Create(context.TODO(), cm, metav1.CreateOptions{}); err != nil { 28 | return err 29 | } 30 | return nil 31 | } 32 | 33 | // CreateResourceFromString creates a Kubernetes resource from the given manifest. 34 | func (f *Framework) CreateResourceFromString(res string) error { 35 | return k8s.KubectlApplyFromStringE(ginkgo.GinkgoT(), f.kubectlOpts, res) 36 | } 37 | -------------------------------------------------------------------------------- /e2e/framework/nginx.go: -------------------------------------------------------------------------------- 1 | package framework 2 | 3 | import ( 4 | "net/http" 5 | "net/url" 6 | 7 | "github.com/gavv/httpexpect/v2" 8 | "github.com/gruntwork-io/terratest/modules/k8s" 9 | "github.com/onsi/ginkgo" 10 | corev1 "k8s.io/api/core/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | ) 13 | 14 | const ( 15 | _nginxManifest = ` 16 | apiVersion: apps/v1 17 | kind: Deployment 18 | metadata: 19 | name: nginx 20 | spec: 21 | replicas: {{ .NginxReplicas }} 22 | selector: 23 | matchLabels: 24 | app: nginx 25 | template: 26 | metadata: 27 | name: nginx 28 | labels: 29 | app: nginx 30 | spec: 31 | {{ if .NginxVolumeConfigMap }} 32 | volumes: 33 | - name: conf 34 | configMap: 35 | name: {{ .NginxVolumeConfigMap }} 36 | {{ end }} 37 | containers: 38 | - name: nginx 39 | image: {{ .LocalRegistry }}/nginx:1.19.3 40 | imagePullPolicy: IfNotPresent 41 | ports: 42 | - containerPort: 80 43 | protocol: TCP 44 | name: http 45 | {{ if .NginxVolumeConfigMap }} 46 | volumeMounts: 47 | - name: conf 48 | mountPath: /etc/nginx/conf.d 49 | {{ end }} 50 | --- 51 | apiVersion: v1 52 | kind: Service 53 | metadata: 54 | name: nginx 55 | spec: 56 | selector: 57 | app: nginx 58 | ports: 59 | - name: http 60 | targetPort: 80 61 | port: 80 62 | protocol: TCP 63 | ` 64 | ) 65 | 66 | // DeployNginxWithConfigMapVolume deploys Nginx with an extra volume (ConfigMap type). 67 | func (f *Framework) DeployNginxWithConfigMapVolume(cm string) error { 68 | f.NginxVolumeConfigMap = cm 69 | defer func() { f.NginxVolumeConfigMap = "" }() 70 | if err := f.newNginx(); err != nil { 71 | return err 72 | } 73 | if err := f.waitUntilAppNginxPodsReady(); err != nil { 74 | return err 75 | } 76 | return nil 77 | } 78 | 79 | // NewHTTPClientToSpringboard creates a http client which sends requests to 80 | // springboard. 81 | func (f *Framework) NewHTTPClientToSpringboard() (*httpexpect.Expect, error) { 82 | endpoint, err := f.buildTunnelToSpringboardService() 83 | if err != nil { 84 | return nil, err 85 | } 86 | u := url.URL{ 87 | Scheme: "http", 88 | Host: endpoint, 89 | } 90 | return httpexpect.WithConfig(httpexpect.Config{ 91 | BaseURL: u.String(), 92 | Client: &http.Client{ 93 | Transport: http.DefaultTransport, 94 | CheckRedirect: func(req *http.Request, via []*http.Request) error { 95 | return http.ErrUseLastResponse 96 | }, 97 | }, 98 | Reporter: httpexpect.NewAssertReporter(httpexpect.NewAssertReporter(ginkgo.GinkgoT())), 99 | }), nil 100 | } 101 | 102 | func (f *Framework) buildTunnelToSpringboardService() (string, error) { 103 | tunnel := k8s.NewTunnel(f.kubectlOpts, k8s.ResourceTypeService, "springboard", 12384, 80) 104 | if err := tunnel.ForwardPortE(ginkgo.GinkgoT()); err != nil { 105 | return "", err 106 | } 107 | f.tunnels = append(f.tunnels, tunnel) 108 | return tunnel.Endpoint(), nil 109 | } 110 | 111 | func (f *Framework) newNginx() error { 112 | artifact, err := f.renderManifest(_nginxManifest) 113 | if err != nil { 114 | return err 115 | } 116 | if err := k8s.KubectlApplyFromStringE(ginkgo.GinkgoT(), f.kubectlOpts, artifact); err != nil { 117 | return err 118 | } 119 | 120 | return nil 121 | } 122 | 123 | func (f *Framework) waitUntilAppNginxPodsReady() error { 124 | opts := metav1.ListOptions{ 125 | LabelSelector: "app=nginx", 126 | } 127 | condFunc := func() (bool, error) { 128 | items, err := k8s.ListPodsE(ginkgo.GinkgoT(), f.kubectlOpts, opts) 129 | if err != nil { 130 | return false, err 131 | } 132 | if len(items) == 0 { 133 | ginkgo.GinkgoT().Log("no nginx pods created") 134 | return false, nil 135 | } 136 | for _, pod := range items { 137 | found := false 138 | for _, cond := range pod.Status.Conditions { 139 | if cond.Type != corev1.PodReady { 140 | continue 141 | } 142 | found = true 143 | if cond.Status != corev1.ConditionTrue { 144 | return false, nil 145 | } 146 | } 147 | if !found { 148 | return false, nil 149 | } 150 | } 151 | return true, nil 152 | } 153 | return waitExponentialBackoff(condFunc) 154 | } 155 | -------------------------------------------------------------------------------- /e2e/framework/springboard.go: -------------------------------------------------------------------------------- 1 | package framework 2 | 3 | import ( 4 | "github.com/gruntwork-io/terratest/modules/k8s" 5 | "github.com/onsi/ginkgo" 6 | corev1 "k8s.io/api/core/v1" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | ) 9 | 10 | const ( 11 | _springboardManifest = ` 12 | apiVersion: v1 13 | kind: ConfigMap 14 | metadata: 15 | name: springboard 16 | data: 17 | proxy.conf: | 18 | server { 19 | listen 80 reuseport; 20 | location / { 21 | proxy_http_version 1.1; 22 | proxy_set_header Connection ""; 23 | proxy_pass http://{{ .SpringboardTarget }}; 24 | proxy_set_header Host $http_host; 25 | } 26 | } 27 | --- 28 | apiVersion: apps/v1 29 | kind: Deployment 30 | metadata: 31 | name: springboard 32 | spec: 33 | replicas: 1 34 | selector: 35 | matchLabels: 36 | app: springboard 37 | template: 38 | metadata: 39 | name: springboard 40 | labels: 41 | app: springboard 42 | annotations: 43 | sidecar.istio.io/inject: "false" 44 | spec: 45 | volumes: 46 | - name: conf 47 | configMap: 48 | name: springboard 49 | containers: 50 | - name: springboard 51 | image: {{ .LocalRegistry }}/nginx:1.19.3 52 | imagePullPolicy: IfNotPresent 53 | ports: 54 | - containerPort: 80 55 | protocol: TCP 56 | name: http 57 | volumeMounts: 58 | - name: conf 59 | mountPath: /etc/nginx/conf.d 60 | --- 61 | apiVersion: v1 62 | kind: Service 63 | metadata: 64 | name: springboard 65 | spec: 66 | selector: 67 | app: springboard 68 | ports: 69 | - name: http 70 | targetPort: 80 71 | port: 80 72 | protocol: TCP 73 | ` 74 | ) 75 | 76 | // DeploySpringboardWithSpecificProxyTarget deploys 77 | func (f *Framework) DeploySpringboardWithSpecificProxyTarget(target string) error { 78 | f.SpringboardTarget = target 79 | defer func() { 80 | f.SpringboardTarget = "" 81 | }() 82 | artifact, err := f.renderManifest(_springboardManifest) 83 | if err != nil { 84 | return err 85 | } 86 | if err := k8s.KubectlApplyFromStringE(ginkgo.GinkgoT(), f.kubectlOpts, artifact); err != nil { 87 | return err 88 | } 89 | if err := f.waitUntilAllSpringboardPodsReady(); err != nil { 90 | return err 91 | } 92 | 93 | return nil 94 | } 95 | 96 | func (f *Framework) waitUntilAllSpringboardPodsReady() error { 97 | opts := metav1.ListOptions{ 98 | LabelSelector: "app=springboard", 99 | } 100 | condFunc := func() (bool, error) { 101 | items, err := k8s.ListPodsE(ginkgo.GinkgoT(), f.kubectlOpts, opts) 102 | if err != nil { 103 | return false, err 104 | } 105 | if len(items) == 0 { 106 | ginkgo.GinkgoT().Log("no springboard pods created") 107 | return false, nil 108 | } 109 | for _, pod := range items { 110 | found := false 111 | for _, cond := range pod.Status.Conditions { 112 | if cond.Type != corev1.PodReady { 113 | continue 114 | } 115 | found = true 116 | if cond.Status != corev1.ConditionTrue { 117 | return false, nil 118 | } 119 | } 120 | if !found { 121 | return false, nil 122 | } 123 | } 124 | return true, nil 125 | } 126 | return waitExponentialBackoff(condFunc) 127 | } 128 | -------------------------------------------------------------------------------- /e2e/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/api7/apisix-mesh-agent/e2e 2 | 3 | go 1.16 4 | 5 | replace github.com/api7/apisix-mesh-agent => ../ 6 | 7 | require ( 8 | github.com/api7/apisix-mesh-agent v0.0.0-00010101000000-000000000000 9 | github.com/gavv/httpexpect/v2 v2.2.0 10 | github.com/gruntwork-io/terratest v0.32.15 11 | github.com/onsi/ginkgo v1.16.4 12 | github.com/onsi/gomega v1.10.5 13 | go.uber.org/zap v1.16.0 14 | golang.org/x/sys v0.0.0-20210608053332-aa57babbf139 // indirect 15 | k8s.io/api v0.20.4 16 | k8s.io/apimachinery v0.20.4 17 | ) 18 | -------------------------------------------------------------------------------- /e2e/suites/proxy.go: -------------------------------------------------------------------------------- 1 | package suites 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "time" 7 | 8 | "github.com/onsi/ginkgo" 9 | "github.com/onsi/gomega" 10 | 11 | "github.com/api7/apisix-mesh-agent/e2e/framework" 12 | ) 13 | 14 | var _ = ginkgo.Describe("[basic proxy functions]", func() { 15 | g := gomega.NewWithT(ginkgo.GinkgoT()) 16 | f, err := framework.NewDefaultFramework() 17 | g.Expect(err).ShouldNot(gomega.HaveOccurred()) 18 | 19 | ginkgo.It("nginx -> httpbin", func() { 20 | template := ` 21 | server { 22 | listen 80; 23 | server_name httpbin.org; 24 | location / { 25 | proxy_pass http://%s; 26 | proxy_set_header Host %s; 27 | proxy_http_version 1.1; 28 | proxy_set_header Connection ""; 29 | } 30 | } 31 | ` 32 | fqdn := f.GetHttpBinServiceFQDN() 33 | snippet := fmt.Sprintf(template, fqdn, fqdn) 34 | g.Expect(f.CreateConfigMap("nginx-httpbin", "httpbin.conf", snippet)).ShouldNot(gomega.HaveOccurred()) 35 | g.Expect(f.DeployNginxWithConfigMapVolume("nginx-httpbin")).ShouldNot(gomega.HaveOccurred()) 36 | g.Expect(f.DeploySpringboardWithSpecificProxyTarget("nginx")).ShouldNot(gomega.HaveOccurred()) 37 | 38 | expect, err := f.NewHTTPClientToSpringboard() 39 | g.Expect(err).ShouldNot(gomega.HaveOccurred()) 40 | 41 | time.Sleep(time.Second * 10) 42 | resp := expect.GET("/ip").WithHeader("Host", fqdn).Expect() 43 | if resp.Raw().StatusCode != http.StatusOK { 44 | ginkgo.GinkgoT().Log("status code is %v, please check logs", resp.Raw().StatusCode) 45 | time.Sleep(time.Hour * 1000) 46 | } 47 | // Hit the default route the cluster outbound|80||httpbin..svc.cluster.local 48 | resp.Status(http.StatusOK) 49 | // The first Via header was added by nginx's sidecar; 50 | // The second Via header was added by httpbin's sidecar; 51 | resp.Headers().Value("Via").Array().Equal([]string{"APISIX", "APISIX"}) 52 | resp.Body().Contains("origin") 53 | }) 54 | }) 55 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/api7/apisix-mesh-agent 2 | 3 | go 1.16 4 | 5 | require ( 6 | github.com/envoyproxy/go-control-plane v0.9.9-0.20210115003313-31f9241a16e6 7 | github.com/envoyproxy/protoc-gen-validate v0.4.1 8 | github.com/fsnotify/fsnotify v1.4.9 9 | github.com/golang/protobuf v1.4.3 10 | github.com/google/uuid v1.2.0 11 | github.com/grpc-ecosystem/grpc-gateway v1.14.6 12 | github.com/soheilhy/cmux v0.1.4 13 | github.com/spf13/cobra v1.1.3 14 | github.com/stretchr/testify v1.7.0 15 | github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 16 | go.etcd.io/etcd/api/v3 v3.5.0-alpha.0 17 | go.uber.org/zap v1.16.0 18 | golang.org/x/net v0.0.0-20210525063256-abc453219eb5 19 | google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c 20 | google.golang.org/grpc v1.36.0 21 | google.golang.org/grpc/examples v0.0.0-20210304020650-930c79186c99 // indirect 22 | google.golang.org/protobuf v1.25.0 23 | gotest.tools v2.2.0+incompatible 24 | istio.io/istio v0.0.0-20210308180034-f6502508b04c 25 | ) 26 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/api7/apisix-mesh-agent/cmd" 8 | ) 9 | 10 | func main() { 11 | rootCmd := cmd.NewMeshAgentCommand() 12 | if err := rootCmd.Execute(); err != nil { 13 | _, _ = fmt.Fprintln(os.Stderr, err.Error()) 14 | os.Exit(1) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /manifests/istio/injection-template.yaml: -------------------------------------------------------------------------------- 1 | metadata: 2 | labels: 3 | service.istio.io/canonical-name: {{ index .ObjectMeta.Labels `service.istio.io/canonical-name` | default (index .ObjectMeta.Labels `app.kubernetes.io/name`) | default (index .ObjectMeta.Labels `app`) | default .DeploymentMeta.Name | quote }} 4 | service.istio.io/canonical-revision: {{ index .ObjectMeta.Labels `service.istio.io/canonical-revision` | default (index .ObjectMeta.Labels `app.kubernetes.io/version`) | default (index .ObjectMeta.Labels `version`) | default "latest" | quote }} 5 | istio.io/rev: {{ .Revision | default "default" | quote }} 6 | spec: 7 | initContainers: 8 | - name: istio-init 9 | image: "{{ .Values.global.proxy_init.hub }}/{{ .Values.global.proxy_init.image }}:{{ .Values.global.proxy_init.tag }}" 10 | args: 11 | - iptables 12 | - --apisix-user 13 | - nobody 14 | - --apisix-inbound-capture-port 15 | - "9081" 16 | - --apisix-port 17 | - "9080" 18 | - --inbound-ports 19 | - "*" 20 | - --inbound-exclude-ports 21 | - "17739" 22 | - --outbound-ports 23 | - "*" 24 | - --outbound-exclude-ports 25 | - "17739,15010" 26 | imagePullPolicy: "{{ valueOrDefault .Values.global.imagePullPolicy `Always` }}" 27 | securityContext: 28 | allowPrivilegeEscalation: {{ .Values.global.proxy.privileged }} 29 | privileged: {{ .Values.global.proxy.privileged }} 30 | capabilities: 31 | add: 32 | - NET_ADMIN 33 | - NET_RAW 34 | drop: 35 | - ALL 36 | readOnlyRootFilesystem: false 37 | runAsGroup: 0 38 | runAsNonRoot: false 39 | runAsUser: 0 40 | restartPolicy: Always 41 | containers: 42 | - name: istio-proxy 43 | image: "{{ .Values.global.proxy.hub }}/{{ .Values.global.proxy.image }}:{{ .Values.global.proxy.tag }}" 44 | ports: 45 | - containerPort: 9080 46 | protocol: TCP 47 | name: http-outbound 48 | - containerPort: 9081 49 | protocol: TCP 50 | name: http-inbound 51 | - containerPort: 17739 52 | protocol: TCP 53 | name: agent 54 | args: 55 | - sidecar 56 | - --run-mode 57 | - bundle 58 | - --provisioner 59 | - xds-v3-grpc 60 | - --log-level 61 | - debug 62 | - --xds-config-source 63 | - "grpc://istiod.{{ .Values.global.istioNamespace }}.svc.{{ .Values.global.proxy.clusterDomain }}:15010" 64 | - --apisix-bin-path 65 | - /usr/bin/apisix 66 | - --grpc-listen 67 | - 0.0.0.0:17739 68 | env: 69 | - name: POD_NAME 70 | valueFrom: 71 | fieldRef: 72 | fieldPath: metadata.name 73 | - name: POD_NAMESPACE 74 | valueFrom: 75 | fieldRef: 76 | fieldPath: metadata.namespace 77 | imagePullPolicy: "{{ valueOrDefault .Values.global.imagePullPolicy `Always` }}" 78 | -------------------------------------------------------------------------------- /nginx/patches/nginx-1.19.3-connection-original-dst.patch: -------------------------------------------------------------------------------- 1 | diff --git a/auto/os/linux b/auto/os/linux 2 | index 5e280eca..086e5372 100644 3 | --- a/auto/os/linux 4 | +++ b/auto/os/linux 5 | @@ -190,6 +190,21 @@ ngx_feature_test="struct __user_cap_data_struct data; 6 | (void) SYS_capset" 7 | . auto/feature 8 | 9 | +# netfilter_ipv4 10 | + 11 | +ngx_feature="netfilter_ipv4" 12 | +ngx_feature_name="NGX_HAVE_NETFILTER_IPV4" 13 | +ngx_feature_run=no 14 | +ngx_feature_incs="#include " 15 | +ngx_feature_path= 16 | +ngx_feature_libs= 17 | +ngx_feature_test="int so_original_dst; 18 | + 19 | + so_original_dst = SO_ORIGINAL_DST; 20 | + 21 | + (void) so_original_dst;" 22 | +. auto/feature 23 | + 24 | 25 | # crypt_r() 26 | 27 | diff --git a/src/http/ngx_http_variables.c b/src/http/ngx_http_variables.c 28 | index e067cf0c..33aa042e 100644 29 | --- a/src/http/ngx_http_variables.c 30 | +++ b/src/http/ngx_http_variables.c 31 | @@ -130,6 +130,11 @@ static ngx_int_t ngx_http_variable_connection(ngx_http_request_t *r, 32 | static ngx_int_t ngx_http_variable_connection_requests(ngx_http_request_t *r, 33 | ngx_http_variable_value_t *v, uintptr_t data); 34 | 35 | +#if (NGX_HAVE_NETFILTER_IPV4) 36 | +static ngx_int_t ngx_http_variable_connection_dst(ngx_http_request_t *r, 37 | + ngx_http_variable_value_t *v, uintptr_t data); 38 | +#endif 39 | + 40 | static ngx_int_t ngx_http_variable_nginx_version(ngx_http_request_t *r, 41 | ngx_http_variable_value_t *v, uintptr_t data); 42 | static ngx_int_t ngx_http_variable_hostname(ngx_http_request_t *r, 43 | @@ -342,6 +347,11 @@ static ngx_http_variable_t ngx_http_core_variables[] = { 44 | { ngx_string("connection_requests"), NULL, 45 | ngx_http_variable_connection_requests, 0, 0, 0 }, 46 | 47 | +#if (NGX_HAVE_NETFILTER_IPV4) 48 | + { ngx_string("connection_original_dst"), NULL, 49 | + ngx_http_variable_connection_dst, 0, 0, 0 }, 50 | +#endif 51 | + 52 | { ngx_string("nginx_version"), NULL, ngx_http_variable_nginx_version, 53 | 0, 0, 0 }, 54 | 55 | @@ -2252,6 +2262,43 @@ ngx_http_variable_connection_requests(ngx_http_request_t *r, 56 | } 57 | 58 | 59 | +#if (NGX_HAVE_NETFILTER_IPV4) 60 | +static ngx_int_t 61 | +ngx_http_variable_connection_dst(ngx_http_request_t *r, 62 | + ngx_http_variable_value_t *v, uintptr_t data) 63 | +{ 64 | + struct sockaddr_in dst; 65 | + socklen_t socklen; 66 | + int rn; 67 | + u_char *p; 68 | + 69 | + socklen = sizeof(struct sockaddr_in); 70 | + 71 | + rn = getsockopt(r->connection->fd, SOL_IP, SO_ORIGINAL_DST, (void *) &dst, 72 | + &socklen); 73 | + if (rn < 0) { 74 | + ngx_log_error(NGX_LOG_CRIT, r->connection->log, ngx_socket_errno, 75 | + "getsockopt(SO_ORIGINAL_DST) failed"); 76 | + return NGX_ERROR; 77 | + } 78 | + 79 | + p = ngx_pnalloc(r->pool, NGX_SOCKADDR_STRLEN); 80 | + if (p == NULL) { 81 | + return NGX_ERROR; 82 | + } 83 | + 84 | + v->len = ngx_sock_ntop((struct sockaddr *) &dst, socklen, p, 85 | + NGX_SOCKADDR_STRLEN, dst.sin_port); 86 | + v->valid = 1; 87 | + v->no_cacheable = 0; 88 | + v->not_found = 0; 89 | + v->data = p; 90 | + 91 | + return NGX_OK; 92 | +} 93 | +#endif 94 | + 95 | + 96 | static ngx_int_t 97 | ngx_http_variable_nginx_version(ngx_http_request_t *r, 98 | ngx_http_variable_value_t *v, uintptr_t data) 99 | diff --git a/src/os/unix/ngx_linux_config.h b/src/os/unix/ngx_linux_config.h 100 | index 3036caeb..2353e9b3 100644 101 | --- a/src/os/unix/ngx_linux_config.h 102 | +++ b/src/os/unix/ngx_linux_config.h 103 | @@ -103,6 +103,9 @@ typedef struct iocb ngx_aiocb_t; 104 | #include 105 | #endif 106 | 107 | +#if (NGX_HAVE_NETFILTER_IPV4) 108 | +#include 109 | +#endif 110 | 111 | #define NGX_LISTEN_BACKLOG 511 112 | 113 | -------------------------------------------------------------------------------- /pkg/adaptor/xds/v3/cluster.go: -------------------------------------------------------------------------------- 1 | package v3 2 | 3 | import ( 4 | clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" 5 | corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" 6 | endpointv3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" 7 | "go.uber.org/zap" 8 | 9 | "github.com/api7/apisix-mesh-agent/pkg/id" 10 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 11 | ) 12 | 13 | func (adaptor *adaptor) TranslateCluster(c *clusterv3.Cluster) (*apisix.Upstream, error) { 14 | ups := &apisix.Upstream{ 15 | Name: c.Name, 16 | Id: id.GenID(c.Name), 17 | Nodes: []*apisix.Node{}, 18 | } 19 | if err := adaptor.translateClusterLbPolicy(c, ups); err != nil { 20 | return nil, err 21 | } 22 | if err := adaptor.translateClusterTimeoutSettings(c, ups); err != nil { 23 | return nil, err 24 | } 25 | if err := adaptor.translateClusterLoadAssignments(c, ups); err != nil { 26 | if err == ErrRequireFurtherEDS { 27 | return ups, err 28 | } 29 | return nil, err 30 | } 31 | 32 | adaptor.logger.Debugw("got upstream after parsing cluster", 33 | zap.Any("cluster", c), 34 | ) 35 | 36 | return ups, nil 37 | } 38 | 39 | func (adaptor *adaptor) translateClusterLbPolicy(c *clusterv3.Cluster, ups *apisix.Upstream) error { 40 | switch c.GetLbPolicy() { 41 | case clusterv3.Cluster_ROUND_ROBIN: 42 | ups.Type = "roundrobin" 43 | case clusterv3.Cluster_LEAST_REQUEST: 44 | // Apache APISIX's lease_conn policy is same to lease request. 45 | // But is doesn't expose configuration items. So LbConfig field 46 | // is ignored. 47 | ups.Type = "least_conn" 48 | default: 49 | // Apache APISIX doesn't support Random, Manglev. In addition, 50 | // also RinghHash (Consistent Hash) is available but the configurations 51 | // like key is in RouteConfiguration, so we cannot use it either. 52 | adaptor.logger.Warnw("ignore cluster with unsupported load balancer", 53 | zap.String("cluster_name", c.Name), 54 | zap.String("lb_policy", c.GetLbPolicy().String()), 55 | ) 56 | return ErrFeatureNotSupportedYet 57 | } 58 | return nil 59 | } 60 | 61 | func (adaptor *adaptor) translateClusterTimeoutSettings(c *clusterv3.Cluster, ups *apisix.Upstream) error { 62 | if c.GetConnectTimeout() != nil { 63 | ups.Timeout = &apisix.Upstream_Timeout{ 64 | Connect: float64((*c.GetConnectTimeout()).Seconds), 65 | Read: 60, 66 | Send: 60, 67 | } 68 | } 69 | return nil 70 | } 71 | 72 | func (adaptor *adaptor) translateClusterLoadAssignments(c *clusterv3.Cluster, ups *apisix.Upstream) error { 73 | if c.GetClusterType() != nil { 74 | return ErrFeatureNotSupportedYet 75 | } 76 | switch c.GetType() { 77 | case clusterv3.Cluster_EDS: 78 | return ErrRequireFurtherEDS 79 | default: 80 | nodes, err := adaptor.TranslateClusterLoadAssignment(c.GetLoadAssignment()) 81 | if err != nil { 82 | return err 83 | } 84 | ups.Nodes = nodes 85 | return nil 86 | } 87 | } 88 | 89 | func (adaptor *adaptor) TranslateClusterLoadAssignment(la *endpointv3.ClusterLoadAssignment) ([]*apisix.Node, error) { 90 | var nodes []*apisix.Node 91 | for _, eps := range la.GetEndpoints() { 92 | var weight int32 93 | if eps.GetLoadBalancingWeight() != nil { 94 | weight = int32(eps.GetLoadBalancingWeight().GetValue()) 95 | } else { 96 | weight = 100 97 | } 98 | for _, ep := range eps.LbEndpoints { 99 | node := &apisix.Node{ 100 | Weight: weight, 101 | } 102 | if ep.GetLoadBalancingWeight() != nil { 103 | node.Weight = int32(ep.GetLoadBalancingWeight().GetValue()) 104 | } 105 | switch identifier := ep.GetHostIdentifier().(type) { 106 | case *endpointv3.LbEndpoint_Endpoint: 107 | switch addr := identifier.Endpoint.Address.Address.(type) { 108 | case *corev3.Address_SocketAddress: 109 | if addr.SocketAddress.GetProtocol() != corev3.SocketAddress_TCP { 110 | adaptor.logger.Warnw("ignore endpoint with non-tcp protocol", 111 | zap.Any("endpoint", ep), 112 | ) 113 | continue 114 | } 115 | node.Host = addr.SocketAddress.GetAddress() 116 | switch port := addr.SocketAddress.GetPortSpecifier().(type) { 117 | case *corev3.SocketAddress_PortValue: 118 | node.Port = int32(port.PortValue) 119 | case *corev3.SocketAddress_NamedPort: 120 | adaptor.logger.Warnw("ignore endpoint with unsupported named port", 121 | zap.Any("endpoint", ep), 122 | ) 123 | continue 124 | } 125 | default: 126 | adaptor.logger.Warnw("ignore endpoint with unsupported address type", 127 | zap.Any("endpoint", ep), 128 | ) 129 | continue 130 | } 131 | default: 132 | adaptor.logger.Warnw("ignore endpoint with unknown endpoint type ", 133 | zap.Any("endpoint", ep), 134 | ) 135 | continue 136 | } 137 | adaptor.logger.Debugw("got node after parsing endpoint", 138 | zap.Any("node", node), 139 | zap.Any("endpoint", ep), 140 | ) 141 | // Currently Apache APISIX doesn't use the metadata field. 142 | // So we don't pass ep.Metadata. 143 | nodes = append(nodes, node) 144 | } 145 | } 146 | return nodes, nil 147 | } 148 | -------------------------------------------------------------------------------- /pkg/adaptor/xds/v3/cluster_test.go: -------------------------------------------------------------------------------- 1 | package v3 2 | 3 | import ( 4 | "testing" 5 | 6 | clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" 7 | corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" 8 | endpointv3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" 9 | "github.com/golang/protobuf/ptypes/duration" 10 | "github.com/golang/protobuf/ptypes/wrappers" 11 | "github.com/stretchr/testify/assert" 12 | 13 | "github.com/api7/apisix-mesh-agent/pkg/log" 14 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 15 | ) 16 | 17 | func TestTranslateClusterLbPolicy(t *testing.T) { 18 | a := &adaptor{logger: log.DefaultLogger} 19 | c := &clusterv3.Cluster{ 20 | Name: "test", 21 | LbPolicy: clusterv3.Cluster_ROUND_ROBIN, 22 | } 23 | var ups apisix.Upstream 24 | assert.Nil(t, a.translateClusterLbPolicy(c, &ups)) 25 | assert.Equal(t, ups.Type, "roundrobin") 26 | c.LbPolicy = clusterv3.Cluster_LEAST_REQUEST 27 | assert.Nil(t, a.translateClusterLbPolicy(c, &ups)) 28 | assert.Equal(t, ups.Type, "least_conn") 29 | 30 | c.LbPolicy = clusterv3.Cluster_RING_HASH 31 | assert.Equal(t, a.translateClusterLbPolicy(c, &ups), ErrFeatureNotSupportedYet) 32 | } 33 | 34 | func TestTranslateClusterTimeoutSettings(t *testing.T) { 35 | a := &adaptor{logger: log.DefaultLogger} 36 | c := &clusterv3.Cluster{ 37 | Name: "test", 38 | ConnectTimeout: &duration.Duration{ 39 | Seconds: 10, 40 | }, 41 | LbPolicy: clusterv3.Cluster_ROUND_ROBIN, 42 | } 43 | var ups apisix.Upstream 44 | assert.Nil(t, a.translateClusterTimeoutSettings(c, &ups)) 45 | assert.Equal(t, ups.Timeout.Connect, float64(10)) 46 | } 47 | 48 | func TestTranslateClusterLoadAssignment(t *testing.T) { 49 | a := &adaptor{logger: log.DefaultLogger} 50 | la := &endpointv3.ClusterLoadAssignment{ 51 | ClusterName: "test", 52 | Endpoints: []*endpointv3.LocalityLbEndpoints{ 53 | { 54 | LbEndpoints: []*endpointv3.LbEndpoint{ 55 | { 56 | HostIdentifier: &endpointv3.LbEndpoint_Endpoint{ 57 | Endpoint: &endpointv3.Endpoint{ 58 | Address: &corev3.Address{ 59 | Address: &corev3.Address_SocketAddress{ 60 | SocketAddress: &corev3.SocketAddress{ 61 | Protocol: corev3.SocketAddress_TCP, 62 | Address: "10.0.3.11", 63 | PortSpecifier: &corev3.SocketAddress_PortValue{ 64 | PortValue: 8000, 65 | }, 66 | }, 67 | }, 68 | }, 69 | }, 70 | }, 71 | LoadBalancingWeight: &wrappers.UInt32Value{ 72 | Value: 100, 73 | }, 74 | }, 75 | { 76 | // Will be ignored. 77 | HostIdentifier: &endpointv3.LbEndpoint_EndpointName{}, 78 | }, 79 | { 80 | // Will be ignored. 81 | HostIdentifier: &endpointv3.LbEndpoint_Endpoint{ 82 | Endpoint: &endpointv3.Endpoint{ 83 | Address: &corev3.Address{ 84 | Address: &corev3.Address_Pipe{}, 85 | }, 86 | }, 87 | }, 88 | }, 89 | { 90 | // Will be ignored. 91 | HostIdentifier: &endpointv3.LbEndpoint_Endpoint{ 92 | Endpoint: &endpointv3.Endpoint{ 93 | Address: &corev3.Address{ 94 | Address: &corev3.Address_SocketAddress{ 95 | SocketAddress: &corev3.SocketAddress{ 96 | Protocol: corev3.SocketAddress_UDP, 97 | Address: "10.0.3.11", 98 | PortSpecifier: &corev3.SocketAddress_PortValue{ 99 | PortValue: 8000, 100 | }, 101 | }, 102 | }, 103 | }, 104 | }, 105 | }, 106 | }, 107 | { 108 | // Will be ignored. 109 | HostIdentifier: &endpointv3.LbEndpoint_Endpoint{ 110 | Endpoint: &endpointv3.Endpoint{ 111 | Address: &corev3.Address{ 112 | Address: &corev3.Address_SocketAddress{ 113 | SocketAddress: &corev3.SocketAddress{ 114 | Protocol: corev3.SocketAddress_TCP, 115 | Address: "10.0.3.12", 116 | PortSpecifier: &corev3.SocketAddress_NamedPort{ 117 | NamedPort: "http", 118 | }, 119 | }, 120 | }, 121 | }, 122 | }, 123 | }, 124 | }, 125 | }, 126 | LoadBalancingWeight: &wrappers.UInt32Value{ 127 | Value: 50, 128 | }, 129 | }, 130 | }, 131 | } 132 | nodes, err := a.TranslateClusterLoadAssignment(la) 133 | assert.Nil(t, err) 134 | assert.Len(t, nodes, 1) 135 | assert.Equal(t, nodes[0].Port, int32(8000)) 136 | assert.Equal(t, nodes[0].Weight, int32(100)) 137 | assert.Equal(t, nodes[0].Host, "10.0.3.11") 138 | } 139 | -------------------------------------------------------------------------------- /pkg/adaptor/xds/v3/listener.go: -------------------------------------------------------------------------------- 1 | package v3 2 | 3 | import ( 4 | listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" 5 | routev3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" 6 | hcmv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" 7 | xdswellknown "github.com/envoyproxy/go-control-plane/pkg/wellknown" 8 | "go.uber.org/zap" 9 | "google.golang.org/protobuf/proto" 10 | "google.golang.org/protobuf/types/known/anypb" 11 | 12 | "github.com/api7/apisix-mesh-agent/pkg/log" 13 | ) 14 | 15 | var ( 16 | _hcmv3 = "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager" 17 | ) 18 | 19 | func (adaptor *adaptor) CollectRouteNamesAndConfigs(l *listenerv3.Listener) ([]string, []*routev3.RouteConfiguration, error) { 20 | var ( 21 | rdsNames []string 22 | staticConfigs []*routev3.RouteConfiguration 23 | ) 24 | 25 | for _, fc := range l.FilterChains { 26 | for _, f := range fc.Filters { 27 | if f.Name == xdswellknown.HTTPConnectionManager && f.GetTypedConfig().GetTypeUrl() == _hcmv3 { 28 | var hcm hcmv3.HttpConnectionManager 29 | if err := anypb.UnmarshalTo(f.GetTypedConfig(), &hcm, proto.UnmarshalOptions{}); err != nil { 30 | log.Errorw("failed to unmarshal HttpConnectionManager config", 31 | zap.Error(err), 32 | zap.Any("listener", l), 33 | ) 34 | return nil, nil, err 35 | } 36 | if hcm.GetRds() != nil { 37 | rdsNames = append(rdsNames, hcm.GetRds().GetRouteConfigName()) 38 | } else if hcm.GetRouteConfig() != nil { 39 | // TODO deep copy? 40 | staticConfigs = append(staticConfigs, hcm.GetRouteConfig()) 41 | } 42 | } 43 | } 44 | } 45 | adaptor.logger.Debugw("got route names and config from listener", 46 | zap.Strings("route_names", rdsNames), 47 | zap.Any("route_configs", staticConfigs), 48 | zap.Any("listener", l), 49 | ) 50 | return rdsNames, staticConfigs, nil 51 | } 52 | -------------------------------------------------------------------------------- /pkg/adaptor/xds/v3/listener_test.go: -------------------------------------------------------------------------------- 1 | package v3 2 | 3 | import ( 4 | "testing" 5 | 6 | listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" 7 | routev3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" 8 | hcmv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3" 9 | xdswellknown "github.com/envoyproxy/go-control-plane/pkg/wellknown" 10 | "github.com/stretchr/testify/assert" 11 | "google.golang.org/protobuf/proto" 12 | "google.golang.org/protobuf/types/known/anypb" 13 | 14 | "github.com/api7/apisix-mesh-agent/pkg/log" 15 | ) 16 | 17 | func TestCollectRouteNamesAndConfigs(t *testing.T) { 18 | a := &adaptor{logger: log.DefaultLogger} 19 | 20 | var ( 21 | any1 anypb.Any 22 | any2 anypb.Any 23 | any3 anypb.Any 24 | ) 25 | 26 | f1 := &hcmv3.HttpConnectionManager{ 27 | RouteSpecifier: &hcmv3.HttpConnectionManager_Rds{ 28 | Rds: &hcmv3.Rds{ 29 | RouteConfigName: "route1", 30 | }, 31 | }, 32 | } 33 | f2 := &hcmv3.HttpConnectionManager{ 34 | RouteSpecifier: &hcmv3.HttpConnectionManager_Rds{ 35 | Rds: &hcmv3.Rds{ 36 | RouteConfigName: "route2", 37 | }, 38 | }, 39 | } 40 | f3 := &hcmv3.HttpConnectionManager{ 41 | RouteSpecifier: &hcmv3.HttpConnectionManager_RouteConfig{ 42 | RouteConfig: &routev3.RouteConfiguration{ 43 | Name: "route3", 44 | VirtualHosts: []*routev3.VirtualHost{ 45 | { 46 | Name: "v1", 47 | Routes: []*routev3.Route{ 48 | { 49 | Name: "route1", 50 | }, 51 | }, 52 | }, 53 | }, 54 | }, 55 | }, 56 | } 57 | 58 | assert.Nil(t, anypb.MarshalFrom(&any1, f1, proto.MarshalOptions{})) 59 | assert.Nil(t, anypb.MarshalFrom(&any2, f2, proto.MarshalOptions{})) 60 | assert.Nil(t, anypb.MarshalFrom(&any3, f3, proto.MarshalOptions{})) 61 | 62 | listener := &listenerv3.Listener{ 63 | Name: "listener1", 64 | FilterChains: []*listenerv3.FilterChain{ 65 | { 66 | Filters: []*listenerv3.Filter{ 67 | { 68 | Name: xdswellknown.HTTPConnectionManager, 69 | ConfigType: &listenerv3.Filter_TypedConfig{ 70 | TypedConfig: &any1, 71 | }, 72 | }, 73 | { 74 | Name: xdswellknown.HTTPConnectionManager, 75 | ConfigType: &listenerv3.Filter_TypedConfig{ 76 | TypedConfig: &any2, 77 | }, 78 | }, 79 | { 80 | Name: xdswellknown.HTTPConnectionManager, 81 | ConfigType: &listenerv3.Filter_TypedConfig{ 82 | TypedConfig: &any3, 83 | }, 84 | }, 85 | }, 86 | }, 87 | }, 88 | } 89 | rdsNames, staticConfigs, err := a.CollectRouteNamesAndConfigs(listener) 90 | assert.Nil(t, err) 91 | assert.Equal(t, rdsNames, []string{"route1", "route2"}) 92 | assert.Len(t, staticConfigs, 1) 93 | assert.Equal(t, staticConfigs[0].Name, "route3") 94 | assert.Len(t, staticConfigs[0].VirtualHosts, 1) 95 | assert.Equal(t, staticConfigs[0].VirtualHosts[0].Name, "v1") 96 | } 97 | -------------------------------------------------------------------------------- /pkg/adaptor/xds/v3/types.go: -------------------------------------------------------------------------------- 1 | package v3 2 | 3 | import ( 4 | "errors" 5 | 6 | clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" 7 | endpointv3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" 8 | listenerv3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3" 9 | routev3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" 10 | 11 | "github.com/api7/apisix-mesh-agent/pkg/config" 12 | "github.com/api7/apisix-mesh-agent/pkg/log" 13 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 14 | ) 15 | 16 | var ( 17 | // ErrRequireFurtherEDS means the translation of Cluster is not complete 18 | // since it depends on EDS to fetch the load assignment (endpoints). 19 | // Once this error was given, the Cluster should keep invisible until 20 | // the EDS config arrived. 21 | ErrRequireFurtherEDS = errors.New("required further EDS config") 22 | // ErrFeatureNotSupportedYet means a non-supported feature exists in the 23 | // xDS resource so the Adaptor goes ahead. 24 | ErrFeatureNotSupportedYet = errors.New("feature not supported yet") 25 | ) 26 | 27 | // Adaptor translates xDS resources like Route, Cluster 28 | // to the equivalent configs in Apache APISIX. 29 | // WARNING: not all fields are translated, only the necessary parts are used, others 30 | // can be added in the future. 31 | type Adaptor interface { 32 | // TranslateRouteConfiguration translates a RouteConfiguration to a series APISIX 33 | // Routes. 34 | TranslateRouteConfiguration(*routev3.RouteConfiguration, *TranslateOptions) ([]*apisix.Route, error) 35 | // TranslateCluster translates a Cluster to an APISIX Upstreams. 36 | TranslateCluster(*clusterv3.Cluster) (*apisix.Upstream, error) 37 | // TranslateClusterLoadAssignment translate the ClusterLoadAssignement resources to APISIX 38 | // Upstream Nodes. 39 | TranslateClusterLoadAssignment(*endpointv3.ClusterLoadAssignment) ([]*apisix.Node, error) 40 | // CollectRouteNamesAndConfigs collects Rds route names and static route configurations 41 | // from listener. 42 | CollectRouteNamesAndConfigs(*listenerv3.Listener) ([]string, []*routev3.RouteConfiguration, error) 43 | } 44 | 45 | // TranslateOptions contains some options to customize the translate process. 46 | type TranslateOptions struct { 47 | // RouteOriginalDestination is a map which key is the name of RouteConfiguration 48 | // and value is the original destination address that a connection should have 49 | // to match this route. The original destination just happens to be the address 50 | // of the listener. 51 | // This is to obey the xDS specification as route configs are configured on listeners 52 | // explicitly while there is no listener configuration on APISIX, so this is necessary 53 | // to avoid the cross-listener-use of routes. 54 | // An extra `vars` expression will be added only if the listener address can be found here. 55 | RouteOriginalDestination map[string]string 56 | } 57 | 58 | type adaptor struct { 59 | logger *log.Logger 60 | } 61 | 62 | // NewAdaptor creates a XDS based adaptor. 63 | func NewAdaptor(cfg *config.Config) (Adaptor, error) { 64 | logger, err := log.NewLogger( 65 | log.WithOutputFile(cfg.LogOutput), 66 | log.WithLogLevel(cfg.LogLevel), 67 | log.WithContext("xds_v3_adaptor"), 68 | ) 69 | if err != nil { 70 | return nil, err 71 | } 72 | return &adaptor{ 73 | logger: logger, 74 | }, nil 75 | } 76 | -------------------------------------------------------------------------------- /pkg/apisix/doc.go: -------------------------------------------------------------------------------- 1 | // Package apisix provides some utility functions which 2 | // are related to the APISIX resources. 3 | package apisix 4 | -------------------------------------------------------------------------------- /pkg/apisix/route.go: -------------------------------------------------------------------------------- 1 | package apisix 2 | 3 | import ( 4 | "google.golang.org/protobuf/proto" 5 | 6 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 7 | ) 8 | 9 | // CompareRoutes diffs two apisix.Route array and finds the new adds, updates 10 | // and deleted ones. Note it stands on the first apisix.Route array's point 11 | // of view. 12 | func CompareRoutes(r1, r2 []*apisix.Route) (added, deleted, updated []*apisix.Route) { 13 | if r1 == nil { 14 | return r2, nil, nil 15 | } 16 | if r2 == nil { 17 | return nil, r1, nil 18 | } 19 | 20 | r1Map := make(map[string]*apisix.Route) 21 | r2Map := make(map[string]*apisix.Route) 22 | for _, r := range r1 { 23 | r1Map[r.Id] = r 24 | } 25 | for _, r := range r2 { 26 | r2Map[r.Id] = r 27 | } 28 | for _, r := range r2 { 29 | if _, ok := r1Map[r.Id]; !ok { 30 | added = append(added, r) 31 | } 32 | } 33 | for _, ro := range r1 { 34 | if rn, ok := r2Map[ro.Id]; !ok { 35 | deleted = append(deleted, ro) 36 | } else { 37 | if !proto.Equal(ro, rn) { 38 | updated = append(updated, rn) 39 | } 40 | } 41 | } 42 | return 43 | } 44 | -------------------------------------------------------------------------------- /pkg/apisix/route_test.go: -------------------------------------------------------------------------------- 1 | package apisix 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | 8 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 9 | ) 10 | 11 | func TestCompareRoutes(t *testing.T) { 12 | r1 := []*apisix.Route{ 13 | { 14 | Id: "1", 15 | }, 16 | { 17 | Id: "2", 18 | }, 19 | { 20 | Id: "3", 21 | }, 22 | } 23 | 24 | added, deleted, updated := CompareRoutes(r1, nil) 25 | assert.Nil(t, added) 26 | assert.Nil(t, updated) 27 | assert.Equal(t, deleted, r1) 28 | 29 | added, deleted, updated = CompareRoutes(nil, r1) 30 | assert.Equal(t, added, r1) 31 | assert.Nil(t, updated) 32 | assert.Nil(t, deleted) 33 | 34 | r2 := []*apisix.Route{ 35 | { 36 | Id: "1", 37 | }, 38 | { 39 | Id: "4", 40 | }, 41 | { 42 | Id: "3", 43 | Uris: []string{"/foo*"}, 44 | }, 45 | } 46 | 47 | added, deleted, updated = CompareRoutes(r1, r2) 48 | assert.Equal(t, added, []*apisix.Route{ 49 | { 50 | Id: "4", 51 | }, 52 | }) 53 | assert.Equal(t, deleted, []*apisix.Route{ 54 | { 55 | Id: "2", 56 | }, 57 | }) 58 | assert.Equal(t, updated[0].Id, "3") 59 | assert.Equal(t, updated[0].Uris, []string{"/foo*"}) 60 | } 61 | -------------------------------------------------------------------------------- /pkg/apisix/upstream.go: -------------------------------------------------------------------------------- 1 | package apisix 2 | 3 | import ( 4 | "google.golang.org/protobuf/proto" 5 | 6 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 7 | ) 8 | 9 | // CompareUpstreams diffs two apisix.Upstreams array and finds the new adds, updates 10 | // and deleted ones. Note it stands on the first apisix.Upstream array's point 11 | // of view. 12 | func CompareUpstreams(u1, u2 []*apisix.Upstream) (added, deleted, updated []*apisix.Upstream) { 13 | if u1 == nil { 14 | return u2, nil, nil 15 | } 16 | if u2 == nil { 17 | return nil, u1, nil 18 | } 19 | u1Map := make(map[string]*apisix.Upstream) 20 | u2Map := make(map[string]*apisix.Upstream) 21 | for _, u := range u1 { 22 | u1Map[u.GetId()] = u 23 | } 24 | for _, u := range u2 { 25 | u2Map[u.GetId()] = u 26 | } 27 | for _, u := range u2 { 28 | if _, ok := u1Map[u.GetId()]; !ok { 29 | added = append(added, u) 30 | } 31 | } 32 | for _, uo := range u1 { 33 | if un, ok := u2Map[uo.GetId()]; !ok { 34 | deleted = append(deleted, uo) 35 | } else { 36 | if !proto.Equal(uo, un) { 37 | updated = append(updated, un) 38 | } 39 | } 40 | } 41 | return 42 | } 43 | -------------------------------------------------------------------------------- /pkg/apisix/upstream_test.go: -------------------------------------------------------------------------------- 1 | package apisix 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | 8 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 9 | ) 10 | 11 | func TestCompareUpstreams(t *testing.T) { 12 | u1 := []*apisix.Upstream{ 13 | { 14 | Id: "1", 15 | }, 16 | { 17 | Id: "2", 18 | }, 19 | { 20 | Id: "3", 21 | }, 22 | } 23 | 24 | added, deleted, updated := CompareUpstreams(u1, nil) 25 | assert.Nil(t, added) 26 | assert.Nil(t, updated) 27 | assert.Equal(t, deleted, u1) 28 | 29 | added, deleted, updated = CompareUpstreams(nil, u1) 30 | assert.Equal(t, added, u1) 31 | assert.Nil(t, updated) 32 | assert.Nil(t, deleted) 33 | 34 | u2 := []*apisix.Upstream{ 35 | { 36 | Id: "1", 37 | }, 38 | { 39 | Id: "4", 40 | }, 41 | { 42 | Id: "3", 43 | Retries: 3, 44 | }, 45 | } 46 | 47 | added, deleted, updated = CompareUpstreams(u1, u2) 48 | assert.Equal(t, added, []*apisix.Upstream{ 49 | { 50 | Id: "4", 51 | }, 52 | }) 53 | assert.Equal(t, deleted, []*apisix.Upstream{ 54 | { 55 | Id: "2", 56 | }, 57 | }) 58 | assert.Equal(t, updated[0].Id, "3") 59 | assert.Equal(t, updated[0].Retries, int32(3)) 60 | } 61 | -------------------------------------------------------------------------------- /pkg/cache/doc.go: -------------------------------------------------------------------------------- 1 | // Package cache provides cache solutions to store APISIX resources. 2 | // A cache solution should support to insert, update, get, list and delete 3 | // for each resources. To reduce the type assertion overheads, the cache 4 | // is designed to be typed. Also, the cache should be threaded-safe. 5 | package cache 6 | -------------------------------------------------------------------------------- /pkg/cache/route.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "sync" 5 | 6 | "google.golang.org/protobuf/proto" 7 | 8 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 9 | ) 10 | 11 | type route struct { 12 | mu sync.RWMutex 13 | // TODO optimize the store if the performance of map 14 | // is unbearable. 15 | store map[string]*apisix.Route 16 | } 17 | 18 | func newRoute() Route { 19 | return &route{ 20 | store: make(map[string]*apisix.Route), 21 | } 22 | } 23 | 24 | func (r *route) Get(id string) (*apisix.Route, error) { 25 | r.mu.RLock() 26 | defer r.mu.RUnlock() 27 | 28 | obj, ok := r.store[id] 29 | if !ok { 30 | return nil, ErrObjectNotFound 31 | } 32 | // Never return the original one to avoid race conditions. 33 | return proto.Clone(obj).(*apisix.Route), nil 34 | } 35 | 36 | func (r *route) List() ([]*apisix.Route, error) { 37 | var objs []*apisix.Route 38 | r.mu.RLock() 39 | defer r.mu.RUnlock() 40 | for _, obj := range r.store { 41 | objs = append(objs, proto.Clone(obj).(*apisix.Route)) 42 | } 43 | return objs, nil 44 | } 45 | 46 | func (r *route) Insert(obj *apisix.Route) error { 47 | obj = proto.Clone(obj).(*apisix.Route) 48 | r.mu.Lock() 49 | defer r.mu.Unlock() 50 | r.store[obj.Id] = obj 51 | return nil 52 | } 53 | 54 | func (r *route) Delete(id string) error { 55 | r.mu.Lock() 56 | defer r.mu.Unlock() 57 | 58 | _, ok := r.store[id] 59 | if !ok { 60 | return ErrObjectNotFound 61 | } 62 | delete(r.store, id) 63 | return nil 64 | } 65 | -------------------------------------------------------------------------------- /pkg/cache/route_test.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "sort" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | 9 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 10 | ) 11 | 12 | func TestRoute(t *testing.T) { 13 | r := newRoute() 14 | assert.NotNil(t, r) 15 | 16 | // Not found 17 | obj, err := r.Get("1") 18 | assert.Nil(t, obj) 19 | assert.Equal(t, err, ErrObjectNotFound) 20 | assert.Equal(t, r.Delete("1"), ErrObjectNotFound) 21 | 22 | route1 := &apisix.Route{ 23 | Id: "1", 24 | } 25 | assert.Nil(t, r.Insert(route1)) 26 | 27 | obj, err = r.Get("1") 28 | assert.Nil(t, err) 29 | assert.Equal(t, obj.Id, "1") 30 | 31 | // Update 32 | obj.Name = "Vivian" 33 | assert.Nil(t, r.Insert(obj)) 34 | obj, err = r.Get("1") 35 | assert.Nil(t, err) 36 | assert.Equal(t, obj.Id, "1") 37 | assert.Equal(t, obj.GetName(), "Vivian") 38 | 39 | // Delete 40 | assert.Nil(t, r.Delete("1")) 41 | assert.Equal(t, r.Delete("1"), ErrObjectNotFound) 42 | obj, err = r.Get("1") 43 | assert.Nil(t, obj) 44 | assert.Error(t, err, ErrObjectNotFound) 45 | } 46 | 47 | func TestRouteList(t *testing.T) { 48 | objs := []*apisix.Route{ 49 | { 50 | Id: "1", 51 | }, 52 | { 53 | Id: "2", 54 | }, 55 | { 56 | Id: "3", 57 | }, 58 | } 59 | r := newRoute() 60 | assert.NotNil(t, r) 61 | for _, obj := range objs { 62 | assert.Nil(t, r.Insert(obj)) 63 | } 64 | list, err := r.List() 65 | assert.Nil(t, err) 66 | assert.Len(t, list, 3) 67 | 68 | var ids []string 69 | for _, elem := range list { 70 | ids = append(ids, elem.GetId()) 71 | } 72 | sort.Strings(ids) 73 | assert.Equal(t, ids[0], "1") 74 | assert.Equal(t, ids[1], "2") 75 | assert.Equal(t, ids[2], "3") 76 | } 77 | 78 | func TestRouteObjectClone(t *testing.T) { 79 | route1 := &apisix.Route{ 80 | Id: "1", 81 | } 82 | r := newRoute() 83 | assert.NotNil(t, r) 84 | assert.Nil(t, r.Insert(route1)) 85 | 86 | obj, err := r.Get("1") 87 | assert.Nil(t, err) 88 | 89 | obj.Name = "alex" 90 | obj, err = r.Get("1") 91 | assert.Nil(t, err) 92 | assert.Equal(t, obj.Name, "") 93 | } 94 | -------------------------------------------------------------------------------- /pkg/cache/types.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 7 | ) 8 | 9 | var ( 10 | // ErrObjectNotFound means the target object is not found 11 | // from the cache. 12 | ErrObjectNotFound = errors.New("object not found") 13 | ) 14 | 15 | // Cache defines what capabilities a cache solution should provide. 16 | type Cache interface { 17 | // Route returns the route exclusive cache object. 18 | Route() Route 19 | // Upstream returns the upstream exclusive cache object. 20 | Upstream() Upstream 21 | } 22 | 23 | // Route defines the exclusive behaviors for apisix.Route. 24 | type Route interface { 25 | // Get the apisix.Route by its id. In case of the object not found, 26 | // ErrObjectNotFound is given. 27 | Get(string) (*apisix.Route, error) 28 | // List lists all apisix.Route. 29 | List() ([]*apisix.Route, error) 30 | // Insert inserts or updates an apisix.Route object, indexed by its id. 31 | Insert(*apisix.Route) error 32 | // Delete deletes the apisix.Route object by the id. In case of object not 33 | // exist, ErrObjectNotFound is given. 34 | Delete(string) error 35 | } 36 | 37 | // Upstream defines the exclusive behaviors for apisix.Upstream. 38 | type Upstream interface { 39 | // Get the apisix.Upstream by its id. In case of the object not found, 40 | // ErrObjectNotFound is given. 41 | Get(string) (*apisix.Upstream, error) 42 | // List lists all apisix.Upstream. 43 | List() ([]*apisix.Upstream, error) 44 | // Insert creates or updates an apisix.Upstream object, indexed by its id. 45 | Insert(*apisix.Upstream) error 46 | // Delete deletes the apisix.Upstream object by the id. In case of object not 47 | // exist, ErrObjectNotFound is given. 48 | Delete(string) error 49 | } 50 | 51 | type cache struct { 52 | route Route 53 | upstream Upstream 54 | } 55 | 56 | // NewInMemoryCache creates a Cache object which stores all data in memory. 57 | func NewInMemoryCache() Cache { 58 | return &cache{ 59 | route: newRoute(), 60 | upstream: newUpstream(), 61 | } 62 | } 63 | 64 | func (c *cache) Route() Route { 65 | return c.route 66 | } 67 | 68 | func (c *cache) Upstream() Upstream { 69 | return c.upstream 70 | } 71 | -------------------------------------------------------------------------------- /pkg/cache/types_test.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | 8 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 9 | ) 10 | 11 | func TestInMemoryCache(t *testing.T) { 12 | c := NewInMemoryCache() 13 | assert.NotNil(t, c) 14 | 15 | ups := &apisix.Upstream{ 16 | Id: "1", 17 | } 18 | r := &apisix.Route{ 19 | Id: "1", 20 | } 21 | 22 | assert.Nil(t, c.Route().Insert(r)) 23 | assert.Nil(t, c.Upstream().Insert(ups)) 24 | 25 | rr, err := c.Route().Get("1") 26 | assert.Nil(t, err) 27 | assert.Equal(t, rr.GetId(), "1") 28 | 29 | uu, err := c.Upstream().Get("1") 30 | assert.Nil(t, err) 31 | assert.Equal(t, uu.GetId(), "1") 32 | } 33 | -------------------------------------------------------------------------------- /pkg/cache/upstream.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "sync" 5 | 6 | "google.golang.org/protobuf/proto" 7 | 8 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 9 | ) 10 | 11 | type upstream struct { 12 | mu sync.RWMutex 13 | // TODO optimize the store if the performance of map 14 | // is unbearable. 15 | store map[string]*apisix.Upstream 16 | } 17 | 18 | func newUpstream() Upstream { 19 | return &upstream{ 20 | store: make(map[string]*apisix.Upstream), 21 | } 22 | } 23 | 24 | func (r *upstream) Get(id string) (*apisix.Upstream, error) { 25 | r.mu.RLock() 26 | defer r.mu.RUnlock() 27 | 28 | obj, ok := r.store[id] 29 | if !ok { 30 | return nil, ErrObjectNotFound 31 | } 32 | // Never return the original one to avoid race conditions. 33 | obj = proto.Clone(obj).(*apisix.Upstream) 34 | // FIXME It seems that proto.Clone cannot copy the empty slice. 35 | if obj.Nodes == nil { 36 | obj.Nodes = []*apisix.Node{} 37 | } 38 | 39 | return obj, nil 40 | } 41 | 42 | func (r *upstream) List() ([]*apisix.Upstream, error) { 43 | var objs []*apisix.Upstream 44 | r.mu.RLock() 45 | defer r.mu.RUnlock() 46 | for _, obj := range r.store { 47 | obj = proto.Clone(obj).(*apisix.Upstream) 48 | // FIXME It seems that proto.Clone cannot copy the empty slice. 49 | if obj.Nodes == nil { 50 | obj.Nodes = []*apisix.Node{} 51 | } 52 | objs = append(objs, obj) 53 | } 54 | return objs, nil 55 | } 56 | 57 | func (r *upstream) Insert(obj *apisix.Upstream) error { 58 | obj = proto.Clone(obj).(*apisix.Upstream) 59 | r.mu.Lock() 60 | defer r.mu.Unlock() 61 | r.store[obj.Id] = obj 62 | return nil 63 | } 64 | 65 | func (r *upstream) Delete(id string) error { 66 | r.mu.Lock() 67 | defer r.mu.Unlock() 68 | 69 | _, ok := r.store[id] 70 | if !ok { 71 | return ErrObjectNotFound 72 | } 73 | delete(r.store, id) 74 | return nil 75 | } 76 | -------------------------------------------------------------------------------- /pkg/cache/upstream_test.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "sort" 5 | "testing" 6 | 7 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestUpstream(t *testing.T) { 13 | u := newUpstream() 14 | assert.NotNil(t, u) 15 | 16 | // Not found 17 | obj, err := u.Get("1") 18 | assert.Nil(t, obj) 19 | assert.Equal(t, err, ErrObjectNotFound) 20 | assert.Equal(t, u.Delete("1"), ErrObjectNotFound) 21 | 22 | ups1 := &apisix.Upstream{ 23 | Id: "1", 24 | } 25 | assert.Nil(t, u.Insert(ups1)) 26 | 27 | obj, err = u.Get("1") 28 | assert.Nil(t, err) 29 | assert.Equal(t, obj.Id, "1") 30 | 31 | // Update 32 | obj.Name = "Vivian" 33 | assert.Nil(t, u.Insert(obj)) 34 | obj, err = u.Get("1") 35 | assert.Nil(t, err) 36 | assert.Equal(t, obj.Id, "1") 37 | assert.Equal(t, obj.GetName(), "Vivian") 38 | 39 | // Delete 40 | assert.Nil(t, u.Delete("1")) 41 | assert.Equal(t, u.Delete("1"), ErrObjectNotFound) 42 | obj, err = u.Get("1") 43 | assert.Nil(t, obj) 44 | assert.Error(t, err, ErrObjectNotFound) 45 | } 46 | 47 | func TestUpstreamList(t *testing.T) { 48 | objs := []*apisix.Upstream{ 49 | { 50 | Id: "1", 51 | }, 52 | { 53 | Id: "2", 54 | }, 55 | { 56 | Id: "3", 57 | }, 58 | } 59 | u := newUpstream() 60 | assert.NotNil(t, u) 61 | for _, obj := range objs { 62 | assert.Nil(t, u.Insert(obj)) 63 | } 64 | list, err := u.List() 65 | assert.Nil(t, err) 66 | assert.Len(t, list, 3) 67 | 68 | var ids []string 69 | for _, elem := range list { 70 | ids = append(ids, elem.GetId()) 71 | } 72 | sort.Strings(ids) 73 | assert.Equal(t, ids[0], "1") 74 | assert.Equal(t, ids[1], "2") 75 | assert.Equal(t, ids[2], "3") 76 | } 77 | 78 | func TestUpstreamObjectClone(t *testing.T) { 79 | ups1 := &apisix.Upstream{ 80 | Id: "1", 81 | } 82 | u := newUpstream() 83 | assert.NotNil(t, u) 84 | assert.Nil(t, u.Insert(ups1)) 85 | 86 | obj, err := u.Get("1") 87 | assert.Nil(t, err) 88 | 89 | obj.Name = "alex" 90 | obj, err = u.Get("1") 91 | assert.Nil(t, err) 92 | assert.Equal(t, obj.Name, "") 93 | } 94 | -------------------------------------------------------------------------------- /pkg/config/types.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "errors" 5 | "net" 6 | "os" 7 | "strconv" 8 | "strings" 9 | 10 | "github.com/google/uuid" 11 | ) 12 | 13 | const ( 14 | // XDSV3FileProvioner means to use the xds v3 file provisioner. 15 | XDSV3FileProvisioner = "xds-v3-file" 16 | // XDSV3GRPCProvisioner means to use the xds v3 grpc provisioner. 17 | XDSV3GRPCProvisioner = "xds-v3-grpc" 18 | 19 | // StandaloneMode means run apisix-mesh-agent standalone. 20 | StandaloneMode = "standalone" 21 | // BundleMode means run apisix-mesh-agent and apisix. 22 | BundleMode = "bundle" 23 | // DefaultAPISIXHomePath is the default home path for Apache APISIX. 24 | DefaultAPISIXHomePath = "/usr/local/apisix" 25 | // DefaultAPISIXBinPath is the default binary path for Apache APISIX. 26 | DefaultAPISIXBinPath = "/usr/local/bin/apisix" 27 | ) 28 | 29 | var ( 30 | // ErrUnknownProvisioner means user specified an unknown provisioner. 31 | ErrUnknownProvisioner = errors.New("unknown provisioner") 32 | // ErrBadGRPCListen means the grpc listen address is invalid. 33 | ErrBadGRPCListen = errors.New("bad grpc listen address") 34 | // ErrEmptyXDSConfigSource means the XDS config source is empty. 35 | ErrEmptyXDSConfigSource = errors.New("empty xds config source, --xds-config-source option is required") 36 | 37 | // DefaultGRPCListen is the default gRPC server listen address. 38 | DefaultGRPCListen = "127.0.0.1:2379" 39 | // DefaultEtcdKeyPrefix is the default key prefix in the mimicking 40 | // etcd v3 server. 41 | DefaultEtcdKeyPrefix = "/apisix" 42 | ) 43 | 44 | // RunningContext contains data which can be decided only when running. 45 | type RunningContext struct { 46 | // PodNamespace is the namesapce of the resident pod. 47 | PodNamespace string 48 | // The IP address of the resident pod. 49 | IPAddress string 50 | } 51 | 52 | // Config contains configurations required for running apisix-mesh-agent. 53 | type Config struct { 54 | // Running Id of this instance, it will be filled by 55 | // a random string when the instance started. 56 | RunId string 57 | // The minimum log level that will be printed. 58 | LogLevel string `json:"log_level" yaml:"log_level"` 59 | // The destination of logs. 60 | LogOutput string `json:"log_output" yaml:"log_output"` 61 | // The Provisioner to use. 62 | // Value can be "xds-v3-file", "xds-v3-grpc". 63 | Provisioner string `json:"provisioner" yaml:"provisioner"` 64 | // The watched xds files, only valid if the Provisioner is "xds-v3-file" 65 | XDSWatchFiles []string `json:"xds_watch_files" yaml:"xds_watch_files"` 66 | XDSConfigSource string `json:"xds_config_source" yaml:"xds_config_source"` 67 | // The grpc listen address 68 | GRPCListen string `json:"grpc_listen" yaml:"grpc_listen"` 69 | // The key prefix in the mimicking etcd v3 server. 70 | EtcdKeyPrefix string `json:"etcd_key_prefix" yaml:"etcd_key_prefix"` 71 | // THe running mode of apisix-mesh-agent, can be: 72 | // 1. standalone - only launch apisix-mesh-agent 73 | // 2. bundle - launch apisix-mesh-agent and apisix, in such case, 74 | // correct apisix home path and bin path should be configured. 75 | // And when you shutdown apisix-mesh-agent, APISIX will also be closed. 76 | RunMode string `json:"run_mode" yaml:"run_mode"` 77 | // The home path of Apache APISIX. 78 | APISIXHomePath string `json:"apisix_home_path" yaml:"apisix_home_path"` 79 | // The executable binary path of Apache APISIX. 80 | APISIXBinPath string `json:"apisix_bin_path" yaml:"apisix_bin_path"` 81 | 82 | // RunningContext is the running context, it's self-contained. 83 | // TODO: Move it outside here since it doesn't belong to "configuration". 84 | RunningContext *RunningContext `json:"running_context" yaml:"running_context"` 85 | } 86 | 87 | // NewDefaultConfig returns a Config object with all items filled by 88 | // their default values. 89 | func NewDefaultConfig() *Config { 90 | return &Config{ 91 | RunId: uuid.NewString(), 92 | LogLevel: "info", 93 | LogOutput: "stderr", 94 | Provisioner: XDSV3FileProvisioner, 95 | GRPCListen: DefaultGRPCListen, 96 | EtcdKeyPrefix: DefaultEtcdKeyPrefix, 97 | APISIXHomePath: DefaultAPISIXHomePath, 98 | APISIXBinPath: DefaultAPISIXBinPath, 99 | RunMode: StandaloneMode, 100 | 101 | RunningContext: getRunningContext(), 102 | } 103 | } 104 | 105 | // Validate validates the config object. 106 | func (cfg *Config) Validate() error { 107 | if cfg.Provisioner == "" { 108 | return errors.New("unspecified provisioner") 109 | } 110 | if cfg.Provisioner != XDSV3FileProvisioner && cfg.Provisioner != XDSV3GRPCProvisioner { 111 | return ErrUnknownProvisioner 112 | } 113 | if cfg.Provisioner == XDSV3GRPCProvisioner && cfg.XDSConfigSource == "" { 114 | return ErrEmptyXDSConfigSource 115 | } 116 | ip, port, err := net.SplitHostPort(cfg.GRPCListen) 117 | if err != nil { 118 | return ErrBadGRPCListen 119 | } 120 | 121 | if net.ParseIP(ip) == nil { 122 | return ErrBadGRPCListen 123 | } 124 | pnum, err := strconv.Atoi(port) 125 | if err != nil || pnum < 1 || pnum > 65535 { 126 | return ErrBadGRPCListen 127 | } 128 | 129 | return nil 130 | } 131 | 132 | func getRunningContext() *RunningContext { 133 | namespace := "default" 134 | if value := os.Getenv("POD_NAMESPACE"); value != "" { 135 | namespace = value 136 | } 137 | 138 | var ( 139 | ipAddr string 140 | ) 141 | ifaces, err := net.Interfaces() 142 | if err != nil { 143 | panic(err) 144 | } 145 | for _, iface := range ifaces { 146 | if iface.Name != "lo" { 147 | addrs, err := iface.Addrs() 148 | if err != nil { 149 | panic(err) 150 | } 151 | if len(addrs) > 0 { 152 | ipAddr = strings.Split(addrs[0].String(), "/")[0] 153 | } 154 | } 155 | } 156 | if ipAddr == "" { 157 | ipAddr = "127.0.0.1" 158 | } 159 | return &RunningContext{ 160 | PodNamespace: namespace, 161 | IPAddress: ipAddr, 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /pkg/config/types_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "errors" 5 | "net" 6 | "os" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestNewDefaultConfig(t *testing.T) { 13 | cfg := NewDefaultConfig() 14 | assert.Equal(t, cfg.LogLevel, "info") 15 | assert.Equal(t, cfg.LogOutput, "stderr") 16 | assert.Equal(t, cfg.Provisioner, XDSV3FileProvisioner) 17 | assert.Equal(t, cfg.GRPCListen, DefaultGRPCListen) 18 | assert.Equal(t, cfg.EtcdKeyPrefix, DefaultEtcdKeyPrefix) 19 | assert.Equal(t, cfg.APISIXHomePath, DefaultAPISIXHomePath) 20 | assert.Equal(t, cfg.APISIXBinPath, DefaultAPISIXBinPath) 21 | assert.Equal(t, cfg.RunMode, StandaloneMode) 22 | } 23 | 24 | func TestConfigValidate(t *testing.T) { 25 | cfg := NewDefaultConfig() 26 | cfg.Provisioner = "redis" 27 | assert.Equal(t, cfg.Validate(), ErrUnknownProvisioner) 28 | 29 | cfg.Provisioner = "" 30 | assert.Equal(t, cfg.Validate(), errors.New("unspecified provisioner")) 31 | 32 | cfg = NewDefaultConfig() 33 | cfg.GRPCListen = "127:8080" 34 | assert.Equal(t, cfg.Validate(), ErrBadGRPCListen) 35 | cfg.GRPCListen = "127.0.0.1:aa" 36 | assert.Equal(t, cfg.Validate(), ErrBadGRPCListen) 37 | cfg.GRPCListen = "hello" 38 | assert.Equal(t, cfg.Validate(), ErrBadGRPCListen) 39 | 40 | cfg.Provisioner = "xds-v3-grpc" 41 | assert.Equal(t, cfg.Validate(), ErrEmptyXDSConfigSource) 42 | } 43 | 44 | func TestGetRunningContext(t *testing.T) { 45 | assert.Nil(t, os.Setenv("POD_NAMESPACE", "apisix")) 46 | rc := getRunningContext() 47 | assert.Equal(t, rc.PodNamespace, "apisix") 48 | assert.Nil(t, os.Setenv("POD_NAMESPACE", "")) 49 | rc = getRunningContext() 50 | assert.Equal(t, rc.PodNamespace, "default") 51 | assert.NotNil(t, net.ParseIP(rc.IPAddress)) 52 | } 53 | -------------------------------------------------------------------------------- /pkg/etcdv3/conformance.go: -------------------------------------------------------------------------------- 1 | package etcdv3 2 | 3 | import ( 4 | "go.etcd.io/etcd/api/v3/etcdserverpb" 5 | "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" 6 | "go.uber.org/zap" 7 | 8 | "github.com/api7/apisix-mesh-agent/pkg/log" 9 | ) 10 | 11 | func (e *etcdV3) checkRangeRequestConformance(r *etcdserverpb.RangeRequest) error { 12 | if len(r.Key) == 0 { 13 | return rpctypes.ErrEmptyKey 14 | } 15 | key := string(r.Key) 16 | randEnd := string(r.RangeEnd) 17 | if !(r.RangeEnd == nil || 18 | (key == e.keyPrefix+"/routes" && randEnd == e.keyPrefix+"/routet") || 19 | (key == e.keyPrefix+"/upstreams" && randEnd == e.keyPrefix+"/upstreamt")) { 20 | 21 | log.Warnw("RangeRequest with unsupported key and range_end combination", 22 | zap.String("key", string(r.Key)), 23 | zap.String("range_end", string(r.RangeEnd)), 24 | ) 25 | return rpctypes.ErrKeyNotFound 26 | } 27 | if r.Limit != 0 { 28 | log.Warnw("RangeRequest with unsupported non-zero limit", 29 | zap.Int64("limit", r.Limit), 30 | ) 31 | return rpctypes.ErrNotCapable 32 | } 33 | if r.SortOrder != etcdserverpb.RangeRequest_NONE { 34 | log.Warnw("RangeRequest requires sorting is not supported yet", 35 | zap.String("sort_order", r.SortOrder.String()), 36 | ) 37 | return rpctypes.ErrNotCapable 38 | } 39 | if r.Revision > 0 || r.MinCreateRevision > 0 || r.MaxCreateRevision > 0 || r.MinModRevision > 0 || r.MaxModRevision > 0 { 40 | log.Warnw("RangeRequest with specific revisions is not supported yet", 41 | zap.Int64("revision", r.Revision), 42 | zap.Int64("min_create_revision", r.MinCreateRevision), 43 | zap.Int64("max_create_revision", r.MaxCreateRevision), 44 | zap.Int64("min_mod_revision", r.MinModRevision), 45 | zap.Int64("max_mod_revision", r.MaxModRevision), 46 | ) 47 | return rpctypes.ErrNotCapable 48 | } 49 | return nil 50 | } 51 | 52 | func (e *etcdV3) checkWatchRequestConformance(r *etcdserverpb.WatchRequest) error { 53 | switch wr := r.RequestUnion.(type) { 54 | case *etcdserverpb.WatchRequest_CancelRequest: 55 | return nil 56 | case *etcdserverpb.WatchRequest_CreateRequest: 57 | if wr.CreateRequest == nil { 58 | return nil 59 | } 60 | key := string(wr.CreateRequest.Key) 61 | rangeEnd := string(wr.CreateRequest.RangeEnd) 62 | if len(key) == 0 { 63 | return rpctypes.ErrEmptyKey 64 | } 65 | if !((key == e.keyPrefix+"/routes" && rangeEnd == e.keyPrefix+"/routet") || 66 | (key == e.keyPrefix+"/upstreams" && rangeEnd == e.keyPrefix+"/upstreamt")) { 67 | 68 | log.Warnw("WatchCreateRequest with unsupported key and range_end combination", 69 | zap.String("key", string(wr.CreateRequest.Key)), 70 | zap.String("range_end", string(wr.CreateRequest.RangeEnd)), 71 | zap.Any("watch_create_request", wr), 72 | ) 73 | return rpctypes.ErrKeyNotFound 74 | } 75 | if wr.CreateRequest.PrevKv { 76 | log.Warnw("WatchCreateRequest enables prev_kv, which is not supported yet", 77 | zap.Any("watch_create_request", wr), 78 | ) 79 | return rpctypes.ErrNotCapable 80 | } 81 | if wr.CreateRequest.ProgressNotify { 82 | log.Warnw("WatchCreateRequest enables progress notify, which is not supported yet", 83 | zap.Any("watch_create_request", wr), 84 | ) 85 | return rpctypes.ErrNotCapable 86 | } 87 | if wr.CreateRequest.Fragment { 88 | log.Warnw("WatchCreateRequest enables fragmented is not supported yet", 89 | zap.Any("watch_create_request", wr), 90 | ) 91 | return rpctypes.ErrNotCapable 92 | } 93 | case *etcdserverpb.WatchRequest_ProgressRequest: 94 | log.Warnw("WatchProgressRequest is not supported yet", 95 | zap.Any("watch_progress_request", wr), 96 | ) 97 | return rpctypes.ErrNotCapable 98 | } 99 | return nil 100 | } 101 | -------------------------------------------------------------------------------- /pkg/etcdv3/conformance_test.go: -------------------------------------------------------------------------------- 1 | package etcdv3 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "go.etcd.io/etcd/api/v3/etcdserverpb" 8 | "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" 9 | 10 | "github.com/api7/apisix-mesh-agent/pkg/log" 11 | ) 12 | 13 | func TestCheckRangeRequestConformance(t *testing.T) { 14 | e := &etcdV3{ 15 | logger: log.DefaultLogger, 16 | } 17 | r := &etcdserverpb.RangeRequest{} 18 | 19 | // Empty key 20 | assert.Equal(t, e.checkRangeRequestConformance(r), rpctypes.ErrEmptyKey) 21 | 22 | // Unsupported range query. 23 | r.Key = []byte("/apisix/aaaa") 24 | r.RangeEnd = []byte("/apisix/route/xxx") 25 | assert.Equal(t, e.checkRangeRequestConformance(r), rpctypes.ErrKeyNotFound) 26 | r.RangeEnd = nil 27 | 28 | // Limitations. 29 | r.Limit = 11 30 | assert.Equal(t, e.checkRangeRequestConformance(r), rpctypes.ErrNotCapable) 31 | r.Limit = 0 32 | 33 | // Sort 34 | r.SortOrder = etcdserverpb.RangeRequest_ASCEND 35 | assert.Equal(t, e.checkRangeRequestConformance(r), rpctypes.ErrNotCapable) 36 | r.SortOrder = etcdserverpb.RangeRequest_NONE 37 | 38 | // Revision 39 | r.MaxCreateRevision = 1333 40 | assert.Equal(t, e.checkRangeRequestConformance(r), rpctypes.ErrNotCapable) 41 | 42 | r.MaxCreateRevision = 0 43 | assert.Nil(t, e.checkRangeRequestConformance(r)) 44 | } 45 | 46 | func TestCheckWatchRequestConformance(t *testing.T) { 47 | e := &etcdV3{ 48 | logger: log.DefaultLogger, 49 | keyPrefix: "/apisix", 50 | } 51 | r := &etcdserverpb.WatchRequest{ 52 | RequestUnion: &etcdserverpb.WatchRequest_CancelRequest{}, 53 | } 54 | // WatchCancelRequest 55 | assert.Nil(t, e.checkWatchRequestConformance(r)) 56 | // WatchProgressRequest 57 | r.RequestUnion = &etcdserverpb.WatchRequest_ProgressRequest{} 58 | assert.Equal(t, e.checkWatchRequestConformance(r), rpctypes.ErrNotCapable) 59 | // Empty CreateRequest 60 | r.RequestUnion = &etcdserverpb.WatchRequest_CreateRequest{} 61 | assert.Nil(t, e.checkWatchRequestConformance(r)) 62 | // Empty key 63 | r.RequestUnion = &etcdserverpb.WatchRequest_CreateRequest{ 64 | CreateRequest: &etcdserverpb.WatchCreateRequest{}, 65 | } 66 | assert.Equal(t, e.checkWatchRequestConformance(r), rpctypes.ErrEmptyKey) 67 | 68 | // Bad Key and RandEnd combination. 69 | r.RequestUnion = &etcdserverpb.WatchRequest_CreateRequest{ 70 | CreateRequest: &etcdserverpb.WatchCreateRequest{ 71 | Key: []byte("/apisix/unknowns"), 72 | RangeEnd: []byte("/apisix/unknownt"), 73 | }, 74 | } 75 | assert.Equal(t, e.checkWatchRequestConformance(r), rpctypes.ErrKeyNotFound) 76 | 77 | // PrevKv 78 | r.RequestUnion = &etcdserverpb.WatchRequest_CreateRequest{ 79 | CreateRequest: &etcdserverpb.WatchCreateRequest{ 80 | Key: []byte("/apisix/routes"), 81 | RangeEnd: []byte("/apisix/routet"), 82 | PrevKv: true, 83 | }, 84 | } 85 | assert.Equal(t, e.checkWatchRequestConformance(r), rpctypes.ErrNotCapable) 86 | 87 | // ProgressNotify 88 | r.RequestUnion = &etcdserverpb.WatchRequest_CreateRequest{ 89 | CreateRequest: &etcdserverpb.WatchCreateRequest{ 90 | Key: []byte("/apisix/routes"), 91 | RangeEnd: []byte("/apisix/routet"), 92 | ProgressNotify: true, 93 | }, 94 | } 95 | assert.Equal(t, e.checkWatchRequestConformance(r), rpctypes.ErrNotCapable) 96 | // Fragment 97 | r.RequestUnion = &etcdserverpb.WatchRequest_CreateRequest{ 98 | CreateRequest: &etcdserverpb.WatchCreateRequest{ 99 | Key: []byte("/apisix/routes"), 100 | RangeEnd: []byte("/apisix/routet"), 101 | Fragment: true, 102 | }, 103 | } 104 | assert.Equal(t, e.checkWatchRequestConformance(r), rpctypes.ErrNotCapable) 105 | } 106 | -------------------------------------------------------------------------------- /pkg/etcdv3/etcd_test.go: -------------------------------------------------------------------------------- 1 | package etcdv3 2 | 3 | import ( 4 | "context" 5 | "net/http/httptest" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "go.etcd.io/etcd/api/v3/etcdserverpb" 11 | "golang.org/x/net/nettest" 12 | "google.golang.org/grpc" 13 | 14 | "github.com/api7/apisix-mesh-agent/pkg/cache" 15 | "github.com/api7/apisix-mesh-agent/pkg/config" 16 | "github.com/api7/apisix-mesh-agent/pkg/log" 17 | "github.com/api7/apisix-mesh-agent/pkg/types" 18 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 19 | ) 20 | 21 | func TestNewEtcdV3Server(t *testing.T) { 22 | cfg := config.NewDefaultConfig() 23 | // defined in kv_test.go 24 | fr := &fakeRevisioner{rev: 0} 25 | 26 | srv, err := NewEtcdV3Server(cfg, cache.NewInMemoryCache(), fr) 27 | assert.Nil(t, err) 28 | assert.NotNil(t, srv) 29 | } 30 | 31 | func TestEtcdV3ServerRun(t *testing.T) { 32 | cfg := config.NewDefaultConfig() 33 | // defined in kv_test.go 34 | fr := &fakeRevisioner{rev: 3} 35 | 36 | c := cache.NewInMemoryCache() 37 | srv, err := NewEtcdV3Server(cfg, c, fr) 38 | assert.Nil(t, err) 39 | assert.NotNil(t, srv) 40 | 41 | stopCh := make(chan struct{}) 42 | listener, err := nettest.NewLocalListener("tcp") 43 | assert.Nil(t, err) 44 | go func() { 45 | err := srv.Serve(listener) 46 | assert.Nil(t, err) 47 | close(stopCh) 48 | }() 49 | 50 | dialCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second) 51 | defer cancel() 52 | conn, err := grpc.DialContext(dialCtx, listener.Addr().String(), 53 | grpc.WithBlock(), 54 | grpc.WithInsecure(), 55 | ) 56 | assert.Nil(t, err) 57 | 58 | client := etcdserverpb.NewKVClient(conn) 59 | rr := &etcdserverpb.RangeRequest{ 60 | Key: []byte("/apisix/routes/1"), 61 | } 62 | resp, err := client.Range(context.Background(), rr) 63 | assert.Nil(t, err) 64 | assert.Len(t, resp.Kvs, 0) 65 | 66 | u := &apisix.Upstream{ 67 | Id: "1", 68 | } 69 | assert.Nil(t, c.Upstream().Insert(u)) 70 | 71 | rr.Key = []byte("/apisix/upstreams") 72 | rr.RangeEnd = []byte("/apisix/upstreamt") 73 | resp, err = client.Range(context.Background(), rr) 74 | assert.NotNil(t, resp) 75 | assert.Nil(t, err) 76 | assert.Len(t, resp.Kvs, 1) 77 | assert.Equal(t, resp.Kvs[0].Key, []byte("/apisix/upstreams/1")) 78 | 79 | assert.Nil(t, srv.Shutdown(context.Background())) 80 | select { 81 | case <-stopCh: 82 | break 83 | case <-time.After(2 * time.Second): 84 | assert.FailNow(t, "etcd v3 server didn't stop") 85 | } 86 | } 87 | 88 | func TestPushEvents(t *testing.T) { 89 | events := []types.Event{ 90 | { 91 | Type: types.EventAdd, 92 | Object: &apisix.Route{Id: "123"}, 93 | }, 94 | { 95 | Type: types.EventAdd, 96 | Object: &apisix.Route{Id: "124"}, 97 | }, 98 | { 99 | Type: types.EventAdd, 100 | Object: &apisix.Upstream{Id: "125"}, 101 | }, 102 | } 103 | f := &fakeRevisioner{rev: 1} 104 | cfg := &config.Config{ 105 | LogLevel: "debug", 106 | LogOutput: "stderr", 107 | EtcdKeyPrefix: "/apisix", 108 | } 109 | etcd, err := NewEtcdV3Server(cfg, cache.NewInMemoryCache(), f) 110 | assert.Nil(t, err) 111 | ws := &watchStream{ 112 | ctx: context.Background(), 113 | eventCh: make(chan *etcdserverpb.WatchResponse), 114 | etcd: etcd.(*etcdV3), 115 | route: make(map[int64]struct{}), 116 | upstream: make(map[int64]struct{}), 117 | } 118 | etcd.(*etcdV3).watchers[1] = ws 119 | ws.route[1] = struct{}{} 120 | ws.upstream[1] = struct{}{} 121 | 122 | etcd.PushEvents(events) 123 | 124 | for i := 0; i < 3; i++ { 125 | select { 126 | case <-time.After(2 * time.Second): 127 | assert.FailNow(t, "didn't receive event in time") 128 | case <-ws.eventCh: 129 | } 130 | } 131 | } 132 | 133 | func TestVersion(t *testing.T) { 134 | rw := httptest.NewRecorder() 135 | req := httptest.NewRequest("GET", "/version", nil) 136 | e := &etcdV3{ 137 | logger: log.DefaultLogger, 138 | } 139 | e.version(rw, req) 140 | assert.Equal(t, rw.Code, 200) 141 | assert.Equal(t, rw.Body.String(), `{"etcdserver":"3.5.0-pre","etcdcluster":"3.5.0"}`) 142 | } 143 | -------------------------------------------------------------------------------- /pkg/id/idgen.go: -------------------------------------------------------------------------------- 1 | package id 2 | 3 | import ( 4 | "fmt" 5 | "hash/crc32" 6 | ) 7 | 8 | // GenID generates an ID according to the raw material. 9 | func GenID(raw string) string { 10 | if raw == "" { 11 | return "" 12 | } 13 | res := crc32.ChecksumIEEE([]byte(raw)) 14 | return fmt.Sprintf("%x", res) 15 | } 16 | -------------------------------------------------------------------------------- /pkg/id/idgen_test.go: -------------------------------------------------------------------------------- 1 | package id 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestGenID(t *testing.T) { 10 | hash := GenID("") 11 | assert.Len(t, hash, 0) 12 | 13 | assert.Equal(t, GenID("111"), GenID("111")) 14 | assert.NotEqual(t, GenID("112"), GenID("111")) 15 | } 16 | -------------------------------------------------------------------------------- /pkg/log/default_logger.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import "go.uber.org/zap/zapcore" 4 | 5 | var ( 6 | // DefaultLogger is the default logger, which logs message to stderr and with 7 | // the minimal level "warn". 8 | DefaultLogger *Logger 9 | ) 10 | 11 | func init() { 12 | l, err := NewLogger( 13 | WithOutputFile("stderr"), 14 | WithLogLevel("warn"), 15 | ) 16 | if err != nil { 17 | panic(err) 18 | } 19 | DefaultLogger = l 20 | } 21 | 22 | // Debug uses the fmt.Sprint to construct and log a message using the DefaultLogger. 23 | func Debug(args ...interface{}) { 24 | DefaultLogger.Debug(args...) 25 | } 26 | 27 | // Debugf uses the fmt.Sprintf to log a templated message using the DefaultLogger. 28 | func Debugf(template string, args ...interface{}) { 29 | DefaultLogger.Debugf(template, args...) 30 | } 31 | 32 | // Debugw logs a message with some additional context using the DefaultLogger. 33 | func Debugw(message string, fields ...zapcore.Field) { 34 | DefaultLogger.Debugw(message, fields...) 35 | } 36 | 37 | // Info uses the fmt.Sprint to construct and log a message using the DefaultLogger. 38 | func Info(args ...interface{}) { 39 | DefaultLogger.Info(args...) 40 | } 41 | 42 | // Infof uses the fmt.Sprintf to log a templated message using the DefaultLogger. 43 | func Infof(template string, args ...interface{}) { 44 | DefaultLogger.Infof(template, args...) 45 | } 46 | 47 | // Infow logs a message with some additional context using the DefaultLogger. 48 | func Infow(message string, fields ...zapcore.Field) { 49 | DefaultLogger.Infow(message, fields...) 50 | } 51 | 52 | // Warn uses the fmt.Sprint to construct and log a message using the DefaultLogger. 53 | func Warn(args ...interface{}) { 54 | DefaultLogger.Warn(args...) 55 | } 56 | 57 | // Warnf uses the fmt.Sprintf to log a templated message using the DefaultLogger. 58 | func Warnf(template string, args ...interface{}) { 59 | DefaultLogger.Warnf(template, args...) 60 | } 61 | 62 | // Warnw logs a message with some additional context using the DefaultLogger. 63 | func Warnw(message string, fields ...zapcore.Field) { 64 | DefaultLogger.Warnw(message, fields...) 65 | } 66 | 67 | // Error uses the fmt.Sprint to construct and log a message using the DefaultLogger. 68 | func Error(args ...interface{}) { 69 | DefaultLogger.Error(args...) 70 | } 71 | 72 | // Errorf uses the fmt.Sprintf to log a templated message using the DefaultLogger. 73 | func Errorf(template string, args ...interface{}) { 74 | DefaultLogger.Errorf(template, args...) 75 | } 76 | 77 | // Errorw logs a message with some additional context using the DefaultLogger. 78 | func Errorw(message string, fields ...zapcore.Field) { 79 | DefaultLogger.Errorw(message, fields...) 80 | } 81 | 82 | // Panic uses the fmt.Sprint to construct and log a message using the DefaultLogger. 83 | func Panic(args ...interface{}) { 84 | DefaultLogger.Panic(args...) 85 | } 86 | 87 | // Panicf uses the fmt.Sprintf to log a templated message using the DefaultLogger. 88 | func Panicf(template string, args ...interface{}) { 89 | DefaultLogger.Panicf(template, args...) 90 | } 91 | 92 | // Panicw logs a message with some additional context using the DefaultLogger. 93 | func Panicw(message string, fields ...zapcore.Field) { 94 | DefaultLogger.Panicw(message, fields...) 95 | } 96 | 97 | // Fatal uses the fmt.Sprint to construct and log a message using the DefaultLogger. 98 | func Fatal(args ...interface{}) { 99 | DefaultLogger.Fatal(args...) 100 | } 101 | 102 | // Fatalf uses the fmt.Sprintf to log a templated message using the DefaultLogger. 103 | func Fatalf(template string, args ...interface{}) { 104 | DefaultLogger.Fatalf(template, args...) 105 | } 106 | 107 | // Fatalw logs a message with some additional context using the DefaultLogger. 108 | func Fatalw(message string, fields ...zapcore.Field) { 109 | DefaultLogger.Fatalw(message, fields...) 110 | } 111 | -------------------------------------------------------------------------------- /pkg/log/default_logger_test.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "go.uber.org/zap" 9 | "go.uber.org/zap/zapcore" 10 | ) 11 | 12 | var ( 13 | logHandler = map[string][]reflect.Value{ 14 | zapcore.DebugLevel.String(): { 15 | reflect.ValueOf(Debug), 16 | reflect.ValueOf(Debugf), 17 | reflect.ValueOf(Debugw), 18 | }, 19 | zapcore.InfoLevel.String(): { 20 | reflect.ValueOf(Info), 21 | reflect.ValueOf(Infof), 22 | reflect.ValueOf(Infow), 23 | }, 24 | zapcore.WarnLevel.String(): { 25 | reflect.ValueOf(Warn), 26 | reflect.ValueOf(Warnf), 27 | reflect.ValueOf(Warnw), 28 | }, 29 | zapcore.ErrorLevel.String(): { 30 | reflect.ValueOf(Error), 31 | reflect.ValueOf(Errorf), 32 | reflect.ValueOf(Errorw), 33 | }, 34 | zapcore.PanicLevel.String(): { 35 | reflect.ValueOf(Panic), 36 | reflect.ValueOf(Panicf), 37 | reflect.ValueOf(Panicw), 38 | }, 39 | zapcore.FatalLevel.String(): { 40 | reflect.ValueOf(Fatal), 41 | reflect.ValueOf(Fatalf), 42 | reflect.ValueOf(Fatalw), 43 | }, 44 | } 45 | ) 46 | 47 | func TestDefaultLogger(t *testing.T) { 48 | for level, handlers := range logHandler { 49 | t.Run("test log with level "+level, func(t *testing.T) { 50 | fws := &fakeWriteSyncer{} 51 | logger, err := NewLogger(WithLogLevel(level), WithWriteSyncer(fws)) 52 | assert.Nil(t, err, "failed to new logger: ", err) 53 | defer logger.Close() 54 | // Reset default logger 55 | DefaultLogger = logger 56 | 57 | handlers[0].Call([]reflect.Value{reflect.ValueOf("hello")}) 58 | assert.Nil(t, logger.Sync(), "failed to sync logger") 59 | 60 | fields := unmarshalLogMessage(t, fws.bytes()) 61 | assert.Equal(t, fields.Level, level, "bad log level ", fields.Level) 62 | assert.Equal(t, fields.Message, "hello", "bad log message ", fields.Message) 63 | 64 | handlers[1].Call([]reflect.Value{reflect.ValueOf("hello I am %s"), reflect.ValueOf("alex")}) 65 | assert.Nil(t, logger.Sync(), "failed to sync logger") 66 | 67 | fields = unmarshalLogMessage(t, fws.bytes()) 68 | assert.Equal(t, fields.Level, level, "bad log level ", fields.Level) 69 | assert.Equal(t, fields.Message, "hello I am alex", "bad log message ", fields.Message) 70 | 71 | handlers[2].Call([]reflect.Value{reflect.ValueOf("hello"), reflect.ValueOf(zap.String("name", "alex")), reflect.ValueOf(zap.Int("age", 3))}) 72 | 73 | assert.Nil(t, logger.Sync(), "failed to sync logger") 74 | 75 | fields = unmarshalLogMessage(t, fws.bytes()) 76 | assert.Equal(t, fields.Level, level, "bad log level ", fields.Level) 77 | assert.Equal(t, fields.Message, "hello", "bad log message ", fields.Message) 78 | assert.Equal(t, fields.Name, "alex", "bad name field ", fields.Name) 79 | assert.Equal(t, fields.Age, 3, "bad age field ", fields.Age) 80 | }) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /pkg/log/logger_test.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "net/http" 7 | "reflect" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | "go.uber.org/zap" 12 | ) 13 | 14 | type fakeWriteSyncer struct { 15 | buf bytes.Buffer 16 | } 17 | 18 | type fields struct { 19 | Level string 20 | Time string 21 | Message string 22 | Name string 23 | Context string 24 | Age int 25 | } 26 | 27 | func (fws *fakeWriteSyncer) Sync() error { 28 | return nil 29 | } 30 | 31 | func (fws *fakeWriteSyncer) Write(p []byte) (int, error) { 32 | return fws.buf.Write(p) 33 | } 34 | 35 | func (fws *fakeWriteSyncer) bytes() (p []byte) { 36 | s := fws.buf.Bytes() 37 | p = make([]byte, len(s)) 38 | copy(p, s) 39 | fws.buf.Reset() 40 | return 41 | } 42 | 43 | func unmarshalLogMessage(t *testing.T, data []byte) *fields { 44 | var f fields 45 | err := json.Unmarshal(data, &f) 46 | assert.Nil(t, err, "failed to unmarshal log message: ", err) 47 | return &f 48 | } 49 | 50 | func TestLogger(t *testing.T) { 51 | for level := range levelMap { 52 | t.Run("test log with level "+level, func(t *testing.T) { 53 | fws := &fakeWriteSyncer{} 54 | logger, err := NewLogger( 55 | WithLogLevel(level), 56 | WithWriteSyncer(fws), 57 | WithContext("test-logger"), 58 | ) 59 | assert.Nil(t, err, "failed to new logger: ", err) 60 | defer logger.Close() 61 | 62 | rv := reflect.ValueOf(logger) 63 | 64 | handler := rv.MethodByName(http.CanonicalHeaderKey(level)) 65 | handler.Call([]reflect.Value{reflect.ValueOf("hello")}) 66 | 67 | assert.Nil(t, logger.Sync(), "failed to sync logger") 68 | 69 | fields := unmarshalLogMessage(t, fws.bytes()) 70 | assert.Equal(t, fields.Level, level, "bad log level ", fields.Level) 71 | assert.Equal(t, fields.Message, "hello", "bad log message ", fields.Message) 72 | assert.Equal(t, fields.Context, "test-logger", "bad context") 73 | 74 | handler = rv.MethodByName(http.CanonicalHeaderKey(level) + "f") 75 | handler.Call([]reflect.Value{reflect.ValueOf("hello I am %s"), reflect.ValueOf("alex")}) 76 | 77 | assert.Nil(t, logger.Sync(), "failed to sync logger") 78 | 79 | fields = unmarshalLogMessage(t, fws.bytes()) 80 | assert.Equal(t, fields.Level, level, "bad log level ", fields.Level) 81 | assert.Equal(t, fields.Message, "hello I am alex", "bad log message ", fields.Message) 82 | assert.Equal(t, fields.Context, "test-logger", "bad context") 83 | 84 | handler = rv.MethodByName(http.CanonicalHeaderKey(level) + "w") 85 | handler.Call([]reflect.Value{reflect.ValueOf("hello"), reflect.ValueOf(zap.String("name", "alex")), reflect.ValueOf(zap.Int("age", 3))}) 86 | 87 | assert.Nil(t, logger.Sync(), "failed to sync logger") 88 | 89 | fields = unmarshalLogMessage(t, fws.bytes()) 90 | assert.Equal(t, fields.Level, level, "bad log level ", fields.Level) 91 | assert.Equal(t, fields.Message, "hello", "bad log message ", fields.Message) 92 | assert.Equal(t, fields.Name, "alex", "bad name field ", fields.Name) 93 | assert.Equal(t, fields.Age, 3, "bad age field ", fields.Age) 94 | assert.Equal(t, fields.Context, "test-logger", "bad context") 95 | }) 96 | } 97 | } 98 | 99 | func TestLogLevel(t *testing.T) { 100 | fws := &fakeWriteSyncer{} 101 | logger, err := NewLogger(WithLogLevel("error"), WithWriteSyncer(fws)) 102 | assert.Nil(t, err, "failed to new logger: ", err) 103 | defer logger.Close() 104 | 105 | logger.Warn("this message should be dropped") 106 | assert.Nil(t, logger.Sync(), "failed to sync logger") 107 | 108 | p := fws.bytes() 109 | assert.Len(t, p, 0, "saw a message which should be dropped") 110 | } 111 | -------------------------------------------------------------------------------- /pkg/log/options.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | "go.uber.org/zap/zapcore" 5 | ) 6 | 7 | // Option configures how to set up logger. 8 | type Option interface { 9 | apply(*options) 10 | } 11 | 12 | type funcOption struct { 13 | do func(*options) 14 | } 15 | 16 | func (fo *funcOption) apply(o *options) { 17 | fo.do(o) 18 | } 19 | 20 | type options struct { 21 | writeSyncer zapcore.WriteSyncer 22 | outputFile string 23 | logLevel string 24 | context string 25 | } 26 | 27 | // WithLogLevel sets the log level. 28 | func WithLogLevel(level string) Option { 29 | return &funcOption{ 30 | do: func(o *options) { 31 | o.logLevel = level 32 | }, 33 | } 34 | } 35 | 36 | // WithOutputFile sets the output file path. 37 | func WithOutputFile(file string) Option { 38 | return &funcOption{ 39 | do: func(o *options) { 40 | o.outputFile = file 41 | }, 42 | } 43 | } 44 | 45 | // WithContext sets the context of the logger. 46 | func WithContext(ctx string) Option { 47 | return &funcOption{ 48 | do: func(o *options) { 49 | o.context = ctx 50 | }, 51 | } 52 | } 53 | 54 | // WithWriteSyncer is a low level API which sets the underlying 55 | // WriteSyncer by providing a zapcore.WriterSyncer, 56 | // which has high priority than WithOutputFile. 57 | func WithWriteSyncer(ws zapcore.WriteSyncer) Option { 58 | return &funcOption{ 59 | do: func(o *options) { 60 | o.writeSyncer = ws 61 | }, 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /pkg/provisioner/types.go: -------------------------------------------------------------------------------- 1 | package provisioner 2 | 3 | import ( 4 | "github.com/api7/apisix-mesh-agent/pkg/types" 5 | ) 6 | 7 | // Provisioner provisions config event. 8 | // The source type can be xDS or UDPA or whatever anything else. 9 | type Provisioner interface { 10 | // Channel returns a readonly channel where caller can get events. 11 | Channel() <-chan []types.Event 12 | // Run launches the provisioner. 13 | Run(chan struct{}) error 14 | } 15 | -------------------------------------------------------------------------------- /pkg/provisioner/util/manifest.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | apisixutil "github.com/api7/apisix-mesh-agent/pkg/apisix" 5 | "github.com/api7/apisix-mesh-agent/pkg/types" 6 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 7 | ) 8 | 9 | // Manifest collects a couples Routes, Upstreams. 10 | type Manifest struct { 11 | Routes []*apisix.Route 12 | Upstreams []*apisix.Upstream 13 | } 14 | 15 | // DiffFrom checks the difference between m and m2 from m's point of view. 16 | func (m *Manifest) DiffFrom(m2 *Manifest) (*Manifest, *Manifest, *Manifest) { 17 | var ( 18 | added Manifest 19 | updated Manifest 20 | deleted Manifest 21 | ) 22 | 23 | a, d, u := apisixutil.CompareRoutes(m.Routes, m2.Routes) 24 | added.Routes = append(added.Routes, a...) 25 | updated.Routes = append(updated.Routes, u...) 26 | deleted.Routes = append(deleted.Routes, d...) 27 | 28 | au, du, uu := apisixutil.CompareUpstreams(m.Upstreams, m2.Upstreams) 29 | added.Upstreams = append(added.Upstreams, au...) 30 | updated.Upstreams = append(updated.Upstreams, uu...) 31 | deleted.Upstreams = append(deleted.Upstreams, du...) 32 | 33 | return &added, &deleted, &updated 34 | } 35 | 36 | // Size calculates the number of resources in the manifest. 37 | func (m *Manifest) Size() int { 38 | return len(m.Upstreams) + len(m.Routes) 39 | } 40 | 41 | // Events generates events according to its collection. 42 | func (m *Manifest) Events(evType types.EventType) []types.Event { 43 | var events []types.Event 44 | for _, r := range m.Routes { 45 | if evType == types.EventDelete { 46 | events = append(events, types.Event{ 47 | Type: types.EventDelete, 48 | Tombstone: r, 49 | }) 50 | } else { 51 | events = append(events, types.Event{ 52 | Type: evType, 53 | Object: r, 54 | }) 55 | } 56 | } 57 | for _, u := range m.Upstreams { 58 | if evType == types.EventDelete { 59 | events = append(events, types.Event{ 60 | Type: types.EventDelete, 61 | Tombstone: u, 62 | }) 63 | } else { 64 | events = append(events, types.Event{ 65 | Type: evType, 66 | Object: u, 67 | }) 68 | } 69 | } 70 | return events 71 | } 72 | -------------------------------------------------------------------------------- /pkg/provisioner/util/manifest_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | 8 | "github.com/api7/apisix-mesh-agent/pkg/types" 9 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 10 | ) 11 | 12 | func TestManifestSize(t *testing.T) { 13 | m := &Manifest{ 14 | Routes: []*apisix.Route{ 15 | {}, {}, 16 | }, 17 | Upstreams: []*apisix.Upstream{ 18 | {}, {}, 19 | }, 20 | } 21 | assert.Equal(t, m.Size(), 4) 22 | } 23 | 24 | func TestManifestEvents(t *testing.T) { 25 | m := &Manifest{ 26 | Routes: []*apisix.Route{ 27 | {}, {}, 28 | }, 29 | Upstreams: []*apisix.Upstream{ 30 | {}, {}, 31 | }, 32 | } 33 | evs := m.Events(types.EventAdd) 34 | assert.Len(t, evs, 4) 35 | assert.NotNil(t, evs[0].Object) 36 | assert.Nil(t, evs[0].Tombstone) 37 | assert.Equal(t, evs[0].Type, types.EventAdd) 38 | 39 | evs = m.Events(types.EventUpdate) 40 | assert.Len(t, evs, 4) 41 | assert.NotNil(t, evs[0].Object) 42 | assert.Nil(t, evs[0].Tombstone) 43 | assert.Equal(t, evs[0].Type, types.EventUpdate) 44 | 45 | evs = m.Events(types.EventDelete) 46 | assert.Len(t, evs, 4) 47 | assert.Nil(t, evs[0].Object) 48 | assert.NotNil(t, evs[0].Tombstone) 49 | assert.Equal(t, evs[0].Type, types.EventDelete) 50 | } 51 | 52 | func TestManifestDiffFrom(t *testing.T) { 53 | m := &Manifest{ 54 | Routes: []*apisix.Route{ 55 | { 56 | Id: "1", 57 | }, 58 | { 59 | Id: "2", 60 | }, 61 | }, 62 | Upstreams: []*apisix.Upstream{ 63 | { 64 | Id: "1", 65 | }, 66 | { 67 | Id: "2", 68 | }, 69 | }, 70 | } 71 | m2 := &Manifest{ 72 | Routes: []*apisix.Route{ 73 | { 74 | Id: "2", 75 | Uris: []string{"/foo"}, 76 | }, 77 | { 78 | Id: "3", 79 | }, 80 | }, 81 | Upstreams: []*apisix.Upstream{ 82 | { 83 | Id: "1", 84 | }, 85 | }, 86 | } 87 | a, d, u := m.DiffFrom(m2) 88 | assert.Equal(t, a.Size(), 1) 89 | assert.Equal(t, a.Routes[0].Id, "3") 90 | 91 | assert.Equal(t, d.Size(), 2) 92 | assert.Equal(t, d.Routes[0].Id, "1") 93 | assert.Equal(t, d.Upstreams[0].Id, "2") 94 | 95 | assert.Equal(t, u.Size(), 1) 96 | assert.Equal(t, u.Routes[0].Id, "2") 97 | assert.Equal(t, u.Routes[0].Uris, []string{"/foo"}) 98 | } 99 | -------------------------------------------------------------------------------- /pkg/provisioner/util/util.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | // GenNodeId generates an id used for xDS protocol. The format is like: 8 | // sidecar~172.10.0.2~12345asad034~default.svc.cluster.local 9 | func GenNodeId(runId, ipAddr, dnsDomain string) string { 10 | var buf strings.Builder 11 | buf.WriteString("sidecar~") 12 | buf.WriteString(ipAddr) 13 | buf.WriteString("~") 14 | buf.WriteString(runId) 15 | buf.WriteString("~") 16 | buf.WriteString(dnsDomain) 17 | return buf.String() 18 | } 19 | -------------------------------------------------------------------------------- /pkg/provisioner/util/util_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestGenNodeId(t *testing.T) { 10 | id := GenNodeId("12345", "10.0.5.3", "default.svc.cluster.local") 11 | assert.Equal(t, id, "sidecar~10.0.5.3~12345~default.svc.cluster.local") 12 | } 13 | -------------------------------------------------------------------------------- /pkg/provisioner/xds/v3/file/delivery.go: -------------------------------------------------------------------------------- 1 | package file 2 | 3 | import ( 4 | clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" 5 | endpointv3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" 6 | routev3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" 7 | "github.com/golang/protobuf/ptypes/any" 8 | "go.uber.org/zap" 9 | "google.golang.org/protobuf/proto" 10 | "google.golang.org/protobuf/types/known/anypb" 11 | 12 | xdsv3 "github.com/api7/apisix-mesh-agent/pkg/adaptor/xds/v3" 13 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 14 | ) 15 | 16 | func (p *xdsFileProvisioner) processRouteConfigurationV3(res *any.Any) []*apisix.Route { 17 | var route routev3.RouteConfiguration 18 | err := anypb.UnmarshalTo(res, &route, proto.UnmarshalOptions{ 19 | DiscardUnknown: true, 20 | }) 21 | if err != nil { 22 | p.logger.Errorw("found invalid RouteConfiguration resource", 23 | zap.Error(err), 24 | zap.Any("resource", res), 25 | ) 26 | return nil 27 | } 28 | 29 | routes, err := p.v3Adaptor.TranslateRouteConfiguration(&route, nil) 30 | if err != nil { 31 | p.logger.Errorw("failed to translate RouteConfiguration to APISIX routes", 32 | zap.Error(err), 33 | zap.Any("route", &route), 34 | ) 35 | } 36 | return routes 37 | } 38 | 39 | func (p *xdsFileProvisioner) processClusterV3(res *any.Any) []*apisix.Upstream { 40 | var cluster clusterv3.Cluster 41 | err := anypb.UnmarshalTo(res, &cluster, proto.UnmarshalOptions{ 42 | DiscardUnknown: true, 43 | }) 44 | if err != nil { 45 | p.logger.Errorw("found invalid Cluster resource", 46 | zap.Error(err), 47 | zap.Any("resource", res), 48 | ) 49 | return nil 50 | } 51 | ups, err := p.v3Adaptor.TranslateCluster(&cluster) 52 | if err != nil && err != xdsv3.ErrRequireFurtherEDS { 53 | p.logger.Errorw("failed to translate Cluster to APISIX routes", 54 | zap.Error(err), 55 | zap.Any("cluster", &cluster), 56 | ) 57 | return nil 58 | } 59 | if err == xdsv3.ErrRequireFurtherEDS { 60 | p.logger.Warnw("cluster depends on another EDS config, an upstream without nodes setting was generated", 61 | zap.Any("upstream", ups), 62 | ) 63 | } 64 | p.upstreamCache[ups.Name] = ups 65 | return []*apisix.Upstream{ups} 66 | } 67 | 68 | func (p *xdsFileProvisioner) processClusterLoadAssignmentV3(res *any.Any) []*apisix.Upstream { 69 | var cla endpointv3.ClusterLoadAssignment 70 | err := anypb.UnmarshalTo(res, &cla, proto.UnmarshalOptions{ 71 | DiscardUnknown: true, 72 | }) 73 | if err != nil { 74 | p.logger.Errorw("found invalid ClusterLoadAssignment resource", 75 | zap.Error(err), 76 | zap.Any("resource", res), 77 | ) 78 | return nil 79 | } 80 | 81 | ups, ok := p.upstreamCache[cla.ClusterName] 82 | if !ok { 83 | p.logger.Warnw("found invalid ClusterLoadAssignment resource", 84 | zap.String("reason", "cluster unknown"), 85 | zap.Any("resource", res), 86 | ) 87 | return nil 88 | } 89 | if len(ups.Nodes) > 0 { 90 | p.logger.Warnw("found redundant ClusterLoadAssignment resource", 91 | zap.String("reason", "Cluster already has load assignment"), 92 | zap.Any("resource", res), 93 | ) 94 | return nil 95 | } 96 | 97 | nodes, err := p.v3Adaptor.TranslateClusterLoadAssignment(&cla) 98 | if err != nil { 99 | p.logger.Errorw("failed to translate ClusterLoadAssignment", 100 | zap.Error(err), 101 | zap.Any("resource", res), 102 | ) 103 | return nil 104 | } 105 | 106 | // Do not set on the original ups to avoid race conditions. 107 | newUps := proto.Clone(ups).(*apisix.Upstream) 108 | newUps.Nodes = nodes 109 | p.upstreamCache[cla.ClusterName] = newUps 110 | return []*apisix.Upstream{newUps} 111 | } 112 | -------------------------------------------------------------------------------- /pkg/provisioner/xds/v3/file/testdata/cluster.json: -------------------------------------------------------------------------------- 1 | { 2 | "versionInfo": "0", 3 | "resources": [ 4 | { 5 | "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", 6 | "name": "httpbin.default.svc.cluster.local", 7 | "type": "EDS" 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /pkg/provisioner/xds/v3/file/testdata/route.json: -------------------------------------------------------------------------------- 1 | { 2 | "versionInfo": "0", 3 | "resources": [ 4 | { 5 | "@type": "type.googleapis.com/envoy.config.route.v3.RouteConfiguration", 6 | "name": "rc1", 7 | "virtualHosts": [ 8 | { 9 | "name": "vhost1", 10 | "domains": [ 11 | "*.apache.org", 12 | "apisix.apache.org" 13 | ], 14 | "routes": [ 15 | { 16 | "name": "route1", 17 | "match": { 18 | "path": "/foo", 19 | "caseSensitive": true 20 | }, 21 | "route": { 22 | "cluster": "kubernetes.default.svc.cluster.local" 23 | } 24 | } 25 | ] 26 | } 27 | ] 28 | } 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /pkg/provisioner/xds/v3/grpc/delivery.go: -------------------------------------------------------------------------------- 1 | package grpc 2 | 3 | import ( 4 | clusterv3 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" 5 | endpointv3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3" 6 | routev3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3" 7 | "github.com/golang/protobuf/ptypes/any" 8 | "go.uber.org/zap" 9 | "google.golang.org/protobuf/proto" 10 | "google.golang.org/protobuf/types/known/anypb" 11 | 12 | xdsv3 "github.com/api7/apisix-mesh-agent/pkg/adaptor/xds/v3" 13 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 14 | ) 15 | 16 | func (p *grpcProvisioner) processRouteConfigurationV3(res *any.Any) ([]*apisix.Route, error) { 17 | var route routev3.RouteConfiguration 18 | err := anypb.UnmarshalTo(res, &route, proto.UnmarshalOptions{ 19 | DiscardUnknown: true, 20 | }) 21 | if err != nil { 22 | p.logger.Errorw("found invalid RouteConfiguration resource", 23 | zap.Error(err), 24 | zap.Any("resource", res), 25 | ) 26 | return nil, err 27 | } 28 | 29 | opts := &xdsv3.TranslateOptions{ 30 | RouteOriginalDestination: p.routeOwnership, 31 | } 32 | routes, err := p.v3Adaptor.TranslateRouteConfiguration(&route, opts) 33 | if err != nil { 34 | p.logger.Errorw("failed to translate RouteConfiguration to APISIX routes", 35 | zap.Error(err), 36 | zap.Any("route", &route), 37 | ) 38 | return nil, err 39 | } 40 | return routes, nil 41 | } 42 | 43 | func (p *grpcProvisioner) processStaticRouteConfigurations(rcs []*routev3.RouteConfiguration) ([]*apisix.Route, error) { 44 | var ( 45 | routes []*apisix.Route 46 | ) 47 | opts := &xdsv3.TranslateOptions{ 48 | RouteOriginalDestination: p.routeOwnership, 49 | } 50 | for _, rc := range rcs { 51 | route, err := p.v3Adaptor.TranslateRouteConfiguration(rc, opts) 52 | if err != nil { 53 | p.logger.Errorw("failed to translate RouteConfiguration to APISIX routes", 54 | zap.Error(err), 55 | zap.Any("route", &route), 56 | ) 57 | return nil, err 58 | } 59 | } 60 | return routes, nil 61 | } 62 | 63 | func (p *grpcProvisioner) processClusterV3(res *any.Any) (*apisix.Upstream, error) { 64 | var cluster clusterv3.Cluster 65 | err := anypb.UnmarshalTo(res, &cluster, proto.UnmarshalOptions{ 66 | DiscardUnknown: true, 67 | }) 68 | if err != nil { 69 | p.logger.Errorw("found invalid Cluster resource", 70 | zap.Error(err), 71 | zap.Any("resource", res), 72 | ) 73 | return nil, err 74 | } 75 | ups, err := p.v3Adaptor.TranslateCluster(&cluster) 76 | if err != nil && err != xdsv3.ErrRequireFurtherEDS { 77 | return nil, err 78 | } 79 | if err == xdsv3.ErrRequireFurtherEDS { 80 | p.logger.Warnw("cluster depends on another EDS config, an upstream without nodes setting was generated", 81 | zap.Any("upstream", ups), 82 | ) 83 | p.edsRequiredClusters.Add(ups.Name) 84 | } 85 | return ups, nil 86 | } 87 | 88 | func (p *grpcProvisioner) processClusterLoadAssignmentV3(res *any.Any) (*apisix.Upstream, error) { 89 | var cla endpointv3.ClusterLoadAssignment 90 | err := anypb.UnmarshalTo(res, &cla, proto.UnmarshalOptions{ 91 | DiscardUnknown: true, 92 | }) 93 | if err != nil { 94 | p.logger.Errorw("found invalid ClusterLoadAssignment resource", 95 | zap.Error(err), 96 | zap.Any("resource", res), 97 | ) 98 | return nil, err 99 | } 100 | 101 | ups, ok := p.upstreams[cla.ClusterName] 102 | if !ok { 103 | p.logger.Warnw("found invalid ClusterLoadAssignment resource", 104 | zap.String("reason", "cluster unknown"), 105 | zap.Any("resource", res), 106 | ) 107 | return nil, _errUnknownClusterName 108 | } 109 | 110 | nodes, err := p.v3Adaptor.TranslateClusterLoadAssignment(&cla) 111 | if err != nil { 112 | p.logger.Errorw("failed to translate ClusterLoadAssignment", 113 | zap.Error(err), 114 | zap.Any("resource", res), 115 | ) 116 | return nil, err 117 | } 118 | 119 | // Do not set on the original ups to avoid race conditions. 120 | newUps := proto.Clone(ups).(*apisix.Upstream) 121 | newUps.Nodes = nodes 122 | p.upstreams[cla.ClusterName] = newUps 123 | return newUps, nil 124 | } 125 | -------------------------------------------------------------------------------- /pkg/set/string.go: -------------------------------------------------------------------------------- 1 | package set 2 | 3 | import "sort" 4 | 5 | // StringSet represents a set which elements are string. 6 | type StringSet map[string]struct{} 7 | 8 | // Add adds an element to set. 9 | func (set StringSet) Add(e string) { 10 | set[e] = struct{}{} 11 | } 12 | 13 | // Equal compares two string set and checks whether they are identical. 14 | func (set StringSet) Equal(set2 StringSet) bool { 15 | if len(set) != len(set2) { 16 | return false 17 | } 18 | for e := range set2 { 19 | if _, ok := set[e]; !ok { 20 | return false 21 | } 22 | } 23 | for e := range set { 24 | if _, ok := set2[e]; !ok { 25 | return false 26 | } 27 | } 28 | return true 29 | } 30 | 31 | // Strings converts the string set to a string slice. 32 | func (set StringSet) Strings() []string { 33 | s := make([]string, 0, len(set)) 34 | for e := range set { 35 | s = append(s, e) 36 | } 37 | return s 38 | } 39 | 40 | // OrderedStrings converts the string set to a sorted string slice. 41 | func (set StringSet) OrderedStrings() []string { 42 | s := set.Strings() 43 | sort.Strings(s) 44 | return s 45 | } 46 | -------------------------------------------------------------------------------- /pkg/set/string_test.go: -------------------------------------------------------------------------------- 1 | package set 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestStringSet(t *testing.T) { 10 | s := StringSet{} 11 | s.Add("123") 12 | s.Add("456") 13 | s2 := StringSet{} 14 | s2.Add("123") 15 | s2.Add("456") 16 | 17 | assert.Equal(t, s.Equal(s2), true) 18 | s2.Add("111") 19 | assert.Equal(t, s.Equal(s2), false) 20 | } 21 | 22 | func TestStringSetToArray(t *testing.T) { 23 | s := StringSet{} 24 | s.Add("123") 25 | s.Add("456") 26 | s2 := StringSet{} 27 | s2.Add("456") 28 | s2.Add("123") 29 | 30 | assert.NotNil(t, s.OrderedStrings()) 31 | assert.NotNil(t, s2.OrderedStrings()) 32 | assert.Equal(t, s.OrderedStrings(), s2.OrderedStrings()) 33 | } 34 | -------------------------------------------------------------------------------- /pkg/sidecar/apisix.go: -------------------------------------------------------------------------------- 1 | package sidecar 2 | 3 | import ( 4 | "bytes" 5 | _ "embed" 6 | "io/ioutil" 7 | "os" 8 | "os/exec" 9 | "path/filepath" 10 | "sync" 11 | "syscall" 12 | "text/template" 13 | "time" 14 | 15 | "go.uber.org/zap" 16 | 17 | "github.com/api7/apisix-mesh-agent/pkg/log" 18 | ) 19 | 20 | var ( 21 | //go:embed apisix/config.yaml 22 | _configYaml string 23 | ) 24 | 25 | type apisixRunner struct { 26 | config *apisixConfig 27 | home string 28 | bin string 29 | runArgs []string 30 | logger *log.Logger 31 | done chan struct{} 32 | process *os.Process 33 | } 34 | 35 | type apisixConfig struct { 36 | SSLPort int 37 | NodeListen int 38 | GRPCListen string 39 | EtcdKeyPrefix string 40 | } 41 | 42 | func (ar *apisixRunner) run(wg *sync.WaitGroup) error { 43 | if err := ar.renderConfig(); err != nil { 44 | return err 45 | } 46 | 47 | errCh := make(chan error, 1) 48 | cmd := exec.Command(ar.bin, ar.runArgs...) 49 | wg.Add(1) 50 | go func() { 51 | defer wg.Done() 52 | stderr := bytes.NewBuffer(nil) 53 | stdout := bytes.NewBuffer(nil) 54 | cmd.Stderr = stderr 55 | cmd.Stdout = stdout 56 | if err := cmd.Run(); err != nil { 57 | ar.logger.Warnw("apisix running failure", 58 | zap.Error(err), 59 | zap.String("bin", ar.bin), 60 | zap.String("stderr", stderr.String()), 61 | zap.String("stdout", stdout.String()), 62 | ) 63 | errCh <- err 64 | } else { 65 | ar.logger.Infow("apisix exited", 66 | zap.String("stderr", stderr.String()), 67 | zap.String("stdout", stdout.String()), 68 | ) 69 | } 70 | }() 71 | select { 72 | case err := <-errCh: 73 | return err 74 | case <-time.After(2 * time.Second): 75 | ar.process = cmd.Process 76 | break 77 | } 78 | ar.logger.Infow("launch apisix", 79 | zap.Int("master_pid", cmd.Process.Pid), 80 | ) 81 | return nil 82 | } 83 | 84 | func (ar *apisixRunner) renderConfig() error { 85 | temp, err := template.New("apisix-config").Parse(_configYaml) 86 | if err != nil { 87 | return err 88 | } 89 | var output bytes.Buffer 90 | if err := temp.Execute(&output, ar.config); err != nil { 91 | return err 92 | } 93 | filename := filepath.Join(ar.home, "conf", "config.yaml") 94 | if err := ioutil.WriteFile(filename, output.Bytes(), 0644); err != nil { 95 | return err 96 | } 97 | return nil 98 | } 99 | 100 | func (ar *apisixRunner) shutdown() { 101 | if ar.process == nil { 102 | return 103 | } 104 | ar.logger.Info("closing apisix") 105 | if err := ar.process.Signal(syscall.SIGINT); err != nil { 106 | ar.logger.Fatalw("failed to send SIGINT signal to apisix master process", 107 | zap.Int("master_pid", ar.process.Pid), 108 | zap.Error(err), 109 | ) 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /pkg/sidecar/apisix/config.yaml: -------------------------------------------------------------------------------- 1 | apisix: 2 | node_listen: {{ .NodeListen }} 3 | enable_admin: true 4 | enable_admin_cors: true 5 | enable_debug: true 6 | port_admin: 9999 7 | enable_dev_mode: true 8 | allow_admin: 9 | - 0.0.0.0/0 10 | ssl: 11 | enable: false 12 | listen_port: {{ .SSLPort }} 13 | nginx_config: # config for render the template to generate nginx.conf 14 | error_log_level: "info" 15 | main_configuration_snippet: | 16 | daemon off; 17 | http_configuration_snippet: | 18 | server { 19 | access_log on; 20 | listen 9081 reuseport; 21 | location / { 22 | proxy_http_version 1.1; 23 | proxy_set_header Connection ""; 24 | proxy_set_header Host $http_host; 25 | proxy_pass http://$connection_original_dst; 26 | add_header Via APISIX always; 27 | } 28 | } 29 | etcd: 30 | host: 31 | - "http://{{ .GRPCListen }}" # multiple etcd address, if your etcd cluster enables TLS, please use https scheme, 32 | # e.g. "https://127.0.0.1:2379". 33 | prefix: "{{ .EtcdKeyPrefix }}" # apisix configurations prefix 34 | plugins: 35 | - cors 36 | - request-id 37 | -------------------------------------------------------------------------------- /pkg/sidecar/apisix_test.go: -------------------------------------------------------------------------------- 1 | package sidecar 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "sync" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | 11 | "github.com/api7/apisix-mesh-agent/pkg/log" 12 | ) 13 | 14 | func TestConfigRender(t *testing.T) { 15 | assert.Nil(t, os.Mkdir("./testdata/conf", 0755)) 16 | defer func() { 17 | assert.Nil(t, os.RemoveAll("./testdata/conf")) 18 | }() 19 | ar := &apisixRunner{ 20 | config: &apisixConfig{ 21 | SSLPort: 9443, 22 | NodeListen: 9080, 23 | GRPCListen: "127.0.0.1:2379", 24 | EtcdKeyPrefix: "/apisix", 25 | }, 26 | runArgs: []string{"start"}, 27 | home: "./testdata", 28 | } 29 | err := ar.renderConfig() 30 | assert.Nil(t, err) 31 | 32 | data, err := ioutil.ReadFile("./testdata/conf/config.yaml") 33 | assert.Nil(t, err) 34 | assert.Contains(t, string(data), "node_listen: 9080") 35 | assert.Contains(t, string(data), "prefix: \"/apisix\"") 36 | assert.Contains(t, string(data), "- \"http://127.0.0.1:2379\"") 37 | } 38 | 39 | func TestApisixRunner(t *testing.T) { 40 | assert.Nil(t, os.Mkdir("./testdata/conf", 0755)) 41 | defer func() { 42 | assert.Nil(t, os.RemoveAll("./testdata/conf")) 43 | }() 44 | ar := &apisixRunner{ 45 | logger: log.DefaultLogger, 46 | config: &apisixConfig{ 47 | SSLPort: 9443, 48 | NodeListen: 9080, 49 | GRPCListen: "127.0.0.1:2379", 50 | EtcdKeyPrefix: "/apisix", 51 | }, 52 | runArgs: []string{"3600"}, 53 | home: "./testdata", 54 | bin: "sleep", 55 | } 56 | var wg sync.WaitGroup 57 | assert.Nil(t, ar.run(&wg)) 58 | pid := ar.process.Pid 59 | assert.NotEqual(t, pid, 0) 60 | ar.shutdown() 61 | } 62 | -------------------------------------------------------------------------------- /pkg/sidecar/cachereflection.go: -------------------------------------------------------------------------------- 1 | package sidecar 2 | 3 | import ( 4 | "errors" 5 | "sync/atomic" 6 | 7 | "go.uber.org/zap" 8 | 9 | "github.com/api7/apisix-mesh-agent/pkg/types" 10 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 11 | ) 12 | 13 | var ( 14 | _errUnknownEventObject = errors.New("unknown event object type") 15 | ) 16 | 17 | func (s *Sidecar) reflectToCache(events []types.Event) { 18 | for i, ev := range events { 19 | var err error 20 | switch ev.Type { 21 | case types.EventAdd, types.EventUpdate: 22 | switch obj := ev.Object.(type) { 23 | case *apisix.Route: 24 | s.logger.Debugw("insert route cache", 25 | zap.Any("route", obj), 26 | zap.String("event", string(ev.Type)), 27 | ) 28 | err = s.cache.Route().Insert(obj) 29 | case *apisix.Upstream: 30 | s.logger.Debugw("insert upstream cache", 31 | zap.Any("upstream", obj), 32 | zap.String("event", string(ev.Type)), 33 | ) 34 | err = s.cache.Upstream().Insert(obj) 35 | default: 36 | err = _errUnknownEventObject 37 | } 38 | default: // types.EventDelete 39 | switch obj := ev.Tombstone.(type) { 40 | case *apisix.Route: 41 | s.logger.Debugw("delete route cache", 42 | zap.Any("route", obj), 43 | zap.String("event", string(ev.Type)), 44 | ) 45 | err = s.cache.Route().Delete(obj.GetId()) 46 | case *apisix.Upstream: 47 | s.logger.Debugw("delete upstream cache", 48 | zap.Any("upstream", obj), 49 | zap.String("event", string(ev.Type)), 50 | ) 51 | err = s.cache.Upstream().Delete(obj.GetId()) 52 | default: 53 | err = _errUnknownEventObject 54 | } 55 | } 56 | if err != nil { 57 | s.logger.Errorw("failed to reflect event to cache", 58 | zap.Any("event", ev), 59 | zap.Error(err), 60 | ) 61 | } 62 | for { 63 | rev := atomic.LoadInt64(&s.revision) 64 | if atomic.CompareAndSwapInt64(&s.revision, rev, rev+1) { 65 | s.logger.Debugw("bump revision", 66 | zap.Any("revision", rev+1), 67 | ) 68 | events[i].Revision = rev 69 | break 70 | } 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /pkg/sidecar/cachereflection_test.go: -------------------------------------------------------------------------------- 1 | package sidecar 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | 8 | "github.com/api7/apisix-mesh-agent/pkg/cache" 9 | "github.com/api7/apisix-mesh-agent/pkg/config" 10 | "github.com/api7/apisix-mesh-agent/pkg/types" 11 | "github.com/api7/apisix-mesh-agent/pkg/types/apisix" 12 | ) 13 | 14 | func TestReflectToCache(t *testing.T) { 15 | cfg := config.NewDefaultConfig() 16 | cfg.XDSWatchFiles = append(cfg.XDSWatchFiles, "/tmp") 17 | cfg.GRPCListen = "127.0.0.1:10001" 18 | s, err := NewSidecar(cfg) 19 | assert.Nil(t, err) 20 | assert.NotNil(t, s) 21 | 22 | events := []types.Event{ 23 | { 24 | Type: types.EventAdd, 25 | Object: &apisix.Route{ 26 | Id: "1", 27 | }, 28 | }, 29 | { 30 | Type: types.EventAdd, 31 | Object: &apisix.Route{ 32 | Id: "2", 33 | }, 34 | }, 35 | { 36 | Type: types.EventUpdate, 37 | Object: &apisix.Upstream{ 38 | Id: "133", 39 | }, 40 | }, 41 | { 42 | Type: types.EventDelete, 43 | Tombstone: &apisix.Upstream{ 44 | Id: "21", 45 | }, 46 | }, 47 | } 48 | err = s.cache.Upstream().Insert(&apisix.Upstream{Id: "21"}) 49 | assert.Nil(t, err) 50 | s.reflectToCache(events) 51 | r1, err := s.cache.Route().Get("1") 52 | assert.NotNil(t, r1) 53 | assert.Nil(t, err) 54 | 55 | r2, err := s.cache.Route().Get("2") 56 | assert.NotNil(t, r2) 57 | assert.Nil(t, err) 58 | 59 | u1, err := s.cache.Upstream().Get("133") 60 | assert.NotNil(t, u1) 61 | assert.Nil(t, err) 62 | 63 | u2, err := s.cache.Upstream().Get("21") 64 | assert.Nil(t, u2) 65 | assert.Equal(t, err, cache.ErrObjectNotFound) 66 | } 67 | -------------------------------------------------------------------------------- /pkg/sidecar/testdata/cluster.json: -------------------------------------------------------------------------------- 1 | { 2 | "versionInfo": "0", 3 | "resources": [ 4 | { 5 | "@type": "type.googleapis.com/envoy.config.cluster.v3.Cluster", 6 | "name": "httpbin.default.svc.cluster.local", 7 | "type": "EDS" 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /pkg/sidecar/types.go: -------------------------------------------------------------------------------- 1 | package sidecar 2 | 3 | import ( 4 | "context" 5 | "net" 6 | "sync" 7 | "sync/atomic" 8 | "time" 9 | 10 | "go.uber.org/zap" 11 | 12 | "github.com/api7/apisix-mesh-agent/pkg/cache" 13 | "github.com/api7/apisix-mesh-agent/pkg/config" 14 | "github.com/api7/apisix-mesh-agent/pkg/etcdv3" 15 | "github.com/api7/apisix-mesh-agent/pkg/log" 16 | "github.com/api7/apisix-mesh-agent/pkg/provisioner" 17 | xdsv3file "github.com/api7/apisix-mesh-agent/pkg/provisioner/xds/v3/file" 18 | xdsv3grpc "github.com/api7/apisix-mesh-agent/pkg/provisioner/xds/v3/grpc" 19 | "github.com/api7/apisix-mesh-agent/pkg/types" 20 | ) 21 | 22 | // Sidecar is the entity to joint provisioner, cache, etcd and launch 23 | // the program. 24 | type Sidecar struct { 25 | runId string 26 | logger *log.Logger 27 | provisioner provisioner.Provisioner 28 | cache cache.Cache 29 | grpcListener net.Listener 30 | etcdSrv etcdv3.EtcdV3 31 | revision int64 32 | apisixRunner *apisixRunner 33 | waitGroup sync.WaitGroup 34 | } 35 | 36 | // NewSidecar creates a Sidecar object. 37 | func NewSidecar(cfg *config.Config) (*Sidecar, error) { 38 | p, err := newProvisioner(cfg) 39 | if err != nil { 40 | return nil, err 41 | } 42 | logger, err := log.NewLogger( 43 | log.WithContext("sidecar"), 44 | log.WithLogLevel(cfg.LogLevel), 45 | log.WithOutputFile(cfg.LogOutput), 46 | ) 47 | if err != nil { 48 | return nil, err 49 | } 50 | 51 | li, err := net.Listen("tcp", cfg.GRPCListen) 52 | if err != nil { 53 | return nil, err 54 | } 55 | 56 | var ar *apisixRunner 57 | if cfg.RunMode == config.BundleMode { 58 | ar = &apisixRunner{ 59 | home: cfg.APISIXHomePath, 60 | bin: cfg.APISIXBinPath, 61 | done: make(chan struct{}), 62 | logger: logger, 63 | runArgs: []string{"start"}, 64 | config: &apisixConfig{ 65 | NodeListen: 9080, 66 | GRPCListen: cfg.GRPCListen, 67 | EtcdKeyPrefix: cfg.EtcdKeyPrefix, 68 | }, 69 | } 70 | } 71 | 72 | s := &Sidecar{ 73 | runId: cfg.RunId, 74 | grpcListener: li, 75 | logger: logger, 76 | provisioner: p, 77 | cache: cache.NewInMemoryCache(), 78 | apisixRunner: ar, 79 | } 80 | etcd, err := etcdv3.NewEtcdV3Server(cfg, s.cache, s) 81 | if err != nil { 82 | return nil, err 83 | } 84 | s.etcdSrv = etcd 85 | return s, nil 86 | } 87 | 88 | // Run runs the sidecar program. 89 | func (s *Sidecar) Run(stop chan struct{}) error { 90 | s.logger.Infow("sidecar started", 91 | zap.String("id", s.runId), 92 | ) 93 | defer s.logger.Info("sidecar exited") 94 | 95 | go func() { 96 | if err := s.provisioner.Run(stop); err != nil { 97 | s.logger.Fatalw("provisioner run failed", 98 | zap.Error(err), 99 | ) 100 | } 101 | }() 102 | 103 | s.waitGroup.Add(1) 104 | go func() { 105 | defer s.waitGroup.Done() 106 | if err := s.etcdSrv.Serve(s.grpcListener); err != nil { 107 | s.logger.Fatalw("etcd v3 server run failed", 108 | zap.Error(err), 109 | ) 110 | } 111 | }() 112 | time.Sleep(time.Second) 113 | 114 | if s.apisixRunner != nil { 115 | // Launch Apache APISIX after the main logic of apisix-mesh-agent was started, 116 | // so that once APISIX started, it can fetch configuration from apisix-mesh-agent. 117 | if err := s.apisixRunner.run(&s.waitGroup); err != nil { 118 | return err 119 | } 120 | } 121 | 122 | loop: 123 | for { 124 | events, ok := <-s.provisioner.Channel() 125 | if !ok { 126 | break loop 127 | } 128 | s.reflectToLog(events) 129 | // TODO may reflect to etcd after cache one by one. 130 | s.reflectToCache(events) 131 | s.reflectToEtcd(events) 132 | // sidecar goroutine doesn't need to watch on stop channel, 133 | // since it can receive the quit signal from the provisioner. 134 | } 135 | 136 | if s.apisixRunner != nil { 137 | s.apisixRunner.shutdown() 138 | } 139 | 140 | shutCtx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) 141 | defer cancel() 142 | if err := s.etcdSrv.Shutdown(shutCtx); err != nil { 143 | s.logger.Errorw("failed to shutdown etcd server", 144 | zap.Error(err), 145 | ) 146 | } 147 | 148 | s.waitGroup.Wait() 149 | return nil 150 | } 151 | 152 | func (s *Sidecar) reflectToLog(events []types.Event) { 153 | s.logger.Debugw("events arrived from provisioner", 154 | zap.Any("events", events), 155 | ) 156 | } 157 | 158 | func (s *Sidecar) reflectToEtcd(events []types.Event) { 159 | s.etcdSrv.PushEvents(events) 160 | } 161 | 162 | // Revision implements etcdv3.Revisioner. 163 | func (s *Sidecar) Revision() int64 { 164 | return atomic.LoadInt64(&s.revision) 165 | } 166 | 167 | func newProvisioner(cfg *config.Config) (provisioner.Provisioner, error) { 168 | switch cfg.Provisioner { 169 | case config.XDSV3FileProvisioner: 170 | return xdsv3file.NewXDSProvisioner(cfg) 171 | case config.XDSV3GRPCProvisioner: 172 | return xdsv3grpc.NewXDSProvisioner(cfg) 173 | default: 174 | return nil, config.ErrUnknownProvisioner 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /pkg/sidecar/types_test.go: -------------------------------------------------------------------------------- 1 | package sidecar 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/assert" 8 | 9 | "github.com/api7/apisix-mesh-agent/pkg/config" 10 | "github.com/api7/apisix-mesh-agent/pkg/id" 11 | ) 12 | 13 | func TestSidecarRun(t *testing.T) { 14 | cfg := config.NewDefaultConfig() 15 | cfg.XDSWatchFiles = append(cfg.XDSWatchFiles, "testdata/cluster.json") 16 | cfg.GRPCListen = "127.0.0.1:10002" 17 | s, err := NewSidecar(cfg) 18 | assert.Nil(t, err) 19 | assert.NotNil(t, s) 20 | 21 | stop := make(chan struct{}) 22 | finishCh := make(chan struct{}) 23 | go func() { 24 | err := s.Run(stop) 25 | assert.Nil(t, err) 26 | close(finishCh) 27 | }() 28 | 29 | time.Sleep(time.Second) 30 | close(stop) 31 | <-finishCh 32 | 33 | ups, err := s.cache.Upstream().Get(id.GenID("httpbin.default.svc.cluster.local")) 34 | assert.Nil(t, err) 35 | assert.Equal(t, ups.Name, "httpbin.default.svc.cluster.local") 36 | assert.Len(t, ups.Nodes, 0) 37 | } 38 | -------------------------------------------------------------------------------- /pkg/types/apisix/base.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.25.0-devel 4 | // protoc v3.12.3 5 | // source: base.proto 6 | 7 | package apisix 8 | 9 | import ( 10 | _ "github.com/envoyproxy/protoc-gen-validate/validate" 11 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 12 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 13 | reflect "reflect" 14 | sync "sync" 15 | ) 16 | 17 | const ( 18 | // Verify that this generated code is sufficiently up-to-date. 19 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 20 | // Verify that runtime/protoimpl is sufficiently up-to-date. 21 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 22 | ) 23 | 24 | // Var represents the expression like: 25 | // ["arg_id", "equal", "543"]. 26 | type Var struct { 27 | state protoimpl.MessageState 28 | sizeCache protoimpl.SizeCache 29 | unknownFields protoimpl.UnknownFields 30 | 31 | // vars in Route is an two-dimensional array which cannot be represented 32 | // directly in protobuf, here we use https://github.com/favadi/protoc-go-inject-tag 33 | // to hack the ultimate pb.go. 34 | Vars []string `protobuf:"bytes,1,rep,name=vars,proto3" json:"vars,omitempty"` 35 | } 36 | 37 | func (x *Var) Reset() { 38 | *x = Var{} 39 | if protoimpl.UnsafeEnabled { 40 | mi := &file_base_proto_msgTypes[0] 41 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 42 | ms.StoreMessageInfo(mi) 43 | } 44 | } 45 | 46 | func (x *Var) String() string { 47 | return protoimpl.X.MessageStringOf(x) 48 | } 49 | 50 | func (*Var) ProtoMessage() {} 51 | 52 | func (x *Var) ProtoReflect() protoreflect.Message { 53 | mi := &file_base_proto_msgTypes[0] 54 | if protoimpl.UnsafeEnabled && x != nil { 55 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 56 | if ms.LoadMessageInfo() == nil { 57 | ms.StoreMessageInfo(mi) 58 | } 59 | return ms 60 | } 61 | return mi.MessageOf(x) 62 | } 63 | 64 | // Deprecated: Use Var.ProtoReflect.Descriptor instead. 65 | func (*Var) Descriptor() ([]byte, []int) { 66 | return file_base_proto_rawDescGZIP(), []int{0} 67 | } 68 | 69 | func (x *Var) GetVars() []string { 70 | if x != nil { 71 | return x.Vars 72 | } 73 | return nil 74 | } 75 | 76 | var File_base_proto protoreflect.FileDescriptor 77 | 78 | var file_base_proto_rawDesc = []byte{ 79 | 0x0a, 0x0a, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 80 | 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 81 | 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x25, 0x0a, 0x03, 0x56, 0x61, 0x72, 0x12, 0x1e, 0x0a, 0x04, 82 | 0x76, 0x61, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0x92, 83 | 0x01, 0x04, 0x08, 0x02, 0x10, 0x04, 0x52, 0x04, 0x76, 0x61, 0x72, 0x73, 0x42, 0x0a, 0x5a, 0x08, 84 | 0x2e, 0x3b, 0x61, 0x70, 0x69, 0x73, 0x69, 0x78, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 85 | } 86 | 87 | var ( 88 | file_base_proto_rawDescOnce sync.Once 89 | file_base_proto_rawDescData = file_base_proto_rawDesc 90 | ) 91 | 92 | func file_base_proto_rawDescGZIP() []byte { 93 | file_base_proto_rawDescOnce.Do(func() { 94 | file_base_proto_rawDescData = protoimpl.X.CompressGZIP(file_base_proto_rawDescData) 95 | }) 96 | return file_base_proto_rawDescData 97 | } 98 | 99 | var file_base_proto_msgTypes = make([]protoimpl.MessageInfo, 1) 100 | var file_base_proto_goTypes = []interface{}{ 101 | (*Var)(nil), // 0: Var 102 | } 103 | var file_base_proto_depIdxs = []int32{ 104 | 0, // [0:0] is the sub-list for method output_type 105 | 0, // [0:0] is the sub-list for method input_type 106 | 0, // [0:0] is the sub-list for extension type_name 107 | 0, // [0:0] is the sub-list for extension extendee 108 | 0, // [0:0] is the sub-list for field type_name 109 | } 110 | 111 | func init() { file_base_proto_init() } 112 | func file_base_proto_init() { 113 | if File_base_proto != nil { 114 | return 115 | } 116 | if !protoimpl.UnsafeEnabled { 117 | file_base_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { 118 | switch v := v.(*Var); i { 119 | case 0: 120 | return &v.state 121 | case 1: 122 | return &v.sizeCache 123 | case 2: 124 | return &v.unknownFields 125 | default: 126 | return nil 127 | } 128 | } 129 | } 130 | type x struct{} 131 | out := protoimpl.TypeBuilder{ 132 | File: protoimpl.DescBuilder{ 133 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 134 | RawDescriptor: file_base_proto_rawDesc, 135 | NumEnums: 0, 136 | NumMessages: 1, 137 | NumExtensions: 0, 138 | NumServices: 0, 139 | }, 140 | GoTypes: file_base_proto_goTypes, 141 | DependencyIndexes: file_base_proto_depIdxs, 142 | MessageInfos: file_base_proto_msgTypes, 143 | }.Build() 144 | File_base_proto = out.File 145 | file_base_proto_rawDesc = nil 146 | file_base_proto_goTypes = nil 147 | file_base_proto_depIdxs = nil 148 | } 149 | -------------------------------------------------------------------------------- /pkg/types/apisix/base.pb.validate.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-validate. DO NOT EDIT. 2 | // source: base.proto 3 | 4 | package apisix 5 | 6 | import ( 7 | "bytes" 8 | "errors" 9 | "fmt" 10 | "net" 11 | "net/mail" 12 | "net/url" 13 | "regexp" 14 | "strings" 15 | "time" 16 | "unicode/utf8" 17 | 18 | "github.com/golang/protobuf/ptypes" 19 | ) 20 | 21 | // ensure the imports are used 22 | var ( 23 | _ = bytes.MinRead 24 | _ = errors.New("") 25 | _ = fmt.Print 26 | _ = utf8.UTFMax 27 | _ = (*regexp.Regexp)(nil) 28 | _ = (*strings.Reader)(nil) 29 | _ = net.IPv4len 30 | _ = time.Duration(0) 31 | _ = (*url.URL)(nil) 32 | _ = (*mail.Address)(nil) 33 | _ = ptypes.DynamicAny{} 34 | ) 35 | 36 | // define the regex for a UUID once up-front 37 | var _base_uuidPattern = regexp.MustCompile("^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$") 38 | 39 | // Validate checks the field values on Var with the rules defined in the proto 40 | // definition for this message. If any rules are violated, an error is returned. 41 | func (m *Var) Validate() error { 42 | if m == nil { 43 | return nil 44 | } 45 | 46 | if l := len(m.GetVars()); l < 2 || l > 4 { 47 | return VarValidationError{ 48 | field: "Vars", 49 | reason: "value must contain between 2 and 4 items, inclusive", 50 | } 51 | } 52 | 53 | return nil 54 | } 55 | 56 | // VarValidationError is the validation error returned by Var.Validate if the 57 | // designated constraints aren't met. 58 | type VarValidationError struct { 59 | field string 60 | reason string 61 | cause error 62 | key bool 63 | } 64 | 65 | // Field function returns field value. 66 | func (e VarValidationError) Field() string { return e.field } 67 | 68 | // Reason function returns reason value. 69 | func (e VarValidationError) Reason() string { return e.reason } 70 | 71 | // Cause function returns cause value. 72 | func (e VarValidationError) Cause() error { return e.cause } 73 | 74 | // Key function returns key value. 75 | func (e VarValidationError) Key() bool { return e.key } 76 | 77 | // ErrorName returns error name. 78 | func (e VarValidationError) ErrorName() string { return "VarValidationError" } 79 | 80 | // Error satisfies the builtin error interface 81 | func (e VarValidationError) Error() string { 82 | cause := "" 83 | if e.cause != nil { 84 | cause = fmt.Sprintf(" | caused by: %v", e.cause) 85 | } 86 | 87 | key := "" 88 | if e.key { 89 | key = "key for " 90 | } 91 | 92 | return fmt.Sprintf( 93 | "invalid %sVar.%s: %s%s", 94 | key, 95 | e.field, 96 | e.reason, 97 | cause) 98 | } 99 | 100 | var _ error = VarValidationError{} 101 | 102 | var _ interface { 103 | Field() string 104 | Reason() string 105 | Key() bool 106 | Cause() error 107 | ErrorName() string 108 | } = VarValidationError{} 109 | -------------------------------------------------------------------------------- /pkg/types/apisix/workaround.go: -------------------------------------------------------------------------------- 1 | package apisix 2 | 3 | import "encoding/json" 4 | 5 | // MarshalJSON implements the json.Marshaler interface. 6 | func (v *Var) MarshalJSON() ([]byte, error) { 7 | if v.Vars == nil { 8 | return []byte("[]"), nil 9 | } 10 | return json.Marshal(v.Vars) 11 | } 12 | -------------------------------------------------------------------------------- /pkg/types/event.go: -------------------------------------------------------------------------------- 1 | // Licensed to the Apache Software Foundation (ASF) under one or more 2 | // contributor license agreements. See the NOTICE file distributed with 3 | // this work for additional information regarding copyright ownership. 4 | // The ASF licenses this file to You under the Apache License, Version 2.0 5 | // (the "License"); you may not use this file except in compliance with 6 | // the License. You may obtain a copy of the License at 7 | // 8 | // http://www.apache.org/licenses/LICENSE-2.0 9 | // 10 | // Unless required by applicable law or agreed to in writing, software 11 | // distributed under the License is distributed on an "AS IS" BASIS, 12 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | // See the License for the specific language governing permissions and 14 | // limitations under the License. 15 | 16 | package types 17 | 18 | // EventType is the kind of event. 19 | type EventType string 20 | 21 | var ( 22 | // EventAdd represents the add event. 23 | EventAdd = EventType("add") 24 | // EventUpdate represents the update event. 25 | EventUpdate = EventType("update") 26 | // EventDelete represents the delete event. 27 | EventDelete = EventType("delete") 28 | ) 29 | 30 | // Event describes a specific event generated from the provisioner. 31 | type Event struct { 32 | Type EventType 33 | Object interface{} 34 | // Tombstone is only valid for delete event, 35 | // in such a case it stands for the final state 36 | // of the object. 37 | Tombstone interface{} 38 | 39 | // Revision is the revision that the event happened 40 | Revision int64 41 | } 42 | -------------------------------------------------------------------------------- /pkg/types/iptables.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | var ( 4 | // The inbound chain. 5 | InboundChain = "APISIX_INBOUND" 6 | RedirectChain = "APISIX_REDIRECT" 7 | InboundRedirectChain = "APISIX_INBOUND_REDIRECT" 8 | OutputChain = "OUTPUT" 9 | PreRoutingChain = "PREROUTING" 10 | ) 11 | -------------------------------------------------------------------------------- /pkg/types/typeurl.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | var ( 4 | // RouteConfigurationUrl is the RDS type url. 5 | RouteConfigurationUrl = "type.googleapis.com/envoy.config.route.v3.RouteConfiguration" 6 | // ClusterUrl is the Cluster type url. 7 | ClusterUrl = "type.googleapis.com/envoy.config.cluster.v3.Cluster" 8 | // ClusterLoadAssignmentUrl is the Cluster type url. 9 | ClusterLoadAssignmentUrl = "type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment" 10 | // ListenerUrl is the Listener type url. 11 | ListenerUrl = "type.googleapis.com/envoy.config.listener.v3.Listener" 12 | ) 13 | -------------------------------------------------------------------------------- /pkg/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "runtime" 7 | "strconv" 8 | "time" 9 | ) 10 | 11 | var ( 12 | // The following fields are populated at build time using -ldflags -X. 13 | _version = "unknown" 14 | _gitRevision = "unknown" 15 | _timestamp = "0" 16 | ) 17 | 18 | // Short returns a short version representation. 19 | func Short() string { 20 | return _version 21 | } 22 | 23 | // String returns a readable version info. 24 | func String() string { 25 | buf := bytes.NewBuffer(nil) 26 | fmt.Fprintf(buf, "Version: %s\n", _version) 27 | fmt.Fprintf(buf, "Git SHA: %s\n", _gitRevision) 28 | fmt.Fprintf(buf, "Go Version: %s\n", runtime.Version()) 29 | fmt.Fprintf(buf, "OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH) 30 | 31 | ts, err := strconv.ParseInt(_timestamp, 10, 32) 32 | if err != nil { 33 | fmt.Fprintln(buf, "Build Date: unknown") 34 | } else { 35 | date := time.Unix(ts, 0) 36 | fmt.Fprintf(buf, "Build Date: %s\n", date.String()) 37 | } 38 | 39 | return buf.String() 40 | } 41 | -------------------------------------------------------------------------------- /pkg/version/version_test.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "fmt" 5 | "runtime" 6 | "testing" 7 | "time" 8 | 9 | "gotest.tools/assert" 10 | ) 11 | 12 | func TestVersion(t *testing.T) { 13 | _version = "x.y.z" 14 | _gitRevision = "9a8bc1dd" 15 | _timestamp = "1613616943" 16 | 17 | ver := String() 18 | expectedVersion := `Version: x.y.z 19 | Git SHA: 9a8bc1dd 20 | Go Version: %s 21 | OS/Arch: %s/%s 22 | Build Date: %s 23 | ` 24 | date := time.Unix(1613616943, 0) 25 | expectedVersion = fmt.Sprintf(expectedVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH, date.String()) 26 | assert.Equal(t, expectedVersion, ver, "bad version") 27 | } 28 | 29 | func TestShort(t *testing.T) { 30 | _version = "1.1.1" 31 | assert.Equal(t, Short(), _version) 32 | } 33 | -------------------------------------------------------------------------------- /scripts/kind-with-registry.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Licensed to the Apache Software Foundation (ASF) under one or more 4 | # contributor license agreements. See the NOTICE file distributed with 5 | # this work for additional information regarding copyright ownership. 6 | # The ASF licenses this file to You under the Apache License, Version 2.0 7 | # (the "License"); you may not use this file except in compliance with 8 | # the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | set -o errexit 20 | set -o nounset 21 | set -o pipefail 22 | 23 | # desired cluster name; default is "apisix" 24 | KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-apisix}" 25 | 26 | if kind get clusters | grep -q ^apisix$ ; then 27 | echo "cluster already exists, moving on" 28 | exit 0 29 | fi 30 | 31 | # create registry container unless it already exists 32 | kind_version=$(kind version) 33 | kind_network='kind' 34 | reg_name='kind-registry' 35 | reg_port='5000' 36 | case "${kind_version}" in 37 | "kind v0.7."* | "kind v0.6."* | "kind v0.5."*) 38 | kind_network='bridge' 39 | ;; 40 | esac 41 | 42 | # create registry container unless it already exists 43 | running="$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" 44 | if [ "${running}" != 'true' ]; then 45 | docker run \ 46 | -d --restart=always -p "${reg_port}:5000" --name "${reg_name}" \ 47 | registry:2 48 | fi 49 | 50 | reg_host="${reg_name}" 51 | if [ "${kind_network}" = "bridge" ]; then 52 | reg_host="$(docker inspect -f '{{.NetworkSettings.IPAddress}}' "${reg_name}")" 53 | fi 54 | echo "Registry Host: ${reg_host}" 55 | 56 | # create a cluster with the local registry enabled in containerd 57 | cat <