├── terraform
├── modules
│ ├── eks
│ │ ├── kubernetes-dashboard-admin.rbac.yaml
│ │ ├── README.md
│ │ ├── versions.tf
│ │ ├── kubernetes.tf
│ │ ├── outputs.tf
│ │ ├── efs.tf
│ │ ├── variables.tf
│ │ ├── security-groups.tf
│ │ └── main.tf
│ ├── rds
│ │ ├── versions.tf
│ │ ├── variables.tf
│ │ ├── outputs.tf
│ │ └── main.tf
│ └── vpc
│ │ ├── versions.tf
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── variables.tf
│ │ ├── security_groups.tf
│ │ ├── README.md
│ │ └── subnets.tf
└── project
│ ├── s3_backend
│ ├── backend
│ │ └── terraform.tf
│ ├── outputs.tf
│ ├── variables.tf
│ ├── README.md
│ ├── Makefile
│ ├── remotestate.tf
│ └── make.py
│ └── main
│ ├── README.md
│ ├── Makefile
│ ├── outputs.tf
│ ├── main.tf.template
│ ├── dev.vars.json
│ └── variables.tf
├── viking-voip.png
├── AWS_Service_Quota_Increase.png
├── voip-full-k8s-network-diagram.jpg
├── scripts
├── update-configmap.py
├── consul
│ ├── dns-configMap-template.yaml
│ ├── dns-configMap.yaml.original
│ └── helm-consul-values.yaml
├── k8s-service-account
│ └── service-account.yaml
├── dashboard
│ └── dashboard-admin.yaml
├── sip-proxy
│ └── sip-proxy-deployment.yaml
├── sip-b2bua
│ └── sip-b2bua-deployment.yaml
├── config-server
│ └── config-server-deployment.yaml
├── database
│ └── viking_schema.sql
└── init.sh
├── .gitignore
├── persisten-volume.yaml
├── Makefile
└── README.md
/terraform/modules/eks/kubernetes-dashboard-admin.rbac.yaml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/viking-voip.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Viking-VoIP/full-voip-on-k8s/HEAD/viking-voip.png
--------------------------------------------------------------------------------
/AWS_Service_Quota_Increase.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Viking-VoIP/full-voip-on-k8s/HEAD/AWS_Service_Quota_Increase.png
--------------------------------------------------------------------------------
/voip-full-k8s-network-diagram.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Viking-VoIP/full-voip-on-k8s/HEAD/voip-full-k8s-network-diagram.jpg
--------------------------------------------------------------------------------
/terraform/project/s3_backend/backend/terraform.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | backend "s3" {
3 | key = "terraform-aws/terraform.tfstate"
4 | }
5 | }
--------------------------------------------------------------------------------
/terraform/modules/rds/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 0.12.26"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 2.49"
8 | }
9 | }
10 | }
--------------------------------------------------------------------------------
/terraform/modules/vpc/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = ">= 0.12.26"
3 |
4 | required_providers {
5 | aws = {
6 | source = "hashicorp/aws"
7 | version = ">= 3.15"
8 | }
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/scripts/update-configmap.py:
--------------------------------------------------------------------------------
1 | import re, sys, os
2 | for line in sys.stdin:
3 | if line == " }\n":
4 | print(" }}\n consul {{\n errors\n cache 30\n forward . {}\n }}".format(os.environ["CONSUL_DNS_IP"]))
5 | else:
6 | print(line[:-1])
--------------------------------------------------------------------------------
/scripts/consul/dns-configMap-template.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | labels:
5 | addonmanager.kubernetes.io/mode: EnsureExists
6 | name: kube-dns
7 | namespace: kube-system
8 | data:
9 | stubDomains: |
10 | {"consul": ["{{ CONSUL_DNS_IP }}"]}
11 |
--------------------------------------------------------------------------------
/scripts/consul/dns-configMap.yaml.original:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | labels:
5 | addonmanager.kubernetes.io/mode: EnsureExists
6 | name: kube-dns
7 | namespace: kube-system
8 | data:
9 | stubDomains: |
10 | {"consul": ["{{ CONSUL_DNS_IP }}"]}
11 |
--------------------------------------------------------------------------------
/terraform/modules/eks/README.md:
--------------------------------------------------------------------------------
1 | # Learn Terraform - Provision an EKS Cluster
2 |
3 | This repo is a companion repo to the [Provision an EKS Cluster learn guide](https://learn.hashicorp.com/terraform/kubernetes/provision-eks-cluster), containing
4 | Terraform configuration files to provision an EKS cluster on AWS.
--------------------------------------------------------------------------------
/terraform/project/s3_backend/outputs.tf:
--------------------------------------------------------------------------------
1 | output "S3_Bucket" {
2 | value = "${aws_s3_bucket.tfrmstate.id}"
3 | }
4 |
5 | output "dynamo_db_lock" {
6 | value = "${aws_dynamodb_table.terraform_statelock.name}"
7 | }
8 |
9 | output "ssh_keypair" {
10 | sensitive = true
11 | value = "${tls_private_key.private_key}"
12 | }
--------------------------------------------------------------------------------
/terraform/project/s3_backend/variables.tf:
--------------------------------------------------------------------------------
1 | variable "aws_region" {
2 | default = "us-east-1"
3 | }
4 | variable "aws_dynamodb_table" {
5 | default = "tf-remote-state-lock"
6 | }
7 |
8 | variable "s3_bucket_name" {
9 | default = "terraform-bucket-lock"
10 | }
11 |
12 | variable "key_name" {
13 | default = "ssh_keypair_name"
14 | }
15 |
--------------------------------------------------------------------------------
/terraform/modules/vpc/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "${var.region}"
3 | }
4 |
5 | /*==== The VPC ======*/
6 | resource "aws_vpc" "vpc" {
7 | cidr_block = "${var.vpc_cidr}"
8 | enable_dns_hostnames = true
9 | enable_dns_support = true
10 | tags = {
11 | Name = "${var.environment}-vpc"
12 | Environment = "${var.environment}"
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/scripts/k8s-service-account/service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: eks-admin
5 | namespace: kube-system
6 | ---
7 | apiVersion: rbac.authorization.k8s.io/v1
8 | kind: ClusterRoleBinding
9 | metadata:
10 | name: eks-admin
11 | roleRef:
12 | apiGroup: rbac.authorization.k8s.io
13 | kind: ClusterRole
14 | name: cluster-admin
15 | subjects:
16 | - kind: ServiceAccount
17 | name: eks-admin
18 | namespace: kube-system
19 |
--------------------------------------------------------------------------------
/scripts/dashboard/dashboard-admin.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: admin-user
5 | namespace: kubernetes-dashboard
6 | ---
7 | apiVersion: rbac.authorization.k8s.io/v1
8 | kind: ClusterRoleBinding
9 | metadata:
10 | name: admin-user
11 | roleRef:
12 | apiGroup: rbac.authorization.k8s.io
13 | kind: ClusterRole
14 | name: cluster-admin
15 | subjects:
16 | - kind: ServiceAccount
17 | name: admin-user
18 | namespace: kubernetes-dashboard
--------------------------------------------------------------------------------
/terraform/modules/vpc/outputs.tf:
--------------------------------------------------------------------------------
1 | output "vpc_id" {
2 | value = "${aws_vpc.vpc.id}"
3 | }
4 |
5 | output "public_subnets_ids" {
6 | value = "${aws_subnet.public_subnet.*.id}"
7 | }
8 |
9 | output "private_subnets_ids" {
10 | value = "${aws_subnet.private_subnet.*.id}"
11 | }
12 |
13 | output "vpc_cidr_block" {
14 | description = "The CIDR block of the VPC"
15 | value = concat(aws_vpc.vpc.*.cidr_block, [""])[0]
16 | }
17 |
18 | output "vpc_default_sg_id" {
19 | value = "${aws_security_group.default.id}"
20 | }
--------------------------------------------------------------------------------
/terraform/modules/rds/variables.tf:
--------------------------------------------------------------------------------
1 | variable "db_instance_type" {
2 | type = string
3 | }
4 |
5 | variable "db_version" {
6 | type = string
7 | }
8 |
9 | variable "db_disk_size" {
10 | type = string
11 | }
12 |
13 | variable "db_username" {
14 | type = string
15 | }
16 |
17 | variable "db_password" {
18 | type = string
19 | }
20 |
21 | variable "public_subnets" {
22 | type = list(string)
23 | }
24 |
25 | variable "vpc_id" {
26 | type = string
27 | }
28 |
29 | variable "db_instance_name" {
30 | type = string
31 | }
32 |
--------------------------------------------------------------------------------
/terraform/project/main/README.md:
--------------------------------------------------------------------------------
1 | # Terraform deployment
2 |
3 | ### Cluster configuration
4 | - Edit `dev.vars.json` and set the params as you want it.
5 | - To bring up the complete cluster, just launch it like:
6 | ```
7 | $ terraform init
8 | $ terraform apply -var-file dev.vars.json
9 | ```
10 |
11 | This terraform will:
12 | - Create a completely new VPC
13 | - Create a MySQL RDS
14 | - Create an EKS with:
15 | - 3 worker node groups:
16 | - 1 for the Backend and Consul - 1 node (http/nginx for xml_curl)
17 | - 1 for Kamailio - 1 node
18 | - 1 for freeSWITCH
19 |
--------------------------------------------------------------------------------
/terraform/project/main/Makefile:
--------------------------------------------------------------------------------
1 | SHELL := /usr/bin/env bash
2 |
3 | #NDEF = $(if $(value $(1)),,$(error $(1) not set))
4 |
5 | .PHONY: init validate apply clean
6 |
7 | all: init validate apply
8 |
9 | init:
10 | @terraform show 2> /dev/null || terraform init -upgrade -reconfigure
11 |
12 | list:
13 | @terraform state pull
14 |
15 | validate:
16 | @terraform validate
17 |
18 | apply:
19 | @terraform apply -auto-approve -var-file=dev.vars.json
20 |
21 | destroy:
22 | @terraform destroy -auto-approve -var-file=dev.vars.json
23 |
24 | clean:
25 | @rm -rf .terraform* terraform.tfstate*
--------------------------------------------------------------------------------
/terraform/modules/vpc/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {
2 | description = "AWS Deployment region.."
3 | default = "us-east-1"
4 | }
5 |
6 | variable "vpc_cidr" {
7 | description = "CIDR to assign to this VPC"
8 | }
9 |
10 | variable "environment" {
11 | description = "On what environment is this running?"
12 | }
13 |
14 | variable "availability_zones" {
15 | description = "On what environment is this running?"
16 | }
17 |
18 | variable "public_subnets_cidr" {
19 | description = "public_subnets_cidr"
20 | }
21 |
22 | variable "private_subnets_cidr" {
23 | description = "On what environment is this running?"
24 | }
25 |
--------------------------------------------------------------------------------
/terraform/modules/vpc/security_groups.tf:
--------------------------------------------------------------------------------
1 |
2 | /*==== VPC's Default Security Group ======*/
3 | resource "aws_security_group" "default" {
4 | name = "${var.environment}-default-sg"
5 | description = "Default security group to allow inbound/outbound from the VPC"
6 | vpc_id = "${aws_vpc.vpc.id}"
7 | depends_on = [aws_vpc.vpc]
8 | ingress {
9 | from_port = "0"
10 | to_port = "0"
11 | protocol = "-1"
12 | self = true
13 | }
14 |
15 | egress {
16 | from_port = "0"
17 | to_port = "0"
18 | protocol = "-1"
19 | self = "true"
20 | }
21 | tags = {
22 | Environment = "${var.environment}"
23 | }
24 | }
--------------------------------------------------------------------------------
/scripts/sip-proxy/sip-proxy-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | annotations: {}
5 | labels:
6 | app: sip-proxy
7 | name: sip-proxy
8 | namespace: default
9 | spec:
10 | minReadySeconds: 10
11 | selector:
12 | matchLabels:
13 | app: sip-proxy
14 | template:
15 | metadata:
16 | labels:
17 | app: sip-proxy
18 | spec:
19 | containers:
20 | - image: vikingvoip/sip-proxy
21 | name: sip-proxy
22 | securityContext:
23 | privileged: true
24 | hostNetwork: true
25 | dnsPolicy: ClusterFirstWithHostNet
26 | nodeSelector:
27 | application: proxy
28 |
--------------------------------------------------------------------------------
/terraform/modules/rds/outputs.tf:
--------------------------------------------------------------------------------
1 | output "db_instance_data" {
2 | description = "The address of the RDS instance"
3 | value = module.rds_mysql
4 | }
5 |
6 | output "db_instance_address" {
7 | description = "The address of the RDS instance"
8 | value = module.rds_mysql.db_instance_address
9 | }
10 |
11 | output "db_instance_arn" {
12 | description = "The ARN of the RDS instance"
13 | value = module.rds_mysql.db_instance_arn
14 | }
15 |
16 | output "db_instance_endpoint" {
17 | description = "The connection endpoint"
18 | value = module.rds_mysql.db_instance_endpoint
19 | }
20 |
21 | output "db_instance_name" {
22 | description = "The database name"
23 | value = module.rds_mysql.db_instance_name
24 | }
25 |
--------------------------------------------------------------------------------
/terraform/modules/eks/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | aws = {
4 | source = "hashicorp/aws"
5 | version = ">= 3.20.0"
6 | }
7 |
8 | random = {
9 | source = "hashicorp/random"
10 | version = "3.0.0"
11 | }
12 |
13 | local = {
14 | source = "hashicorp/local"
15 | version = "2.0.0"
16 | }
17 |
18 | null = {
19 | source = "hashicorp/null"
20 | version = "3.0.0"
21 | }
22 |
23 | template = {
24 | source = "hashicorp/template"
25 | version = "2.2.0"
26 | }
27 |
28 | kubernetes = {
29 | source = "hashicorp/kubernetes"
30 | version = ">= 2.0.1"
31 | }
32 | }
33 |
34 | required_version = "> 0.14"
35 | }
36 |
37 |
--------------------------------------------------------------------------------
/terraform/project/s3_backend/README.md:
--------------------------------------------------------------------------------
1 | # Terraform Backend Configuration
2 | S3 Backend Configuration for Terraform State
3 |
4 | Issue this command when initializing the project:
5 |
6 | `make init`
7 |
8 | Once init'ed, apply with:
9 |
10 | `make apply`
11 |
12 | This project will create the necessary S3 bucket and DynamoDB backend for Terraform.
13 | - It will create an S3 bucket as `terraform-bucket-lock-XXXXXXXXXX`
14 | - It will create a DynamoDB table named `tf-remote-state-lock-XXXXXXXXXX`
15 |
16 | Where `XXXXXXXXXX` will be random numbers (same for both) so as not to conflict with existing buckets out there in the wild.
17 |
18 | Then it will set the s3 backend on the `main` project by sed'ing `../main/main.tf.template` into `../main/main.tf`.
19 |
20 | __Note:__ Bucket Name random digits will be changed
21 |
22 |
--------------------------------------------------------------------------------
/terraform/project/main/outputs.tf:
--------------------------------------------------------------------------------
1 | output "vpc_id" {
2 | value = module.vpc.vpc_id
3 | }
4 |
5 | output "vpc_default_sg_id" {
6 | description = "We need the VPC's default SG"
7 | value = module.vpc.vpc_default_sg_id
8 | }
9 |
10 | output "db_instance_data" {
11 | description = "The address of the RDS instance"
12 | value = module.rds
13 | }
14 |
15 | output "db_instance_address" {
16 | description = "The address of the RDS instance"
17 | value = module.rds.db_instance_address
18 | }
19 |
20 | output "db_instance_arn" {
21 | description = "The ARN of the RDS instance"
22 | value = module.rds.db_instance_arn
23 | }
24 |
25 | output "db_instance_endpoint" {
26 | description = "The connection endpoint"
27 | value = module.rds.db_instance_endpoint
28 | }
29 |
30 | output "db_instance_name" {
31 | description = "The database name"
32 | value = module.rds.db_instance_name
33 | }
34 |
--------------------------------------------------------------------------------
/scripts/consul/helm-consul-values.yaml:
--------------------------------------------------------------------------------
1 |
2 | global:
3 | datacenter: voip-full
4 | name: consul
5 |
6 | ui:
7 | replicas: 1
8 | service:
9 | enabled: true
10 | type: 'LoadBalancer'
11 | service.beta.kubernetes.io/aws-load-balancer-scheme: "internal"
12 | # nodeSelector:
13 | # application: "support"
14 |
15 | server:
16 | enabled: "-"
17 | image: null
18 | replicas: 3
19 | bootstrapExpect: 3 # Should <= replicas count
20 |
21 | # storage and storageClass are the settings for configuring stateful
22 | # storage for the server pods. storage should be set to the disk size of
23 | # the attached volume. storageClass is the class of storage which defaults
24 | # to null (the Kube cluster will pick the default).
25 | # storage: 10Gi
26 | storageClass: null
27 | nodeSelector: |
28 | application: "support"
29 |
30 | client:
31 | enabled: false
32 | extraConfig: |
33 | {"advertise_reconnect_timeout": "15m"}
34 |
--------------------------------------------------------------------------------
/terraform/project/s3_backend/Makefile:
--------------------------------------------------------------------------------
1 | SHELL := /usr/bin/env bash
2 |
3 | NDEF = $(if $(value $(1)),,$(error $(1) not set))
4 |
5 | .PHONY: init validate apply clean
6 |
7 | all: init validate apply
8 |
9 | init:
10 | terraform init -reconfigure
11 |
12 | validate:
13 | @terraform validate
14 |
15 | apply: init
16 | @terraform apply -auto-approve
17 | @BUCKET=$$(terraform output -json | jq '.S3_Bucket.value'); \
18 | DYNAMODB=$$(terraform output -json | jq '.dynamo_db_lock.value'); \
19 | echo $${BUCKET} "/" $${DYNAMODB}; \
20 | $$(sed "s/{{ BUCKET_NAME }}/$${BUCKET}/g; s/{{ DYNAMODB_LOCK }}/$${DYNAMODB}/g" ../main/main.tf.template > ../main/main.tf); \
21 | terraform state pull | jq --raw-output '.outputs.ssh_keypair.value.private_key_pem' | sed 's/\\n/\n/g' > ../../../ssh_keypair.pem; \
22 | echo "done."
23 |
24 | destroy: init
25 | @terraform destroy -auto-approve
26 |
27 | clean:
28 | @rm -rf .terraform* terraform.tfstate*
29 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .terraform*
2 | # Local .terraform directories
3 | **/.terraform/*
4 | .git-davidcsi
5 | # .tfstate files
6 | *.tfstate
7 | *.terraform.lock.hcl
8 | *.tfstate.*
9 | terraform-state.json
10 | # ignore templates output
11 | terraform/project/main/main.tf
12 | # Crash log files
13 | crash.log
14 |
15 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most
16 | # .tfvars files are managed as part of configuration and so should be included in
17 | # version control.
18 | #
19 | # example.tfvars
20 |
21 | # Ignore override files as they are usually used to override resources locally and so
22 | # are not checked in
23 | override.tf
24 | override.tf.json
25 | *_override.tf
26 | *_override.tf.json
27 |
28 | # Include override files you do wish to add to version control using negated pattern
29 | #
30 | # !example_override.tf
31 |
32 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
33 | # example: *tfplan*
34 |
--------------------------------------------------------------------------------
/terraform/modules/eks/kubernetes.tf:
--------------------------------------------------------------------------------
1 | # Kubernetes provider
2 | # https://learn.hashicorp.com/terraform/kubernetes/provision-eks-cluster#optional-configure-terraform-kubernetes-provider
3 | # To learn how to schedule deployments and services using the provider, go here: https://learn.hashicorp.com/terraform/kubernetes/deploy-nginx-kubernetes
4 |
5 | # The Kubernetes provider is included in this file so the EKS module can complete successfully. Otherwise, it throws an error when creating `kubernetes_config_map.aws_auth`.
6 | # You should **not** schedule deployments and services in this workspace. This keeps workspaces modular (one for provision EKS, another for scheduling Kubernetes resources) as per best practices.
7 |
8 | provider "kubernetes" {
9 | host = data.aws_eks_cluster.cluster.endpoint
10 | token = data.aws_eks_cluster_auth.cluster.token
11 | cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
12 | }
13 |
--------------------------------------------------------------------------------
/terraform/modules/eks/outputs.tf:
--------------------------------------------------------------------------------
1 | output "cluster_id" {
2 | description = "EKS cluster ID."
3 | value = module.eks.cluster_id
4 | }
5 |
6 | output "cluster_endpoint" {
7 | description = "Endpoint for EKS control plane."
8 | value = module.eks.cluster_endpoint
9 | }
10 |
11 | output "cluster_security_group_id" {
12 | description = "Security group ids attached to the cluster control plane."
13 | value = module.eks.cluster_security_group_id
14 | }
15 |
16 | output "kubectl_config" {
17 | description = "kubectl config as generated by the module."
18 | value = module.eks.kubeconfig
19 | }
20 |
21 | output "config_map_aws_auth" {
22 | description = "A kubernetes configuration to authenticate to this EKS cluster."
23 | value = module.eks.config_map_aws_auth
24 | }
25 |
26 | output "region" {
27 | description = "AWS region"
28 | value = var.region
29 | }
30 |
31 | output "cluster_name" {
32 | description = "Kubernetes Cluster Name"
33 | value = var.cluster_name
34 | }
35 |
36 |
--------------------------------------------------------------------------------
/scripts/sip-b2bua/sip-b2bua-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | annotations: {}
5 | labels:
6 | app: sip-b2bua
7 | name: sip-b2bua
8 | namespace: default
9 | spec:
10 | minReadySeconds: 10
11 | selector:
12 | matchLabels:
13 | app: sip-b2bua
14 | template:
15 | metadata:
16 | labels:
17 | app: sip-b2bua
18 | spec:
19 | containers:
20 | - image: vikingvoip/sip-b2bua
21 | imagePullPolicy: Always
22 | volumeMounts:
23 | - name: fslogs
24 | mountPath: /fslogs
25 | - name: efs
26 | mountPath: /efs
27 | name: sip-b2bua
28 | securityContext:
29 | privileged: true
30 | volumes:
31 | - name: fslogs
32 | hostPath:
33 | path: /var/log/freeswitch
34 | type: DirectoryOrCreate
35 | - name: efs
36 | hostPath:
37 | path: /efs
38 | type: DirectoryOrCreate
39 | hostNetwork: true
40 | dnsPolicy: ClusterFirstWithHostNet
41 | nodeSelector:
42 | application: b2bua
43 |
--------------------------------------------------------------------------------
/terraform/project/s3_backend/remotestate.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "${var.aws_region}"
3 | }
4 |
5 | resource "random_id" "tc-rmstate" {
6 | byte_length = 4
7 | }
8 |
9 | resource "aws_s3_bucket" "tfrmstate" {
10 | bucket = "${var.s3_bucket_name}-${random_id.tc-rmstate.dec}-${random_id.tc-rmstate.dec}"
11 | acl = "private"
12 | force_destroy = true
13 |
14 | tags = {
15 | Name = "tf remote state"
16 | }
17 | }
18 |
19 | resource "aws_s3_bucket_object" "rmstate_folder" {
20 | bucket = "${aws_s3_bucket.tfrmstate.id}"
21 | key = "terraform-aws/"
22 | }
23 |
24 | resource "aws_dynamodb_table" "terraform_statelock" {
25 | name = "${var.aws_dynamodb_table}-${random_id.tc-rmstate.dec}"
26 | read_capacity = 20
27 | write_capacity = 20
28 | hash_key = "LockID"
29 |
30 | attribute {
31 | name = "LockID"
32 | type = "S"
33 | }
34 | }
35 |
36 | resource "tls_private_key" "private_key" {
37 | algorithm = "RSA"
38 | rsa_bits = 4096
39 | }
40 |
41 | resource "aws_key_pair" "generated_key" {
42 | key_name = var.key_name
43 | public_key = tls_private_key.private_key.public_key_openssh
44 | }
--------------------------------------------------------------------------------
/scripts/config-server/config-server-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: config-server-service
5 | namespace: default
6 | annotations:
7 | service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
8 | spec:
9 | selector:
10 | app: config-server-app
11 | ports:
12 | - protocol: "TCP"
13 | port: 8080
14 | targetPort: 8080
15 | type: LoadBalancer
16 | ---
17 | apiVersion: apps/v1
18 | kind: Deployment
19 | metadata:
20 | annotations: {}
21 | labels:
22 | app: config-server-app
23 | name: config-server-app
24 | namespace: default
25 | spec:
26 | minReadySeconds: 10
27 | replicas: 1
28 | selector:
29 | matchLabels:
30 | app: config-server-app
31 | strategy:
32 | rollingUpdate:
33 | maxSurge: 1
34 | maxUnavailable: 0
35 | type: RollingUpdate
36 | template:
37 | metadata:
38 | labels:
39 | app: config-server-app
40 | spec:
41 | containers:
42 | - image: vikingvoip/config-server
43 | imagePullPolicy: Always
44 | name: config-server
45 | ports:
46 | - containerPort: 8080
47 | nodeSelector:
48 | application: backend
--------------------------------------------------------------------------------
/persisten-volume.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: gp2-0
5 | labels:
6 | type: local
7 | spec:
8 | storageClassName: gp2
9 | capacity:
10 | storage: 10Gi
11 | volumeMode: Filesystem
12 | accessModes:
13 | - ReadWriteOnce
14 | persistentVolumeReclaimPolicy: Recycle
15 | claimRef:
16 | namespace: consul
17 | name: data-consul-consul-server-0
18 | # hostPath:
19 | # path: "/consul/data"
20 | #---
21 | #apiVersion: v1
22 | #kind: PersistentVolume
23 | #metadata:
24 | # name: gp2-1
25 | # labels:
26 | # type: local
27 | #spec:
28 | # storageClassName: gp2
29 | # capacity:
30 | # storage: 10Gi
31 | # accessModes:
32 | # - ReadWriteOnce
33 | # persistentVolumeReclaimPolicy: Recycle
34 | # claimRef:
35 | # namespace: consul
36 | # name: data-consul-consul-server-1
37 | # hostPath:
38 | # path: "/consul"
39 | #---
40 | #apiVersion: v1
41 | #kind: PersistentVolume
42 | #metadata:
43 | # name: gp2-2
44 | # labels:
45 | # type: local
46 | #spec:
47 | # storageClassName: gp2
48 | # capacity:
49 | # storage: 10Gi
50 | # accessModes:
51 | # - ReadWriteOnce
52 | # persistentVolumeReclaimPolicy: Recycle
53 | # claimRef:
54 | # namespace: consul
55 | # name: data-consul-consul-server-2
56 | # hostPath:
57 | # path: "/consul"
58 | #
59 |
--------------------------------------------------------------------------------
/terraform/project/s3_backend/make.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """
3 | Module Docstring
4 | """
5 |
6 | import os, sys
7 | from subprocess import Popen, PIPE
8 |
9 | __author__ = "David Villasmil"
10 | __version__ = "0.1.0"
11 | __license__ = "MIT"
12 |
13 | def exec_command(command):
14 | ## run it ##
15 | # p = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE)
16 | # res = ''
17 |
18 | # ## But do not wait till netstat finish, start displaying output immediately ##
19 | # while True:
20 | # out = p.stderr.read(1)
21 | # if out == '' and p.poll() != None:
22 | # break
23 | # if out != '':
24 | # print('char: {}'.format(out))
25 | # #sys.stdout.write(out)
26 | # #sys.stdout.flush()
27 | # print('resul: {}'.format(p.returncode))
28 | # return res
29 | process = Popen(command, stdout=PIPE, shell=True)
30 | print("Process: {}".format(process.pid))
31 | while True:
32 | line = process.stdout.readline().rstrip()
33 | print("line: {}".format(line))
34 | if not line:
35 | break
36 | yield line
37 |
38 |
39 | def init():
40 | for line in exec_command('terraform init 2>&1'):
41 | print('RES: {}'.format(res))
42 |
43 | def main():
44 | """ Main entry point of the app """
45 | init()
46 |
47 |
48 | if __name__ == "__main__":
49 | """ This is executed when run from the command line """
50 | main()
--------------------------------------------------------------------------------
/terraform/modules/eks/efs.tf:
--------------------------------------------------------------------------------
1 | resource "aws_security_group" "efs_security_group" {
2 | name = "efs_security_group"
3 | description = "Allow NFS from SGs"
4 | vpc_id = var.vpc_id
5 | ingress {
6 | description = "EFS mount target"
7 | from_port = 2049
8 | to_port = 2049
9 | protocol = "tcp"
10 | cidr_blocks = ["0.0.0.0/0"]
11 | }
12 | egress {
13 | from_port = 0
14 | to_port = 0
15 | protocol = "-1"
16 | cidr_blocks = ["0.0.0.0/0"]
17 | }
18 | }
19 |
20 | resource "aws_efs_file_system" "efs" {
21 | creation_token = "EFS Shared Data"
22 | performance_mode = "generalPurpose"
23 | tags = {
24 | Name = "EFS Vociemail Backend"
25 | }
26 | }
27 |
28 | resource "aws_efs_mount_target" "efs_0" {
29 | file_system_id = "${aws_efs_file_system.efs.id}"
30 | subnet_id = flatten(var.private_subnets_ids)[0]
31 | security_groups = [ "${aws_security_group.efs_security_group.id}" ]
32 | }
33 |
34 | resource "aws_efs_mount_target" "efs_1" {
35 | file_system_id = "${aws_efs_file_system.efs.id}"
36 | subnet_id = flatten(var.private_subnets_ids)[1]
37 | security_groups = [ "${aws_security_group.efs_security_group.id}" ]
38 | }
39 |
40 | resource "aws_efs_mount_target" "efs_2" {
41 | file_system_id = "${aws_efs_file_system.efs.id}"
42 | subnet_id = flatten(var.private_subnets_ids)[2]
43 | security_groups = [ "${aws_security_group.efs_security_group.id}" ]
44 | }
45 |
--------------------------------------------------------------------------------
/scripts/database/viking_schema.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE `cdr` (
2 | `id` int NOT NULL AUTO_INCREMENT,
3 | `datetime_start` datetime NOT NULL,
4 | `sip_call_id` varchar(64) CHARACTER SET utf8mb4 NOT NULL DEFAULT '',
5 | `sip_from_user` varchar(64) CHARACTER SET utf8mb4 NOT NULL DEFAULT '',
6 | `sip_from_display` varchar(64) CHARACTER SET utf8mb4 NOT NULL DEFAULT '',
7 | `sip_to_user` varchar(64) CHARACTER SET utf8mb4 NOT NULL DEFAULT '',
8 | `datetime_answer` datetime NOT NULL,
9 | `duration` int NOT NULL DEFAULT 0,
10 | `rtp_audio_in_mos` DECIMAL(5,2) NOT NULL DEFAULT 0,
11 | `rtp_audio_in_packet_count` int NOT NULL DEFAULT 0,
12 | `rtp_audio_in_skip_packet_count` int NOT NULL DEFAULT 0,
13 | `datetime_end` datetime NOT NULL,
14 | `hangup_cause` varchar(64) CHARACTER SET utf8mb4 NOT NULL DEFAULT '',
15 | `hangup_cause_q850` varchar(64) CHARACTER SET utf8mb4 NOT NULL DEFAULT '',
16 | `remote_media_ip` char(15) CHARACTER SET utf8mb4 NOT NULL DEFAULT '',
17 | `read_codec` char(64) CHARACTER SET utf8mb4 NOT NULL DEFAULT '',
18 | `local_public_ip` char(64) CHARACTER SET utf8mb4 NOT NULL DEFAULT '',
19 | `write_codec` char(64) CHARACTER SET utf8mb4 NOT NULL DEFAULT '',
20 | `context` varchar(64) CHARACTER SET utf8mb4 NOT NULL DEFAULT '',
21 | `last_app` varchar(64) CHARACTER SET utf8mb4 NOT NULL DEFAULT '',
22 | `last_arg` char(64) CHARACTER SET utf8mb4 NOT NULL DEFAULT '',
23 | PRIMARY KEY (id),
24 | UNIQUE KEY (sip_call_id)
25 | ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;
26 |
27 | CREATE INDEX datetime_from_to ON cdr (datetime_start,sip_from_user,sip_to_user);
28 |
--------------------------------------------------------------------------------
/terraform/project/main/main.tf.template:
--------------------------------------------------------------------------------
1 | terraform {
2 | backend "s3" {
3 | region = "us-east-1"
4 | encrypt = true
5 | bucket = {{ BUCKET_NAME }}
6 | key = "terraform-state"
7 | dynamodb_table = {{ DYNAMODB_LOCK }}
8 | }
9 | }
10 |
11 | module "vpc" {
12 | source = "../../modules/vpc"
13 | region = var.region
14 | vpc_cidr = var.vpc_cidr
15 | environment = var.environment
16 | availability_zones = var.availability_zones
17 | public_subnets_cidr = var.public_subnets_cidr
18 | private_subnets_cidr = var.private_subnets_cidr
19 | }
20 |
21 | module "rds" {
22 | source = "../../modules/rds"
23 | db_instance_name = var.db_instance_name
24 | db_instance_type = var.db_instance_type
25 | db_version = var.db_version
26 | db_disk_size = var.db_disk_size
27 | db_username = var.db_username
28 | db_password = var.db_password
29 | public_subnets = module.vpc.private_subnets_ids
30 | vpc_id = module.vpc.vpc_id
31 | }
32 |
33 | module "eks" {
34 | source = "../../modules/eks"
35 | cluster_name = var.eks_cluster_name
36 | kubernetes_version = var.eks_kubernetes_version
37 | region = var.region
38 | vpc_id = module.vpc.vpc_id
39 | vpc_cidr = var.vpc_cidr
40 | environment = var.environment
41 | availability_zones = var.availability_zones
42 | public_subnets_ids = module.vpc.public_subnets_ids
43 | private_subnets_ids = module.vpc.private_subnets_ids
44 | eks_workers = var.eks_workers
45 | vpc_default_sg_id = module.vpc.vpc_default_sg_id
46 | }
--------------------------------------------------------------------------------
/terraform/project/main/dev.vars.json:
--------------------------------------------------------------------------------
1 | {
2 | "local_subscribers_regexp": "^7[0-9]{8}",
3 |
4 | "db_instance_name": "backend-db",
5 | "db_instance_type": "db.t3.micro",
6 | "db_disk_size": "40",
7 | "db_username": "admin",
8 | "db_password": "AbC123fgh#",
9 | "db_version": "5.7.39",
10 |
11 | "region" : "us-east-1",
12 | "vpc_cidr" : "10.0.0.0/16",
13 | "environment" : "dev",
14 | "availability_zones" : [ "us-east-1a", "us-east-1b", "us-east-1c" ],
15 | "public_subnets_cidr" : ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"],
16 | "private_subnets_cidr" : ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"],
17 |
18 | "eks_cluster_name": "dev-cluster",
19 | "eks_kubernetes_version": "1.22",
20 | "eks_workers": {
21 | "support": {
22 | "instance_type": "t3.medium",
23 | "desired_capacity": "3",
24 | "min_capacity": "3",
25 | "max_capacity": "3",
26 | "disk_size": "60",
27 | "ssh_key_name": "ssh_keypair_name"
28 | },
29 | "backend": {
30 | "instance_type": "t3.medium",
31 | "desired_capacity": "1",
32 | "min_capacity": "1",
33 | "max_capacity": "1",
34 | "disk_size": "40",
35 | "ssh_key_name": "ssh_keypair_name"
36 | },
37 | "sip-proxy": {
38 | "instance_type": "t3.medium",
39 | "desired_capacity": "1",
40 | "min_capacity": "1",
41 | "max_capacity": "1",
42 | "disk_size": "40",
43 | "ssh_key_name": "ssh_keypair_name"
44 | },
45 | "sip-b2bua": {
46 | "instance_type": "t3.medium",
47 | "desired_capacity": "1",
48 | "min_capacity": "1",
49 | "max_capacity": "1",
50 | "disk_size": "40",
51 | "ssh_key_name": "ssh_keypair_name"
52 | }
53 | }
54 | }
--------------------------------------------------------------------------------
/terraform/modules/rds/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-east-1"
3 | }
4 |
5 | module "rds_mysql" {
6 | source = "git::https://github.com/tmknom/terraform-aws-rds-mysql.git?ref=tags/2.0.0"
7 | identifier = var.db_instance_name
8 | engine_version = var.db_version
9 | instance_class = var.db_instance_type
10 | allocated_storage = var.db_disk_size
11 | username = var.db_username
12 | password = var.db_password
13 |
14 | subnet_ids = var.public_subnets
15 | vpc_id = var.vpc_id
16 | #source_cidr_blocks = tolist(module.vpc.vpc_cidr_block)
17 | source_cidr_blocks = [ "10.0.0.0/16" ]
18 |
19 | maintenance_window = "mon:10:10-mon:10:40"
20 | backup_window = "09:10-09:40"
21 | apply_immediately = false
22 | multi_az = false
23 | port = 3306
24 | name = "viking"
25 | storage_type = "gp2"
26 | iops = 0
27 | auto_minor_version_upgrade = false
28 | allow_major_version_upgrade = false
29 | backup_retention_period = 0
30 | storage_encrypted = false
31 | kms_key_id = ""
32 | deletion_protection = false
33 | final_snapshot_identifier = "final-snapshot"
34 | skip_final_snapshot = true
35 | enabled_cloudwatch_logs_exports = []
36 | monitoring_interval = 0
37 | monitoring_role_arn = ""
38 | iam_database_authentication_enabled = false
39 | copy_tags_to_snapshot = false
40 | publicly_accessible = true
41 | license_model = "general-public-license"
42 | major_engine_version = "5.7"
43 | description = "This is the database backend for the VoIP platform"
44 |
45 | tags = {
46 | Environment = "dev"
47 | }
48 | }
--------------------------------------------------------------------------------
/terraform/project/main/variables.tf:
--------------------------------------------------------------------------------
1 | variable "eks_cluster_name" {
2 | type = string
3 | }
4 | variable "eks_kubernetes_version" {
5 | type = string
6 | }
7 |
8 | variable "db_instance_name" {
9 | type = string
10 | }
11 |
12 | variable "db_instance_type" {
13 | type = string
14 | }
15 |
16 | variable "db_disk_size" {
17 | type = string
18 | }
19 |
20 | variable "db_username" {
21 | type = string
22 | }
23 |
24 | variable "db_password" {
25 | type = string
26 | }
27 |
28 | variable "db_version" {
29 | type = string
30 | }
31 |
32 | variable "region" {
33 | type = string
34 | }
35 |
36 | variable "vpc_cidr" {
37 | type = string
38 | }
39 |
40 | variable "environment" {
41 | type = string
42 | }
43 |
44 | variable "availability_zones" {
45 | type = list(string)
46 | }
47 |
48 | variable "public_subnets_cidr" {
49 | type = list(string)
50 | }
51 |
52 | variable "private_subnets_cidr" {
53 | type = list(string)
54 | }
55 |
56 | variable "eks_workers" {
57 | type = object({
58 | support = object({
59 | instance_type = string
60 | desired_capacity = string
61 | min_capacity = string
62 | max_capacity = string
63 | disk_size = string
64 | ssh_key_name = string
65 | })
66 | backend = object({
67 | instance_type = string
68 | desired_capacity = string
69 | min_capacity = string
70 | max_capacity = string
71 | disk_size = string
72 | ssh_key_name = string
73 | })
74 | sip-proxy = object({
75 | instance_type = string
76 | desired_capacity = string
77 | min_capacity = string
78 | max_capacity = string
79 | disk_size = string
80 | ssh_key_name = string
81 | })
82 | sip-b2bua = object({
83 | instance_type = string
84 | desired_capacity = string
85 | min_capacity = string
86 | max_capacity = string
87 | disk_size = string
88 | ssh_key_name = string
89 | })
90 | })
91 | }
92 |
--------------------------------------------------------------------------------
/terraform/modules/eks/variables.tf:
--------------------------------------------------------------------------------
1 | variable "cluster_name" {
2 | description = "Cluster Name"
3 | }
4 |
5 | variable "kubernetes_version" {
6 | description = "Kubernetes veersion as per AWS specs"
7 | }
8 |
9 | variable "region" {
10 | description = "AWS Deployment region.."
11 | }
12 |
13 | variable "vpc_id" {
14 | description = "VPC ID where to deploy the EKS"
15 | }
16 |
17 | variable "vpc_cidr" {
18 | description = "CIDR to assign to this VPC"
19 | }
20 |
21 | variable "environment" {
22 | description = "On what environment is this running?"
23 | }
24 |
25 | variable "availability_zones" {
26 | description = "On what environment is this running?"
27 | }
28 |
29 | variable "public_subnets_ids" {
30 | description = "public_subnets"
31 | }
32 |
33 | variable "private_subnets_ids" {
34 | description = "On what environment is this running?"
35 | }
36 |
37 | variable "vpc_default_sg_id" {
38 | description = "Default VPC secutry group"
39 | }
40 |
41 | variable "eks_workers" {
42 | type = object({
43 | support = object({
44 | instance_type = string
45 | desired_capacity = string
46 | min_capacity = string
47 | max_capacity = string
48 | disk_size = string
49 | ssh_key_name = string
50 | })
51 | backend = object({
52 | instance_type = string
53 | desired_capacity = string
54 | min_capacity = string
55 | max_capacity = string
56 | disk_size = string
57 | ssh_key_name = string
58 | })
59 | sip-proxy = object({
60 | instance_type = string
61 | desired_capacity = string
62 | min_capacity = string
63 | max_capacity = string
64 | disk_size = string
65 | ssh_key_name = string
66 | })
67 | sip-b2bua = object({
68 | instance_type = string
69 | desired_capacity = string
70 | min_capacity = string
71 | max_capacity = string
72 | disk_size = string
73 | ssh_key_name = string
74 | })
75 | })
76 | }
77 |
--------------------------------------------------------------------------------
/terraform/modules/vpc/README.md:
--------------------------------------------------------------------------------
1 | # Simple VPC
2 |
3 | Configuration in this directory creates set of VPC resources which may be sufficient for development environment.
4 |
5 | There is a public and private subnet created per availability zone in addition to single NAT Gateway shared between all 3 availability zones.
6 |
7 | This configuration uses Availability Zone IDs and Availability Zone names for demonstration purposes. Normally, you need to specify only names or IDs.
8 |
9 | [Read more about AWS regions, availability zones and local zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-regions-availability-zones).
10 |
11 | ## Usage
12 |
13 | To run this example you need to execute:
14 |
15 | ```bash
16 | $ terraform init
17 | $ terraform plan
18 | $ terraform apply
19 | ```
20 |
21 | Note that this example may create resources which can cost money (AWS Elastic IP, for example). Run `terraform destroy` when you don't need these resources.
22 |
23 |
24 | ## Requirements
25 |
26 | | Name | Version |
27 | |------|---------|
28 | | [terraform](#requirement\_terraform) | >= 0.12.26 |
29 | | [aws](#requirement\_aws) | >= 3.15 |
30 |
31 | ## Providers
32 |
33 | No providers.
34 |
35 | ## Modules
36 |
37 | | Name | Source | Version |
38 | |------|--------|---------|
39 | | [vpc](#module\_vpc) | ../../ | |
40 |
41 | ## Resources
42 |
43 | No resources.
44 |
45 | ## Inputs
46 |
47 | No inputs.
48 |
49 | ## Outputs
50 |
51 | | Name | Description |
52 | |------|-------------|
53 | | [azs](#output\_azs) | A list of availability zones spefified as argument to this module |
54 | | [nat\_public\_ips](#output\_nat\_public\_ips) | List of public Elastic IPs created for AWS NAT Gateway |
55 | | [private\_subnets](#output\_private\_subnets) | List of IDs of private subnets |
56 | | [public\_subnets](#output\_public\_subnets) | List of IDs of public subnets |
57 | | [vpc\_cidr\_block](#output\_vpc\_cidr\_block) | The CIDR block of the VPC |
58 | | [vpc\_id](#output\_vpc\_id) | The ID of the VPC |
59 |
60 |
--------------------------------------------------------------------------------
/terraform/modules/vpc/subnets.tf:
--------------------------------------------------------------------------------
1 | /* Internet gateway for the public subnet */
2 | resource "aws_internet_gateway" "ig" {
3 | vpc_id = "${aws_vpc.vpc.id}"
4 | tags = {
5 | Name = "${var.environment}-igw"
6 | Environment = "${var.environment}"
7 | }
8 | }
9 |
10 | /* Elastic IP for NAT */
11 | resource "aws_eip" "nat_eip" {
12 | vpc = true
13 | depends_on = [aws_internet_gateway.ig]
14 | }
15 |
16 | /* NAT */
17 | resource "aws_nat_gateway" "nat" {
18 | allocation_id = "${aws_eip.nat_eip.id}"
19 | subnet_id = "${element(aws_subnet.public_subnet.*.id, 0)}"
20 | depends_on = [aws_internet_gateway.ig]
21 | tags = {
22 | Name = "nat"
23 | Environment = "${var.environment}"
24 | }
25 | }
26 |
27 | /* Public subnet */
28 | resource "aws_subnet" "public_subnet" {
29 | vpc_id = "${aws_vpc.vpc.id}"
30 | count = "${length(var.public_subnets_cidr)}"
31 | cidr_block = "${element(var.public_subnets_cidr, count.index)}"
32 | availability_zone = "${element(var.availability_zones, count.index)}"
33 | map_public_ip_on_launch = true
34 | tags = {
35 | Name = "${var.environment}-${element(var.availability_zones, count.index)}-public-subnet"
36 | Environment = "${var.environment}"
37 | }
38 | }
39 |
40 | /* Private subnet */
41 | resource "aws_subnet" "private_subnet" {
42 | vpc_id = "${aws_vpc.vpc.id}"
43 | count = "${length(var.private_subnets_cidr)}"
44 | cidr_block = "${element(var.private_subnets_cidr, count.index)}"
45 | availability_zone = "${element(var.availability_zones, count.index)}"
46 | map_public_ip_on_launch = false
47 | tags = {
48 | Name = "${var.environment}-${element(var.availability_zones, count.index)}-private-subnet"
49 | Environment = "${var.environment}"
50 | }
51 | }
52 |
53 | /* Routing table for private subnet */
54 | resource "aws_route_table" "private" {
55 | vpc_id = "${aws_vpc.vpc.id}"
56 | tags = {
57 | Name = "${var.environment}-private-route-table"
58 | Environment = "${var.environment}"
59 | }
60 | }
61 |
62 | /* Routing table for public subnet */
63 | resource "aws_route_table" "public" {
64 | vpc_id = "${aws_vpc.vpc.id}"
65 | tags = {
66 | Name = "${var.environment}-public-route-table"
67 | Environment = "${var.environment}"
68 | }
69 | }
70 |
71 | resource "aws_route" "public_internet_gateway" {
72 | route_table_id = "${aws_route_table.public.id}"
73 | destination_cidr_block = "0.0.0.0/0"
74 | gateway_id = "${aws_internet_gateway.ig.id}"
75 | }
76 |
77 | resource "aws_route" "private_nat_gateway" {
78 | route_table_id = "${aws_route_table.private.id}"
79 | destination_cidr_block = "0.0.0.0/0"
80 | nat_gateway_id = "${aws_nat_gateway.nat.id}"
81 | }
82 |
83 | /* Route table associations */
84 | resource "aws_route_table_association" "public" {
85 | count = "${length(var.public_subnets_cidr)}"
86 | subnet_id = "${element(aws_subnet.public_subnet.*.id, count.index)}"
87 | route_table_id = "${aws_route_table.public.id}"
88 | }
89 |
90 | resource "aws_route_table_association" "private" {
91 | count = "${length(var.private_subnets_cidr)}"
92 | subnet_id = "${element(aws_subnet.private_subnet.*.id, count.index)}"
93 | route_table_id = "${aws_route_table.private.id}"
94 | }
--------------------------------------------------------------------------------
/terraform/modules/eks/security-groups.tf:
--------------------------------------------------------------------------------
1 | resource "aws_security_group" "sg_ssh_mgmt" {
2 | name = "sg_ssh_mgmt"
3 | description = "Security group to allow SSH management"
4 | vpc_id = var.vpc_id
5 | }
6 |
7 | resource "aws_security_group_rule" "sg_ssh_mgmt_rule" {
8 | type = "ingress"
9 | from_port = 22
10 | to_port = 22
11 | protocol = "tcp"
12 | cidr_blocks = [ "0.0.0.0/0" ]
13 | security_group_id = aws_security_group.sg_ssh_mgmt.id
14 | }
15 |
16 | ## On the SIP-PROXY we should allow all UDP/TCP on ports 5060 and everything from SIP-B2BUA, BACKEND and SUPPORT and allow everything OUT
17 | resource "aws_security_group" "sg_sip_proxy" {
18 | name = "sg_sip_proxy"
19 | description = "Security group to allow 5060 to the SIP Proxy from the Public Internet"
20 | vpc_id = var.vpc_id
21 | }
22 |
23 | resource "aws_security_group_rule" "sg_sip_proxy_public_5060_rule_tcp" {
24 | type = "ingress"
25 | from_port = 5060
26 | to_port = 5060
27 | protocol = "tcp"
28 | cidr_blocks = [ "0.0.0.0/0" ]
29 | security_group_id = aws_security_group.sg_sip_proxy.id
30 | }
31 |
32 | resource "aws_security_group_rule" "sg_sip_proxy_public_5060_rule_udp" {
33 | type = "ingress"
34 | from_port = 5060
35 | to_port = 5060
36 | protocol = "udp"
37 | cidr_blocks = [ "0.0.0.0/0" ]
38 | security_group_id = aws_security_group.sg_sip_proxy.id
39 | }
40 |
41 | resource "aws_security_group_rule" "sg_sip_proxy_egress_rule" {
42 | type = "egress"
43 | from_port = 0
44 | to_port = 0
45 | protocol = "-1"
46 | cidr_blocks = [ "0.0.0.0/0" ]
47 | security_group_id = aws_security_group.sg_sip_proxy.id
48 | }
49 |
50 | ## On the SIP-B2BUA we should allow all UDP on ports 16000-33000 and everything from SIP-PROXY, BACKEND and SUPPORT
51 | resource "aws_security_group" "sg_sip_b2bua" {
52 | name = "sg_sip_b2bua"
53 | description = "Security group to allow everything from the SIP Proxy SG and rtp from the Public Internet"
54 | vpc_id = var.vpc_id
55 | }
56 |
57 | resource "aws_security_group_rule" "sg_sip_b2bua_rtp_from_public_rule" {
58 | type = "ingress"
59 | from_port = 16000
60 | to_port = 33000
61 | protocol = "udp"
62 | cidr_blocks = [ "0.0.0.0/0" ]
63 | security_group_id = aws_security_group.sg_sip_b2bua.id
64 | }
65 |
66 | resource "aws_security_group_rule" "sg_sip_b2bua_egress_rule" {
67 | type = "egress"
68 | from_port = 0
69 | to_port = 0
70 | protocol = "-1"
71 | cidr_blocks = [ "0.0.0.0/0" ]
72 | security_group_id = aws_security_group.sg_sip_b2bua.id
73 | }
74 |
75 | ## On the BACKEND we should allow everything from SIP-PROXY, SIP_B2BUA and SUPPORT
76 | resource "aws_security_group" "sg_backend_sg" {
77 | name = "sg_backend_sg"
78 | description = "Security group for backend instances"
79 | vpc_id = var.vpc_id
80 | }
81 |
82 | resource "aws_security_group_rule" "sg_sip_backend_egress_rule" {
83 | type = "egress"
84 | from_port = 0
85 | to_port = 0
86 | protocol = "-1"
87 | cidr_blocks = [ "0.0.0.0/0" ]
88 | security_group_id = aws_security_group.sg_backend_sg.id
89 | }
90 |
91 | ## On the SUPPORT we should allow everything from SIP-PROXY, SIP_B2BUA and BACKEND
92 |
93 | resource "aws_security_group" "sg_support_sg" {
94 | name = "sg_support_sg"
95 | description = "Security group for support instances"
96 | vpc_id = var.vpc_id
97 | }
98 |
99 | resource "aws_security_group_rule" "sg_sip_support_egress_rule" {
100 | type = "egress"
101 | from_port = 0
102 | to_port = 0
103 | protocol = "-1"
104 | cidr_blocks = [ "0.0.0.0/0" ]
105 | security_group_id = aws_security_group.sg_support_sg.id
106 | }
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | SHELL := /usr/bin/env bash
2 |
3 | #NDEF = $(if $(value $(1)),,$(error $(1) not set))
4 |
5 | NO_COLOR=$(shell echo -e "\e[m")
6 | WARN_COLOR=$(shell echo -e "\e[1;7m")
7 | ERROR_COLOR=$(shell echo -e "\e[1;31m")
8 | OK_COLOR=$(shell echo -e "\e[1;34m")
9 | HIGHLIGHT_COLOR=$(shell echo -e "\e[93;1m")
10 |
11 | report_success = "$(OK_COLOR)RESULT:$(NO_COLOR) $(HIGHLIGHT_COLOR)$1$(NO_COLOR) $2 Success"
12 |
13 | report_failure = "$(ERROR_COLOR)RESULT: $(HIGHLIGHT_COLOR)$1$(NO_COLOR) $2 Failed, exiting...$(ERROR_COLOR)"
14 |
15 | .PHONY: init validate apply clean
16 |
17 | help:
18 | @echo ""
19 | @echo "This makefile:"
20 | @echo " - "make init-backend": Will initialize the backend (S3 bucket and DynamoDB Table for terraform state)."
21 | @echo " - "make apply-backend": Will initialize + build the backend (S3 bucket and DynamoDB Table for terraform state)."
22 | @echo " - "make init-main": Will initialize the main project (This will prepare the project to build)."
23 | @echo " - "make apply-main": Will initialize + build the whole voip platform (Will create all required resource on AWS)."
24 | @echo " - "make destroy-backend": Will destroy all backend resources."
25 | @echo " - "make destroy-main": Will destroy the whole voip platform resources."
26 | @echo " - "make destroy-all": Will destroy all resources."
27 | @echo " - "make clean": Will remove all terraform files (.terraform, etc)."
28 | @echo " - "make init-script": deploy all images and services"
29 | @echo ""
30 |
31 | all: apply-main
32 |
33 | init-backend:
34 | @echo "$(WARN_COLOR)WARNING:$(NO_COLOR) Initializing terraform backend"
35 | @cd terraform/project/s3_backend && $(MAKE) init && echo $(call report_success,"Backend","Initialize") || (echo $(call report_failure,"Backend","Initialize") && exit -1)
36 |
37 | apply-backend: #init-backend
38 | @echo "$(WARN_COLOR)WARNING:$(NO_COLOR) Applying Backend will create the S3 bucket and DynamoDB Table."
39 | @cd terraform/project/s3_backend && $(MAKE) apply && echo $(call report_success,"Backend","Apply") || (echo $(call report_failure,"Backend","Apply") && exit -1)
40 |
41 | list-backend: #init-backend
42 | @echo "$(WARN_COLOR)WARNING:$(NO_COLOR) Showing Backend will create the S3 bucket and DynamoDB Table."
43 | @cd terraform/project/s3_backend && $(MAKE) list && echo $(call report_success,"Backend","Show") || (echo $(call report_failure,"Backend","Show") && exit -1)
44 |
45 | init-main: #apply-backend
46 | @echo "$(WARN_COLOR)WARNING:$(NO_COLOR) Initializing $(OK_COLOR)Main$(NO_COLOR) project."
47 | @cd terraform/project/main && $(MAKE) init && echo $(call report_success,"Main","Initialize") || (echo $(call report_failure,"Main","Initialize") && exit -1)
48 |
49 | apply-main: #init-main
50 | @echo "$(WARN_COLOR)WARNING:$(NO_COLOR) Applying $(OK_COLOR)main$(NO_COLOR) project -- Tnis will create all required resources on AWS."
51 | @cd terraform/project/main && $(MAKE) apply && echo $(call report_success,"Main","Apply") || (echo $(call report_failure,"Main","Apply") && exit -1)
52 | #cd scripts; bash ./init.sh && echo $(call report_success,"Scripts","Execute") || (echo $(call report_failure,"Scripts","Execute") && exit -1)
53 |
54 | list-main: #init-backend
55 | @echo "$(WARN_COLOR)WARNING:$(NO_COLOR) Showing Backend will create the S3 bucket and DynamoDB Table."
56 | @cd terraform/project/main && $(MAKE) list && echo $(call report_success,"Main","Show") || (echo $(call report_failure,"Main","Show") && exit -1)
57 |
58 | init-script:
59 | cd scripts; bash ./init.sh && echo $(call report_success,"Scripts","Execute") || (echo $(call report_failure,"Scripts","Execute") && exit -1)
60 |
61 | destroy-backend:
62 | @echo '$(ERROR_COLOR)***** WARNING: This will DESTROY all resources!$(ERROR_COLOR) *****'
63 |
64 | @# Let' ask the user if we should continue, since this is going to destroy everything
65 |
66 | @while [ -z "$$CONTINUE" ]; do \
67 | read -r -p "Type anything but Y or y to exit. [y/N] " CONTINUE; \
68 | done ; \
69 | if [ ! $$CONTINUE == "y" ]; then \
70 | if [ ! $$CONTINUE == "Y" ]; then \
71 | echo "Exiting." ; exit 1 ; \
72 | fi \
73 | fi
74 | @cd terraform/project/s3_backend && $(MAKE) destroy && echo "$(OK_COLOR)RESULT:$(NO_COLOR) $(HIGHLIGHT_COLOR)Backend$(NO_COLOR) Destroy Success" || echo "$(ERROR_COLOR)RESULT:$(ERROR_COLOR) $(HIGHLIGHT_COLOR)Backend$(NO_COLOR) Destroy Failed, exiting..."
75 |
76 | destroy-main:
77 | @echo '$(ERROR_COLOR)***** WARNING: This will DESTROY all resources!$(ERROR_COLOR) *****'
78 |
79 | @# Let' ask the user if we should continue, since this is going to destroy everything
80 |
81 | @while [ -z "$$CONTINUE" ]; do \
82 | read -r -p "Type anything but Y or y to exit. [y/N] " CONTINUE; \
83 | done ; \
84 | if [ ! $$CONTINUE == "y" ]; then \
85 | if [ ! $$CONTINUE == "Y" ]; then \
86 | echo "Exiting." ; exit 1 ; \
87 | fi \
88 | fi
89 | #@kubectl delete -f scripts/config-server/config-server-deployment.yaml
90 | @cd terraform/project/main && $(MAKE) destroy && echo "$(OK_COLOR)RESULT:$(NO_COLOR) $(HIGHLIGHT_COLOR)Main$(NO_COLOR) Destroy Success" || echo "$(ERROR_COLOR)RESULT:$(ERROR_COLOR) $(HIGHLIGHT_COLOR)Main$(NO_COLOR) Destroy Failed, exiting..."
91 |
92 | destroy-all: destroy-main destroy-backend
93 |
94 |
95 | clean:
96 | @cd terraform/project/main && rm -rf .terraform/ terraform.tfstate* .terraform*
97 | @cd terraform/project/s3_backend && rm -rf .terraform/ terraform.tfstate* .terraform*
98 |
--------------------------------------------------------------------------------
/terraform/modules/eks/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = var.region
3 | }
4 |
5 | resource "random_integer" "priority" {
6 | min = 0
7 | max = 2
8 | }
9 |
10 | module "eks" {
11 | version = "17.24.0"
12 | source = "terraform-aws-modules/eks/aws"
13 | cluster_name = var.cluster_name
14 | cluster_version = var.kubernetes_version
15 | subnets = var.private_subnets_ids
16 |
17 | tags = {
18 | Environment = var.environment
19 | }
20 |
21 | vpc_id = var.vpc_id
22 |
23 | worker_groups = [
24 | {
25 | name = "support"
26 | instance_type = var.eks_workers.support.instance_type
27 | asg_desired_capacity = var.eks_workers.support.desired_capacity
28 | key_name = var.eks_workers.support.ssh_key_name
29 | subnets = var.private_subnets_ids
30 | # I could not find a way of labeling the nodes in the groups, for later use with "nodeSelector",
31 | # so I'm doing it via additional_userdata. We need to wait until the node has attached to the cluster
32 | # before labeling.
33 | additional_userdata = <> /etc/fstab
140 | EOT
141 | associate_public_ip_address = true
142 | public_ip = true
143 | additional_security_group_ids = [
144 | aws_security_group.sg_ssh_mgmt.id,
145 | aws_security_group.sg_sip_b2bua.id,
146 | aws_security_group.efs_security_group.id,
147 | var.vpc_default_sg_id
148 | ]
149 | labels = {
150 | application = "b2bua"
151 | }
152 | }
153 | ]
154 | }
155 |
156 | resource "aws_eks_addon" "ebs" {
157 | cluster_name = var.cluster_name
158 | addon_name = "aws-ebs-csi-driver"
159 | addon_version = "v1.11.4-eksbuild.1"
160 |
161 | #depends_on = eks
162 | }
163 |
164 | resource "kubernetes_annotations" "default-storageclass" {
165 | api_version = "storage.k8s.io/v1"
166 | kind = "StorageClass"
167 | force = "true"
168 |
169 | metadata {
170 | name = "gp2"
171 | }
172 | annotations = {
173 | "storageclass.kubernetes.io/is-default-class" = "true"
174 | }
175 | }
176 |
177 | data "aws_eks_cluster" "cluster" {
178 | name = module.eks.cluster_id
179 | }
180 |
181 | data "aws_eks_cluster_auth" "cluster" {
182 | name = module.eks.cluster_id
183 | }
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Full VoIP service on kubernetes.
2 |
3 | The goal of this project is to provide people with a complete, fully-fledge VoIP platform based on Kubernetes.
4 |
5 | It is based on AWS EKS and consists of two parts:
6 |
7 | The first is the Terraform project, which will create all resources needed to implement:
8 | - 1 EKS cluster with the following nodes:
9 | - 1 backend (1 node in a private subnet): this will run the Consul service and the Routing Service (based on FS XML_CURL) pods
10 | - 1 Proxy (1 node in a public subnet): this will run Kamailio in a pod.
11 | - 2 B2BUA (2 nodes in a public subnet): These will run freeSWITCH. Signaling will run on the private IPs while RTP will use a public IP.
12 |
13 | The clients will register (if configured) on the Proxy's public IP address. When they make/receive calls via this address. The Proxy will forward all calls to the FS's on the private IPs, then negotiate with the clien an RTP connection via FS's public IPs.
14 |
15 | # Architecture:
16 | 
17 |
18 | # Resources Created by the project
19 |
20 | - VPC (Including IGW, RouteTables, etc)
21 | - EKS cluster
22 | - RDS database
23 | - Support ASG (You can configure all node types in `terraform/project/main/dev.vars.json`:
24 | - 3 support nodes for consul (required for state consensus, they can be small types)
25 | - SIP-PROXY ASG:
26 | - 1 SIP-PROXY node (Kamailio)
27 | - Backend ASG:
28 | - 1 config-server node
29 | - B2BUA ASG:
30 | - 1 B2BUA node (freeSWITCH)
31 |
32 | ---
33 | # Requirements
34 | ##
35 |
36 | - You need to have an AWS account properly configured in your CLI.
37 | - The AWS account secret_access_key and access_key_id should already be properly configured in your ~/.aws/credential file, i.e.:
38 | ```
39 | [myawsaccount]
40 | aws_access_key_id = [YOUR-ACCESS-KEY-ID]
41 | aws_secret_access_key = [YOUR-SECRET-ACCESS-KEY]
42 | ```
43 | - And make sure you export the Environment Variables, before calling the _make file_, i.e.:
44 | - `export AWS_PROFILE=myawsaccount`
45 | - You MUST create an ssh keypair EC2->Key Pairs, name it something and set it on the variables file (see "Prepare your deployment")
46 | - Said account must have all permissions to create a VPC, routing tables, EKS cluster, ASGs, etc.
47 | - You must have installed and properly configured the following:
48 | - helm (https://helm.sh/docs/intro/install/)
49 | - kubectl (https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/)
50 | - AWS cli utility (https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html)
51 |
52 |
53 | # Docker Images
54 |
55 | When you create the platform, kubernetes will pull 3 images off of my dockerhub repo (https://hub.docker.com/orgs/vikingvoip/repositories)
56 | The images pulled are:
57 | - Kamailio Proxy Server version 5.3
58 | - freeSWITCH version 1.10
59 | - Config Server, this is an nginx server with some php scripts to server the dialplan to freeSWITCH as well as a script to parse and insert the CDRs into the database.
60 |
61 | # Services out of the box
62 |
63 | - User-to-user calling:
64 | When a registered user calls another local user, the call always goes to the B2BUA so that the RTP is anchored there. The call is then forwarded to the proxy, who then does the actual location lookup and forward to the B-user.
65 | - User-to-termination provider:
66 | I'll provide a variable on dev.vars.json so that you can do outbound termination.
67 | - Voicemail:
68 | When a local user is called and is not registered, the incoming calls goes to Voicemail. This was implemented accross all avaiable freeSWITCHes by creating an EFS and attaching it to all SIP-B2BUA, so the recordings are available to all.
69 |
70 | To do:
71 |
72 | - Support for REFER/Transfer by remembering where a user's call sent and if we receive another call for that user, send it to the same B2BUA.
73 | - Some kind of UI to create new subscribers.
74 | - Some kind of UI to add new termination providers.
75 | - Some kind of UI to add new origination providers.
76 | - Some kind of UI to create a route table based on the number diales (LCR).
77 |
78 | # Deploy
79 | ## Makefile
80 |
81 | - ```make help```: Will give you all possible directives, i.e.:
82 | - ```make init-backend```: Will initialize the s3_backend, meaning preparing all plugins needed by terraform.
83 | - ```make apply-backend```: Will apply the s3_backend terraform. This will create an S3 bucket and a DynamoDB table which are used to keep track of the state of terraform deployments in relation to resrouces in AWS. (Includes all previous directives)
84 | - ```make init-main```: Will initialize the main projecyt, meaning preparing all plugins needed by that terraform project.
85 | - ```make apply-backend```: Will apply the main terraform project. This will create all needed resources to deploy the whole eks platform.
86 | - ```make destroy-main```: Will delete all resources previously created by the main project. (it is possible the destroy doesn't work because sometimes a previsouly created ELB is not destroyed. If this happens, you will need to manually delete the ELB and execute the destroy agaon. We're investigating into that.)
87 | - ```make destroy-backend```: Will delete the backend resources created for state management.
88 |
89 | ---
90 |
91 | # Prepare your deployment
92 | ##
93 |
94 | Clone the repo
95 |
96 | ```git clone git@github.com:Viking-VoIP/full-voip-on-k8s.git viking-voip```
97 |
98 | cd into the project folder:
99 |
100 | ```cd viking-voip```
101 |
102 | *IMPORTANT*: The variable file contains all the information needed to deploy the complete solution. There are parameters you will probably want to change.
103 |
104 | Use your favorite edit to edit the variables file:
105 |
106 | `terraform/project/main/dev.vars.json`
107 |
108 | A few variables are special:
109 | - `local_subscribers_regexp: "^7[0-9]{8}"`: This is a regexp representing the numbers you'll be assigning to you subscribers, in this case 700000000-799999999. The SIP-PROXY will detect calls to these numbers and forward them to the B2BUA so the rtp is anchored there and you don't have any audio issues.
110 | - `region: "us-east-1"`: The AWS region in which to deploy the platform.
111 | - `db_username: "admin"`: The database (RDS) username
112 | - `db_password: "AbC123fgh#"`: The databse (RDS) password
113 | - `ssh_key_name: some-key-name`: The ssh key pair you created for this.
114 |
115 | *Please review the vars file to set the instance type you want*
116 |
117 |
118 | To build the whole project simply execute:
119 |
120 | ```make apply-main```
121 |
122 | This will launch the deployment process.
123 |
124 | If everything goes OK, you will get an output of your setup, you should save this somewhere safe.
125 |
126 | *NOTE*: You should at least change dev.vars.json for the db password.
127 |
128 | ---
129 | ## What next?
130 |
131 | Subscribers are not created by default, you will need to add them manually. To do this you can execute something like:
132 |
133 | ### Log on to kubernetes dashboard:
134 |
135 | * Get a token:
136 | ```
137 | kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '{print $1}')
138 | ```
139 |
140 | * Start a kkubectl proxy
141 | ```
142 | kubectl proxy
143 | ```
144 |
145 | Copy the token in the output and paste it below:
146 |
147 | * Open
148 | http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#/login
149 |
150 |
151 | *Get the proxy's pod*
152 | ```
153 | POD=$(kubectl get pods -o json | jq ".items[]|.metadata|select(.name|test(\"sip-proxy.\"))|.name" | sed "s/\"//g")
154 | ```
155 |
156 | *Set a couple var*
157 | ```
158 | USER=721110000
159 | PASS=whatever
160 | ```
161 |
162 | *Get the domain from dev.vars.json*
163 | ```
164 | DOMAIN=$(kubectl exec -t -n consul consul-server-0 -- /bin/consul kv get voice/proxy-public-ip | sed "s/\"//g")
165 | ```
166 |
167 | *Create the user via sip-proxy*
168 | ```
169 | kubectl exec -ti $POD -- kamctl add $USER@$DOMAIN $PASS
170 | ```
171 |
172 |
173 | If you're testing with a brand-new account (AWS), you will probably have to request a Service Quota Increase, because the default quota is not enough, we need 6 VMs, here's a video on how to do that:
174 |
175 | [](https://youtu.be/h03LzJtmXRQ)
176 |
177 | Or you can add them straight on the database.
178 |
179 | I put together a quick (long) video showing you how to install it.
180 |
181 |
182 | [](https://youtu.be/Vk4def_WvbI)
183 |
--------------------------------------------------------------------------------
/scripts/init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | checkutil () {
4 | if [ -z "$(which $@)" ]; then
5 | echo "Could not find \"$@\" anywhere, please install it\!"
6 | exit 1
7 | fi
8 | }
9 |
10 | # echo "--> Starting intialization script..."
11 | export AWS_REGION=$(jq '.region' ../terraform/project/main/dev.vars.json | sed 's/"//g')
12 | export CLUSTER_NAME=$(cat ../terraform/project/main/dev.vars.json | jq '.eks_cluster_name' | sed 's/"//g')
13 | cd ../terraform/project/main && export DB_ID=$(terraform state pull | jq '.outputs.db_instance_data.value.db_instance_data.db_instance_id' | sed 's/"//g') && cd -
14 | export DB_USER=$(cat ../terraform/project/main/dev.vars.json | jq '.db_username' | sed 's/"//g')
15 | export DB_PASSWD=$(cat ../terraform/project/main/dev.vars.json | jq '.db_password' | sed 's/"//g')
16 | export LOCAL_SUBSCRIBERS_REGEXP=$(cat ../terraform/project/main/dev.vars.json | jq '.local_subscribers_regexp' | sed 's/"//g')
17 |
18 | echo "--> Configuring access to EKS..."
19 | aws eks --region $AWS_REGION update-kubeconfig --name $CLUSTER_NAME
20 | sleep 3 # sleep 5 # read -p "Press enter to continue"
21 |
22 | echo "--> Resolving DB params..."
23 | export DB_ADDRESS=$(aws --region=us-east-1 rds describe-db-instances --db-instance-identifier $DB_ID | jq '.DBInstances[].Endpoint.Address' | sed 's/"//g')
24 |
25 | # Check if aws command is available
26 | checkutil "aws"
27 |
28 | # Check if kubectl command is available
29 | checkutil "kubectl"
30 |
31 | # Check if heml command is available
32 | checkutil "helm"
33 |
34 | # Install consul
35 | echo "--> Install consul..."
36 | if [ "$(helm list --all-namespaces | grep 'consul' | wc -l)" -eq "1" ]; then
37 | echo "Consul chart already installed, skipping."
38 | else
39 |
40 | # Verify OIDC exists
41 | oidc_id=$(aws eks describe-cluster --name $CLUSTER_NAME --query "cluster.identity.oidc.issuer" --output text | cut -d '/' -f 5)
42 | if [ "1" -eq "$(aws iam list-open-id-connect-providers | grep $oidc_id | wc -l)" ]; then
43 | echo "ODIC driver already present..."
44 | else
45 | eksctl utils associate-iam-oidc-provider --cluster $CLUSTER_NAME --approve
46 | fi
47 |
48 | # let's add the right permissions to the nodes so they can mount volumes dynamically
49 | aws iam attach-role-policy \
50 | --role-name $(aws iam list-roles | jq '.Roles[] | select( .RoleName | test("dev-cluster.*")) | select( .AssumeRolePolicyDocument | .Statement[] | .Principal | .Service | test("ec2.amazonaws.com")?) | .RoleName' | sed 's/"//g') \
51 | --policy-arn $(aws iam list-policies | jq '.Policies[] | select(.PolicyName | test("AmazonEC2FullAccess")) | .Arn' | sed 's/\"//g')
52 |
53 | #kubectl apply -f consul/storage-class.yaml
54 |
55 | helm repo add hashicorp https://helm.releases.hashicorp.com
56 | #helm install -f consul/helm-consul-values.yaml hashicorp hashicorp/consul
57 | helm install --values consul/helm-consul-values.yaml consul hashicorp/consul --set global.name=consul --create-namespace --namespace consul
58 | sleep 5 # sleep 5 # read -p "Press enter to continue"
59 | # Get kubernetes' DNS IP, configure the configMap and apply it
60 | export CONSUL_DNS_IP=$(kubectl get svc -n consul consul-dns -o jsonpath='{.spec.clusterIP}')
61 | # Replace and apply the confuigmap for consul-dns
62 | sed "s/{{ CONSUL_DNS_IP }}/$CONSUL_DNS_IP/g" consul/dns-configMap-template.yaml > consul/dns-configMap.yaml
63 | kubectl apply -f consul/dns-configMap.yaml
64 | sleep 5 # sleep 5 # read -p "Press enter to continue"
65 | # Add the forwarding of all .consul resolve requests to consul-dns and apply
66 | kubectl get -n kube-system cm/coredns -o yaml | python3 update-configmap.py | kubectl apply -f -
67 | sleep 5 # sleep 5 # read -p "Press enter to continue"
68 | fi
69 |
70 | # Create service account
71 | echo "--> Configure a Service Account in our new cluster..."
72 | if [ "$(kubectl get serviceaccounts -A | grep 'eks-admin' | wc -l)" -eq "1" ]; then
73 | echo "ServiceAccount already exists, not creating..."
74 | else
75 | kubectl apply -f k8s-service-account/service-account.yaml
76 | fi
77 |
78 | sleep 5 # sleep 5 # read -p "Press enter to continue"
79 |
80 | # Install dashboard and dashboard-admin
81 | echo "--> Install dashboard and dashboard-admin..."
82 | if [ "$(kubectl get pods -A | grep 'kubernetes-dashboard' | wc -l)" -eq "2" ]; then
83 | echo "Dashboard already deployed, not deploying..."
84 | else
85 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.6.1/aio/deploy/recommended.yaml
86 | kubectl apply -f dashboard/dashboard-admin.yaml
87 | fi
88 | sleep 5 # sleep 5 # read -p "Press enter to continue"
89 |
90 | # Just in case the svc is not yet deployed...
91 | echo "--> Just in case the svc is not yet deployed, we'll query for it..."
92 | until [ "$(kubectl get svc -n consul | grep 'consul-ui' | wc -l)" -eq "1" ]; do
93 | echo "Waiting for consul service to be up... (3 seconds)"
94 | sleep 3
95 | done
96 |
97 | sleep 5 # sleep 5 # read -p "Press enter to continue"
98 |
99 | RUNNING_CONSUL=$(kubectl get pod -n consul | grep "Running" | grep "server" | grep -v leader | head -n 1 | awk '{print$1}')
100 | echo "RUNNING_CONSUL=<$RUNNING_CONSUL>"
101 | # Wait for consul-server is running
102 | echo "--> Wait for consul-server to be running..."
103 | until [ "2" -eq "$(kubectl get pod -n consul $RUNNING_CONSUL | wc -l)" ]; do echo "Waiting for consul server to be running..., output: " && kubectl get pod -n consul $RUNNING_CONSUL | wc -l && sleep 30; done
104 |
105 | #if [ "$(echo $data | grep "cluste" | wc -l)" -gt 0 ]; then echo "yes"; fi
106 |
107 | sleep 5 # sleep 5 # read -p "Press enter to continue"
108 |
109 | ### Set all variables on consul
110 | echo "--> Writing backend/db_address $DB_ADDRESS to $RUNNING_CONSUL ..."
111 | while [ "$(kubectl exec -t -n consul $RUNNING_CONSUL -- /bin/consul kv put backend/db_address $DB_ADDRESS | grep 'Success' | wc -l)" -ne "1" ]; do echo "Failed writing! Will retry..."; sleep 30; done
112 |
113 | echo "--> Writing backend/db_user $DB_USER to $RUNNING_CONSUL ..."
114 | while [ "$(kubectl exec -t -n consul $RUNNING_CONSUL -- /bin/consul kv put backend/db_user $DB_USER | grep 'Success' | wc -l)" -ne "1" ]; do echo "Failed writing! Will retry..."; sleep 30; done
115 |
116 | echo "--> Writing backend/db_pass $DB_PASSWD to $RUNNING_CONSUL ..."
117 | while [ "$(kubectl exec -t -n consul $RUNNING_CONSUL -- /bin/consul kv put backend/db_pass $DB_PASSWD | grep 'Success' | wc -l)" -ne "1" ]; do echo "Failed writing! Will retry..."; sleep 30; done
118 |
119 | echo "--> Writing voice/local_subscribers_regexp $LOCAL_SUBSCRIBERS_REGEXP to $RUNNING_CONSUL ..."
120 | while [ "$(kubectl exec -t -n consul $RUNNING_CONSUL -- /bin/consul kv put voice/local_subscribers_regexp $LOCAL_SUBSCRIBERS_REGEXP | grep 'Success' | wc -l)" -ne "1" ]; do echo "Failed writing! Will retry..."; sleep 30; done
121 |
122 | # Deploy sip-proxy
123 | echo "--> Deploy sip-proxy..."
124 | for yaml in $(find sip-proxy/*.yaml); do kubectl apply -f $yaml; done
125 |
126 | sleep 5 # sleep 5 # read -p "Press enter to continue"
127 |
128 | # Deploy sip-b2bua
129 | echo "--> Deploy sip-b2bua..."
130 | for yaml in $(find sip-b2bua/*.yaml); do kubectl apply -f $yaml; done
131 |
132 | sleep 5 # sleep 5 # read -p "Press enter to continue"
133 |
134 | # Deploy config-server
135 | echo "--> Deploy config-server..."
136 | for yaml in $(find config-server/*.yaml); do kubectl apply -f $yaml; done
137 |
138 | export SIP_PUBLIC_IP=$(aws --region $AWS_REGION ec2 describe-instances --filters "Name=tag:aws:autoscaling:groupName,Values=*sip-proxy*" | jq '.Reservations[].Instances[].PublicIpAddress' | sed 's/"//g' | grep -v null | head -n 1)
139 |
140 | # Push Database params to consul
141 | echo "--> Push Database params to consul..."
142 |
143 | # Now we should have the proxy's public ip, let's set it
144 | kubectl exec -t -n consul $RUNNING_CONSUL -- /bin/consul kv put voice/proxy-public-ip $SIP_PUBLIC_IP
145 |
146 | #DOMAIN=$(kubectl exec -t -n consul $RUNNING_CONSUL -- /bin/consul kv get voice/proxy-public-ip | sed "s/\"//g")
147 |
148 | # Variables
149 | DB_ADDRESS=$(kubectl exec -t -n consul $RUNNING_CONSUL -- /bin/consul kv get backend/db_address)
150 | DB_USER=$(kubectl exec -t -n consul $RUNNING_CONSUL -- /bin/consul kv get backend/db_user)
151 | DB_PASSWD=$(kubectl exec -t -n consul $RUNNING_CONSUL -- /bin/consul kv get backend/db_pass)
152 | # Create CDR table in viking's database via sip-proxy (which has a mysql client)
153 |
154 | until [ "1" -eq "$(kubectl get pods -o json | jq ".items[]|.metadata|select(.name|test(\"sip-proxy.\"))|.name" | wc -l)" ]; do
155 | echo "Waiting for sip-proxy running..." && sleep 30
156 | done
157 |
158 | echo "Waiting for 3 minutes for the dust to settle..."
159 | sleep 180
160 |
161 | # inject database
162 | kubectl exec -t -n default $(kubectl get pods -o json | jq ".items[]|.metadata|select(.name|test(\"sip-proxy.\"))|.name" | sed "s/\"//g") -- /bin/bash -c "/usr/bin/mysql -h $DB_ADDRESS -u $DB_USER -p$DB_PASSWD viking < /etc/kamailio/viking_schema.sql"
163 |
164 | # Lets create a couple subscriber (72110000)
165 | POD=$(kubectl get pods -o json | jq ".items[]|.metadata|select(.name|test(\"sip-proxy.\"))|.name" | sed "s/\"//g")
166 | DOMAIN=$(kubectl exec -t -n consul $RUNNING_CONSUL -- /bin/consul kv get voice/proxy-public-ip | sed "s/\"//g")
167 |
168 | USER=721110000
169 | PASS=whatever
170 | kubectl exec -ti $POD -- kamctl add $USER@$DOMAIN $PASS
171 |
172 | USER=721110001
173 | PASS=whatever
174 | kubectl exec -ti $POD -- kamctl add $USER@$DOMAIN $PASS
175 | #############
176 |
177 | # Output the SIP-PROXY's Public IP Address
178 | echo "--> Output the SIP-PROXY's Public IP Address and we're done."
179 | echo "***************************************************************"
180 | echo "*** Congratulations! Your service should now be running. ***"
181 | echo "*** Your public SIP IP Address is $SIP_PUBLIC_IP ***"
182 | echo "***************************************************************"
183 |
--------------------------------------------------------------------------------