├── .gitignore
├── .gitmodules
├── .jfrog.taskcat.yml
├── .nojekyll
├── .taskcat.yml
├── CODEOWNERS
├── LICENSE.txt
├── Makefile
├── NOTICE.txt
├── README.md
├── cloudInstallerScripts
├── artifactory-ami.yml
├── roles
│ ├── artifactory-ami
│ │ ├── .travis.yml
│ │ ├── defaults
│ │ │ ├── main.yml
│ │ │ └── main.yml.bak
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ ├── exception.yml
│ │ │ ├── main.yml
│ │ │ ├── preferences.yml
│ │ │ └── version.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── artifactory.cluster.license.j2
│ │ │ ├── binarystore.xml.j2
│ │ │ ├── installer-info.json.j2
│ │ │ ├── join.key.j2
│ │ │ ├── master.key.j2
│ │ │ └── system.yaml.j2
│ │ └── vars
│ │ │ └── main.yml
│ ├── artifactory-nginx-ami
│ │ ├── .travis.yml
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── files
│ │ │ └── nginx.conf
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ └── artifactory.conf.j2
│ │ ├── tests
│ │ │ ├── inventory
│ │ │ └── test.yml
│ │ └── vars
│ │ │ └── main.yml
│ ├── artifactory-nginx-ssl
│ │ ├── .travis.yml
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── artifactory.conf.j2
│ │ │ ├── certificate.key.j2
│ │ │ └── certificate.pem.j2
│ │ ├── tests
│ │ │ ├── inventory
│ │ │ └── test.yml
│ │ └── vars
│ │ │ └── main.yml
│ ├── artifactory-nginx
│ │ ├── .travis.yml
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── files
│ │ │ └── nginx.conf
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ └── artifactory.conf.j2
│ │ ├── tests
│ │ │ ├── inventory
│ │ │ └── test.yml
│ │ └── vars
│ │ │ └── main.yml
│ ├── artifactory
│ │ ├── .travis.yml
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ ├── exception.yml
│ │ │ ├── main.yml
│ │ │ ├── preferences.yml
│ │ │ └── version.yml
│ │ ├── tasks
│ │ │ ├── configure-licenses.yml
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── artifactory.cluster.license.j2
│ │ │ ├── artifactory.pro.license.j2
│ │ │ ├── binarystore.xml.j2
│ │ │ ├── installer-info.json.j2
│ │ │ ├── join.key.j2
│ │ │ ├── master.key.j2
│ │ │ └── system.yaml.j2
│ │ └── vars
│ │ │ └── main.yml
│ ├── xray-ami
│ │ ├── .travis.yml
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── meta
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ ├── Debian.yml
│ │ │ ├── RedHat.yml
│ │ │ └── main.yml
│ │ ├── templates
│ │ │ ├── installer-info.json.j2
│ │ │ ├── join.key.j2
│ │ │ ├── master.key.j2
│ │ │ └── system.yaml.j2
│ │ ├── tests
│ │ │ ├── inventory
│ │ │ └── test.yml
│ │ └── vars
│ │ │ └── main.yml
│ └── xray
│ │ ├── .travis.yml
│ │ ├── defaults
│ │ └── main.yml
│ │ ├── handlers
│ │ └── main.yml
│ │ ├── meta
│ │ └── main.yml
│ │ ├── tasks
│ │ ├── Debian.yml
│ │ ├── RedHat.yml
│ │ ├── initialize-pg-db.yml
│ │ └── main.yml
│ │ ├── templates
│ │ ├── installer-info.json.j2
│ │ ├── join.key.j2
│ │ ├── master.key.j2
│ │ └── system.yaml.j2
│ │ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ │ └── vars
│ │ └── main.yml
├── site-artifactory.yml
├── site-xray.yml
└── xray-ami.yml
├── docs
├── images
│ ├── architecture_diagram.png
│ ├── jfrog-architecture-diagram.png
│ ├── jfrog-architecture-diagram.pptx
│ ├── jfrog-architecture-diagrams.pptx
│ ├── secret_manager_licenses.png
│ ├── secrets_manager_certificates.png
│ ├── xray_update_1.png
│ ├── xray_update_2.png
│ ├── xray_update_3.png
│ ├── xray_update_4.png
│ └── xray_update_5.png
└── partner_editable
│ ├── _settings.adoc
│ ├── additional_info.adoc
│ ├── architecture.adoc
│ ├── deploy_steps.adoc
│ ├── deployment_options.adoc
│ ├── faq_troubleshooting.adoc
│ ├── licenses.adoc
│ ├── overview_target_and_usage.adoc
│ ├── pre-reqs.adoc
│ ├── product_description.adoc
│ ├── regions.adoc
│ ├── service_limits.adoc
│ └── specialized_knowledge.adoc
├── pipeline-taskcat.yml
├── templates
├── ami-rt-xray-creation.template.yaml
├── ami-rt-xray-main.template.yaml
├── ami-rt-xray-vpc.template.yaml
├── jfrog-ami-creation.template.yaml
├── jfrog-ami-main.template.yaml
├── jfrog-ami-vpc.template.yaml
├── jfrog-artifactory-core-infrastructure.template.yaml
├── jfrog-artifactory-ec2-existing-vpc.template.yaml
├── jfrog-artifactory-ec2-instance.template.yaml
├── jfrog-artifactory-ec2-main.template.yaml
├── jfrog-artifactory-pro-ec2-existing-vpc-main.template.yaml
├── jfrog-artifactory-pro-ec2-new-vpc-main.template.yaml
└── jfrog-xray-ec2-instance.template.yaml
└── xray-setup
├── crhelper-2.0.6.dist-info
├── INSTALLER
├── LICENSE
├── METADATA
├── NOTICE
├── RECORD
├── WHEEL
└── top_level.txt
├── crhelper
├── __init__.py
├── __pycache__
│ ├── __init__.cpython-38.pyc
│ ├── log_helper.cpython-38.pyc
│ ├── resource_helper.cpython-38.pyc
│ └── utils.cpython-38.pyc
├── log_helper.py
├── resource_helper.py
└── utils.py
├── handler.py
├── psycopg2
├── __init__.py
├── _ipaddress.py
├── _json.py
├── _lru_cache.py
├── _psycopg.cpython-37m-x86_64-linux-gnu.so
├── _range.py
├── compat.py
├── errorcodes.py
├── errors.py
├── extensions.py
├── extras.py
├── pool.py
├── psycopg1.py
├── sql.py
└── tz.py
└── tests
├── __init__.py
├── __pycache__
├── __init__.cpython-38.pyc
├── test_log_helper.cpython-38.pyc
├── test_resource_helper.cpython-38.pyc
└── test_utils.cpython-38.pyc
├── test_log_helper.py
├── test_resource_helper.py
├── test_utils.py
└── unit
├── __init__.py
└── __pycache__
└── __init__.cpython-38.pyc
/.gitignore:
--------------------------------------------------------------------------------
1 | .ignore/
2 | venv/
3 | .taskcat_overrides.*
4 | .taskcat/
5 | taskcat_outputs/
6 | templates/.*output
7 | .DS_Store
8 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "submodules/quickstart-aws-vpc"]
2 | path = submodules/quickstart-aws-vpc
3 | url = https://github.com/aws-quickstart/quickstart-aws-vpc.git
4 | branch = main
5 |
6 | [submodule "submodules/quickstart-linux-bastion"]
7 | path = submodules/quickstart-linux-bastion
8 | url = https://github.com/aws-quickstart/quickstart-linux-bastion.git
9 | branch = main
10 |
11 | [submodule "docs/boilerplate"]
12 | path = docs/boilerplate
13 | url = https://github.com/aws-quickstart/quickstart-documentation-base-common.git
14 | branch = main
15 |
16 |
--------------------------------------------------------------------------------
/.jfrog.taskcat.yml:
--------------------------------------------------------------------------------
1 | project:
2 | name: quickstart-jfrog-artifactory
3 | owner: quickstart-eng@amazon.com
4 | s3_object_acl: private
5 | s3_regional_buckets: True
6 | s3_bucket: tcat-qs-ec2 # commercial accounts (also for GovCloud)
7 | #s3_bucket: tcat-qs-ec2 #aws-seller accounts (MarketPlace)
8 |
9 | parameters:
10 | #KeyPairName: "" #Key Pair Name which should be already exists in aws.This should be set in global taskcat file.
11 | ArtifactoryVersion: 7.37.14
12 | XrayVersion: 3.46.0
13 | #AccessCidr: "" #Cidr block for accessing artifactory loadbalancer. This should be set in global taskcat file.
14 | QsS3BucketName: "$[taskcat_autobucket]"
15 | QsS3KeyPrefix: "quickstart-jfrog-artifactory/"
16 | QsS3BucketRegion: "$[taskcat_current_region]"
17 | MasterKey: "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
18 | SmLicenseName: "jfrog-artifactory"
19 | SmCertName: "jfrog.tech-certificates"
20 | DatabasePassword: "$[taskcat_genpass_8A]"
21 | MultiAzDatabase: "true"
22 | ArtifactoryServerName: "artifactory"
23 | XrayDatabasePassword: "$[taskcat_genpass_8A]"
24 | AvailabilityZones: "$[taskcat_genaz_2]"
25 | ProvisionBastionHost: "Enabled"
26 | NumberOfSecondary: 2
27 | InstallXray: "true"
28 | XrayNumberOfSecondary: 1
29 | #RemoteAccessCidr: #Remote cidr to access bastion host. This should be set in global taskcat file.
30 |
31 | tests:
32 | rt-ami:
33 | auth:
34 | us-east-1: seller
35 | us-gov-west-1: gov
36 | us-gov-east-1: gov
37 | parameters:
38 | AvailabilityZone: "$[taskcat_genaz_1]"
39 | template: templates/jfrog-ami-main.template.yaml
40 | regions:
41 | - us-east-1
42 | - us-gov-east-1
43 |
44 | xray-ami:
45 | auth:
46 | us-east-1: seller
47 | us-gov-west-1: gov
48 | us-gov-east-1: gov
49 | parameters:
50 | AvailabilityZone: "$[taskcat_genaz_1]"
51 | template: templates/ami-rt-xray-main.template.yaml
52 | regions:
53 | - us-east-1
54 | - us-gov-east-1
55 |
56 | ent-new-vpc:
57 | auth:
58 | us-gov-west-1: gov
59 | us-gov-east-1: gov
60 | parameters:
61 | AvailabilityZones: "us-east-1a, us-east-1b"
62 | DatabasePreferredAz: us-east-1a
63 | template: templates/jfrog-artifactory-ec2-main.template.yaml
64 | regions:
65 | - us-east-1
66 | # - us-east-2
67 | # - us-west-1
68 | # - us-west-2
69 | # - us-gov-east-1
70 | # - us-gov-west-1
71 | # - ap-south-1
72 |
73 | ent-existing-vpc-e1:
74 | auth:
75 | us-gov-west-1: gov
76 | us-gov-east-1: gov
77 | parameters:
78 | # us-east-1
79 | AvailabilityZones: "us-east-1a, us-east-1b"
80 | VpcId: "vpc-0df1d896364490643"
81 | PublicSubnet1Id: "subnet-05e222926ec99d3c3"
82 | PublicSubnet2Id: "subnet-02a7e4641b2e5bc13"
83 | PrivateSubnet1Id: "subnet-0f0f73fdb8b31271d"
84 | PrivateSubnet2Id: "subnet-02f5e6f3024809a98"
85 |
86 | template: templates/jfrog-artifactory-ec2-existing-vpc.template.yaml
87 | regions:
88 | - us-east-1
89 |
90 | ent-existing-vpc-e2:
91 | auth:
92 | us-gov-west-1: gov
93 | us-gov-east-1: gov
94 | parameters:
95 | # InstanceType : m6g.xlarge
96 | # XrayInstanceType : c6g.2xlarge
97 |
98 | # us-east-2
99 | AvailabilityZones: "us-east-2a, us-east-2b"
100 | VpcId: "vpc-06134dfb53cb98669"
101 | PublicSubnet1Id: "subnet-0f029329115b95a59"
102 | PublicSubnet2Id: "subnet-0583fc3ec5bc47ae4"
103 | PrivateSubnet1Id: "subnet-0e61b51bfe9fdc6ce"
104 | PrivateSubnet2Id: "subnet-0e960a9a68ae9d824"
105 |
106 | template: templates/jfrog-artifactory-ec2-existing-vpc.template.yaml
107 | regions:
108 | - us-east-2
109 |
110 | ent-existing-vpc-w1:
111 | auth:
112 | us-gov-west-1: gov
113 | us-gov-east-1: gov
114 | parameters:
115 | # us-west-1
116 | AvailabilityZones: "us-west-1b, us-west-1c"
117 | VpcId: "vpc-02461d0d92635b8a7"
118 | PublicSubnet1Id: "subnet-0dda0ef33f3fbaaef"
119 | PublicSubnet2Id: "subnet-0dec438020b90312a"
120 | PrivateSubnet1Id: "subnet-0d323306426f255d9"
121 | PrivateSubnet2Id: "subnet-021a85a01196887ac"
122 |
123 | template: templates/jfrog-artifactory-ec2-existing-vpc.template.yaml
124 | regions:
125 | - us-west-1
126 |
127 | ent-existing-vpc-w2:
128 | auth:
129 | us-gov-west-1: gov
130 | us-gov-east-1: gov
131 | parameters:
132 | # us-west-2
133 | AvailabilityZones: "us-west-2a, us-west-2b"
134 | VpcId: "vpc-0459089633112f550"
135 | PublicSubnet1Id: "subnet-0d70e204ab20f8580"
136 | PublicSubnet2Id: "subnet-02480323f018dc593"
137 | PrivateSubnet1Id: "subnet-085574b8abfb79e3c"
138 | PrivateSubnet2Id: "subnet-0a545283b02e1ccdd"
139 |
140 | template: templates/jfrog-artifactory-ec2-existing-vpc.template.yaml
141 | regions:
142 | - us-west-2
143 |
144 | prox-new-vpc:
145 | auth:
146 | us-gov-west-1: gov
147 | us-gov-east-1: gov
148 | parameters:
149 | dummyParam: "needed in case no parameters are needed in this section"
150 | template: templates/jfrog-artifactory-pro-ec2-new-vpc-main.template.yaml
151 | regions:
152 | - us-west-1
153 | - us-gov-west-1
154 |
155 | prox-existing-vpc-e2:
156 | auth:
157 | us-gov-west-1: gov
158 | us-gov-east-1: gov
159 | parameters:
160 | # us-east-1
161 | AvailabilityZones: "us-east-2a, us-east-2b"
162 | DatabasePreferredAz: us-east-2a
163 |
164 | VpcId: "vpc-06134dfb53cb98669"
165 | PublicSubnet1Id: "subnet-0f029329115b95a59"
166 | PublicSubnet2Id: "subnet-0583fc3ec5bc47ae4"
167 | PrivateSubnet1Id: "subnet-0e61b51bfe9fdc6ce"
168 | PrivateSubnet2Id: "subnet-0e960a9a68ae9d824"
169 |
170 | template: templates/jfrog-artifactory-pro-ec2-existing-vpc-main.template.yaml
171 | regions:
172 | - us-east-2
173 |
174 | prox-existing-vpc-w1:
175 | parameters:
176 | # us-west-1
177 | AvailabilityZones: "us-west-1b, us-west-1c"
178 | DatabasePreferredAz: us-west-1b
179 | VpcId: "vpc-02461d0d92635b8a7"
180 | PublicSubnet1Id: "subnet-0dda0ef33f3fbaaef"
181 | PublicSubnet2Id: "subnet-0dec438020b90312a"
182 | PrivateSubnet1Id: "subnet-0d323306426f255d9"
183 | PrivateSubnet2Id: "subnet-021a85a01196887ac"
184 |
185 | template: templates/jfrog-artifactory-pro-ec2-existing-vpc-main.template.yaml
186 | regions:
187 | - us-west-1
188 |
189 | prox-existing-vpc-ge1:
190 | auth:
191 | us-gov-west-1: gov
192 | us-gov-east-1: gov
193 | parameters:
194 | # us-east-1
195 | AvailabilityZones: "us-gov-east-1a, us-gov-east-1b"
196 | DatabasePreferredAz: us-gov-east-1a
197 |
198 | VpcId: "vpc-0767e97df3b88d54e"
199 | PublicSubnet1Id: "subnet-0e42f633560429957"
200 | PublicSubnet2Id: "subnet-0914e3cd6e9e1c3d2"
201 | PrivateSubnet1Id: "subnet-016c0766394fd52ee"
202 | PrivateSubnet2Id: "subnet-0bdd4f787ec39cad3"
203 |
204 | template: templates/jfrog-artifactory-pro-ec2-existing-vpc-main.template.yaml
205 | regions:
206 | - us-gov-east-1
207 |
208 | create-vpc:
209 | auth:
210 | us-east-1: default
211 | us-gov-west-1: gov
212 | us-gov-east-1: gov
213 | parameters:
214 | dummyParam: "needed in case no parameters are needed in this section"
215 | template: submodules/quickstart-aws-vpc/templates/aws-vpc.template.yaml
216 | regions:
217 | # - us-east-1
218 | - us-east-2
219 | # - us-west-1
220 | # - us-west-2
221 | # - us-gov-east-1
222 |
--------------------------------------------------------------------------------
/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/.nojekyll
--------------------------------------------------------------------------------
/.taskcat.yml:
--------------------------------------------------------------------------------
1 | project:
2 | name: quickstart-jfrog-artifactory
3 | owner: quickstart-eng@amazon.com
4 | s3_object_acl: private
5 | s3_regional_buckets: true
6 | shorten_stack_name: true
7 | regions:
8 | - us-east-1
9 | - us-west-2
10 | - us-east-2
11 | - us-west-1
12 | - eu-central-1
13 | - eu-west-1
14 | - eu-west-2
15 | - eu-west-3
16 | - ap-southeast-2
17 | - eu-north-1
18 | - ca-central-1
19 | - sa-east-1
20 | - eu-south-1
21 | - af-south-1
22 | parameters:
23 | ArtifactoryVersion: 7.37.14
24 | XrayVersion: 3.46.0
25 | AccessCidr: "10.0.0.0/0"
26 | QsS3BucketName: "$[taskcat_autobucket]"
27 | QsS3KeyPrefix: "quickstart-jfrog-artifactory/"
28 | QsS3BucketRegion: "$[taskcat_current_region]"
29 | tests:
30 | jfrog-artifactory-ec2-xray:
31 | parameters:
32 | KeyPairName: "$[taskcat_getkeypair]"
33 | RemoteAccessCidr: "10.0.0.0/0"
34 | AvailabilityZones: "$[taskcat_genaz_2]"
35 | DatabasePassword: "$[taskcat_genpass_8A]"
36 | ProvisionBastionHost: "Disabled"
37 | NumberOfSecondary: "2"
38 | ArtifactoryServerName: "localhost"
39 | InstallXray: "true"
40 | XrayDatabasePassword: "$[taskcat_genpass_8A]"
41 | XrayNumberOfSecondary: 1
42 | MasterKey: "override"
43 | SmLicenseName: "override"
44 | SmCertName: "override"
45 | template: templates/jfrog-artifactory-ec2-main.template.yaml
46 | regions:
47 | # - us-east-1
48 | - us-west-2
49 | # - us-east-2
50 | # - us-west-1
51 | # - eu-central-1
52 | # - eu-west-1
53 | # - eu-west-2
54 | # - eu-west-3
55 | # - ap-southeast-2
56 | # - eu-north-1
57 | # - ca-central-1
58 | # - sa-east-1
59 | # - eu-south-1
60 | # - af-south-1
61 |
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @davmayd @aws-quickstart/aws_quickstart_team
2 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: help run submodules
2 | USEVENV ?= true
3 | VENV=$${PWD}/venv
4 | VENVBIN=${VENV}/bin
5 | SHELL ?= /bin/bash
6 |
7 | test: lint submodules
8 | ifeq ($(USEVENV), true)
9 | $(MAKE) venv
10 | ${VENVBIN}/taskcat test run -n -l
11 | else
12 | taskcat test run -n -l
13 | endif
14 |
15 |
16 | venv/bin/python3:
17 | python3 -m venv ${VENV}
18 |
19 | venv/bin/taskcat: venv/bin/python3
20 | ${VENVBIN}/pip3 install taskcat
21 |
22 | venv/bin/aws: venv/bin/python3
23 | ${VENVBIN}/pip3 install awscli
24 |
25 | venv: venv/bin/taskcat venv/bin/aws
26 |
27 | submodules:
28 | git submodule init
29 | git submodule update --init --recursive
30 |
31 | help:
32 | @echo "make test : executes ${VENVBIN}/taskcat"
33 | @echo "if running in a container without venv please set USEVENV to false"
34 |
35 |
36 | create: venv
37 | ${VENVBIN}/aws cloudformation create-stack --stack-name test --template-body file://$(pwd)/templates/jfrog-artifactory-ec2-new-vpc.template --parameters $(cat .ignore/params) --capabilities CAPABILITY_IAM
38 |
39 | delete: venv
40 | ${VENVBIN}/aws cloudformation delete-stack --stack-name test
41 |
42 | .ONESHELL:
43 |
44 | lint:
45 | ifeq ($(USEVENV), true)
46 | $(MAKE) venv
47 | time ${VENVBIN}/taskcat lint
48 | else
49 | time taskcat lint
50 | endif
51 |
52 | public_repo: venv
53 | ${VENVBIN}/taskcat -c theflash/ci/config.yml -u
54 | #https://${VENVBIN}/taskcat-tag-quickstart-jfrog-artifactory-c2fa9d34.s3-us-west-2.amazonaws.com/quickstart-jfrog-artifactory/templates/jfrog-artifactory-ec2-main.template
55 | #curl https://${VENVBIN}/taskcat-tag-quickstart-jfrog-artifactory-7008506c.s3-us-west-2.amazonaws.com/quickstart-jfrog-artifactory/templates/jfrog-artifactory-ec2-main.template
56 |
57 | get_public_dns: venv
58 | ${VENVBIN}/aws elb describe-load-balancers | jq '.LoadBalancerDescriptions[]| .CanonicalHostedZoneName'
59 |
60 | get_bastion_ip: venv
61 | ${VENVBIN}/aws ec2 describe-instances | jq '.[] | select(.[].Instances[].Tags[].Value == "LinuxBastion") '
62 |
63 | clean:
64 |
65 | realclean:
66 | rm -fr ${VENV} submodules
67 |
--------------------------------------------------------------------------------
/NOTICE.txt:
--------------------------------------------------------------------------------
1 | Copyright 2016-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
4 |
5 | http://aws.amazon.com/apache2.0/
6 |
7 | or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # quickstart-jfrog-artifactory
2 | ## Deprecation Notice
3 |
4 | :x: This repository is subject to deprecation in Q4 2024. For more details, [please review this announcement](https://github.com/aws-ia/.announcements/issues/1).
5 |
6 | ## This repository has been deprecated in favor of https://github.com/aws-ia/cfn-ps-jfrog-artifactory.
7 | ***We will archive this repository and keep it publicly available until May 1, 2024.***
8 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/artifactory-ami.yml:
--------------------------------------------------------------------------------
1 | - hosts: localhost
2 | gather_facts: true
3 | become: true
4 | roles:
5 | - name: artifactory-ami
6 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 |
5 | # Use the new container infrastructure
6 | sudo: false
7 |
8 | # Install ansible
9 | addons:
10 | apt:
11 | packages:
12 | - python-pip
13 |
14 | install:
15 | # Install ansible
16 | - pip install ansible
17 |
18 | # Check ansible version
19 | - ansible --version
20 |
21 | # Create ansible.cfg with correct roles_path
22 | - printf '[defaults]\nroles_path=../' >ansible.cfg
23 |
24 | script:
25 | # Basic role syntax check
26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
27 |
28 | notifications:
29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for artifactory
3 | # indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
4 | ansible_marketplace: standalone
5 |
6 | # whether we are creating a AMI for Marketplace or just for configuring EC2 instance
7 | ami_creation: false
8 |
9 | # The version of Artifactory to install
10 | artifactory_version: 7.15.3
11 |
12 | # licenses file - specify a licenses file or specify up to 5 licenses
13 | artifactory_license1:
14 | artifactory_license2:
15 | artifactory_license3:
16 | artifactory_license4:
17 | artifactory_license5:
18 | artifactory_license6:
19 |
20 | # whether to enable HA
21 | artifactory_ha_enabled: true
22 |
23 | # value for whether a host is primary. this should be set in host vars
24 | artifactory_is_primary: true
25 |
26 | # The location where Artifactory should install.
27 | artifactory_download_directory: /opt/jfrog
28 |
29 | # The location where Artifactory should store data.
30 | artifactory_file_store_dir: /data
31 |
32 | extra_java_opts: -server -Xms2g -Xmx14g -Xss256k -XX:+UseG1GC
33 |
34 | artifactory_tar: https://releases.jfrog.io/artifactory/artifactory-pro/org/artifactory/pro/jfrog-artifactory-pro/{{ artifactory_version }}/jfrog-artifactory-pro-{{ artifactory_version }}-linux.tar.gz
35 |
36 | artifactory_home: "{{ artifactory_download_directory }}/artifactory-pro-{{ artifactory_version }}"
37 | db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
38 |
39 | artifactory_user: artifactory
40 | artifactory_group: artifactory
41 |
42 | # Set the parameters required for the service.
43 | service_list:
44 | - name: artifactory
45 | description: Start script for Artifactory
46 | start_command: "{{ artifactory_home }}/bin/artifactory.sh start"
47 | stop_command: "{{ artifactory_home }}/bin/artifactory.sh stop"
48 | type: forking
49 | status_pattern: artifactory
50 | user_name: "{{ artifactory_user }}"
51 | group_name: "{{ artifactory_group }}"
52 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/defaults/main.yml.bak:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for artifactory
3 | # indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
4 | ansible_marketplace: standalone
5 |
6 | # whether we are creating a AMI for Marketplace or just for configuring EC2 instance
7 | ami_creation: false
8 |
9 | # The version of Artifactory to install
10 | artifactory_version: 7.15.3
11 |
12 | # licenses file - specify a licenses file or specify up to 5 licenses
13 | artifactory_license1:
14 | artifactory_license2:
15 | artifactory_license3:
16 | artifactory_license4:
17 | artifactory_license5:
18 | artifactory_license6:
19 |
20 | # whether to enable HA
21 | artifactory_ha_enabled: true
22 |
23 | # value for whether a host is primary. this should be set in host vars
24 | artifactory_is_primary: true
25 |
26 | # The location where Artifactory should install.
27 | artifactory_download_directory: /opt/jfrog
28 |
29 | # The location where Artifactory should store data.
30 | artifactory_file_store_dir: /data
31 |
32 | extra_java_opts: -server -Xms2g -Xmx14g -Xss256k -XX:+UseG1GC
33 |
34 |
35 |
36 | # Pick the Artifactory flavour to install, can be also cpp-ce, jcr, pro.
37 | # for Artifactory, use following values
38 | artifactory_flavour: pro
39 | artifactory_tar: https://releases.jfrog.io/artifactory/artifactory-pro/org/artifactory/{{ artifactory_flavour }}/jfrog-artifactory-{{ artifactory_flavour }}/{{ artifactory_version }}/jfrog-artifactory-{{ artifactory_flavour }}-{{ artifactory_version }}-linux.tar.gz
40 |
41 | # for JCR, use following values
42 | # artifactory_flavour: jcr
43 | # artifactory_tar: https://dl.bintray.com/jfrog/artifactory/org/artifactory/{{ artifactory_flavour }}/jfrog-artifactory-{{ artifactory_flavour }}/{{ artifactory_version }}/jfrog-artifactory-{{ artifactory_flavour }}-{{ artifactory_version }}-linux.tar.gz
44 |
45 | artifactory_home: "{{ artifactory_download_directory }}/artifactory-{{ artifactory_flavour }}-{{ artifactory_version }}"
46 | db_download_url: "https://jdbc.postgresql.org/download/postgresql-42.2.12.jar"
47 |
48 | artifactory_user: artifactory
49 | artifactory_group: artifactory
50 |
51 | # Set the parameters required for the service.
52 | service_list:
53 | - name: artifactory
54 | description: Start script for Artifactory
55 | start_command: "{{ artifactory_home }}/bin/artifactory.sh start"
56 | stop_command: "{{ artifactory_home }}/bin/artifactory.sh stop"
57 | type: forking
58 | status_pattern: artifactory
59 | user_name: "{{ artifactory_user }}"
60 | group_name: "{{ artifactory_group }}"
61 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for artifactory
3 | - name: systemctl daemon-reload
4 | systemd:
5 | daemon_reload: yes
6 |
7 | - name: restart artifactory
8 | service:
9 | name: artifactory
10 | state: restarted
11 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/meta/exception.yml:
--------------------------------------------------------------------------------
1 | ---
2 | exceptions:
3 | - variation: Alpine
4 | reason: Artifactory start/stop scripts don't properly work.
5 | - variation: amazonlinux:1
6 | reason: "Shutting down artifactory: /usr/bin/java\nfinding\nUsing the default catalina management port (8015) to test shutdown\nArtifactory Tomcat already stopped"
7 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Robert de Bock
4 | role_name: artifactory
5 | description: Install and configure artifactory on your system.
6 | license: Apache-2.0
7 | company: none
8 | min_ansible_version: 2.8
9 |
10 | platforms:
11 | - name: Debian
12 | versions:
13 | - all
14 | - name: EL
15 | versions:
16 | - 7
17 | - 8
18 | - name: Fedora
19 | versions:
20 | - all
21 | - name: OpenSUSE
22 | versions:
23 | - all
24 | - name: Ubuntu
25 | versions:
26 | - bionic
27 |
28 | galaxy_tags:
29 | - artifactory
30 | - centos
31 | - redhat
32 | - server
33 | - system
34 |
35 | dependencies: []
36 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/meta/preferences.yml:
--------------------------------------------------------------------------------
1 | ---
2 | tox_parallel: yes
3 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/meta/version.yml:
--------------------------------------------------------------------------------
1 | ---
2 | project_name: JFrog
3 | reference: "https://github.com/robertdebock/ansible-role-artifactory/blob/master/defaults/main.yml"
4 | versions:
5 | - name: Artifactory
6 | url: "https://releases.jfrog.io/artifactory/"
7 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for artifactory
3 | - name: install nginx
4 | include_role:
5 | name: artifactory-nginx-ami
6 |
7 | - name: create group for artifactory
8 | group:
9 | name: "{{ artifactory_group }}"
10 | state: present
11 | become: yes
12 |
13 | - name: create user for artifactory
14 | user:
15 | name: "{{ artifactory_user }}"
16 | group: "{{ artifactory_group }}"
17 | system: yes
18 | become: yes
19 |
20 | - name: ensure artifactory_download_directory exists
21 | file:
22 | path: "{{ artifactory_download_directory }}"
23 | state: directory
24 | become: yes
25 |
26 | - name: download artifactory
27 | unarchive:
28 | src: "{{ artifactory_tar }}"
29 | dest: "{{ artifactory_download_directory }}"
30 | remote_src: yes
31 | owner: "{{ artifactory_user }}"
32 | group: "{{ artifactory_group }}"
33 | creates: "{{ artifactory_home }}"
34 | become: yes
35 | register: downloadartifactory
36 | until: downloadartifactory is succeeded
37 | retries: 3
38 |
39 | - name: ensure artifactory_file_store_dir exists
40 | file:
41 | path: "{{ artifactory_file_store_dir }}"
42 | state: directory
43 | owner: "{{ artifactory_user }}"
44 | group: "{{ artifactory_group }}"
45 | become: yes
46 |
47 | - name: ensure data subdirectories exist
48 | file:
49 | path: "{{ artifactory_home }}/var/{{ item }}"
50 | state: directory
51 | owner: "{{ artifactory_user }}"
52 | group: "{{ artifactory_group }}"
53 | loop:
54 | - "bootstrap/artifactory/tomcat/lib"
55 | - "etc"
56 | become: yes
57 |
58 | - name: download database driver
59 | get_url:
60 | url: "{{ db_download_url }}"
61 | dest: "{{ artifactory_home }}/var/bootstrap/artifactory/tomcat/lib"
62 | owner: "{{ artifactory_user }}"
63 | group: "{{ artifactory_group }}"
64 | become: yes
65 |
66 | - name: clean up after creating ami
67 | block:
68 | - name: Remove SSH keys
69 | file:
70 | path: "{{ ssh_keys.dir }}"
71 | state: absent
72 | loop:
73 | - dir: "/home/.jfrog_ami/.ssh/authorized_keys"
74 | - dir: "/root/.ssh/authorized_keys"
75 | - dir: "/home/centos/.ssh/authorized_keys"
76 | loop_control:
77 | loop_var: ssh_keys
78 |
79 | - name: shutdown VM
80 | command: /sbin/shutdown -h now
81 | ignore_errors: 'yes'
82 | when: ami_creation
83 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/templates/artifactory.cluster.license.j2:
--------------------------------------------------------------------------------
1 | {% if artifactory_license1 %}
2 | {% if artifactory_license1|length %}
3 | {{ artifactory_license1 }}
4 | {% endif %}
5 | {% endif %}
6 | {% if artifactory_license2 %}
7 |
8 |
9 | {% if artifactory_license2|length %}
10 | {{ artifactory_license2 }}
11 | {% endif %}
12 | {% endif %}
13 | {% if artifactory_license3 %}
14 |
15 |
16 | {% if artifactory_license3|length %}
17 | {{ artifactory_license3 }}
18 | {% endif %}
19 | {% endif %}
20 | {% if artifactory_license4 %}
21 |
22 | {% if artifactory_license4|length %}
23 | {{ artifactory_license4 }}
24 | {% endif %}
25 | {% endif %}
26 | {% if artifactory_license5 %}
27 |
28 | {% if artifactory_license5|length %}
29 | {{ artifactory_license5 }}
30 | {% endif %}
31 | {% endif %}
32 | {% if artifactory_license6 %}
33 |
34 | {% if artifactory_license6|length %}
35 | {{ artifactory_license6 }}
36 | {% endif %}
37 | {% endif %}
38 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/templates/binarystore.xml.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/templates/installer-info.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "productId": "Ansible_artifactory/1.0.0",
3 | "features": [
4 | {
5 | "featureId": "Partner/ACC-006973"
6 | },
7 | {
8 | "featureId": "Channel/{{ ansible_marketplace }}"
9 | }
10 | ]
11 | }
12 |
13 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/templates/join.key.j2:
--------------------------------------------------------------------------------
1 | {{ join_key }}
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/templates/master.key.j2:
--------------------------------------------------------------------------------
1 | {{ master_key }}
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/templates/system.yaml.j2:
--------------------------------------------------------------------------------
1 | ## @formatter:off
2 | ## JFROG ARTIFACTORY SYSTEM CONFIGURATION FILE
3 | ## HOW TO USE: comment-out any field and keep the correct yaml indentation by deleting only the leading '#' character.
4 | configVersion: 1
5 |
6 | ## NOTE: JFROG_HOME is a place holder for the JFrog root directory containing the deployed product, the home directory for all JFrog products.
7 | ## Replace JFROG_HOME with the real path! For example, in RPM install, JFROG_HOME=/opt/jfrog
8 |
9 | ## NOTE: Sensitive information such as passwords and join key are encrypted on first read.
10 | ## NOTE: The provided commented key and value is the default.
11 |
12 | ## SHARED CONFIGURATIONS
13 | ## A shared section for keys across all services in this config
14 | shared:
15 |
16 | ## Node Settings
17 | node:
18 | ## A unique id to identify this node.
19 | ## Default: auto generated at startup.
20 | id: {{ ansible_machine_id }}
21 |
22 | ## Sets this node as primary in HA installation
23 | # primary: {{ artifactory_is_primary }}
24 | Affinity: "any"
25 |
26 | ## Sets this node as part of HA installation
27 | haEnabled: {{ artifactory_ha_enabled }}
28 |
29 | ## Database Configuration
30 | database:
31 | ## One of: mysql, oracle, mssql, postgresql, mariadb
32 | ## Default: Embedded derby
33 |
34 | ## Example for mysql/postgresql
35 | type: "{{ db_type }}"
36 | driver: "{{ db_driver }}"
37 | url: "{{ db_url }}"
38 | username: "{{ db_user }}"
39 | password: "{{ db_password }}"
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-ami/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ami/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 |
5 | # Use the new container infrastructure
6 | sudo: false
7 |
8 | # Install ansible
9 | addons:
10 | apt:
11 | packages:
12 | - python-pip
13 |
14 | install:
15 | # Install ansible
16 | - pip install ansible
17 |
18 | # Check ansible version
19 | - ansible --version
20 |
21 | # Create ansible.cfg with correct roles_path
22 | - printf '[defaults]\nroles_path=../' >ansible.cfg
23 |
24 | script:
25 | # Basic role syntax check
26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
27 |
28 | notifications:
29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ami/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for artifactory-nginx
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ami/files/nginx.conf:
--------------------------------------------------------------------------------
1 | #user nobody;
2 | worker_processes 1;
3 | error_log /var/log/nginx/error.log info;
4 | #pid logs/nginx.pid;
5 | events {
6 | worker_connections 1024;
7 | }
8 | http {
9 | include mime.types;
10 | variables_hash_max_size 1024;
11 | variables_hash_bucket_size 64;
12 | server_names_hash_max_size 4096;
13 | server_names_hash_bucket_size 128;
14 | types_hash_max_size 2048;
15 | types_hash_bucket_size 64;
16 | proxy_read_timeout 2400s;
17 | client_header_timeout 2400s;
18 | client_body_timeout 2400s;
19 | proxy_connect_timeout 75s;
20 | proxy_send_timeout 2400s;
21 | proxy_buffer_size 32k;
22 | proxy_buffers 40 32k;
23 | proxy_busy_buffers_size 64k;
24 | proxy_temp_file_write_size 250m;
25 | proxy_http_version 1.1;
26 | client_body_buffer_size 128k;
27 | include /etc/nginx/conf.d/*.conf;
28 | default_type application/octet-stream;
29 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
30 | '$status $body_bytes_sent "$http_referer" '
31 | '"$http_user_agent" "$http_x_forwarded_for"';
32 | access_log /var/log/nginx/access.log main;
33 | sendfile on;
34 | #tcp_nopush on;
35 | #keepalive_timeout 0;
36 | keepalive_timeout 65;
37 | }
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ami/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for artifactory-nginx
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ami/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your role description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Choose a valid license ID from https://spdx.org - some suggested licenses:
11 | # - BSD-3-Clause (default)
12 | # - MIT
13 | # - GPL-2.0-or-later
14 | # - GPL-3.0-only
15 | # - Apache-2.0
16 | # - CC-BY-4.0
17 | license: license (GPL-2.0-or-later, MIT, etc)
18 |
19 | min_ansible_version: 2.9
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | #
25 | # Provide a list of supported platforms, and for each platform a list of versions.
26 | # If you don't wish to enumerate all versions for a particular platform, use 'all'.
27 | # To view available platforms and versions (or releases), visit:
28 | # https://galaxy.ansible.com/api/v1/platforms/
29 | #
30 | # platforms:
31 | # - name: Fedora
32 | # versions:
33 | # - all
34 | # - 25
35 | # - name: SomePlatform
36 | # versions:
37 | # - all
38 | # - 1.0
39 | # - 7
40 | # - 99.99
41 |
42 | galaxy_tags: []
43 | # List tags for your role here, one per line. A tag is a keyword that describes
44 | # and categorizes the role. Users find roles by searching for tags. Be sure to
45 | # remove the '[]' above, if you add tags to this list.
46 | #
47 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
48 | # Maximum 20 tags per role.
49 |
50 | dependencies: []
51 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
52 | # if you add dependencies to this list.
53 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ami/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: ensure python2 is installed
3 | yum:
4 | name: python2
5 | state: present
6 | update_cache: true
7 | become: true
8 | become_user: root
9 |
10 | - name: Add epel-release repo
11 | yum:
12 | name: epel-release
13 | state: present
14 | vars:
15 | ansible_python_interpreter: /bin/python2
16 |
17 | - name: Install nginx
18 | yum:
19 | name: nginx
20 | state: present
21 | vars:
22 | ansible_python_interpreter: /bin/python2
23 |
24 | - name: configure main nginx conf file.
25 | copy:
26 | src: nginx.conf
27 | dest: /etc/nginx/nginx.conf
28 | owner: root
29 | group: root
30 | mode: '0755'
31 | become: yes
32 |
33 | - name: restart nginx
34 | service:
35 | name: nginx
36 | state: restarted
37 | enabled: yes
38 | become: yes
39 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ami/templates/artifactory.conf.j2:
--------------------------------------------------------------------------------
1 | ###########################################################
2 | ## this configuration was generated by JFrog Artifactory ##
3 | ###########################################################
4 |
5 | ## add HA entries when ha is configure
6 | upstream artifactory {
7 | server 127.0.0.1:8082;
8 | }
9 | upstream artifactory-direct {
10 | server 127.0.0.1:8081;
11 | }
12 | ## server configuration
13 | server {
14 | listen 80 ;
15 | server_name _;
16 | if ($http_x_forwarded_proto = '') {
17 | set $http_x_forwarded_proto $scheme;
18 | }
19 | ## Application specific logs
20 | access_log /var/log/nginx/artifactory-access.log;
21 | error_log /var/log/nginx/artifactory-error.log;
22 | rewrite ^/$ /ui/ redirect;
23 | rewrite ^/ui$ /ui/ redirect;
24 | chunked_transfer_encoding on;
25 | client_max_body_size 0;
26 | location / {
27 | proxy_read_timeout 2400s;
28 | proxy_pass_header Server;
29 | proxy_cookie_path ~*^/.* /;
30 | proxy_pass "http://artifactory";
31 | proxy_next_upstream error timeout non_idempotent;
32 | proxy_next_upstream_tries 1;
33 | proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
34 | proxy_set_header X-Forwarded-Port $server_port;
35 | proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
36 | proxy_set_header Host $http_host;
37 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
38 |
39 | location ~ ^/artifactory/ {
40 | proxy_pass http://artifactory-direct;
41 | }
42 | }
43 | }
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ami/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ami/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - artifactory-nginx
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ami/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for artifactory-nginx
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ssl/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 |
5 | # Use the new container infrastructure
6 | sudo: false
7 |
8 | # Install ansible
9 | addons:
10 | apt:
11 | packages:
12 | - python-pip
13 |
14 | install:
15 | # Install ansible
16 | - pip install ansible
17 |
18 | # Check ansible version
19 | - ansible --version
20 |
21 | # Create ansible.cfg with correct roles_path
22 | - printf '[defaults]\nroles_path=../' >ansible.cfg
23 |
24 | script:
25 | # Basic role syntax check
26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
27 |
28 | notifications:
29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ssl/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for artifactory-nginx
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ssl/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for artifactory-nginx
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ssl/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your role description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Choose a valid license ID from https://spdx.org - some suggested licenses:
11 | # - BSD-3-Clause (default)
12 | # - MIT
13 | # - GPL-2.0-or-later
14 | # - GPL-3.0-only
15 | # - Apache-2.0
16 | # - CC-BY-4.0
17 | license: license (GPL-2.0-or-later, MIT, etc)
18 |
19 | min_ansible_version: 2.9
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | #
25 | # Provide a list of supported platforms, and for each platform a list of versions.
26 | # If you don't wish to enumerate all versions for a particular platform, use 'all'.
27 | # To view available platforms and versions (or releases), visit:
28 | # https://galaxy.ansible.com/api/v1/platforms/
29 | #
30 | # platforms:
31 | # - name: Fedora
32 | # versions:
33 | # - all
34 | # - 25
35 | # - name: SomePlatform
36 | # versions:
37 | # - all
38 | # - 1.0
39 | # - 7
40 | # - 99.99
41 |
42 | galaxy_tags: []
43 | # List tags for your role here, one per line. A tag is a keyword that describes
44 | # and categorizes the role. Users find roles by searching for tags. Be sure to
45 | # remove the '[]' above, if you add tags to this list.
46 | #
47 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
48 | # Maximum 20 tags per role.
49 |
50 | dependencies: []
51 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
52 | # if you add dependencies to this list.
53 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ssl/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for artifactory-nginx
3 | - name: configure the artifactory nginx conf
4 | template:
5 | src: artifactory.conf.j2
6 | dest: /etc/nginx/conf.d/artifactory.conf
7 | owner: root
8 | group: root
9 | mode: '0755'
10 | become: yes
11 |
12 | - name: ensure nginx dir exists
13 | file:
14 | path: "/var/opt/jfrog/nginx/ssl"
15 | state: directory
16 | become: yes
17 |
18 | - name: configure certificate
19 | template:
20 | src: certificate.pem.j2
21 | dest: "/var/opt/jfrog/nginx/ssl/cert.pem"
22 | become: yes
23 |
24 | - name: ensure pki exists
25 | file:
26 | path: "/etc/pki/tls"
27 | state: directory
28 | become: yes
29 |
30 | - name: configure key
31 | template:
32 | src: certificate.key.j2
33 | dest: "/etc/pki/tls/cert.key"
34 | become: yes
35 |
36 | - name: Allow apache to modify files in /srv/git_repos
37 | sefcontext:
38 | target: '/var/opt/jfrog/nginx/ssl/cert.pem'
39 | setype: httpd_sys_content_t
40 | state: present
41 | vars:
42 | ansible_python_interpreter: /bin/python2
43 | become: yes
44 |
45 | - name: Apply new SELinux file context to filesystem
46 | command: restorecon -v /var/opt/jfrog/nginx/ssl/cert.pem
47 | become: yes
48 |
49 | - name: restart nginx
50 | service:
51 | name: nginx
52 | state: restarted
53 | enabled: yes
54 | become: yes
55 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ssl/templates/artifactory.conf.j2:
--------------------------------------------------------------------------------
1 | ###########################################################
2 | ## this configuration was generated by JFrog Artifactory ##
3 | ###########################################################
4 |
5 | ## add HA entries when ha is configure
6 | upstream artifactory {
7 | server 127.0.0.1:8082;
8 | }
9 | upstream artifactory-direct {
10 | server 127.0.0.1:8081;
11 | }
12 | ssl_protocols TLSv1.1 TLSv1.2;
13 | ssl_certificate /var/opt/jfrog/nginx/ssl/cert.pem;
14 | ssl_certificate_key /etc/pki/tls/cert.key;
15 | ssl_session_cache shared:SSL:1m;
16 | ssl_prefer_server_ciphers on;
17 | ## server configuration
18 | server {
19 | listen 80;
20 | listen 443 ssl http2;
21 | server_name _;
22 | if ($http_x_forwarded_proto = '') {
23 | set $http_x_forwarded_proto $scheme;
24 | }
25 | ## Application specific logs
26 | access_log /var/log/nginx/artifactory-access.log;
27 | error_log /var/log/nginx/artifactory-error.log;
28 | rewrite ^/$ /ui/ redirect;
29 | rewrite ^/ui$ /ui/ redirect;
30 | chunked_transfer_encoding on;
31 | client_max_body_size 0;
32 | location / {
33 | proxy_read_timeout 2400s;
34 | proxy_pass_header Server;
35 | proxy_cookie_path ~*^/.* /;
36 | proxy_pass "http://artifactory";
37 | proxy_next_upstream error timeout non_idempotent;
38 | proxy_next_upstream_tries 1;
39 | proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
40 | proxy_set_header X-Forwarded-Port $server_port;
41 | proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
42 | proxy_set_header Host $http_host;
43 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
44 |
45 | location ~ ^/artifactory/ {
46 | proxy_pass http://artifactory-direct;
47 | }
48 | }
49 | }
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ssl/templates/certificate.key.j2:
--------------------------------------------------------------------------------
1 | {{ certificate_key | regex_replace('(-+(BEGIN|END) [A-Z ]*-+ ?|[A-Za-z0-9\+=/]* )', '\\1\n') }}
2 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ssl/templates/certificate.pem.j2:
--------------------------------------------------------------------------------
1 | {{ certificate | regex_replace('(-+(BEGIN|END) [A-Z ]*-+ ?|[A-Za-z0-9\+=/]* )', '\\1\n') }}
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ssl/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ssl/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - artifactory-nginx
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx-ssl/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for artifactory-nginx
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 |
5 | # Use the new container infrastructure
6 | sudo: false
7 |
8 | # Install ansible
9 | addons:
10 | apt:
11 | packages:
12 | - python-pip
13 |
14 | install:
15 | # Install ansible
16 | - pip install ansible
17 |
18 | # Check ansible version
19 | - ansible --version
20 |
21 | # Create ansible.cfg with correct roles_path
22 | - printf '[defaults]\nroles_path=../' >ansible.cfg
23 |
24 | script:
25 | # Basic role syntax check
26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
27 |
28 | notifications:
29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for artifactory-nginx
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx/files/nginx.conf:
--------------------------------------------------------------------------------
1 | #user nobody;
2 | worker_processes 1;
3 | error_log /var/log/nginx/error.log info;
4 | #pid logs/nginx.pid;
5 | events {
6 | worker_connections 1024;
7 | }
8 | http {
9 | include mime.types;
10 | variables_hash_max_size 1024;
11 | variables_hash_bucket_size 64;
12 | server_names_hash_max_size 4096;
13 | server_names_hash_bucket_size 128;
14 | types_hash_max_size 2048;
15 | types_hash_bucket_size 64;
16 | proxy_read_timeout 2400s;
17 | client_header_timeout 2400s;
18 | client_body_timeout 2400s;
19 | proxy_connect_timeout 75s;
20 | proxy_send_timeout 2400s;
21 | proxy_buffer_size 32k;
22 | proxy_buffers 40 32k;
23 | proxy_busy_buffers_size 64k;
24 | proxy_temp_file_write_size 250m;
25 | proxy_http_version 1.1;
26 | client_body_buffer_size 128k;
27 | include /etc/nginx/conf.d/*.conf;
28 | default_type application/octet-stream;
29 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
30 | '$status $body_bytes_sent "$http_referer" '
31 | '"$http_user_agent" "$http_x_forwarded_for"';
32 | access_log /var/log/nginx/access.log main;
33 | sendfile on;
34 | #tcp_nopush on;
35 | #keepalive_timeout 0;
36 | keepalive_timeout 65;
37 | }
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for artifactory-nginx
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your role description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Choose a valid license ID from https://spdx.org - some suggested licenses:
11 | # - BSD-3-Clause (default)
12 | # - MIT
13 | # - GPL-2.0-or-later
14 | # - GPL-3.0-only
15 | # - Apache-2.0
16 | # - CC-BY-4.0
17 | license: license (GPL-2.0-or-later, MIT, etc)
18 |
19 | min_ansible_version: 2.9
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | #
25 | # Provide a list of supported platforms, and for each platform a list of versions.
26 | # If you don't wish to enumerate all versions for a particular platform, use 'all'.
27 | # To view available platforms and versions (or releases), visit:
28 | # https://galaxy.ansible.com/api/v1/platforms/
29 | #
30 | # platforms:
31 | # - name: Fedora
32 | # versions:
33 | # - all
34 | # - 25
35 | # - name: SomePlatform
36 | # versions:
37 | # - all
38 | # - 1.0
39 | # - 7
40 | # - 99.99
41 |
42 | galaxy_tags: []
43 | # List tags for your role here, one per line. A tag is a keyword that describes
44 | # and categorizes the role. Users find roles by searching for tags. Be sure to
45 | # remove the '[]' above, if you add tags to this list.
46 | #
47 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
48 | # Maximum 20 tags per role.
49 |
50 | dependencies: []
51 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
52 | # if you add dependencies to this list.
53 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: configure main nginx conf file.
3 | copy:
4 | src: nginx.conf
5 | dest: /etc/nginx/nginx.conf
6 | owner: root
7 | group: root
8 | mode: '0755'
9 | become: yes
10 |
11 | - name: configure main nginx conf file.
12 | copy:
13 | src: nginx.conf
14 | dest: /etc/nginx/nginx.conf
15 | owner: root
16 | group: root
17 | mode: '0755'
18 | become: yes
19 |
20 | - name: configure the artifactory nginx conf
21 | template:
22 | src: artifactory.conf.j2
23 | dest: /etc/nginx/conf.d/artifactory.conf
24 | owner: root
25 | group: root
26 | mode: '0755'
27 | become: yes
28 |
29 | - name: restart nginx
30 | service:
31 | name: nginx
32 | state: restarted
33 | enabled: yes
34 | become: yes
35 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx/templates/artifactory.conf.j2:
--------------------------------------------------------------------------------
1 | ###########################################################
2 | ## this configuration was generated by JFrog Artifactory ##
3 | ###########################################################
4 |
5 | ## add HA entries when ha is configure
6 | upstream artifactory {
7 | server 127.0.0.1:8082;
8 | }
9 | upstream artifactory-direct {
10 | server 127.0.0.1:8081;
11 | }
12 | ## server configuration
13 | server {
14 | listen 80 ;
15 | server_name _;
16 | if ($http_x_forwarded_proto = '') {
17 | set $http_x_forwarded_proto $scheme;
18 | }
19 | ## Application specific logs
20 | access_log /var/log/nginx/artifactory-access.log;
21 | error_log /var/log/nginx/artifactory-error.log;
22 | rewrite ^/$ /ui/ redirect;
23 | rewrite ^/ui$ /ui/ redirect;
24 | chunked_transfer_encoding on;
25 | client_max_body_size 0;
26 | location / {
27 | proxy_read_timeout 2400s;
28 | proxy_pass_header Server;
29 | proxy_cookie_path ~*^/.* /;
30 | proxy_pass "http://artifactory";
31 | proxy_next_upstream error timeout non_idempotent;
32 | proxy_next_upstream_tries 1;
33 | proxy_set_header X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
34 | proxy_set_header X-Forwarded-Port $server_port;
35 | proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
36 | proxy_set_header Host $http_host;
37 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
38 |
39 | location ~ ^/artifactory/ {
40 | proxy_pass http://artifactory-direct;
41 | }
42 | }
43 | }
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - artifactory-nginx
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory-nginx/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for artifactory-nginx
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 |
5 | # Use the new container infrastructure
6 | sudo: false
7 |
8 | # Install ansible
9 | addons:
10 | apt:
11 | packages:
12 | - python-pip
13 |
14 | install:
15 | # Install ansible
16 | - pip install ansible
17 |
18 | # Check ansible version
19 | - ansible --version
20 |
21 | # Create ansible.cfg with correct roles_path
22 | - printf '[defaults]\nroles_path=../' >ansible.cfg
23 |
24 | script:
25 | # Basic role syntax check
26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
27 |
28 | notifications:
29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for artifactory
3 | # indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
4 | ansible_marketplace: standalone
5 |
6 | # The version of Artifactory to install
7 | artifactory_version: 7.19.4
8 |
9 | # licenses - cluster license content in json
10 | artifactory_licenses:
11 |
12 | # whether to enable HA
13 | artifactory_ha_enabled: true
14 |
15 | # value for whether a host is primary. this should be set in host vars
16 | artifactory_is_primary: true
17 |
18 | # The location where Artifactory should install.
19 | artifactory_download_directory: /opt/jfrog
20 |
21 | # The location where Artifactory should store data.
22 | artifactory_file_store_dir: /data
23 |
24 | extra_java_opts: -server -Xms2g -Xmx14g -Xss256k -XX:+UseG1GC
25 |
26 | artifactory_tar: https://releases.jfrog.io/artifactory/artifactory-pro/org/artifactory/pro/jfrog-artifactory-pro/{{ artifactory_version }}/jfrog-artifactory-pro-{{ artifactory_version }}-linux.tar.gz
27 | artifactory_home: "{{ artifactory_download_directory }}/artifactory-pro-{{ artifactory_version }}"
28 |
29 | artifactory_user: artifactory
30 | artifactory_group: artifactory
31 |
32 | # Set the parameters required for the service.
33 | service_list:
34 | - name: artifactory
35 | description: Start script for Artifactory
36 | start_command: "{{ artifactory_home }}/bin/artifactory.sh start"
37 | stop_command: "{{ artifactory_home }}/bin/artifactory.sh stop"
38 | type: forking
39 | status_pattern: artifactory
40 | user_name: "{{ artifactory_user }}"
41 | group_name: "{{ artifactory_group }}"
42 |
43 | product_id: CloudFormation_QS_EC2/1.0.0
44 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for artifactory
3 | - name: systemctl daemon-reload
4 | systemd:
5 | daemon_reload: yes
6 |
7 | - name: restart artifactory
8 | service:
9 | name: artifactory
10 | state: restarted
11 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/meta/exception.yml:
--------------------------------------------------------------------------------
1 | ---
2 | exceptions:
3 | - variation: Alpine
4 | reason: Artifactory start/stop scripts don't properly work.
5 | - variation: amazonlinux:1
6 | reason: "Shutting down artifactory: /usr/bin/java\nfinding\nUsing the default catalina management port (8015) to test shutdown\nArtifactory Tomcat already stopped"
7 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Robert de Bock
4 | role_name: artifactory
5 | description: Install and configure artifactory on your system.
6 | license: Apache-2.0
7 | company: none
8 | min_ansible_version: 2.8
9 |
10 | platforms:
11 | - name: Debian
12 | versions:
13 | - all
14 | - name: EL
15 | versions:
16 | - 7
17 | - 8
18 | - name: Fedora
19 | versions:
20 | - all
21 | - name: OpenSUSE
22 | versions:
23 | - all
24 | - name: Ubuntu
25 | versions:
26 | - bionic
27 |
28 | galaxy_tags:
29 | - artifactory
30 | - centos
31 | - redhat
32 | - server
33 | - system
34 |
35 | dependencies: []
36 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/meta/preferences.yml:
--------------------------------------------------------------------------------
1 | ---
2 | tox_parallel: yes
3 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/meta/version.yml:
--------------------------------------------------------------------------------
1 | ---
2 | project_name: JFrog
3 | reference: "https://github.com/robertdebock/ansible-role-artifactory/blob/master/defaults/main.yml"
4 | versions:
5 | - name: Artifactory
6 | url: "https://releases.jfrog.io/artifactory/"
7 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/tasks/configure-licenses.yml:
--------------------------------------------------------------------------------
1 | - name: set license for Enterprise
2 | block:
3 | - name: use license file
4 | copy:
5 | src: "{{ artifactory_license_file }}"
6 | dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.cluster.license"
7 | force: no # only copy if file doesn't exist
8 | become: yes
9 | when: artifactory_license_file is defined and artifactory_is_primary == true
10 |
11 | - name: use license strings
12 | vars:
13 | artifactory_licenses_dict: "{{ artifactory_licenses | default('{}') }}"
14 |
15 | template:
16 | src: artifactory.cluster.license.j2
17 | dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.cluster.license"
18 | force: no # only create if file doesn't exist
19 | become: yes
20 | when: artifactory_license_file is not defined and artifactory_is_primary == true
21 | when: artifactory_ha_enabled
22 |
23 | - name: set license for Pro
24 | block:
25 | - name: use license file
26 | copy:
27 | src: "{{ artifactory_license_file }}"
28 | dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.lic"
29 | force: no # only create if file doesn't exist
30 | become: yes
31 | when: artifactory_license_file is defined
32 |
33 | - name: use license strings
34 | vars:
35 | artifactory_licenses_dict: "{{ artifactory_licenses | default('{}') }}"
36 |
37 | template:
38 | src: artifactory.pro.license.j2
39 | dest: "{{ artifactory_home }}/var/etc/artifactory/artifactory.lic"
40 | force: no # only create if file doesn't exist
41 | become: yes
42 | when: artifactory_license_file is not defined
43 | when: not artifactory_ha_enabled
44 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for artifactory
3 | - name: Set artifactory major version
4 | set_fact:
5 | artifactory_major_verion: "{{ artifactory_version.split('.')[0] }}"
6 |
7 | - name: create group for artifactory
8 | group:
9 | name: "{{ artifactory_group }}"
10 | state: present
11 | become: yes
12 |
13 | - name: create user for artifactory
14 | user:
15 | name: "{{ artifactory_user }}"
16 | group: "{{ artifactory_group }}"
17 | system: yes
18 | become: yes
19 |
20 | - name: ensure artifactory_download_directory exists
21 | file:
22 | path: "{{ artifactory_download_directory }}"
23 | state: directory
24 | become: yes
25 |
26 | - name: ensure artifactory_file_store_dir exists
27 | file:
28 | path: "{{ artifactory_file_store_dir }}"
29 | state: directory
30 | owner: "{{ artifactory_user }}"
31 | group: "{{ artifactory_group }}"
32 | become: yes
33 |
34 | - name: ensure data subdirectories exist and have correct ownership
35 | file:
36 | path: "{{ artifactory_home }}/var/{{ item }}"
37 | state: directory
38 | owner: "{{ artifactory_user }}"
39 | group: "{{ artifactory_group }}"
40 | loop:
41 | - "bootstrap"
42 | - "etc"
43 | - "data"
44 | - "data/artifactory"
45 | - "data/filebeat"
46 | - "etc/info"
47 | - "etc/security"
48 | - "etc/artifactory"
49 | - "etc/artifactory/info"
50 | become: yes
51 |
52 | - name: check if system yaml file exits
53 | stat:
54 | path: "{{ artifactory_home }}/var/etc/system.yaml"
55 | register: system_yaml
56 |
57 | - name: use specified system yaml
58 | copy:
59 | src: "{{ system_file }}"
60 | dest: "{{ artifactory_home }}/var/etc/system.yaml"
61 | become: yes
62 | when: system_file is defined and not system_yaml.stat.exists
63 |
64 | - name: configure system yaml
65 | template:
66 | src: system.yaml.j2
67 | dest: "{{ artifactory_home }}/var/etc/system.yaml"
68 | become: yes
69 | when: system_file is not defined and not system_yaml.stat.exists
70 |
71 | - name: configure master key
72 | template:
73 | src: master.key.j2
74 | dest: "{{ artifactory_home }}/var/etc/security/master.key"
75 | force: no # only create if file doesn't exist
76 | become: yes
77 |
78 | - name: configure join key
79 | template:
80 | src: join.key.j2
81 | dest: "{{ artifactory_home }}/var/etc/security/join.key"
82 | force: no # only create if file doesn't exist
83 | become: yes
84 |
85 | - name: configure installer info
86 | template:
87 | src: installer-info.json.j2
88 | dest: "{{ artifactory_home }}/var/etc/artifactory/info/installer-info.json"
89 | become: yes
90 |
91 | - name: use specified binary store file
92 | copy:
93 | src: "{{ binary_store_file }}"
94 | dest: "{{ artifactory_home }}/var/etc/artifactory/binarystore.xml"
95 | force: no # only copy if file doesn't exist
96 | become: yes
97 | when: binary_store_file is defined
98 |
99 | - name: set default binary store
100 | template:
101 | src: binarystore.xml.j2
102 | dest: "{{ artifactory_home }}/var/etc/artifactory/binarystore.xml"
103 | force: no # only create if file doesn't exist
104 | become: yes
105 | when: binary_store_file is not defined
106 |
107 | - name: configure licenses
108 | include_tasks: configure-licenses.yml
109 |
110 | - name: create artifactory service
111 | shell: "{{ artifactory_home }}/app/bin/installService.sh"
112 | become: yes
113 |
114 | - name: Delete plugin folder
115 | file:
116 | state: absent
117 | path: "{{ artifactory_home }}/var/etc/artifactory/plugins"
118 |
119 | - name: ensure efs plugin folder exists
120 | file:
121 | path: "/efsmount/plugins"
122 | state: directory
123 | become: yes
124 |
125 | - name: symlink plugin folder to EFS
126 | file:
127 | src: "/efsmount/plugins"
128 | path: "{{ artifactory_home }}/var/etc/artifactory/plugins"
129 | state: link
130 | force: yes
131 | owner: "{{ artifactory_user }}"
132 | group: "{{ artifactory_group }}"
133 |
134 | - name: ensure data subdirectories exist and have correct ownership
135 | file:
136 | path: "{{ artifactory_home }}/var/{{ item }}"
137 | state: directory
138 | owner: "{{ artifactory_user }}"
139 | group: "{{ artifactory_group }}"
140 | loop:
141 | - "etc/artifactory/plugins"
142 | become: yes
143 |
144 | - name: start and enable the primary node
145 | service:
146 | name: artifactory
147 | state: restarted
148 | become: yes
149 | # when: artifactory_is_primary == true
150 |
151 | # - name: random wait before restarting to prevent secondary nodes from hitting DB first
152 | # pause:
153 | # seconds: "{{ 120 | random + 10}}"
154 | # when: artifactory_is_primary == false
155 |
156 | # - name: start and enable the secondary nodes
157 | # service:
158 | # name: artifactory
159 | # state: restarted
160 | # become: yes
161 | # when: artifactory_is_primary == false
162 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/templates/artifactory.cluster.license.j2:
--------------------------------------------------------------------------------
1 | {% if artifactory_licenses_dict %}
2 | {% for key in (artifactory_licenses_dict.keys() | select('match', '^ArtifactoryLicense\d$')) %}
3 | {{ artifactory_licenses_dict[key] }}
4 |
5 | {% endfor %}
6 | {% endif %}
7 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/templates/artifactory.pro.license.j2:
--------------------------------------------------------------------------------
1 | {% if artifactory_licenses_dict %}
2 | {% for key in (artifactory_licenses_dict.keys() | select('match', '^ArtifactoryLicense\d$')) %}
3 | {% if loop.first %}
4 | {{ artifactory_licenses_dict[key] }}
5 | {% endif %}
6 |
7 | {% endfor %}
8 | {% endif %}
9 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/templates/binarystore.xml.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | s3.{{ s3_region }}.amazonaws.com
9 | {{ s3_bucket }}
10 | artifactory/filestore
11 | {{ s3_region }}
12 | true
13 |
14 |
15 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/templates/installer-info.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "productId": "{{ product_id }}",
3 | "features": [
4 | {
5 | "featureId": "Partner/ACC-006973"
6 | }
7 | ]
8 | }
9 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/templates/join.key.j2:
--------------------------------------------------------------------------------
1 | {{ join_key }}
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/templates/master.key.j2:
--------------------------------------------------------------------------------
1 | {{ master_key }}
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/templates/system.yaml.j2:
--------------------------------------------------------------------------------
1 | ## @formatter:off
2 | ## JFROG ARTIFACTORY SYSTEM CONFIGURATION FILE
3 | ## HOW TO USE: comment-out any field and keep the correct yaml indentation by deleting only the leading '#' character.
4 | configVersion: 1
5 |
6 | ## NOTE: JFROG_HOME is a place holder for the JFrog root directory containing the deployed product, the home directory for all JFrog products.
7 | ## Replace JFROG_HOME with the real path! For example, in RPM install, JFROG_HOME=/opt/jfrog
8 |
9 | ## NOTE: Sensitive information such as passwords and join key are encrypted on first read.
10 | ## NOTE: The provided commented key and value is the default.
11 |
12 | ## SHARED CONFIGURATIONS
13 | ## A shared section for keys across all services in this config
14 | shared:
15 | ## Java options
16 | extraJavaOpts: "{{ extra_java_opts }}"
17 |
18 | ## Node Settings
19 | node:
20 | ## A unique id to identify this node.
21 | ## Default: auto generated at startup.
22 | id: {{ ansible_machine_id }}
23 |
24 | ## Sets this node as primary in HA installation
25 | # primary: {{ artifactory_is_primary }}
26 | Affinity: "any"
27 |
28 | ## Sets this node as part of HA installation
29 | haEnabled: {{ artifactory_ha_enabled }}
30 |
31 | ## Database Configuration
32 | database:
33 | ## One of: mysql, oracle, mssql, postgresql, mariadb
34 | ## Default: Embedded derby
35 |
36 | ## Example for mysql/postgresql
37 | type: "{{ db_type }}"
38 | driver: "{{ db_driver }}"
39 | url: "{{ db_url }}"
40 | username: "{{ db_user }}"
41 | password: "{{ db_password }}"
42 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/artifactory/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 |
5 | # Use the new container infrastructure
6 | sudo: false
7 |
8 | # Install ansible
9 | addons:
10 | apt:
11 | packages:
12 | - python-pip
13 |
14 | install:
15 | # Install ansible
16 | - pip install ansible
17 |
18 | # Check ansible version
19 | - ansible --version
20 |
21 | # Create ansible.cfg with correct roles_path
22 | - printf '[defaults]\nroles_path=../' >ansible.cfg
23 |
24 | script:
25 | # Basic role syntax check
26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
27 |
28 | notifications:
29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for xray
3 | # indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
4 | ansible_marketplace: standalone
5 |
6 | # whether we are creating a AMI for Marketplace or just for configuring EC2 instance
7 | ami_creation: false
8 |
9 | # The version of xray to install
10 | xray_version: 3.17.4
11 |
12 | # whether to enable HA
13 | xray_ha_enabled: true
14 |
15 | # The location where xray should install.
16 | xray_download_directory: /opt/jfrog
17 |
18 | # The remote xray download file
19 | xray_tar: https://releases.jfrog.io/artifactory/jfrog-xray/xray-linux/{{ xray_version }}/jfrog-xray-{{ xray_version }}-linux.tar.gz
20 |
21 | #The xray install directory
22 | xray_home: "{{ xray_download_directory }}/jfrog-xray-{{ xray_version }}-linux"
23 |
24 | #xray users and groups
25 | xray_user: xray
26 | xray_group: xray
27 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for xray
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your role description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Choose a valid license ID from https://spdx.org - some suggested licenses:
11 | # - BSD-3-Clause (default)
12 | # - MIT
13 | # - GPL-2.0-or-later
14 | # - GPL-3.0-only
15 | # - Apache-2.0
16 | # - CC-BY-4.0
17 | license: license (GPL-2.0-or-later, MIT, etc)
18 |
19 | min_ansible_version: 2.9
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | #
25 | # Provide a list of supported platforms, and for each platform a list of versions.
26 | # If you don't wish to enumerate all versions for a particular platform, use 'all'.
27 | # To view available platforms and versions (or releases), visit:
28 | # https://galaxy.ansible.com/api/v1/platforms/
29 | #
30 | # platforms:
31 | # - name: Fedora
32 | # versions:
33 | # - all
34 | # - 25
35 | # - name: SomePlatform
36 | # versions:
37 | # - all
38 | # - 1.0
39 | # - 7
40 | # - 99.99
41 |
42 | galaxy_tags: []
43 | # List tags for your role here, one per line. A tag is a keyword that describes
44 | # and categorizes the role. Users find roles by searching for tags. Be sure to
45 | # remove the '[]' above, if you add tags to this list.
46 | #
47 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
48 | # Maximum 20 tags per role.
49 |
50 | dependencies: []
51 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
52 | # if you add dependencies to this list.
53 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/tasks/Debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install db5.3-util
3 | apt:
4 | deb: "{{ xray_home }}/app/third-party/misc/db5.3-util_5.3.28-3ubuntu3_amd64.deb"
5 | ignore_errors: yes
6 | become: yes
7 |
8 | - name: Install db-util
9 | apt:
10 | deb: "{{ xray_home }}/app/third-party/misc/db-util_1_3a5.3.21exp1ubuntu1_all.deb"
11 | ignore_errors: yes
12 | become: yes
13 |
14 | - name: Install libssl
15 | apt:
16 | deb: "{{ xray_home }}/app/third-party/rabbitmq/libssl1.1_1.1.0j-1_deb9u1_amd64.deb"
17 | ignore_errors: yes
18 | become: yes
19 |
20 | - name: Install socat
21 | apt:
22 | deb: "{{ xray_home }}/app/third-party/rabbitmq/socat_1.7.3.1-2+deb9u1_amd64.deb"
23 | become: yes
24 |
25 | - name: Install libwxbase3.0-0v5
26 | apt:
27 | name: libwxbase3.0-0v5
28 | update_cache: yes
29 | state: present
30 | ignore_errors: yes
31 | become: yes
32 |
33 | - name: Install erlang
34 | apt:
35 | deb: "{{ xray_home }}/app/third-party/rabbitmq/esl-erlang_21.2.1-1~ubuntu~xenial_amd64.deb"
36 | become: yes
37 |
38 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/tasks/RedHat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install db-utl
3 | yum:
4 | name: "{{ xray_home }}/app/third-party/misc/libdb-utils-5.3.21-25.el7.x86_64.rpm"
5 | state: present
6 | vars:
7 | ansible_python_interpreter: /bin/python2
8 |
9 | - name: Install socat
10 | yum:
11 | name: "{{ xray_home }}/app/third-party/rabbitmq/socat-1.7.3.2-2.el7.x86_64.rpm"
12 | state: present
13 | vars:
14 | ansible_python_interpreter: /bin/python2
15 |
16 | - name: Install erlang
17 | yum:
18 | name: "{{ xray_home }}/app/third-party/rabbitmq/erlang-23.2.3-1.el7.x86_64.rpm"
19 | state: present
20 | vars:
21 | ansible_python_interpreter: /bin/python2
22 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create group for xray
3 | group:
4 | name: "{{ xray_group }}"
5 | state: present
6 | become: yes
7 |
8 | - name: create user for xray
9 | user:
10 | name: "{{ xray_user }}"
11 | group: "{{ xray_group }}"
12 | system: yes
13 | become: yes
14 |
15 | - name: ensure xray_download_directory exists
16 | file:
17 | path: "{{ xray_download_directory }}"
18 | state: directory
19 | become: yes
20 |
21 | - name: download xray
22 | unarchive:
23 | src: "{{ xray_tar }}"
24 | dest: "{{ xray_download_directory }}"
25 | remote_src: yes
26 | owner: "{{ xray_user }}"
27 | group: "{{ xray_group }}"
28 | creates: "{{ xray_home }}"
29 | become: yes
30 | register: downloadxray
31 | until: downloadxray is succeeded
32 | retries: 3
33 |
34 | - name: perform prerequisite installation
35 | include_tasks: "{{ ansible_os_family }}.yml"
36 |
37 | - name: ensure etc exists
38 | file:
39 | path: "{{ xray_home }}/var/etc"
40 | state: directory
41 | owner: "{{ xray_user }}"
42 | group: "{{ xray_group }}"
43 | become: yes
44 |
45 | - name: Remove SSH keys
46 | file:
47 | path: "{{ ssh_keys.dir }}"
48 | state: absent
49 | loop:
50 | - dir: "/home/.xray_ami/.ssh/authorized_keys"
51 | - dir: "/root/.ssh/authorized_keys"
52 | - dir: "/home/centos/.ssh/authorized_keys"
53 | loop_control:
54 | loop_var: ssh_keys
55 | when: ami_creation
56 |
57 | - name: shutdown VM
58 | command: /sbin/shutdown -h now
59 | ignore_errors: 'yes'
60 | when: ami_creation
61 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/templates/installer-info.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "productId": "Ansible_artifactory/1.0.0",
3 | "features": [
4 | {
5 | "featureId": "Partner/ACC-006973"
6 | },
7 | {
8 | "featureId": "Channel/{{ ansible_marketplace }}"
9 | }
10 | ]
11 | }
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/templates/join.key.j2:
--------------------------------------------------------------------------------
1 | {{ join_key }}
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/templates/master.key.j2:
--------------------------------------------------------------------------------
1 | {{ master_key }}
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/templates/system.yaml.j2:
--------------------------------------------------------------------------------
1 | ## @formatter:off
2 | ## JFROG ARTIFACTORY SYSTEM CONFIGURATION FILE
3 | ## HOW TO USE: comment-out any field and keep the correct yaml indentation by deleting only the leading '#' character.
4 | configVersion: 1
5 |
6 | ## NOTE: JFROG_HOME is a place holder for the JFrog root directory containing the deployed product, the home directory for all JFrog products.
7 | ## Replace JFROG_HOME with the real path! For example, in RPM install, JFROG_HOME=/opt/jfrog
8 |
9 | ## NOTE: Sensitive information such as passwords and join key are encrypted on first read.
10 | ## NOTE: The provided commented key and value is the default.
11 |
12 | ## SHARED CONFIGURATIONS
13 | ## A shared section for keys across all services in this config
14 | shared:
15 | ## Base URL of the JFrog Platform Deployment (JPD)
16 | ## This is the URL to the machine where JFrog Artifactory is deployed, or the load balancer pointing to it. It is recommended to use DNS names rather than direct IPs.
17 | ## Examples: "http://jfrog.acme.com" or "http://10.20.30.40:8082"
18 | jfrogUrl: {{ jfrog_url }}
19 |
20 | ## Node Settings
21 | node:
22 | ## A unique id to identify this node.
23 | ## Default: auto generated at startup.
24 | id: {{ ansible_machine_id }}
25 |
26 | ## Database Configuration
27 | database:
28 | ## One of: mysql, oracle, mssql, postgresql, mariadb
29 | ## Default: Embedded derby
30 |
31 | ## Example for mysql/postgresql
32 | type: "{{ db_type }}"
33 | driver: "{{ db_driver }}"
34 | url: "{{ db_url }}"
35 | username: "{{ db_user }}"
36 | password: "{{ db_password }}"
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - xray
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray-ami/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for xray
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 |
5 | # Use the new container infrastructure
6 | sudo: false
7 |
8 | # Install ansible
9 | addons:
10 | apt:
11 | packages:
12 | - python-pip
13 |
14 | install:
15 | # Install ansible
16 | - pip install ansible
17 |
18 | # Check ansible version
19 | - ansible --version
20 |
21 | # Create ansible.cfg with correct roles_path
22 | - printf '[defaults]\nroles_path=../' >ansible.cfg
23 |
24 | script:
25 | # Basic role syntax check
26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
27 |
28 | notifications:
29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for xray
3 | # indicates were this collection was downlaoded from (galaxy, automation_hub, standalone)
4 | ansible_marketplace: standalone
5 |
6 | # The version of xray to install
7 | xray_version: 3.17.4
8 |
9 | # whether to enable HA
10 | xray_ha_enabled: true
11 |
12 | # The location where xray should install.
13 | xray_download_directory: /opt/jfrog
14 |
15 | # The remote xray download file
16 | xray_tar: https://releases.jfrog.io/artifactory/jfrog-xray/xray-linux/{{ xray_version }}/jfrog-xray-{{ xray_version }}-linux.tar.gz
17 |
18 | #The xray install directory
19 | xray_home: "{{ xray_download_directory }}/jfrog-xray-{{ xray_version }}-linux"
20 |
21 | #xray users and groups
22 | xray_user: xray
23 | xray_group: xray
24 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for xray
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: your name
3 | description: your role description
4 | company: your company (optional)
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Choose a valid license ID from https://spdx.org - some suggested licenses:
11 | # - BSD-3-Clause (default)
12 | # - MIT
13 | # - GPL-2.0-or-later
14 | # - GPL-3.0-only
15 | # - Apache-2.0
16 | # - CC-BY-4.0
17 | license: license (GPL-2.0-or-later, MIT, etc)
18 |
19 | min_ansible_version: 2.9
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | #
25 | # Provide a list of supported platforms, and for each platform a list of versions.
26 | # If you don't wish to enumerate all versions for a particular platform, use 'all'.
27 | # To view available platforms and versions (or releases), visit:
28 | # https://galaxy.ansible.com/api/v1/platforms/
29 | #
30 | # platforms:
31 | # - name: Fedora
32 | # versions:
33 | # - all
34 | # - 25
35 | # - name: SomePlatform
36 | # versions:
37 | # - all
38 | # - 1.0
39 | # - 7
40 | # - 99.99
41 |
42 | galaxy_tags: []
43 | # List tags for your role here, one per line. A tag is a keyword that describes
44 | # and categorizes the role. Users find roles by searching for tags. Be sure to
45 | # remove the '[]' above, if you add tags to this list.
46 | #
47 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
48 | # Maximum 20 tags per role.
49 |
50 | dependencies: []
51 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
52 | # if you add dependencies to this list.
53 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/tasks/Debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install db5.3-util
3 | apt:
4 | deb: "{{ xray_home }}/app/third-party/misc/db5.3-util_5.3.28-3ubuntu3_amd64.deb"
5 | ignore_errors: yes
6 | become: yes
7 |
8 | - name: Install db-util
9 | apt:
10 | deb: "{{ xray_home }}/app/third-party/misc/db-util_1_3a5.3.21exp1ubuntu1_all.deb"
11 | ignore_errors: yes
12 | become: yes
13 |
14 | - name: Install libssl
15 | apt:
16 | deb: "{{ xray_home }}/app/third-party/rabbitmq/libssl1.1_1.1.0j-1_deb9u1_amd64.deb"
17 | ignore_errors: yes
18 | become: yes
19 |
20 | - name: Install socat
21 | apt:
22 | deb: "{{ xray_home }}/app/third-party/rabbitmq/socat_1.7.3.1-2+deb9u1_amd64.deb"
23 | become: yes
24 |
25 | - name: Install libwxbase3.0-0v5
26 | apt:
27 | name: libwxbase3.0-0v5
28 | update_cache: yes
29 | state: present
30 | ignore_errors: yes
31 | become: yes
32 |
33 | - name: Install erlang
34 | apt:
35 | deb: "{{ xray_home }}/app/third-party/rabbitmq/esl-erlang_21.2.1-1~ubuntu~xenial_amd64.deb"
36 | become: yes
37 |
38 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/tasks/RedHat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install db-utl
3 | yum:
4 | name: "{{ xray_home }}/app/third-party/misc/libdb-utils-5.3.21-25.el7.x86_64.rpm"
5 | state: present
6 | vars:
7 | ansible_python_interpreter: /bin/python2
8 |
9 | - name: Install socat
10 | yum:
11 | name: "{{ xray_home }}/app/third-party/rabbitmq/socat-1.7.3.2-2.el7.x86_64.rpm"
12 | state: present
13 | vars:
14 | ansible_python_interpreter: /bin/python2
15 |
16 | - name: Install erlang
17 | yum:
18 | name: "{{ xray_home }}/app/third-party/rabbitmq/erlang-23.2.3-1.el7.x86_64.rpm"
19 | state: present
20 | vars:
21 | ansible_python_interpreter: /bin/python2
22 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/tasks/initialize-pg-db.yml:
--------------------------------------------------------------------------------
1 | - name: initialize Postgres DB
2 | block:
3 | - name: check if user/role exists
4 | command: psql -A -t {{db_master_url}} -c "SELECT 1 FROM pg_roles WHERE rolname='{{db_user}}'"
5 | register: user_exists
6 |
7 | - debug:
8 | var: user_exists.stdout_lines
9 |
10 | - name: create user/role
11 | command: psql {{db_master_url}} -c "CREATE USER {{db_user}} WITH PASSWORD '{{db_password}}'"
12 | register: shell_output
13 | when: user_exists.stdout != "1"
14 |
15 | - debug:
16 | var: shell_output.stdout_lines
17 | when: user_exists.stdout != "1"
18 |
19 | - name: grant membership role
20 | command: psql {{db_master_url}} -c "GRANT {{db_user}} TO {{db_master_user}}"
21 | register: shell_output
22 | when: user_exists.stdout != "1"
23 |
24 | - debug:
25 | var: shell_output.stdout_lines
26 | when: user_exists.stdout != "1"
27 |
28 | - name: check if xraydb exists
29 | command: psql -A -t {{db_master_url}} -c "SELECT 1 FROM pg_database WHERE datname='xraydb'"
30 | register: db_exists
31 |
32 | - debug:
33 | var: db_exists.stdout_lines
34 |
35 | - name: create xraydb database
36 | command: psql {{db_master_url}} -c "CREATE DATABASE xraydb WITH OWNER={{db_user}} ENCODING='UTF8'"
37 | register: shell_output
38 | when: db_exists.stdout != "1"
39 |
40 | - debug:
41 | var: shell_output.stdout_lines
42 | when: db_exists.stdout != "1"
43 |
44 | - name: grant xraydb privileges to role
45 | command: psql {{db_master_url}} -c "GRANT ALL PRIVILEGES ON DATABASE xraydb TO {{db_user}}"
46 | register: shell_output
47 | when: db_exists.stdout != "1"
48 |
49 | - debug:
50 | var: shell_output.stdout_lines
51 | when: db_exists.stdout != "1"
52 | become: yes
53 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: initialize postgres database
3 | include_tasks: initialize-pg-db.yml
4 |
5 | - name: create group for xray
6 | group:
7 | name: "{{ xray_group }}"
8 | state: present
9 | become: yes
10 |
11 | - name: create user for xray
12 | user:
13 | name: "{{ xray_user }}"
14 | group: "{{ xray_group }}"
15 | system: yes
16 | become: yes
17 |
18 | - name: ensure xray_download_directory exists
19 | file:
20 | path: "{{ xray_download_directory }}"
21 | state: directory
22 | become: yes
23 |
24 | - name: perform prerequisite installation
25 | include_tasks: "{{ ansible_os_family }}.yml"
26 |
27 | - name: ensure data subdirectories exist and have correct ownership
28 | file:
29 | path: "{{ xray_home }}/var/{{ item }}"
30 | state: directory
31 | owner: "{{ xray_user }}"
32 | group: "{{ xray_group }}"
33 | loop:
34 | - "etc"
35 | - "data"
36 | - "etc/info"
37 | - "etc/security"
38 | become: yes
39 |
40 | - name: configure system yaml
41 | template:
42 | src: system.yaml.j2
43 | dest: "{{ xray_home }}/var/etc/system.yaml"
44 | force: no # only create if file doesn't exist
45 | become: yes
46 |
47 | - name: configure master key
48 | template:
49 | src: master.key.j2
50 | dest: "{{ xray_home }}/var/etc/security/master.key"
51 | force: no # only create if file doesn't exist
52 | become: yes
53 |
54 | - name: configure join key
55 | template:
56 | src: join.key.j2
57 | dest: "{{ xray_home }}/var/etc/security/join.key"
58 | force: no # only create if file doesn't exist
59 | become: yes
60 |
61 | - name: configure installer info
62 | template:
63 | src: installer-info.json.j2
64 | dest: "{{ xray_home }}/var/etc/info/installer-info.json"
65 | force: no # only create if file doesn't exist
66 | become: yes
67 |
68 | - name: create xray service
69 | shell: "{{ xray_home }}/app/bin/installService.sh"
70 | become: yes
71 |
72 | - name: start and enable xray
73 | service:
74 | name: xray
75 | state: restarted
76 | become: yes
77 |
78 | - name: join rabbitmq cluster
79 | shell: "/root/create_rabbitmq_cluster.sh 2>&1 | tee /root/joinrabbitcluster.log"
80 | become: yes
81 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/templates/installer-info.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "productId": "Ansible_artifactory/1.0.0",
3 | "features": [
4 | {
5 | "featureId": "Partner/ACC-006973"
6 | },
7 | {
8 | "featureId": "Channel/{{ ansible_marketplace }}"
9 | }
10 | ]
11 | }
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/templates/join.key.j2:
--------------------------------------------------------------------------------
1 | {{ join_key }}
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/templates/master.key.j2:
--------------------------------------------------------------------------------
1 | {{ master_key }}
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/templates/system.yaml.j2:
--------------------------------------------------------------------------------
1 | ## @formatter:off
2 | ## JFROG ARTIFACTORY SYSTEM CONFIGURATION FILE
3 | ## HOW TO USE: comment-out any field and keep the correct yaml indentation by deleting only the leading '#' character.
4 | configVersion: 1
5 |
6 | ## NOTE: JFROG_HOME is a place holder for the JFrog root directory containing the deployed product, the home directory for all JFrog products.
7 | ## Replace JFROG_HOME with the real path! For example, in RPM install, JFROG_HOME=/opt/jfrog
8 |
9 | ## NOTE: Sensitive information such as passwords and join key are encrypted on first read.
10 | ## NOTE: The provided commented key and value is the default.
11 |
12 | ## SHARED CONFIGURATIONS
13 | ## A shared section for keys across all services in this config
14 | shared:
15 | ## Base URL of the JFrog Platform Deployment (JPD)
16 | ## This is the URL to the machine where JFrog Artifactory is deployed, or the load balancer pointing to it. It is recommended to use DNS names rather than direct IPs.
17 | ## Examples: "http://jfrog.acme.com" or "http://10.20.30.40:8082"
18 | jfrogUrl: {{ jfrog_url }}
19 |
20 | ## Java options
21 | extraJavaOpts: "{{ extra_java_opts }}"
22 |
23 | ## Node Settings
24 | node:
25 | ## A unique id to identify this node.
26 | ## Default: auto generated at startup.
27 | id: {{ ansible_machine_id }}
28 |
29 | ## Database Configuration
30 | database:
31 | ## One of: mysql, oracle, mssql, postgresql, mariadb
32 | ## Default: Embedded derby
33 |
34 | ## Example for mysql/postgresql
35 | type: "{{ db_type }}"
36 | driver: "{{ db_driver }}"
37 | url: "{{ db_url }}"
38 | username: "{{ db_user }}"
39 | password: "{{ db_password }}"
40 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - xray
--------------------------------------------------------------------------------
/cloudInstallerScripts/roles/xray/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for xray
--------------------------------------------------------------------------------
/cloudInstallerScripts/site-artifactory.yml:
--------------------------------------------------------------------------------
1 | - hosts: localhost
2 | gather_facts: true
3 | become: true
4 | tasks:
5 | - include_role:
6 | name: artifactory
7 | - include_role:
8 | name: artifactory-nginx
9 | when: "enable_ssl != true"
10 | - include_role:
11 | name: artifactory-nginx-ssl
12 | when: "enable_ssl == true"
13 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/site-xray.yml:
--------------------------------------------------------------------------------
1 | - hosts: localhost
2 | gather_facts: true
3 | become: true
4 | roles:
5 | - name: xray
6 |
--------------------------------------------------------------------------------
/cloudInstallerScripts/xray-ami.yml:
--------------------------------------------------------------------------------
1 | - hosts: localhost
2 | gather_facts: true
3 | become: true
4 | roles:
5 | - name: xray-ami
6 |
--------------------------------------------------------------------------------
/docs/images/architecture_diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/docs/images/architecture_diagram.png
--------------------------------------------------------------------------------
/docs/images/jfrog-architecture-diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/docs/images/jfrog-architecture-diagram.png
--------------------------------------------------------------------------------
/docs/images/jfrog-architecture-diagram.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/docs/images/jfrog-architecture-diagram.pptx
--------------------------------------------------------------------------------
/docs/images/jfrog-architecture-diagrams.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/docs/images/jfrog-architecture-diagrams.pptx
--------------------------------------------------------------------------------
/docs/images/secret_manager_licenses.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/docs/images/secret_manager_licenses.png
--------------------------------------------------------------------------------
/docs/images/secrets_manager_certificates.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/docs/images/secrets_manager_certificates.png
--------------------------------------------------------------------------------
/docs/images/xray_update_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/docs/images/xray_update_1.png
--------------------------------------------------------------------------------
/docs/images/xray_update_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/docs/images/xray_update_2.png
--------------------------------------------------------------------------------
/docs/images/xray_update_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/docs/images/xray_update_3.png
--------------------------------------------------------------------------------
/docs/images/xray_update_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/docs/images/xray_update_4.png
--------------------------------------------------------------------------------
/docs/images/xray_update_5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/docs/images/xray_update_5.png
--------------------------------------------------------------------------------
/docs/partner_editable/_settings.adoc:
--------------------------------------------------------------------------------
1 | :quickstart-project-name: quickstart-jfrog-artifactory
2 | :partner-product-name: JFrog Artifactory Enterprise and Xray
3 | :partner-product-short-name: Artifactory and Xray
4 | :partner-company-name: JFrog Ltd.
5 | :doc-month: April
6 | :doc-year: 2021
7 | :partner-contributors: Vinay Aggarwal and Alex Hung, JFrog
8 | :quickstart-contributors: Dylan Owen, AWS Quick Start team
9 | :deployment_time: 30 minutes
10 | :default_deployment_region: us-east-1
11 | // Uncomment these two attributes if you are leveraging
12 | // - an AWS Marketplace listing.
13 | // Additional content will be auto-generated based on these attributes.
14 | :marketplace_subscription:
15 | :marketplace_listing_url: https://aws.amazon.com/marketplace/pp/B00O7WM7QW?ref_=aws-mp-console-subscription-detail
16 | :parameters_as_appendix:
--------------------------------------------------------------------------------
/docs/partner_editable/architecture.adoc:
--------------------------------------------------------------------------------
1 | Deploying this Quick Start for a new virtual private cloud (VPC) with
2 | default parameters builds the following {partner-product-name} environment in the
3 | AWS Cloud.
4 |
5 | // Replace this example diagram with your own. Send us your source PowerPoint file. Be sure to follow our guidelines here : http://(we should include these points on our contributors giude)
6 | [#architecture1]
7 | .Quick Start architecture for {partner-product-name} on AWS
8 | [link=images/jfrog-architecture-diagram.png]
9 | image::../images/jfrog-architecture-diagram.png[Architecture,width=648,height=439]
10 |
11 | As shown in Figure 1, the Quick Start sets up the following:
12 |
13 | * A highly available architecture that spans two Availability Zones.*
14 | * A VPC configured with public and private subnets, according to AWS best practices, to
15 | provide you with your own virtual network on AWS.*
16 | * A Network Load Balancer attached to the public subnets connecting via port 80 or 443
17 | to the Artifactory primary and secondary nodes in the private subnets.
18 | * A Network Load Balancer attached to the public subnets for Xray to connect via port 80 to the Artifactory primary and secondary nodes residing in the private subnets.
19 | * A private and encrypted Amazon S3 bucket for repository storage.
20 | * In the public subnets:
21 |
22 | ** Managed network address translation (NAT) gateways to allow outbound
23 | internet access for resources in the private subnets.*
24 | ** A Linux bastion host in an Auto Scaling group to allow inbound Secure
25 | Shell (SSH) access from the RemoteAccess Classless Inter-Domain Routing (CIDR) to the Amazon EC2 instances in public
26 | and private subnets.
27 |
28 | * In the private subnets:
29 |
30 | // Add bullet points for any additional components that are included in the deployment. Make sure that the additional components are also represented in the architecture diagram.
31 | ** Two Amazon EC2 Auto Scaling groups, one for the primary node and one for the secondary nodes.
32 | ** (Optional) One Amazon EC2 Auto Scaling group for the Xray nodes.
33 | ** A PostgreSQL instance on Amazon RDS that can be accessed from the private subnets on port 3306 or 5532.
34 |
35 | For more information, see https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_PostgreSQL.html[PostgreSQL on Amazon RDS^].
36 |
37 | NOTE: The purpose of the Auto Scaling groups is for automatic deployment of
38 | the primary node into another Availability Zone if a failure occurs. Do not modify the
39 | number of instances.
40 |
41 | *The template that deploys the Quick Start into an existing VPC skips
42 | the components marked by asterisks and prompts you for your existing VPC
43 | configuration.
44 |
45 | === Auto Scaling groups
46 |
47 | The Auto Scaling groups are designed to have one primary node and multiple secondary
48 | nodes. When an EC2 node or service fail, Auto Scaling groups automatically recreate the
49 | instances. For this reason, all configurations are made on boot and result in a loss of any
50 | data that are not stored in the Amazon RDS instance or S3 bucket.
51 |
52 | For more information, see https://aws.amazon.com/autoscaling/[AWS Auto Scaling^].
53 |
54 | === Ansible init script
55 | Ansible is installed and configured to run only on initial boot. Ansible, in cooperation with
56 | the Auto Scaling group, initiates the required configuration to configure, install, and run Artifactory and Xray. As a part
57 | of this configuration, the nodes automatically join the HA cluster.
58 |
59 | WARNING: Do not change the master key of the stack when updating the stack.
60 | Doing so results in an unsupported configuration that future nodes cannot join.
61 | To update an expired Secure Sockets Layer (SSL) certificate, change the
62 | CloudFormation stack certificate and certificate key inputs, and then redeploy the
63 | nodes (see Updating Artifactory). +
64 | +
65 | If you change the certificate and certificate key
66 | manually on the Amazon EC2 instances (instead of updating the CloudFormation stack), your
67 | manual changes are lost at the next update or reboot, which results in an unwanted
68 | configuration.
--------------------------------------------------------------------------------
/docs/partner_editable/deploy_steps.adoc:
--------------------------------------------------------------------------------
1 | // We need to work around Step numbers here if we are going to potentially exclude the AMI subscription
2 | === Sign in to your AWS account
3 |
4 | . Sign in to your AWS account at https://aws.amazon.com with an IAM user role that has the necessary permissions. For details, see link:#_planning_the_deployment[Planning the deployment] earlier in this guide.
5 | . Make sure that your AWS account is configured correctly, as discussed in the link:#_technical_requirements[Technical requirements] section.
6 |
7 | === Launch the Quick Start
8 |
9 | WARNING: If you’re deploying {partner-product-name} into an existing VPC, make sure that your VPC has two private subnets in different Availability Zones for the workload instances, and that the subnets aren’t shared. This Quick Start doesn’t support https://docs.aws.amazon.com/vpc/latest/userguide/vpc-sharing.html[shared subnets^]. These subnets require https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html[NAT gateways^] in their route tables, to allow the instances to download packages and software without exposing them to the internet.
10 |
11 | Each deployment takes about {deployment_time} to complete.
12 |
13 | . Sign in to your AWS account, and choose one of the following options to launch the AWS CloudFormation template. For help with choosing an option, see link:#_deployment_options[deployment options] earlier in this guide.
14 |
15 | [cols="3,1"]
16 | |===
17 | ^|https://fwd.aws/DzEkv[Deploy {partner-product-name} into a new VPC on AWS^]
18 | ^|https://fwd.aws/5wnqz[View template^]
19 |
20 | ^|https://fwd.aws/Ee88A[Deploy {partner-product-name} into an existing VPC on AWS^]
21 | ^|https://fwd.aws/jMBXD[View template^]
22 | |===
23 |
24 | [start=2]
25 | . Check the AWS Region that’s displayed in the upper-right corner of the navigation bar, and change it if necessary. This Region is where the network infrastructure for {partner-product-name} is built. The template is launched in the {default_deployment_region} Region by default.
26 |
27 | [start=3]
28 | . On the *Create stack* page, keep the default setting for the template URL, and then choose *Next*.
29 | . On the *Specify stack details* page, change the stack name if needed. Review the parameters for the template. Provide values for the parameters that require input. For all other parameters, review the default settings and customize them as necessary. For details on each parameter, see the link:#_parameter_reference[Parameter reference] section of this guide. When you finish reviewing and customizing the parameters, choose *Next*.
--------------------------------------------------------------------------------
/docs/partner_editable/deployment_options.adoc:
--------------------------------------------------------------------------------
1 | // There are generally two deployment options. If additional are required, add them here
2 |
3 | This Quick Start provides two deployment options:
4 |
5 | * *Deploy {partner-product-short-name} into a new VPC*. This option builds a new AWS environment consisting of the VPC, subnets, NAT gateways, security groups, bastion hosts, and other infrastructure components. It then deploys {partner-product-short-name} into this new VPC.
6 | * *Deploy {partner-product-short-name} into an existing VPC*. This option provisions {partner-product-short-name} in your existing AWS infrastructure.
7 |
8 | The Quick Start provides separate templates for these options. It also lets you configure Classless Inter-Domain Routing (CIDR) blocks, instance types, and {partner-product-short-name} settings, as discussed later in this guide.
--------------------------------------------------------------------------------
/docs/partner_editable/faq_troubleshooting.adoc:
--------------------------------------------------------------------------------
1 | // Add any tips or answers to anticipated questions. This could include the following troubleshooting information. If you don’t have any other Q&A to add, change “FAQ” to “Troubleshooting.”
2 |
3 | == FAQ
4 |
5 | *Q.* . I provisioned more secondary nodes than I have licenses, and I cannot access
6 | Artifactory. What do I do?
7 |
8 | *A.* In the AWS CloudFormation console, choose **Update stack**, and reduce the number of
9 | secondary nodes to the number of licenses you purchased, minus one license for the master.
10 |
11 | *Q.* My license ran out and Artifactory is unresponsive. How do I fix this?
12 |
13 | *A.* Reduce the number of secondary nodes to zero, and contact JFrog for a new license.
14 |
15 | *Q.* My certificate is out of date. How do I update it?
16 |
17 | *A.* The certificate is handled via Ansible or Helm. In the AWS CloudFormation console,
18 | choose **Update stack**, change the certificate and certificate key values. Then, by rolling
19 | restart, update the master node first, and then, one at a time, the secondary nodes. This will
20 | rebuild each node with the correct certificate.
21 |
22 |
23 | *Q.* I encountered a *CREATE_FAILED* error when I launched the Quick Start.
24 |
25 | *A.* If AWS CloudFormation fails to create the stack, we recommend that you relaunch the template with *Rollback on failure* set to *No*. This setting is under *Advanced* in the AWS CloudFormation console on the *Configure stack options* page. With this setting, the stack’s state is retained and the instance is left running, so you can troubleshoot the issue. (For Windows, look at the log files in %ProgramFiles%\Amazon\EC2ConfigService and C:\cfn\log.)
26 | // If you’re deploying on Linux instances, provide the location for log files on Linux, or omit this sentence.
27 |
28 | WARNING: When you set *Rollback on failure* to *Disabled*, you continue to incur AWS charges for this stack. Please make sure to delete the stack when you finish troubleshooting.
29 |
30 | For additional information, see https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/troubleshooting.html[Troubleshooting AWS CloudFormation^] on the AWS website.
31 |
32 | *Q.* I encountered a size limitation error when I deployed the AWS CloudFormation templates.
33 |
34 | *A.* Launch the Quick Start templates from the links in this guide or from another S3 bucket. If you deploy the templates from a local copy on your computer or from a location other than an S3 bucket, you might encounter template size limitations. For more information about AWS CloudFormation quotas, see the http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html[AWS documentation^].
--------------------------------------------------------------------------------
/docs/partner_editable/licenses.adoc:
--------------------------------------------------------------------------------
1 | // Include details about the license and how they can sign up. If no license is required, clarify that.
2 |
3 | // These two paragraphs provide an example of the details you can provide. Provide links as appropriate.
4 |
5 |
6 | // Example content below:
7 |
8 | // _This Quick Start requires a license for {partner-product-name}. To use the Quick Start in your production environment, sign up for a license at . When you launch the Quick Start, place the license key in an S3 bucket and specify its location._
9 |
10 | // _If you don’t have a license, the Quick Start deploys with a trial license. The trial license gives you days of free usage in a non-production environment. After this time, you can upgrade to a production license by following the instructions at ._
11 |
12 | // // Or, if the deployment uses an AMI, update this paragraph. If it doesn’t, remove the paragraph.
13 | // _The Quick Start requires a subscription to the Amazon Machine Image (AMI) for {partner-product-name}, which is available from https://aws.amazon.com/marketplace/[AWS Marketplace^]. Additional pricing, terms, and conditions may apply. For instructions, see link:#step-2.-subscribe-to-the-software-ami[step 2] in the deployment section._
14 |
15 |
16 | This Quick Start requires a subscription to the CentOS AMIand an Enterprise or Enterprise+ license for Artifactory. You can subscribe to the CentOS AMI on the https://aws.amazon.com/marketplace/pp/B00O7WM7QW?ref_=aws-mp-console-subscription-detail[AWS CentOS Marketplace^] page. If you choose to install Xray, you must have an Enterprise+ license or an Enterprise license with the Xray add-on.
17 |
18 | To use the Quick Start in your production environment, sign up for a https://jfrog.com/artifactory/free-trial-aws/#enterprise[free trial license^], which includes three Artifactory Enterprise licenses. Add the license keys to AWS Secrets Manager, as described in the <<#_deployment_steps, Deployment steps>> section in this guide.
19 |
20 | NOTE: Enterprise or Enterprise+ licenses are required for high availability. If the license isn’t an Enterprise or Enterprise+ license, the license is invalid,
21 | or the license is not included, the deployment will fail. Also, ensure that the number
22 | of secondary Artifactory servers is at most the amount licensed minus one, for the
23 | primary server. If you specify too many servers, see the <> section for instructions.
24 |
25 | If you use a free trial, convert to a permanent license before your trial ends,
26 | otherwise the nodes will become unresponsive. You can request a quote by contacting
27 | https://jfrog.com/pricing/[JFrog^].
--------------------------------------------------------------------------------
/docs/partner_editable/overview_target_and_usage.adoc:
--------------------------------------------------------------------------------
1 | // Replace the content in <>
2 | // Identify your target audience and explain how/why they would use this Quick Start.
3 | //Avoid borrowing text from third-party websites (copying text from AWS service documentation is fine). Also, avoid marketing-speak, focusing instead on the technical aspect.
4 |
5 | https://jfrog.com/artifactory/[JFrog’s Artifactory^] is an enterprise universal repository manager, capable of hosting all of
6 | your binaries in one place. This Quick Start deploys Artifactory Enterprise in a highly
7 | available (HA) configuration into AWS.
8 |
9 | https://jfrog.com/xray/[JFrog Xray^] works with JFrog Artifactory to perform universal analysis of binary software components
10 | at any stage of the application lifecycle, providing radical transparency that leads to trust in your software. Xray is an optional installation that you can choose during the Quick Start deployment.
11 |
12 | This Quick Start is for administrators who want the flexibility, scale, and availability of
13 | AWS through products such as virtual private clouds (VPCs), Amazon Elastic Compute
14 | Cloud (Amazon EC2), Amazon Simple Storage Service (Amazon S3), Elastic Load Balancing
15 | (ELB), and Amazon Relational Database Service (Amazon RDS) to deploy Artifactory as
16 | their repository manager.
17 |
18 | Amazon EC2, along with Amazon S3 and Amazon RDS, forms the foundation for the
19 | deployment. By using Amazon S3 and Amazon RDS as persistent storage for artifacts and
20 | the configuration, respectively, Artifactory and Xray can be completely redeployed, scaled up, or
21 | scaled down, depending on your requirements. This configuration allows organizations to
22 | save on costs for multiple secondary nodes and to pay only for storage used.
23 |
24 | The default installation creates two https://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html[Amazon EC2 Auto Scaling^] groups:
25 |
26 | * The first Auto Scaling group is responsible for the primary node and ensures that the
27 | node.id for HA is set to `primary` and that there is always only one primary node.
28 | * The second Auto Scaling group is responsible for ensuring that the `node.id` for the
29 | secondaries is unique and therefore sets it to the hostname. This Auto Scaling group is
30 | also responsible for scaling up or down the number of secondaries to the amount
31 | specified by the administrator.
32 |
33 | The optional Xray installation creates two additional Amazon EC2 Auto Scaling groups:
34 |
35 | * The first Auto Scaling group is responsible for the primary node. Xray is installed into the primary subnet.
36 | * The second Auto Scaling group is responsible installing Xray into the secondary subnet. This Auto Scaling group is also responsible for scaling up or down the number of secondaries to the amount specified by the administrator.
37 |
38 | The Auto Scaling groups are monitored by the Network Load Balancer, which is configured
39 | with https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-healthchecks.html[health checks^] that validate that the Artifactory service is up and running. If the endpoint
40 | returns an error response, a new node is recovered within 10 minutes.
--------------------------------------------------------------------------------
/docs/partner_editable/pre-reqs.adoc:
--------------------------------------------------------------------------------
1 | // If no preperation is required, remove all content from here
2 |
3 | // ==== Prepare your AWS account
4 |
5 | // _Describe any setup required in the AWS account prior to template launch_
6 |
7 | // ==== Prepare your {partner-company-name} account
8 |
9 | // _Describe any setup required in the partner portal/account prior to template launch_
10 |
11 | ==== Prepare for the deployment
12 | // _Describe any preparation required to complete the product build, such as obtaining licenses or placing files in S3_
13 |
14 | ===== Prepare the certificate and certificate key
15 |
16 | Open the certificate into an editor of your choice and copy the certificate and paste it directly in the text box in next step. This results in the entire certificate being on a single line, and it automatically converts all Carriage Return and Line Feed (CRLF) or Line Feed (LF) characters to spaces.
17 | Follow the same process for the certificate key.
18 |
19 |
20 | ===== Add the license keys and certificate to AWS Secrets Manager
21 |
22 | Perform these steps:
23 |
24 | . Open AWS Secrets Manager in the same Region in which you deploy the Quick Start.
25 | . Choose *Store a new secret*.
26 | . Choose *Other type of secret*.
27 | . For the secret key value, create six rows for the Artifactory licenses.
28 | . Add the following key names and Artifactory license keys (see <>):
29 |
30 | * *ArtifactoryLicense1*
31 | * *ArtifactoryLicense2*
32 | * *ArtifactoryLicense3*
33 | * *ArtifactoryLicense4*
34 | * *ArtifactoryLicense5*
35 | * *ArtifactoryLicense6*
36 |
37 | :xrefstyle: short
38 | [#secret_manager_licenses]
39 | .Secrets Manager key-value licenses page
40 | [link=../{quickstart-project-name}/images/secret_manager_licenses.png]
41 | image::../images/secret_manager_licenses.png[image_placeholder]
42 |
43 | [start=6]
44 | . Choose *Next*.
45 | . Provide a secret name to use when deploying this Quick Start.
46 | . Choose *Next* twice.
47 | . Choose *Store*.
48 |
49 | [start=7]
50 | . Repeat the above steps for storing the certificate details in AWS Secrets Manager.
51 | . Create three rows for the certificate information retrieved in the preparation of the certificate in the <> section:
52 |
53 | * *Certificate*
54 | * *CertificateKey*
55 | * *CertificateDomain*
56 |
57 | :xrefstyle: short
58 | [#secret_manager_certificate]
59 | .Secrets Manager key-value certificates page
60 | [link=../{quickstart-project-name}/images/secrets_manager_certificates.png]
61 | image::../images/secrets_manager_certificates.png[image_placeholder]
62 |
63 |
64 | // Optional based on Marketplace listing. Not to be edited
65 | ifdef::marketplace_subscription[]
66 | ===== Subscribe to the CentOS AMI
67 |
68 | This Quick Start requires a subscription to the Amazon Machine Image (AMI) for CentOS in AWS Marketplace.
69 |
70 | Perform the following steps:
71 |
72 | . Sign in to your AWS account.
73 | . {marketplace_listing_url}[Open the page for the CentOS AMI in AWS Marketplace], and then choose *Continue to Subscribe*.
74 | . Review the terms and conditions for software usage, and then choose *Accept Terms*. +
75 | A confirmation page loads, and an email confirmation is sent to the account owner. For detailed subscription instructions, see the https://aws.amazon.com/marketplace/help/200799470[AWS Marketplace documentation^].
76 |
77 | . When the subscription process is complete, exit out of AWS Marketplace without further action. *Do not* provision the software from AWS Marketplace — the Quick Start deploys the AMI for you.
78 | endif::marketplace_subscription[]
79 | // \Not to be edited
--------------------------------------------------------------------------------
/docs/partner_editable/product_description.adoc:
--------------------------------------------------------------------------------
1 | // Replace the content in <>
2 | // Briefly describe the software. Use consistent and clear branding.
3 | // Include the benefits of using the software on AWS, and provide details on usage scenarios.
4 |
5 | Once you deploy JFrog’s Artifactory with the option to install Xray, you can use it as a production service. For more
6 | information about setting up Artifactory, see the <<#_get_started_with_JFrog_Artifactory, Get started with JFrog_Artifactory>> section
7 | later in this guide.
8 |
9 | WARNING: The deployment is configured as _infrastructure as code_. Any changes
10 | to the infrastructure should be done by updating the https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacks.html[CloudFormation stack^]. Any
11 | changes performed on the boxes themselves (including reverse-proxy
12 | configurations) are lost when an instance reboots. By design, upon shutdown of an
13 | instance, or when Artifactory is unavailable, an Auto Scaling group replaces the
14 | node, following a load-balancing health check.
--------------------------------------------------------------------------------
/docs/partner_editable/regions.adoc:
--------------------------------------------------------------------------------
1 | - _us-east-1 (N. Virginia)_ (EXAMPLE)
2 | - _us-east-2 (Ohio)_ (EXAMPLE)
3 | //TODO
--------------------------------------------------------------------------------
/docs/partner_editable/service_limits.adoc:
--------------------------------------------------------------------------------
1 | // Replace the in each row to specify the number of resources used in this deployment. Remove the rows for resources that aren’t used.
2 | |===
3 | |Resource |This deployment uses
4 |
5 | // Space needed to maintain table headers
6 | |VPCs |1
7 | |Elastic IP addresses |3
8 | |AWS Identity and Access Management (IAM) security groups |1
9 | |IAM roles |2
10 | |Security groups |4
11 | |Auto Scaling groups |3
12 | |Load Balancers |2
13 | |m5.xlarge instances |4
14 | |t3.micro instances |1
15 | |db.m5.large (RDS) |1
16 | |S3 Buckets |1
17 | |===
18 |
--------------------------------------------------------------------------------
/docs/partner_editable/specialized_knowledge.adoc:
--------------------------------------------------------------------------------
1 | // Replace the content in <>
2 | // Describe or link to specific knowledge requirements; for example: “familiarity with basic concepts in the areas of networking, database operations, and data encryption” or “familiarity with .”
3 |
4 | This Quick Start assumes familiarity with JFrog Artifactory, JFrog Xray, and infrastructure as code. It
5 | also requires a moderate level of familiarity with AWS services. If you’re new to AWS, visit
6 | the Getting Started Resource Center and the AWS Training and Certification website for
7 | materials and programs that can help you develop the skills to design, deploy, and operate
8 | your infrastructure and applications on the AWS Cloud.
--------------------------------------------------------------------------------
/pipeline-taskcat.yml:
--------------------------------------------------------------------------------
1 | project:
2 | name: quickstart-jfrog-artifactory
3 | owner: quickstart-eng@amazon.com
4 | s3_object_acl: private
5 | s3_regional_buckets: True
6 | #s3_bucket: tcat-422383ecc658557f9a377abae675aac0 # commercial accounts (also for GovCloud)
7 | #s3_bucket: tcat-a3e80b6745b2547da1c745b16adf2a66 # aws-seller accounts (MarketPlace)
8 |
9 | parameters:
10 | KeyPairName : "keyname"
11 | ArtifactoryVersion : "rt_ver"
12 | XrayVersion : "xray_ver"
13 | AccessCidr : "0.0.0.0/0"
14 | QsS3BucketName : "$[taskcat_autobucket]"
15 | QsS3KeyPrefix : "quickstart-jfrog-artifactory/"
16 | QsS3BucketRegion : "$[taskcat_current_region]"
17 | MasterKey : "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
18 | SmLicenseName : "jfrog-artifactory"
19 | SmCertName : "jfrog.tech-certificates"
20 | ArtifactoryServerName: "artifactory"
21 | MultiAzDatabase : "true"
22 | ProvisionBastionHost: "Enabled"
23 | DatabasePassword : "$[taskcat_genpass_8A]"
24 | XrayDatabasePassword: "$[taskcat_genpass_8A]"
25 |
26 | # Set this to your home intenet gateway public IP in override file
27 | # e.g. "24.4.228.4/32"
28 | RemoteAccessCidr : "63.238.166.122/29" # JFrog network
29 |
30 |
31 | regions:
32 | - REGION
33 |
34 | tests:
35 | rt-ami:
36 | auth:
37 | REGION: seller
38 | us-gov-east-1 : gov
39 | parameters:
40 | AvailabilityZone: "$[taskcat_genaz_1]"
41 | template: templates/jfrog-ami-main.template.yaml
42 |
43 | xray-ami:
44 | auth:
45 | REGION: seller
46 | us-gov-east-1 : gov
47 | parameters:
48 | AvailabilityZone: "$[taskcat_genaz_1]"
49 | template: templates/ami-rt-xray-main.template.yaml
50 |
51 | create-bastion-with-existing-vpc:
52 | parameters:
53 | # us-west-1
54 | VpcId : "vpc-0d5806917ae9d5aae"
55 | PublicSubnet1Id : "subnet-05e6d5d00b0743b24"
56 | PublicSubnet2Id : "subnet-012e67d537e962d6e"
57 | RemoteAccessCIDR : "24.4.228.4/32"
58 | EnableTCPForwarding : 'true'
59 | template: submodules/quickstart-linux-bastion/templates/linux-bastion.template
60 |
61 | artifactory-enterprise-new-vpc:
62 | auth:
63 | REGION: default
64 | us-gov-west-1: gov
65 | us-gov-east-1: gov
66 | parameters:
67 | InstallXray: "true"
68 | XrayNumberOfSecondary: 1
69 | AvailabilityZones: "$[taskcat_genaz_2]"
70 | template: templates/jfrog-artifactory-ec2-main.template.yaml
71 |
72 |
73 | artifactory-enterprise-existing-vpc:
74 | auth:
75 | REGION: default
76 | us-gov-west-1: gov
77 | us-gov-east-1: gov
78 | parameters:
79 | InstallXray: "true"
80 | XrayNumberOfSecondary: 1
81 |
82 | # us-west-1
83 | AvailabilityZones : "us-east-1a, us-east-1b"
84 | VpcId : "vpc-0df1d896364490643"
85 | PublicSubnet1Id : "subnet-05e222926ec99d3c3"
86 | PublicSubnet2Id : "subnet-02a7e4641b2e5bc13"
87 | PrivateSubnet1Id : "subnet-0f0f73fdb8b31271d"
88 | PrivateSubnet2Id : "subnet-02f5e6f3024809a98"
89 |
90 | template: templates/jfrog-artifactory-ec2-existing-vpc.template.yaml
91 |
92 |
93 | rt-xray-ec2-marketplace:
94 | parameters:
95 | DatabasePassword: "$[taskcat_genpass_8A]"
96 | DatabaseInstance: "db.m5.large"
97 | NumberOfSecondary: "2"
98 | ArtifactoryServerName: "artifactory"
99 | SmLicenseCertName: "jfrog-artifactory"
100 | MultiAzDatabase: "true"
101 | # InstallXray: "false"
102 | # XrayNumberOfSecondary: 1
103 | AvailabilityZones: "us-east-1a, us-east-1b"
104 | template: templates/jfrog-artifactory-ec2-marketplace-main.template.yaml
105 |
106 | artifactory-pro-new-vpc:
107 | parameters:
108 | InstallXray : "true"
109 | AvailabilityZones : "$[taskcat_genaz_2]"
110 | template : templates/jfrog-artifactory-pro-ec2-new-vpc-main.template.yaml
111 |
112 | artifactory-pro-existing-vpc:
113 | parameters:
114 | InstallXray : "true"
115 |
116 | # us-east-1
117 | AvailabilityZones : "us-east-1a, us-east-1b"
118 | VpcId : "vpc-0df1d896364490643"
119 | PublicSubnet1Id : "subnet-05e222926ec99d3c3"
120 | PublicSubnet2Id : "subnet-02a7e4641b2e5bc13"
121 | PrivateSubnet1Id : "subnet-0f0f73fdb8b31271d"
122 | PrivateSubnet2Id : "subnet-02f5e6f3024809a98"
123 |
124 | PrivateSubnet1Cidr : "10.0.0.0/19"
125 | PrivateSubnet2Cidr : "10.0.32.0/19"
126 | template : templates/jfrog-artifactory-pro-ec2-existing-vpc-main.template.yaml
127 |
128 |
129 |
--------------------------------------------------------------------------------
/templates/ami-rt-xray-main.template.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Description: 'JFrog Product AMI creation into an existing VPC (qs-xxx)'
3 | Metadata:
4 | AWS::CloudFormation::Interface:
5 | ParameterGroups:
6 | - Label:
7 | default: Security configuration
8 | Parameters:
9 | - KeyPairName
10 | - RemoteAccessCidr
11 | - Label:
12 | default: Network Configuration
13 | Parameters:
14 | - AvailabilityZone
15 | - Label:
16 | default: JFrog Product AMI info
17 | Parameters:
18 | - InstanceType
19 | - XrayVersion
20 | - Label:
21 | default: AWS Quick Start Configuration
22 | Parameters:
23 | - QsS3BucketName
24 | - QsS3KeyPrefix
25 | - QsS3BucketRegion
26 | ParameterLabels:
27 | KeyPairName:
28 | default: SSH key name
29 | RemoteAccessCidr:
30 | default: Remote access CIDR
31 | AvailabilityZone:
32 | default: Availability zone
33 | InstanceType:
34 | default: EC2 instance type
35 | XrayVersion:
36 | default: Xray version
37 | QsS3BucketName:
38 | default: Quick Start S3 bucket name
39 | QsS3KeyPrefix:
40 | default: Quick Start S3 key prefix
41 | QsS3BucketRegion:
42 | default: Quick Start S3 bucket region
43 | Parameters:
44 | AvailabilityZone:
45 | Description: The Availability Zone to use for the public subnet in the VPC.
46 | Default: us-west-2a
47 | Type: AWS::EC2::AvailabilityZone::Name
48 | KeyPairName:
49 | Description: The name of an existing public/private key pair, which allows you
50 | to securely connect to your instance after it launches.
51 | Type: AWS::EC2::KeyPair::KeyName
52 | RemoteAccessCidr:
53 | Description: The remote CIDR range for allowing SSH into the Bastion instance.
54 | We recommend that you set this value to a trusted IP range.
55 | For example, you might want to grant specific ranges inside your corporate network SSH access.
56 | AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][0-9]|3[0-2]))$
57 | Type: String
58 | XrayVersion:
59 | Description: The version of the Jfrog product line that you want to deploy into an AMI.
60 | Please see the release notes to select the version you want to deploy.
61 | https://www.jfrog.com/confluence/display/RTF/Release+Notes
62 | AllowedPattern: ^(([0-9]|[1-9][0-9])\.){2}([1-9][0-9]|[0-9])(-([a-z][0-9][0-9][0-9]))?$
63 | ConstraintDescription: A version that matches X.X.X per Xray releases.
64 | Default: 3.17.4
65 | Type: String
66 | InstanceType:
67 | Description: The EC2 instance type for the AMI.
68 | AllowedValues:
69 | - m5.xlarge
70 | - m5.2xlarge
71 | - m5.4xlarge
72 | - m5.10xlarge
73 | ConstraintDescription: Must contain valid instance type.
74 | Default: m5.xlarge
75 | Type: String
76 | QsS3BucketName:
77 | Description: S3 bucket name for the Quick Start assets. This string can include
78 | numbers, lowercase letters, uppercase letters, and hyphens (-). It cannot start
79 | or end with a hyphen (-).
80 | AllowedPattern: ^[0-9a-zA-Z]+([0-9a-zA-Z-]*[0-9a-zA-Z])*$
81 | ConstraintDescription: Quick Start bucket name can include numbers, lowercase
82 | letters, uppercase letters, and hyphens (-). It cannot start or end with a hyphen
83 | (-).
84 | Default: aws-quickstart
85 | Type: String
86 | QsS3KeyPrefix:
87 | Description: S3 key prefix for the Quick Start assets. Quick Start key prefix
88 | can include numbers, lowercase letters, uppercase letters, hyphens (-), and
89 | forward slash (/).
90 | AllowedPattern: ^[0-9a-zA-Z-/]*$
91 | ConstraintDescription: Quick Start key prefix can include numbers, lowercase letters,
92 | uppercase letters, hyphens (-), and forward slash (/).
93 | Default: quickstart-jfrog-xray/
94 | Type: String
95 | QsS3BucketRegion:
96 | Default: 'us-east-1'
97 | Description: The AWS Region where the Quick Start S3 bucket (QSS3BucketName) is hosted. When using your own bucket, you must specify this value.
98 | Type: String
99 | Conditions:
100 | UsingDefaultBucket: !Equals [!Ref QsS3BucketName, 'aws-quickstart']
101 | Resources:
102 | VPCStack:
103 | Type: AWS::CloudFormation::Stack
104 | Properties:
105 | TemplateURL: !Sub
106 | - https://${S3Bucket}.s3.${S3Region}.${AWS::URLSuffix}/${QsS3KeyPrefix}templates/jfrog-ami-vpc.template.yaml
107 | - S3Bucket: !If [UsingDefaultBucket, !Sub '${QsS3BucketName}-${AWS::Region}', !Ref 'QsS3BucketName']
108 | S3Region: !If [UsingDefaultBucket, !Ref 'AWS::Region', !Ref 'QsS3BucketRegion']
109 | Parameters:
110 | AvailabilityZone: !Ref AvailabilityZone
111 | AMIStack:
112 | Type: AWS::CloudFormation::Stack
113 | Properties:
114 | TemplateURL: !Sub
115 | - https://${S3Bucket}.s3.${S3Region}.${AWS::URLSuffix}/${QsS3KeyPrefix}templates/ami-rt-xray-creation.template.yaml
116 | - S3Bucket: !If [UsingDefaultBucket, !Sub '${QsS3BucketName}-${AWS::Region}', !Ref 'QsS3BucketName']
117 | S3Region: !If [UsingDefaultBucket, !Ref 'AWS::Region', !Ref 'QsS3BucketRegion']
118 | Parameters:
119 | KeyPairName: !Ref KeyPairName
120 | RemoteAccessCidr: !Ref RemoteAccessCidr
121 | VpcId: !GetAtt VPCStack.Outputs.VpcId
122 | PublicSubnet1Id: !GetAtt VPCStack.Outputs.PublicSubnetID
123 | InstanceType: !Ref InstanceType
124 | XrayVersion: !Ref XrayVersion
125 | QsS3BucketName: !Ref QsS3BucketName
126 | QsS3KeyPrefix: !Ref QsS3KeyPrefix
127 | QsS3BucketRegion: !Ref QsS3BucketRegion
128 |
--------------------------------------------------------------------------------
/templates/ami-rt-xray-vpc.template.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Description: 'Creates a VPC with a single public subnet. Note: If you require
3 | more than one subnet, please move to the aws-vpc quickstart module (qs-xxx)'
4 | Metadata:
5 | AWS::CloudFormation::Interface:
6 | ParameterGroups:
7 | - Label:
8 | default: VPC Information
9 | Parameters:
10 | - AvailabilityZone
11 | - VpcCidr
12 | - PublicSubnetCidr
13 | - VpcTenancy
14 | ParameterLabels:
15 | AvailabilityZone:
16 | default: Availability Zone
17 | PublicSubnetCidr:
18 | default: Public subnet CIDR
19 | VpcCidr:
20 | default: VPC CIDR
21 | VpcTenancy:
22 | default: VPC Tenancy
23 | Parameters:
24 | AvailabilityZone:
25 | Description: The Availability Zone to use for the public subnet in the VPC.
26 | Default: us-west-2a
27 | Type: AWS::EC2::AvailabilityZone::Name
28 | VpcCidr:
29 | Description: The CIDR block for the VPC.
30 | AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-8]))$
31 | ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/16-28
32 | Default: 10.0.0.0/27
33 | Type: String
34 | PublicSubnetCidr:
35 | Description: The CIDR block for the public (DMZ) subnet 1 located in Availability
36 | Zone 1.
37 | AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-8]))$
38 | ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/16-28
39 | Default: 10.0.0.0/28
40 | Type: String
41 | VpcTenancy:
42 | Description: "The allowed tenancy of instances launched into the VPC"
43 | AllowedValues:
44 | - default
45 | - dedicated
46 | Default: default
47 | Type: String
48 | Conditions:
49 | NVirginiaRegionCondition: !Equals [!Ref 'AWS::Region', 'us-east-1']
50 | Resources:
51 | AmiDHCPOptions:
52 | Type: AWS::EC2::DHCPOptions
53 | Properties:
54 | DomainName: !If
55 | - NVirginiaRegionCondition
56 | - ec2.internal
57 | - !Sub '${AWS::Region}.compute.internal'
58 | DomainNameServers:
59 | - AmazonProvidedDNS
60 | AmiVpc:
61 | Type: AWS::EC2::VPC
62 | Properties:
63 | CidrBlock: !Ref VpcCidr
64 | InstanceTenancy: !Ref VpcTenancy
65 | EnableDnsSupport: true
66 | EnableDnsHostnames: true
67 | Tags:
68 | - Key: Name
69 | Value: !Ref AWS::StackName
70 | VpcDHCPOptionsAssociation:
71 | Type: AWS::EC2::VPCDHCPOptionsAssociation
72 | Properties:
73 | VpcId: !Ref AmiVpc
74 | DhcpOptionsId: !Ref AmiDHCPOptions
75 | InternetGateway:
76 | Type: AWS::EC2::InternetGateway
77 | Properties:
78 | Tags:
79 | - Key: Name
80 | Value: !Ref AWS::StackName
81 | VpcGatewayAttach:
82 | Type: AWS::EC2::VPCGatewayAttachment
83 | Properties:
84 | VpcId: !Ref AmiVpc
85 | InternetGatewayId: !Ref InternetGateway
86 | PublicSubnet:
87 | Type: AWS::EC2::Subnet
88 | Properties:
89 | VpcId: !Ref AmiVpc
90 | CidrBlock: !Ref PublicSubnetCidr
91 | AvailabilityZone: !Ref AvailabilityZone
92 | MapPublicIpOnLaunch: true
93 | Tags:
94 | - Key: Name
95 | Value: !Sub 'Public Subnet for ${AWS::StackName}'
96 | PublicSubnetRouteTable:
97 | Type: AWS::EC2::RouteTable
98 | Properties:
99 | VpcId: !Ref AmiVpc
100 | Tags:
101 | - Key: Name
102 | Value: !Sub 'Public Subnet Route table for ${AWS::StackName}'
103 | PublicSubnetRoute:
104 | DependsOn: VpcGatewayAttach
105 | Type: AWS::EC2::Route
106 | Properties:
107 | RouteTableId: !Ref PublicSubnetRouteTable
108 | DestinationCidrBlock: 0.0.0.0/0
109 | GatewayId: !Ref InternetGateway
110 | PublicSubnetRouteTableAssociation:
111 | Type: AWS::EC2::SubnetRouteTableAssociation
112 | Properties:
113 | SubnetId: !Ref PublicSubnet
114 | RouteTableId: !Ref PublicSubnetRouteTable
115 | S3Endpoint:
116 | Type: AWS::EC2::VPCEndpoint
117 | Metadata:
118 | cfn-lint:
119 | config:
120 | ignore_checks:
121 | - EIAMPolicyWildcardResource
122 | - EPolicyWildcardPrincipal
123 | Properties:
124 | PolicyDocument:
125 | Version: 2012-10-17
126 | Statement:
127 | - Effect: Allow
128 | Principal: '*'
129 | Action:
130 | - s3:AbortMultipartUpload
131 | - s3:BypassGovernanceRetention
132 | - s3:CreateAccessPoint
133 | - s3:CreateAccessPointForObjectLambda
134 | - s3:CreateBucket
135 | - s3:CreateJob
136 | - s3:DeleteAccessPoint
137 | - s3:DeleteAccessPointForObjectLambda
138 | - s3:DeleteAccessPointPolicy
139 | - s3:DeleteAccessPointPolicyForObjectLambda
140 | - s3:DeleteBucket
141 | - s3:DeleteBucketOwnershipControls
142 | - s3:DeleteBucketPolicy
143 | - s3:DeleteBucketWebsite
144 | - s3:DeleteJobTagging
145 | - s3:DeleteObject
146 | - s3:DeleteObjectTagging
147 | - s3:DeleteObjectVersion
148 | - s3:DeleteObjectVersionTagging
149 | - s3:DeleteStorageLensConfiguration
150 | - s3:DeleteStorageLensConfigurationTagging
151 | - s3:DescribeJob
152 | - s3:Get*
153 | - s3:List*
154 | - s3:ObjectOwnerOverrideToBucketOwner
155 | - s3:Put*
156 | - s3:ReplicateDelete
157 | - s3:ReplicateObject
158 | - s3:ReplicateTags
159 | - s3:RestoreObject
160 | - s3:UpdateJobPriority
161 | - s3:UpdateJobStatus
162 | Resource:
163 | - !Sub 'arn:${AWS::Partition}:s3:::*'
164 | RouteTableIds:
165 | - !Ref PublicSubnetRouteTable
166 | ServiceName: !Join
167 | - ''
168 | - - com.amazonaws.
169 | - !Ref 'AWS::Region'
170 | - .s3
171 | VpcId: !Ref AmiVpc
172 |
173 | Outputs:
174 | PublicSubnetCidr:
175 | Description: Public subnet CIDR for the newly created VPC and subnet
176 | Value: !Ref PublicSubnetCidr
177 | Export:
178 | Name: !Sub '${AWS::StackName}-PublicSubnetCidr:'
179 | PublicSubnetID:
180 | Description: Public subnet ID for newly created VPC
181 | Value: !Ref PublicSubnet
182 | Export:
183 | Name: !Sub '${AWS::StackName}-PublicSubnetID'
184 | PublicSubnetRouteTable:
185 | Description: Public subnet route table
186 | Value: !Ref PublicSubnetRouteTable
187 | Export:
188 | Name: !Sub '${AWS::StackName}-PublicSubnetRouteTable'
189 | VpcCidr:
190 | Description: VPC CIDR
191 | Value: !Ref VpcCidr
192 | Export:
193 | Name: !Sub '${AWS::StackName}-VpcCidr:'
194 | VpcId:
195 | Description: Vpc ID
196 | Value: !Ref AmiVpc
197 | Export:
198 | Name: !Sub '${AWS::StackName}-VpcId'
199 |
--------------------------------------------------------------------------------
/templates/jfrog-ami-main.template.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Description: 'JFrog Product AMI creation into an existing VPC (qs-xxx)'
3 | Metadata:
4 | AWS::CloudFormation::Interface:
5 | ParameterGroups:
6 | - Label:
7 | default: Security configuration
8 | Parameters:
9 | - KeyPairName
10 | - RemoteAccessCidr
11 | - Label:
12 | default: Network Configuration
13 | Parameters:
14 | - AvailabilityZone
15 | - Label:
16 | default: JFrog Product AMI info
17 | Parameters:
18 | - InstanceType
19 | - ArtifactoryVersion
20 | - Label:
21 | default: AWS Quick Start Configuration
22 | Parameters:
23 | - QsS3BucketName
24 | - QsS3KeyPrefix
25 | - QsS3BucketRegion
26 | ParameterLabels:
27 | KeyPairName:
28 | default: SSH key name
29 | RemoteAccessCidr:
30 | default: Remote access CIDR
31 | AvailabilityZone:
32 | default: Availability zone
33 | InstanceType:
34 | default: EC2 instance type
35 | ArtifactoryVersion:
36 | default: Artifactory version
37 | QsS3BucketName:
38 | default: Quick Start S3 bucket name
39 | QsS3KeyPrefix:
40 | default: Quick Start S3 key prefix
41 | QsS3BucketRegion:
42 | default: Quick Start S3 bucket region
43 | Parameters:
44 | AvailabilityZone:
45 | Description: The Availability Zone to use for the public subnet in the VPC.
46 | Default: us-west-2a
47 | Type: AWS::EC2::AvailabilityZone::Name
48 | KeyPairName:
49 | Description: The name of an existing public/private key pair, which allows you
50 | to securely connect to your instance after it launches.
51 | Type: AWS::EC2::KeyPair::KeyName
52 | RemoteAccessCidr:
53 | Description: The remote CIDR range for allowing SSH into the Bastion instance.
54 | We recommend that you set this value to a trusted IP range.
55 | For example, you might want to grant specific ranges inside your corporate network SSH access.
56 | AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][0-9]|3[0-2]))$
57 | Type: String
58 | ArtifactoryVersion:
59 | Description: The version of the Jfrog product line that you want to deploy into an AMI.
60 | Please see the release notes to select the version you want to deploy.
61 | https://www.jfrog.com/confluence/display/RTF/Release+Notes
62 | AllowedPattern: ^(([0-9]|[1-9][0-9])\.){2}([1-9][0-9]|[0-9])(-([a-z][0-9][0-9][0-9]))?$
63 | ConstraintDescription: A version that matches X.X.X per Artifactory releases.
64 | Default: 7.15.3
65 | Type: String
66 | InstanceType:
67 | Description: The EC2 instance type for the AMI.
68 | AllowedValues:
69 | - m5.xlarge
70 | - m5.2xlarge
71 | - m5.4xlarge
72 | - m5.10xlarge
73 | ConstraintDescription: Must contain valid instance type.
74 | Default: m5.xlarge
75 | Type: String
76 | QsS3BucketName:
77 | Description: S3 bucket name for the Quick Start assets. This string can include
78 | numbers, lowercase letters, uppercase letters, and hyphens (-). It cannot start
79 | or end with a hyphen (-).
80 | AllowedPattern: ^[0-9a-zA-Z]+([0-9a-zA-Z-]*[0-9a-zA-Z])*$
81 | ConstraintDescription: Quick Start bucket name can include numbers, lowercase
82 | letters, uppercase letters, and hyphens (-). It cannot start or end with a hyphen
83 | (-).
84 | Default: aws-quickstart
85 | Type: String
86 | QsS3KeyPrefix:
87 | Description: S3 key prefix for the Quick Start assets. Quick Start key prefix
88 | can include numbers, lowercase letters, uppercase letters, hyphens (-), and
89 | forward slash (/).
90 | AllowedPattern: ^[0-9a-zA-Z-/]*$
91 | ConstraintDescription: Quick Start key prefix can include numbers, lowercase letters,
92 | uppercase letters, hyphens (-), and forward slash (/).
93 | Default: quickstart-jfrog-artifactory/
94 | Type: String
95 | QsS3BucketRegion:
96 | Default: 'us-east-1'
97 | Description: The AWS Region where the Quick Start S3 bucket (QSS3BucketName) is hosted. When using your own bucket, you must specify this value.
98 | Type: String
99 | Conditions:
100 | UsingDefaultBucket: !Equals [!Ref QsS3BucketName, 'aws-quickstart']
101 | Resources:
102 | VPCStack:
103 | Type: AWS::CloudFormation::Stack
104 | Properties:
105 | TemplateURL: !Sub
106 | - https://${S3Bucket}.s3.${S3Region}.${AWS::URLSuffix}/${QsS3KeyPrefix}templates/jfrog-ami-vpc.template.yaml
107 | - S3Bucket: !If [UsingDefaultBucket, !Sub '${QsS3BucketName}-${AWS::Region}', !Ref 'QsS3BucketName']
108 | S3Region: !If [UsingDefaultBucket, !Ref 'AWS::Region', !Ref 'QsS3BucketRegion']
109 | Parameters:
110 | AvailabilityZone: !Ref AvailabilityZone
111 | AMIStack:
112 | Type: AWS::CloudFormation::Stack
113 | Properties:
114 | TemplateURL: !Sub
115 | - https://${S3Bucket}.s3.${S3Region}.${AWS::URLSuffix}/${QsS3KeyPrefix}templates/jfrog-ami-creation.template.yaml
116 | - S3Bucket: !If [UsingDefaultBucket, !Sub '${QsS3BucketName}-${AWS::Region}', !Ref 'QsS3BucketName']
117 | S3Region: !If [UsingDefaultBucket, !Ref 'AWS::Region', !Ref 'QsS3BucketRegion']
118 | Parameters:
119 | KeyPairName: !Ref KeyPairName
120 | RemoteAccessCidr: !Ref RemoteAccessCidr
121 | VpcId: !GetAtt VPCStack.Outputs.VpcId
122 | PublicSubnet1Id: !GetAtt VPCStack.Outputs.PublicSubnetID
123 | InstanceType: !Ref InstanceType
124 | ArtifactoryVersion: !Ref ArtifactoryVersion
125 | QsS3BucketName: !Ref QsS3BucketName
126 | QsS3KeyPrefix: !Ref QsS3KeyPrefix
127 | QsS3BucketRegion: !Ref QsS3BucketRegion
128 |
--------------------------------------------------------------------------------
/templates/jfrog-ami-vpc.template.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Description: 'Creates a VPC with a single public subnet. Note: If you require
3 | more than one subnet, please move to the aws-vpc quickstart module (qs-xxx)'
4 | Metadata:
5 | AWS::CloudFormation::Interface:
6 | ParameterGroups:
7 | - Label:
8 | default: VPC Information
9 | Parameters:
10 | - AvailabilityZone
11 | - VpcCidr
12 | - PublicSubnetCidr
13 | - VpcTenancy
14 | ParameterLabels:
15 | AvailabilityZone:
16 | default: Availability Zone
17 | PublicSubnetCidr:
18 | default: Public subnet CIDR
19 | VpcCidr:
20 | default: VPC CIDR
21 | VpcTenancy:
22 | default: VPC Tenancy
23 | Parameters:
24 | AvailabilityZone:
25 | Description: The Availability Zone to use for the public subnet in the VPC.
26 | Default: us-west-2a
27 | Type: AWS::EC2::AvailabilityZone::Name
28 | VpcCidr:
29 | Description: The CIDR block for the VPC.
30 | AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-8]))$
31 | ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/16-28
32 | Default: 10.0.0.0/27
33 | Type: String
34 | PublicSubnetCidr:
35 | Description: The CIDR block for the public (DMZ) subnet 1 located in Availability
36 | Zone 1.
37 | AllowedPattern: ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(1[6-9]|2[0-8]))$
38 | ConstraintDescription: CIDR block parameter must be in the form x.x.x.x/16-28
39 | Default: 10.0.0.0/28
40 | Type: String
41 | VpcTenancy:
42 | Description: "The allowed tenancy of instances launched into the VPC"
43 | AllowedValues:
44 | - default
45 | - dedicated
46 | Default: default
47 | Type: String
48 | Conditions:
49 | NVirginiaRegionCondition: !Equals [!Ref 'AWS::Region', 'us-east-1']
50 | Resources:
51 | AmiDHCPOptions:
52 | Type: AWS::EC2::DHCPOptions
53 | Properties:
54 | DomainName: !If
55 | - NVirginiaRegionCondition
56 | - ec2.internal
57 | - !Sub '${AWS::Region}.compute.internal'
58 | DomainNameServers:
59 | - AmazonProvidedDNS
60 | AmiVpc:
61 | Type: AWS::EC2::VPC
62 | Properties:
63 | CidrBlock: !Ref VpcCidr
64 | InstanceTenancy: !Ref VpcTenancy
65 | EnableDnsSupport: true
66 | EnableDnsHostnames: true
67 | Tags:
68 | - Key: Name
69 | Value: !Ref AWS::StackName
70 | VpcDHCPOptionsAssociation:
71 | Type: AWS::EC2::VPCDHCPOptionsAssociation
72 | Properties:
73 | VpcId: !Ref AmiVpc
74 | DhcpOptionsId: !Ref AmiDHCPOptions
75 | InternetGateway:
76 | Type: AWS::EC2::InternetGateway
77 | Properties:
78 | Tags:
79 | - Key: Name
80 | Value: !Ref AWS::StackName
81 | VpcGatewayAttach:
82 | Type: AWS::EC2::VPCGatewayAttachment
83 | Properties:
84 | VpcId: !Ref AmiVpc
85 | InternetGatewayId: !Ref InternetGateway
86 | PublicSubnet:
87 | Type: AWS::EC2::Subnet
88 | Properties:
89 | VpcId: !Ref AmiVpc
90 | CidrBlock: !Ref PublicSubnetCidr
91 | AvailabilityZone: !Ref AvailabilityZone
92 | MapPublicIpOnLaunch: true
93 | Tags:
94 | - Key: Name
95 | Value: !Sub 'Public Subnet for ${AWS::StackName}'
96 | PublicSubnetRouteTable:
97 | Type: AWS::EC2::RouteTable
98 | Properties:
99 | VpcId: !Ref AmiVpc
100 | Tags:
101 | - Key: Name
102 | Value: !Sub 'Public Subnet Route table for ${AWS::StackName}'
103 | PublicSubnetRoute:
104 | DependsOn: VpcGatewayAttach
105 | Type: AWS::EC2::Route
106 | Properties:
107 | RouteTableId: !Ref PublicSubnetRouteTable
108 | DestinationCidrBlock: 0.0.0.0/0
109 | GatewayId: !Ref InternetGateway
110 | PublicSubnetRouteTableAssociation:
111 | Type: AWS::EC2::SubnetRouteTableAssociation
112 | Properties:
113 | SubnetId: !Ref PublicSubnet
114 | RouteTableId: !Ref PublicSubnetRouteTable
115 | S3Endpoint:
116 | Type: AWS::EC2::VPCEndpoint
117 | Metadata:
118 | cfn-lint:
119 | config:
120 | ignore_checks:
121 | - EIAMPolicyWildcardResource
122 | - EPolicyWildcardPrincipal
123 | ignore_reasons:
124 | - EIAMPolicyWildcardResource: excluding for s3:Get*, s3:Put*, s3:List*
125 | Properties:
126 | PolicyDocument:
127 | Version: 2012-10-17
128 | Statement:
129 | - Effect: Allow
130 | Principal: '*'
131 | Action:
132 | - s3:AbortMultipartUpload
133 | - s3:BypassGovernanceRetention
134 | - s3:CreateAccessPoint
135 | - s3:CreateAccessPointForObjectLambda
136 | - s3:CreateBucket
137 | - s3:CreateJob
138 | - s3:DeleteAccessPoint
139 | - s3:DeleteAccessPointForObjectLambda
140 | - s3:DeleteAccessPointPolicy
141 | - s3:DeleteAccessPointPolicyForObjectLambda
142 | - s3:DeleteBucket
143 | - s3:DeleteBucketOwnershipControls
144 | - s3:DeleteBucketPolicy
145 | - s3:DeleteBucketWebsite
146 | - s3:DeleteJobTagging
147 | - s3:DeleteObject
148 | - s3:DeleteObjectTagging
149 | - s3:DeleteObjectVersion
150 | - s3:DeleteObjectVersionTagging
151 | - s3:DeleteStorageLensConfiguration
152 | - s3:DeleteStorageLensConfigurationTagging
153 | - s3:DescribeJob
154 | - s3:Get*
155 | - s3:List*
156 | - s3:ObjectOwnerOverrideToBucketOwner
157 | - s3:Put*
158 | - s3:ReplicateDelete
159 | - s3:ReplicateObject
160 | - s3:ReplicateTags
161 | - s3:RestoreObject
162 | - s3:UpdateJobPriority
163 | - s3:UpdateJobStatus
164 | Resource:
165 | - !Sub 'arn:${AWS::Partition}:s3:::*'
166 | RouteTableIds:
167 | - !Ref PublicSubnetRouteTable
168 | ServiceName: !Join
169 | - ''
170 | - - com.amazonaws.
171 | - !Ref 'AWS::Region'
172 | - .s3
173 | VpcId: !Ref AmiVpc
174 |
175 | Outputs:
176 | PublicSubnetCidr:
177 | Description: Public subnet CIDR for the newly created VPC and subnet
178 | Value: !Ref PublicSubnetCidr
179 | Export:
180 | Name: !Sub '${AWS::StackName}-PublicSubnetCidr:'
181 | PublicSubnetID:
182 | Description: Public subnet ID for newly created VPC
183 | Value: !Ref PublicSubnet
184 | Export:
185 | Name: !Sub '${AWS::StackName}-PublicSubnetID'
186 | PublicSubnetRouteTable:
187 | Description: Public subnet route table
188 | Value: !Ref PublicSubnetRouteTable
189 | Export:
190 | Name: !Sub '${AWS::StackName}-PublicSubnetRouteTable'
191 | VpcCidr:
192 | Description: VPC CIDR
193 | Value: !Ref VpcCidr
194 | Export:
195 | Name: !Sub '${AWS::StackName}-VpcCidr:'
196 | VpcId:
197 | Description: Vpc ID
198 | Value: !Ref AmiVpc
199 | Export:
200 | Name: !Sub '${AWS::StackName}-VpcId'
201 |
--------------------------------------------------------------------------------
/xray-setup/crhelper-2.0.6.dist-info/INSTALLER:
--------------------------------------------------------------------------------
1 | pip
2 |
--------------------------------------------------------------------------------
/xray-setup/crhelper-2.0.6.dist-info/METADATA:
--------------------------------------------------------------------------------
1 | Metadata-Version: 2.1
2 | Name: crhelper
3 | Version: 2.0.6
4 | Summary: crhelper simplifies authoring CloudFormation Custom Resources
5 | Home-page: https://github.com/aws-cloudformation/custom-resource-helper
6 | Author: Jay McConnell
7 | Author-email: jmmccon@amazon.com
8 | License: Apache2
9 | Platform: UNKNOWN
10 | Classifier: Programming Language :: Python :: 3.6
11 | Classifier: Programming Language :: Python :: 3.7
12 | Classifier: License :: OSI Approved :: Apache Software License
13 | Classifier: Operating System :: OS Independent
14 | Description-Content-Type: text/markdown
15 |
16 | ## Custom Resource Helper
17 |
18 | Simplify best practice Custom Resource creation, sending responses to CloudFormation and providing exception, timeout
19 | trapping, and detailed configurable logging.
20 |
21 | [](https://pypi.org/project/crhelper/)
22 | 
23 | [](https://travis-ci.com/aws-cloudformation/custom-resource-helper)
24 | [](https://codecov.io/gh/aws-cloudformation/custom-resource-helper)
25 |
26 | ## Features
27 |
28 | * Dead simple to use, reduces the complexity of writing a CloudFormation custom resource
29 | * Guarantees that CloudFormation will get a response even if an exception is raised
30 | * Returns meaningful errors to CloudFormation Stack events in the case of a failure
31 | * Polling enables run times longer than the lambda 15 minute limit
32 | * JSON logging that includes request id's, stack id's and request type to assist in tracing logs relevant to a
33 | particular CloudFormation event
34 | * Catches function timeouts and sends CloudFormation a failure response
35 | * Static typing (mypy) compatible
36 |
37 | ## Installation
38 |
39 | Install into the root folder of your lambda function
40 |
41 | ```json
42 | cd my-lambda-function/
43 | pip install crhelper -t .
44 | ```
45 |
46 | ## Example Usage
47 |
48 | [This blog](https://aws.amazon.com/blogs/infrastructure-and-automation/aws-cloudformation-custom-resource-creation-with-python-aws-lambda-and-crhelper/) covers usage in more detail.
49 |
50 | ```python
51 | from __future__ import print_function
52 | from crhelper import CfnResource
53 | import logging
54 |
55 | logger = logging.getLogger(__name__)
56 | # Initialise the helper, all inputs are optional, this example shows the defaults
57 | helper = CfnResource(json_logging=False, log_level='DEBUG', boto_level='CRITICAL', sleep_on_delete=120)
58 |
59 | try:
60 | ## Init code goes here
61 | pass
62 | except Exception as e:
63 | helper.init_failure(e)
64 |
65 |
66 | @helper.create
67 | def create(event, context):
68 | logger.info("Got Create")
69 | # Optionally return an ID that will be used for the resource PhysicalResourceId,
70 | # if None is returned an ID will be generated. If a poll_create function is defined
71 | # return value is placed into the poll event as event['CrHelperData']['PhysicalResourceId']
72 | #
73 | # To add response data update the helper.Data dict
74 | # If poll is enabled data is placed into poll event as event['CrHelperData']
75 | helper.Data.update({"test": "testdata"})
76 |
77 | # To return an error to cloudformation you raise an exception:
78 | if not helper.Data.get("test"):
79 | raise ValueError("this error will show in the cloudformation events log and console.")
80 |
81 | return "MyResourceId"
82 |
83 |
84 | @helper.update
85 | def update(event, context):
86 | logger.info("Got Update")
87 | # If the update resulted in a new resource being created, return an id for the new resource.
88 | # CloudFormation will send a delete event with the old id when stack update completes
89 |
90 |
91 | @helper.delete
92 | def delete(event, context):
93 | logger.info("Got Delete")
94 | # Delete never returns anything. Should not fail if the underlying resources are already deleted.
95 | # Desired state.
96 |
97 |
98 | @helper.poll_create
99 | def poll_create(event, context):
100 | logger.info("Got create poll")
101 | # Return a resource id or True to indicate that creation is complete. if True is returned an id
102 | # will be generated
103 | return True
104 |
105 |
106 | def handler(event, context):
107 | helper(event, context)
108 | ```
109 |
110 | ### Polling
111 |
112 | If you need longer than the max runtime of 15 minutes, you can enable polling by adding additional decorators for
113 | `poll_create`, `poll_update` or `poll_delete`. When a poll function is defined for `create`/`update`/`delete` the
114 | function will not send a response to CloudFormation and instead a CloudWatch Events schedule will be created to
115 | re-invoke the lambda function every 2 minutes. When the function is invoked the matching `@helper.poll_` function will
116 | be called, logic to check for completion should go here, if the function returns `None` then the schedule will run again
117 | in 2 minutes. Once complete either return a PhysicalResourceID or `True` to have one generated. The schedule will be
118 | deleted and a response sent back to CloudFormation. If you use polling the following additional IAM policy must be
119 | attached to the function's IAM role:
120 |
121 | ```yaml
122 | {
123 | "Version": "2012-10-17",
124 | "Statement": [
125 | {
126 | "Effect": "Allow",
127 | "Action": [
128 | "lambda:AddPermission",
129 | "lambda:RemovePermission",
130 | "events:PutRule",
131 | "events:DeleteRule",
132 | "events:PutTargets",
133 | "events:RemoveTargets"
134 | ],
135 | "Resource": "*"
136 | }
137 | ]
138 | }
139 | ```
140 |
141 | ## Credits
142 |
143 | Decorator implementation inspired by https://github.com/ryansb/cfn-wrapper-python
144 |
145 | Log implementation inspired by https://gitlab.com/hadrien/aws_lambda_logging
146 |
147 | ## License
148 |
149 | This library is licensed under the Apache 2.0 License.
150 |
151 |
152 |
--------------------------------------------------------------------------------
/xray-setup/crhelper-2.0.6.dist-info/NOTICE:
--------------------------------------------------------------------------------
1 | Custom Resource Helper
2 | Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 |
--------------------------------------------------------------------------------
/xray-setup/crhelper-2.0.6.dist-info/RECORD:
--------------------------------------------------------------------------------
1 | crhelper-2.0.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2 | crhelper-2.0.6.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
3 | crhelper-2.0.6.dist-info/METADATA,sha256=0FEfmNkHpgUGUHmR-GGoiZwcGJsEYmJE92mkBI_tQ1Q,5537
4 | crhelper-2.0.6.dist-info/NOTICE,sha256=gDru0mjdrGkrCJfnHTVboKMdS7U85Ha8bV_PQTCckfM,96
5 | crhelper-2.0.6.dist-info/RECORD,,
6 | crhelper-2.0.6.dist-info/WHEEL,sha256=g4nMs7d-Xl9-xC9XovUrsDHGXt-FT0E17Yqo92DEfvY,92
7 | crhelper-2.0.6.dist-info/top_level.txt,sha256=pe_5uNErAyss8aUfseYKAjd3a1-LXM6bPjnkun7vbso,15
8 | crhelper/__init__.py,sha256=VSvHU2MKgP96DHSDXR1OYxnbC8j7yfuVhZubBLU7Pns,66
9 | crhelper/__pycache__/__init__.cpython-38.pyc,,
10 | crhelper/__pycache__/log_helper.cpython-38.pyc,,
11 | crhelper/__pycache__/resource_helper.cpython-38.pyc,,
12 | crhelper/__pycache__/utils.cpython-38.pyc,,
13 | crhelper/log_helper.py,sha256=18n4WKlGgxXL_iiYPqE8dWv9TW4sPZc4Ae3px5dbHmY,2665
14 | crhelper/resource_helper.py,sha256=jlFCL0YMi1lEN9kOqhRtKkMcDovoJJpwq1oTk3W5hX0,12637
15 | crhelper/utils.py,sha256=HX_ZnUy3DP81L5ofOVshhWK9NwYnZ9dzIWUPnOfFm5w,1384
16 | tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17 | tests/__pycache__/__init__.cpython-38.pyc,,
18 | tests/__pycache__/test_log_helper.cpython-38.pyc,,
19 | tests/__pycache__/test_resource_helper.cpython-38.pyc,,
20 | tests/__pycache__/test_utils.cpython-38.pyc,,
21 | tests/test_log_helper.py,sha256=T25g-RnRYrwp05v__25thYiodWIIDtoSXDFAqe9Z7rQ,3256
22 | tests/test_resource_helper.py,sha256=5BzbcWX49kSZN0GveRpG8Bt3PHAYUGubJMOmbAigFP0,14462
23 | tests/test_utils.py,sha256=HbLMvoXfYbF952AMM-ey8RNasbYHFqfX17rqajluOKM,1407
24 | tests/unit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25 | tests/unit/__pycache__/__init__.cpython-38.pyc,,
26 |
--------------------------------------------------------------------------------
/xray-setup/crhelper-2.0.6.dist-info/WHEEL:
--------------------------------------------------------------------------------
1 | Wheel-Version: 1.0
2 | Generator: bdist_wheel (0.34.2)
3 | Root-Is-Purelib: true
4 | Tag: py3-none-any
5 |
6 |
--------------------------------------------------------------------------------
/xray-setup/crhelper-2.0.6.dist-info/top_level.txt:
--------------------------------------------------------------------------------
1 | crhelper
2 | tests
3 |
--------------------------------------------------------------------------------
/xray-setup/crhelper/__init__.py:
--------------------------------------------------------------------------------
1 | from crhelper.resource_helper import CfnResource, SUCCESS, FAILED
2 |
--------------------------------------------------------------------------------
/xray-setup/crhelper/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/xray-setup/crhelper/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/xray-setup/crhelper/__pycache__/log_helper.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/xray-setup/crhelper/__pycache__/log_helper.cpython-38.pyc
--------------------------------------------------------------------------------
/xray-setup/crhelper/__pycache__/resource_helper.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/xray-setup/crhelper/__pycache__/resource_helper.cpython-38.pyc
--------------------------------------------------------------------------------
/xray-setup/crhelper/__pycache__/utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/xray-setup/crhelper/__pycache__/utils.cpython-38.pyc
--------------------------------------------------------------------------------
/xray-setup/crhelper/log_helper.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import json
3 | import logging
4 |
5 |
6 | def _json_formatter(obj):
7 | """Formatter for unserialisable values."""
8 | return str(obj)
9 |
10 |
11 | class JsonFormatter(logging.Formatter):
12 | """AWS Lambda Logging formatter.
13 |
14 | Formats the log message as a JSON encoded string. If the message is a
15 | dict it will be used directly. If the message can be parsed as JSON, then
16 | the parse d value is used in the output record.
17 | """
18 |
19 | def __init__(self, **kwargs):
20 | super(JsonFormatter, self).__init__()
21 | self.format_dict = {
22 | 'timestamp': '%(asctime)s',
23 | 'level': '%(levelname)s',
24 | 'location': '%(name)s.%(funcName)s:%(lineno)d',
25 | }
26 | self.format_dict.update(kwargs)
27 | self.default_json_formatter = kwargs.pop(
28 | 'json_default', _json_formatter)
29 |
30 | def format(self, record):
31 | record_dict = record.__dict__.copy()
32 | record_dict['asctime'] = self.formatTime(record)
33 |
34 | log_dict = {
35 | k: v % record_dict
36 | for k, v in self.format_dict.items()
37 | if v
38 | }
39 |
40 | if isinstance(record_dict['msg'], dict):
41 | log_dict['message'] = record_dict['msg']
42 | else:
43 | log_dict['message'] = record.getMessage()
44 |
45 | # Attempt to decode the message as JSON, if so, merge it with the
46 | # overall message for clarity.
47 | try:
48 | log_dict['message'] = json.loads(log_dict['message'])
49 | except (TypeError, ValueError):
50 | pass
51 |
52 | if record.exc_info:
53 | # Cache the traceback text to avoid converting it multiple times
54 | # (it's constant anyway)
55 | # from logging.Formatter:format
56 | if not record.exc_text:
57 | record.exc_text = self.formatException(record.exc_info)
58 |
59 | if record.exc_text:
60 | log_dict['exception'] = record.exc_text
61 |
62 | json_record = json.dumps(log_dict, default=self.default_json_formatter)
63 |
64 | if hasattr(json_record, 'decode'): # pragma: no cover
65 | json_record = json_record.decode('utf-8')
66 |
67 | return json_record
68 |
69 |
70 | def setup(level='DEBUG', formatter_cls=JsonFormatter, boto_level=None, **kwargs):
71 | if formatter_cls:
72 | for handler in logging.root.handlers:
73 | handler.setFormatter(formatter_cls(**kwargs))
74 |
75 | logging.root.setLevel(level)
76 |
77 | if not boto_level:
78 | boto_level = level
79 |
80 | logging.getLogger('boto').setLevel(boto_level)
81 | logging.getLogger('boto3').setLevel(boto_level)
82 | logging.getLogger('botocore').setLevel(boto_level)
83 | logging.getLogger('urllib3').setLevel(boto_level)
84 |
--------------------------------------------------------------------------------
/xray-setup/crhelper/utils.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import json
3 | import logging as logging
4 | import time
5 | from urllib.parse import urlsplit, urlunsplit
6 | from http.client import HTTPSConnection
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 |
11 | def _send_response(response_url, response_body):
12 | try:
13 | json_response_body = json.dumps(response_body)
14 | except Exception as e:
15 | msg = "Failed to convert response to json: {}".format(str(e))
16 | logger.error(msg, exc_info=True)
17 | response_body = {'Status': 'FAILED', 'Data': {}, 'Reason': msg}
18 | json_response_body = json.dumps(response_body)
19 | logger.debug("CFN response URL: {}".format(response_url))
20 | logger.debug(json_response_body)
21 | headers = {'content-type': '', 'content-length': str(len(json_response_body))}
22 | split_url = urlsplit(response_url)
23 | host = split_url.netloc
24 | url = urlunsplit(("", "", *split_url[2:]))
25 | while True:
26 | try:
27 | connection = HTTPSConnection(host)
28 | connection.request(method="PUT", url=url, body=json_response_body, headers=headers)
29 | response = connection.getresponse()
30 | logger.info("CloudFormation returned status code: {}".format(response.reason))
31 | break
32 | except Exception as e:
33 | logger.error("Unexpected failure sending response to CloudFormation {}".format(e), exc_info=True)
34 | time.sleep(5)
35 |
--------------------------------------------------------------------------------
/xray-setup/handler.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from crhelper import CfnResource
3 | import logging
4 | import psycopg2
5 | from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
6 |
7 | logger = logging.getLogger(__name__)
8 | # Initialise the helper, all inputs are optional, this example shows the defaults
9 | helper = CfnResource(json_logging=False, log_level='DEBUG', boto_level='CRITICAL', sleep_on_delete=120)
10 |
11 | @helper.create
12 | def create(event, context):
13 | conn = None
14 | try:
15 | logger.info("Got Create. Connecting to db")
16 | conn = psycopg2.connect(
17 | dbname=event['ResourceProperties']['XrayMasterDatabaseUrl'].split("/")[1].split("?")[0],
18 | user=event['ResourceProperties']['DatabaseUser'],
19 | host=event['ResourceProperties']['XrayMasterDatabaseUrl'].split(":")[0],
20 | password=event['ResourceProperties']['DatabasePassword']
21 | )
22 | conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
23 | cur = conn.cursor()
24 | logger.info("Start Queries")
25 | cur.execute(f"CREATE USER {event['ResourceProperties']['XrayDatabaseUser']} WITH PASSWORD \'{event['ResourceProperties']['XrayDatabasePassword']}\';")
26 | cur.execute(f"GRANT {event['ResourceProperties']['XrayDatabaseUser']} to {event['ResourceProperties']['DatabaseUser']};")
27 | cur.execute(f"CREATE DATABASE xraydb WITH OWNER={event['ResourceProperties']['XrayDatabaseUser']};")
28 | cur.execute(f"GRANT ALL PRIVILEGES ON DATABASE xraydb TO {event['ResourceProperties']['XrayDatabaseUser']};")
29 | cur.close()
30 | logger.info("End Queries")
31 | except psycopg2.DatabaseError as e:
32 | raise ValueError(e)
33 | finally:
34 | if conn:
35 | conn.close()
36 |
37 | @helper.update
38 | @helper.delete
39 | def noop(event, context):
40 | pass
41 |
42 | def handler(event, context):
43 | helper(event, context)
44 |
--------------------------------------------------------------------------------
/xray-setup/psycopg2/__init__.py:
--------------------------------------------------------------------------------
1 | """A Python driver for PostgreSQL
2 |
3 | psycopg is a PostgreSQL_ database adapter for the Python_ programming
4 | language. This is version 2, a complete rewrite of the original code to
5 | provide new-style classes for connection and cursor objects and other sweet
6 | candies. Like the original, psycopg 2 was written with the aim of being very
7 | small and fast, and stable as a rock.
8 |
9 | Homepage: http://initd.org/projects/psycopg2
10 |
11 | .. _PostgreSQL: https://www.postgresql.org/
12 | .. _Python: https://www.python.org/
13 |
14 | :Groups:
15 | * `Connections creation`: connect
16 | * `Value objects constructors`: Binary, Date, DateFromTicks, Time,
17 | TimeFromTicks, Timestamp, TimestampFromTicks
18 | """
19 | # psycopg/__init__.py - initialization of the psycopg module
20 | #
21 | # Copyright (C) 2003-2019 Federico Di Gregorio
22 | #
23 | # psycopg2 is free software: you can redistribute it and/or modify it
24 | # under the terms of the GNU Lesser General Public License as published
25 | # by the Free Software Foundation, either version 3 of the License, or
26 | # (at your option) any later version.
27 | #
28 | # In addition, as a special exception, the copyright holders give
29 | # permission to link this program with the OpenSSL library (or with
30 | # modified versions of OpenSSL that use the same license as OpenSSL),
31 | # and distribute linked combinations including the two.
32 | #
33 | # You must obey the GNU Lesser General Public License in all respects for
34 | # all of the code used other than OpenSSL.
35 | #
36 | # psycopg2 is distributed in the hope that it will be useful, but WITHOUT
37 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
38 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
39 | # License for more details.
40 |
41 | # Import modules needed by _psycopg to allow tools like py2exe to do
42 | # their work without bothering about the module dependencies.
43 |
44 | # Note: the first internal import should be _psycopg, otherwise the real cause
45 | # of a failed loading of the C module may get hidden, see
46 | # https://archives.postgresql.org/psycopg/2011-02/msg00044.php
47 |
48 | # Import the DBAPI-2.0 stuff into top-level module.
49 |
50 | from psycopg2._psycopg import ( # noqa
51 | BINARY, NUMBER, STRING, DATETIME, ROWID,
52 |
53 | Binary, Date, Time, Timestamp,
54 | DateFromTicks, TimeFromTicks, TimestampFromTicks,
55 |
56 | Error, Warning, DataError, DatabaseError, ProgrammingError, IntegrityError,
57 | InterfaceError, InternalError, NotSupportedError, OperationalError,
58 |
59 | _connect, apilevel, threadsafety, paramstyle,
60 | __version__, __libpq_version__,
61 | )
62 |
63 | from psycopg2 import tz # noqa
64 |
65 |
66 | # Register default adapters.
67 |
68 | from psycopg2 import extensions as _ext
69 | _ext.register_adapter(tuple, _ext.SQL_IN)
70 | _ext.register_adapter(type(None), _ext.NoneAdapter)
71 |
72 | # Register the Decimal adapter here instead of in the C layer.
73 | # This way a new class is registered for each sub-interpreter.
74 | # See ticket #52
75 | from decimal import Decimal # noqa
76 | from psycopg2._psycopg import Decimal as Adapter # noqa
77 | _ext.register_adapter(Decimal, Adapter)
78 | del Decimal, Adapter
79 |
80 |
81 | def connect(dsn=None, connection_factory=None, cursor_factory=None, **kwargs):
82 | """
83 | Create a new database connection.
84 |
85 | The connection parameters can be specified as a string:
86 |
87 | conn = psycopg2.connect("dbname=test user=postgres password=secret")
88 |
89 | or using a set of keyword arguments:
90 |
91 | conn = psycopg2.connect(database="test", user="postgres", password="secret")
92 |
93 | Or as a mix of both. The basic connection parameters are:
94 |
95 | - *dbname*: the database name
96 | - *database*: the database name (only as keyword argument)
97 | - *user*: user name used to authenticate
98 | - *password*: password used to authenticate
99 | - *host*: database host address (defaults to UNIX socket if not provided)
100 | - *port*: connection port number (defaults to 5432 if not provided)
101 |
102 | Using the *connection_factory* parameter a different class or connections
103 | factory can be specified. It should be a callable object taking a dsn
104 | argument.
105 |
106 | Using the *cursor_factory* parameter, a new default cursor factory will be
107 | used by cursor().
108 |
109 | Using *async*=True an asynchronous connection will be created. *async_* is
110 | a valid alias (for Python versions where ``async`` is a keyword).
111 |
112 | Any other keyword parameter will be passed to the underlying client
113 | library: the list of supported parameters depends on the library version.
114 |
115 | """
116 | kwasync = {}
117 | if 'async' in kwargs:
118 | kwasync['async'] = kwargs.pop('async')
119 | if 'async_' in kwargs:
120 | kwasync['async_'] = kwargs.pop('async_')
121 |
122 | if dsn is None and not kwargs:
123 | raise TypeError('missing dsn and no parameters')
124 |
125 | dsn = _ext.make_dsn(dsn, **kwargs)
126 | conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
127 | if cursor_factory is not None:
128 | conn.cursor_factory = cursor_factory
129 |
130 | return conn
131 |
--------------------------------------------------------------------------------
/xray-setup/psycopg2/_ipaddress.py:
--------------------------------------------------------------------------------
1 | """Implementation of the ipaddres-based network types adaptation
2 | """
3 |
4 | # psycopg/_ipaddress.py - Ipaddres-based network types adaptation
5 | #
6 | # Copyright (C) 2016-2019 Daniele Varrazzo
7 | #
8 | # psycopg2 is free software: you can redistribute it and/or modify it
9 | # under the terms of the GNU Lesser General Public License as published
10 | # by the Free Software Foundation, either version 3 of the License, or
11 | # (at your option) any later version.
12 | #
13 | # In addition, as a special exception, the copyright holders give
14 | # permission to link this program with the OpenSSL library (or with
15 | # modified versions of OpenSSL that use the same license as OpenSSL),
16 | # and distribute linked combinations including the two.
17 | #
18 | # You must obey the GNU Lesser General Public License in all respects for
19 | # all of the code used other than OpenSSL.
20 | #
21 | # psycopg2 is distributed in the hope that it will be useful, but WITHOUT
22 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
24 | # License for more details.
25 |
26 | from psycopg2.extensions import (
27 | new_type, new_array_type, register_type, register_adapter, QuotedString)
28 | from psycopg2.compat import text_type
29 |
30 | # The module is imported on register_ipaddress
31 | ipaddress = None
32 |
33 | # The typecasters are created only once
34 | _casters = None
35 |
36 |
37 | def register_ipaddress(conn_or_curs=None):
38 | """
39 | Register conversion support between `ipaddress` objects and `network types`__.
40 |
41 | :param conn_or_curs: the scope where to register the type casters.
42 | If `!None` register them globally.
43 |
44 | After the function is called, PostgreSQL :sql:`inet` values will be
45 | converted into `~ipaddress.IPv4Interface` or `~ipaddress.IPv6Interface`
46 | objects, :sql:`cidr` values into into `~ipaddress.IPv4Network` or
47 | `~ipaddress.IPv6Network`.
48 |
49 | .. __: https://www.postgresql.org/docs/current/static/datatype-net-types.html
50 | """
51 | global ipaddress
52 | import ipaddress
53 |
54 | global _casters
55 | if _casters is None:
56 | _casters = _make_casters()
57 |
58 | for c in _casters:
59 | register_type(c, conn_or_curs)
60 |
61 | for t in [ipaddress.IPv4Interface, ipaddress.IPv6Interface,
62 | ipaddress.IPv4Network, ipaddress.IPv6Network]:
63 | register_adapter(t, adapt_ipaddress)
64 |
65 |
66 | def _make_casters():
67 | inet = new_type((869,), 'INET', cast_interface)
68 | ainet = new_array_type((1041,), 'INET[]', inet)
69 |
70 | cidr = new_type((650,), 'CIDR', cast_network)
71 | acidr = new_array_type((651,), 'CIDR[]', cidr)
72 |
73 | return [inet, ainet, cidr, acidr]
74 |
75 |
76 | def cast_interface(s, cur=None):
77 | if s is None:
78 | return None
79 | # Py2 version force the use of unicode. meh.
80 | return ipaddress.ip_interface(text_type(s))
81 |
82 |
83 | def cast_network(s, cur=None):
84 | if s is None:
85 | return None
86 | return ipaddress.ip_network(text_type(s))
87 |
88 |
89 | def adapt_ipaddress(obj):
90 | return QuotedString(str(obj))
91 |
--------------------------------------------------------------------------------
/xray-setup/psycopg2/_json.py:
--------------------------------------------------------------------------------
1 | """Implementation of the JSON adaptation objects
2 |
3 | This module exists to avoid a circular import problem: pyscopg2.extras depends
4 | on psycopg2.extension, so I can't create the default JSON typecasters in
5 | extensions importing register_json from extras.
6 | """
7 |
8 | # psycopg/_json.py - Implementation of the JSON adaptation objects
9 | #
10 | # Copyright (C) 2012-2019 Daniele Varrazzo
11 | #
12 | # psycopg2 is free software: you can redistribute it and/or modify it
13 | # under the terms of the GNU Lesser General Public License as published
14 | # by the Free Software Foundation, either version 3 of the License, or
15 | # (at your option) any later version.
16 | #
17 | # In addition, as a special exception, the copyright holders give
18 | # permission to link this program with the OpenSSL library (or with
19 | # modified versions of OpenSSL that use the same license as OpenSSL),
20 | # and distribute linked combinations including the two.
21 | #
22 | # You must obey the GNU Lesser General Public License in all respects for
23 | # all of the code used other than OpenSSL.
24 | #
25 | # psycopg2 is distributed in the hope that it will be useful, but WITHOUT
26 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
27 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
28 | # License for more details.
29 |
30 | import json
31 |
32 | from psycopg2._psycopg import ISQLQuote, QuotedString
33 | from psycopg2._psycopg import new_type, new_array_type, register_type
34 | from psycopg2.compat import PY2
35 |
36 |
37 | # oids from PostgreSQL 9.2
38 | JSON_OID = 114
39 | JSONARRAY_OID = 199
40 |
41 | # oids from PostgreSQL 9.4
42 | JSONB_OID = 3802
43 | JSONBARRAY_OID = 3807
44 |
45 |
46 | class Json(object):
47 | """
48 | An `~psycopg2.extensions.ISQLQuote` wrapper to adapt a Python object to
49 | :sql:`json` data type.
50 |
51 | `!Json` can be used to wrap any object supported by the provided *dumps*
52 | function. If none is provided, the standard :py:func:`json.dumps()` is
53 | used.
54 |
55 | """
56 | def __init__(self, adapted, dumps=None):
57 | self.adapted = adapted
58 | self._conn = None
59 | self._dumps = dumps or json.dumps
60 |
61 | def __conform__(self, proto):
62 | if proto is ISQLQuote:
63 | return self
64 |
65 | def dumps(self, obj):
66 | """Serialize *obj* in JSON format.
67 |
68 | The default is to call `!json.dumps()` or the *dumps* function
69 | provided in the constructor. You can override this method to create a
70 | customized JSON wrapper.
71 | """
72 | return self._dumps(obj)
73 |
74 | def prepare(self, conn):
75 | self._conn = conn
76 |
77 | def getquoted(self):
78 | s = self.dumps(self.adapted)
79 | qs = QuotedString(s)
80 | if self._conn is not None:
81 | qs.prepare(self._conn)
82 | return qs.getquoted()
83 |
84 | if PY2:
85 | def __str__(self):
86 | return self.getquoted()
87 | else:
88 | def __str__(self):
89 | # getquoted is binary in Py3
90 | return self.getquoted().decode('ascii', 'replace')
91 |
92 |
93 | def register_json(conn_or_curs=None, globally=False, loads=None,
94 | oid=None, array_oid=None, name='json'):
95 | """Create and register typecasters converting :sql:`json` type to Python objects.
96 |
97 | :param conn_or_curs: a connection or cursor used to find the :sql:`json`
98 | and :sql:`json[]` oids; the typecasters are registered in a scope
99 | limited to this object, unless *globally* is set to `!True`. It can be
100 | `!None` if the oids are provided
101 | :param globally: if `!False` register the typecasters only on
102 | *conn_or_curs*, otherwise register them globally
103 | :param loads: the function used to parse the data into a Python object. If
104 | `!None` use `!json.loads()`, where `!json` is the module chosen
105 | according to the Python version (see above)
106 | :param oid: the OID of the :sql:`json` type if known; If not, it will be
107 | queried on *conn_or_curs*
108 | :param array_oid: the OID of the :sql:`json[]` array type if known;
109 | if not, it will be queried on *conn_or_curs*
110 | :param name: the name of the data type to look for in *conn_or_curs*
111 |
112 | The connection or cursor passed to the function will be used to query the
113 | database and look for the OID of the :sql:`json` type (or an alternative
114 | type if *name* if provided). No query is performed if *oid* and *array_oid*
115 | are provided. Raise `~psycopg2.ProgrammingError` if the type is not found.
116 |
117 | """
118 | if oid is None:
119 | oid, array_oid = _get_json_oids(conn_or_curs, name)
120 |
121 | JSON, JSONARRAY = _create_json_typecasters(
122 | oid, array_oid, loads=loads, name=name.upper())
123 |
124 | register_type(JSON, not globally and conn_or_curs or None)
125 |
126 | if JSONARRAY is not None:
127 | register_type(JSONARRAY, not globally and conn_or_curs or None)
128 |
129 | return JSON, JSONARRAY
130 |
131 |
132 | def register_default_json(conn_or_curs=None, globally=False, loads=None):
133 | """
134 | Create and register :sql:`json` typecasters for PostgreSQL 9.2 and following.
135 |
136 | Since PostgreSQL 9.2 :sql:`json` is a builtin type, hence its oid is known
137 | and fixed. This function allows specifying a customized *loads* function
138 | for the default :sql:`json` type without querying the database.
139 | All the parameters have the same meaning of `register_json()`.
140 | """
141 | return register_json(conn_or_curs=conn_or_curs, globally=globally,
142 | loads=loads, oid=JSON_OID, array_oid=JSONARRAY_OID)
143 |
144 |
145 | def register_default_jsonb(conn_or_curs=None, globally=False, loads=None):
146 | """
147 | Create and register :sql:`jsonb` typecasters for PostgreSQL 9.4 and following.
148 |
149 | As in `register_default_json()`, the function allows to register a
150 | customized *loads* function for the :sql:`jsonb` type at its known oid for
151 | PostgreSQL 9.4 and following versions. All the parameters have the same
152 | meaning of `register_json()`.
153 | """
154 | return register_json(conn_or_curs=conn_or_curs, globally=globally,
155 | loads=loads, oid=JSONB_OID, array_oid=JSONBARRAY_OID, name='jsonb')
156 |
157 |
158 | def _create_json_typecasters(oid, array_oid, loads=None, name='JSON'):
159 | """Create typecasters for json data type."""
160 | if loads is None:
161 | loads = json.loads
162 |
163 | def typecast_json(s, cur):
164 | if s is None:
165 | return None
166 | return loads(s)
167 |
168 | JSON = new_type((oid, ), name, typecast_json)
169 | if array_oid is not None:
170 | JSONARRAY = new_array_type((array_oid, ), "%sARRAY" % name, JSON)
171 | else:
172 | JSONARRAY = None
173 |
174 | return JSON, JSONARRAY
175 |
176 |
177 | def _get_json_oids(conn_or_curs, name='json'):
178 | # lazy imports
179 | from psycopg2.extensions import STATUS_IN_TRANSACTION
180 | from psycopg2.extras import _solve_conn_curs
181 |
182 | conn, curs = _solve_conn_curs(conn_or_curs)
183 |
184 | # Store the transaction status of the connection to revert it after use
185 | conn_status = conn.status
186 |
187 | # column typarray not available before PG 8.3
188 | typarray = conn.info.server_version >= 80300 and "typarray" or "NULL"
189 |
190 | # get the oid for the hstore
191 | curs.execute(
192 | "SELECT t.oid, %s FROM pg_type t WHERE t.typname = %%s;"
193 | % typarray, (name,))
194 | r = curs.fetchone()
195 |
196 | # revert the status of the connection as before the command
197 | if conn_status != STATUS_IN_TRANSACTION and not conn.autocommit:
198 | conn.rollback()
199 |
200 | if not r:
201 | raise conn.ProgrammingError("%s data type not found" % name)
202 |
203 | return r
204 |
--------------------------------------------------------------------------------
/xray-setup/psycopg2/_lru_cache.py:
--------------------------------------------------------------------------------
1 | """
2 | LRU cache implementation for Python 2.7
3 |
4 | Ported from http://code.activestate.com/recipes/578078/ and simplified for our
5 | use (only support maxsize > 0 and positional arguments).
6 | """
7 |
8 | from collections import namedtuple
9 | from functools import update_wrapper
10 | from threading import RLock
11 |
12 | _CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
13 |
14 |
15 | def lru_cache(maxsize=100):
16 | """Least-recently-used cache decorator.
17 |
18 | Arguments to the cached function must be hashable.
19 |
20 | See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
21 |
22 | """
23 | def decorating_function(user_function):
24 |
25 | cache = dict()
26 | stats = [0, 0] # make statistics updateable non-locally
27 | HITS, MISSES = 0, 1 # names for the stats fields
28 | cache_get = cache.get # bound method to lookup key or return None
29 | _len = len # localize the global len() function
30 | lock = RLock() # linkedlist updates aren't threadsafe
31 | root = [] # root of the circular doubly linked list
32 | root[:] = [root, root, None, None] # initialize by pointing to self
33 | nonlocal_root = [root] # make updateable non-locally
34 | PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
35 |
36 | assert maxsize and maxsize > 0, "maxsize %s not supported" % maxsize
37 |
38 | def wrapper(*args):
39 | # size limited caching that tracks accesses by recency
40 | key = args
41 | with lock:
42 | link = cache_get(key)
43 | if link is not None:
44 | # record recent use of the key by moving it to the
45 | # front of the list
46 | root, = nonlocal_root
47 | link_prev, link_next, key, result = link
48 | link_prev[NEXT] = link_next
49 | link_next[PREV] = link_prev
50 | last = root[PREV]
51 | last[NEXT] = root[PREV] = link
52 | link[PREV] = last
53 | link[NEXT] = root
54 | stats[HITS] += 1
55 | return result
56 | result = user_function(*args)
57 | with lock:
58 | root, = nonlocal_root
59 | if key in cache:
60 | # getting here means that this same key was added to the
61 | # cache while the lock was released. since the link
62 | # update is already done, we need only return the
63 | # computed result and update the count of misses.
64 | pass
65 | elif _len(cache) >= maxsize:
66 | # use the old root to store the new key and result
67 | oldroot = root
68 | oldroot[KEY] = key
69 | oldroot[RESULT] = result
70 | # empty the oldest link and make it the new root
71 | root = nonlocal_root[0] = oldroot[NEXT]
72 | oldkey = root[KEY]
73 | # oldvalue = root[RESULT]
74 | root[KEY] = root[RESULT] = None
75 | # now update the cache dictionary for the new links
76 | del cache[oldkey]
77 | cache[key] = oldroot
78 | else:
79 | # put result in a new link at the front of the list
80 | last = root[PREV]
81 | link = [last, root, key, result]
82 | last[NEXT] = root[PREV] = cache[key] = link
83 | stats[MISSES] += 1
84 | return result
85 |
86 | def cache_info():
87 | """Report cache statistics"""
88 | with lock:
89 | return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
90 |
91 | def cache_clear():
92 | """Clear the cache and cache statistics"""
93 | with lock:
94 | cache.clear()
95 | root = nonlocal_root[0]
96 | root[:] = [root, root, None, None]
97 | stats[:] = [0, 0]
98 |
99 | wrapper.__wrapped__ = user_function
100 | wrapper.cache_info = cache_info
101 | wrapper.cache_clear = cache_clear
102 | return update_wrapper(wrapper, user_function)
103 |
104 | return decorating_function
105 |
--------------------------------------------------------------------------------
/xray-setup/psycopg2/_psycopg.cpython-37m-x86_64-linux-gnu.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/xray-setup/psycopg2/_psycopg.cpython-37m-x86_64-linux-gnu.so
--------------------------------------------------------------------------------
/xray-setup/psycopg2/compat.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | __all__ = ['string_types', 'text_type', 'lru_cache']
4 |
5 | if sys.version_info[0] == 2:
6 | # Python 2
7 | PY2 = True
8 | PY3 = False
9 | string_types = basestring,
10 | text_type = unicode
11 | from ._lru_cache import lru_cache
12 |
13 | else:
14 | # Python 3
15 | PY2 = False
16 | PY3 = True
17 | string_types = str,
18 | text_type = str
19 | from functools import lru_cache
20 |
--------------------------------------------------------------------------------
/xray-setup/psycopg2/errors.py:
--------------------------------------------------------------------------------
1 | """Error classes for PostgreSQL error codes
2 | """
3 |
4 | # psycopg/errors.py - SQLSTATE and DB-API exceptions
5 | #
6 | # Copyright (C) 2018-2019 Daniele Varrazzo
7 | #
8 | # psycopg2 is free software: you can redistribute it and/or modify it
9 | # under the terms of the GNU Lesser General Public License as published
10 | # by the Free Software Foundation, either version 3 of the License, or
11 | # (at your option) any later version.
12 | #
13 | # In addition, as a special exception, the copyright holders give
14 | # permission to link this program with the OpenSSL library (or with
15 | # modified versions of OpenSSL that use the same license as OpenSSL),
16 | # and distribute linked combinations including the two.
17 | #
18 | # You must obey the GNU Lesser General Public License in all respects for
19 | # all of the code used other than OpenSSL.
20 | #
21 | # psycopg2 is distributed in the hope that it will be useful, but WITHOUT
22 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
24 | # License for more details.
25 |
26 | #
27 | # NOTE: the exceptions are injected into this module by the C extention.
28 | #
29 |
30 |
31 | def lookup(code):
32 | """Lookup an error code and return its exception class.
33 |
34 | Raise `!KeyError` if the code is not found.
35 | """
36 | from psycopg2._psycopg import sqlstate_errors # avoid circular import
37 | return sqlstate_errors[code]
38 |
--------------------------------------------------------------------------------
/xray-setup/psycopg2/extensions.py:
--------------------------------------------------------------------------------
1 | """psycopg extensions to the DBAPI-2.0
2 |
3 | This module holds all the extensions to the DBAPI-2.0 provided by psycopg.
4 |
5 | - `connection` -- the new-type inheritable connection class
6 | - `cursor` -- the new-type inheritable cursor class
7 | - `lobject` -- the new-type inheritable large object class
8 | - `adapt()` -- exposes the PEP-246_ compatible adapting mechanism used
9 | by psycopg to adapt Python types to PostgreSQL ones
10 |
11 | .. _PEP-246: https://www.python.org/dev/peps/pep-0246/
12 | """
13 | # psycopg/extensions.py - DBAPI-2.0 extensions specific to psycopg
14 | #
15 | # Copyright (C) 2003-2019 Federico Di Gregorio
16 | #
17 | # psycopg2 is free software: you can redistribute it and/or modify it
18 | # under the terms of the GNU Lesser General Public License as published
19 | # by the Free Software Foundation, either version 3 of the License, or
20 | # (at your option) any later version.
21 | #
22 | # In addition, as a special exception, the copyright holders give
23 | # permission to link this program with the OpenSSL library (or with
24 | # modified versions of OpenSSL that use the same license as OpenSSL),
25 | # and distribute linked combinations including the two.
26 | #
27 | # You must obey the GNU Lesser General Public License in all respects for
28 | # all of the code used other than OpenSSL.
29 | #
30 | # psycopg2 is distributed in the hope that it will be useful, but WITHOUT
31 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
32 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
33 | # License for more details.
34 |
35 | import re as _re
36 |
37 | from psycopg2._psycopg import ( # noqa
38 | BINARYARRAY, BOOLEAN, BOOLEANARRAY, BYTES, BYTESARRAY, DATE, DATEARRAY,
39 | DATETIMEARRAY, DECIMAL, DECIMALARRAY, FLOAT, FLOATARRAY, INTEGER,
40 | INTEGERARRAY, INTERVAL, INTERVALARRAY, LONGINTEGER, LONGINTEGERARRAY,
41 | ROWIDARRAY, STRINGARRAY, TIME, TIMEARRAY, UNICODE, UNICODEARRAY,
42 | AsIs, Binary, Boolean, Float, Int, QuotedString, )
43 |
44 | try:
45 | from psycopg2._psycopg import ( # noqa
46 | MXDATE, MXDATETIME, MXDATETIMETZ, MXINTERVAL, MXTIME, MXDATEARRAY,
47 | MXDATETIMEARRAY, MXDATETIMETZARRAY, MXINTERVALARRAY, MXTIMEARRAY,
48 | DateFromMx, TimeFromMx, TimestampFromMx, IntervalFromMx, )
49 | except ImportError:
50 | pass
51 |
52 | from psycopg2._psycopg import ( # noqa
53 | PYDATE, PYDATETIME, PYDATETIMETZ, PYINTERVAL, PYTIME, PYDATEARRAY,
54 | PYDATETIMEARRAY, PYDATETIMETZARRAY, PYINTERVALARRAY, PYTIMEARRAY,
55 | DateFromPy, TimeFromPy, TimestampFromPy, IntervalFromPy, )
56 |
57 | from psycopg2._psycopg import ( # noqa
58 | adapt, adapters, encodings, connection, cursor,
59 | lobject, Xid, libpq_version, parse_dsn, quote_ident,
60 | string_types, binary_types, new_type, new_array_type, register_type,
61 | ISQLQuote, Notify, Diagnostics, Column, ConnectionInfo,
62 | QueryCanceledError, TransactionRollbackError,
63 | set_wait_callback, get_wait_callback, encrypt_password, )
64 |
65 |
66 | """Isolation level values."""
67 | ISOLATION_LEVEL_AUTOCOMMIT = 0
68 | ISOLATION_LEVEL_READ_UNCOMMITTED = 4
69 | ISOLATION_LEVEL_READ_COMMITTED = 1
70 | ISOLATION_LEVEL_REPEATABLE_READ = 2
71 | ISOLATION_LEVEL_SERIALIZABLE = 3
72 | ISOLATION_LEVEL_DEFAULT = None
73 |
74 |
75 | """psycopg connection status values."""
76 | STATUS_SETUP = 0
77 | STATUS_READY = 1
78 | STATUS_BEGIN = 2
79 | STATUS_SYNC = 3 # currently unused
80 | STATUS_ASYNC = 4 # currently unused
81 | STATUS_PREPARED = 5
82 |
83 | # This is a useful mnemonic to check if the connection is in a transaction
84 | STATUS_IN_TRANSACTION = STATUS_BEGIN
85 |
86 |
87 | """psycopg asynchronous connection polling values"""
88 | POLL_OK = 0
89 | POLL_READ = 1
90 | POLL_WRITE = 2
91 | POLL_ERROR = 3
92 |
93 |
94 | """Backend transaction status values."""
95 | TRANSACTION_STATUS_IDLE = 0
96 | TRANSACTION_STATUS_ACTIVE = 1
97 | TRANSACTION_STATUS_INTRANS = 2
98 | TRANSACTION_STATUS_INERROR = 3
99 | TRANSACTION_STATUS_UNKNOWN = 4
100 |
101 |
102 | def register_adapter(typ, callable):
103 | """Register 'callable' as an ISQLQuote adapter for type 'typ'."""
104 | adapters[(typ, ISQLQuote)] = callable
105 |
106 |
107 | # The SQL_IN class is the official adapter for tuples starting from 2.0.6.
108 | class SQL_IN(object):
109 | """Adapt any iterable to an SQL quotable object."""
110 | def __init__(self, seq):
111 | self._seq = seq
112 | self._conn = None
113 |
114 | def prepare(self, conn):
115 | self._conn = conn
116 |
117 | def getquoted(self):
118 | # this is the important line: note how every object in the
119 | # list is adapted and then how getquoted() is called on it
120 | pobjs = [adapt(o) for o in self._seq]
121 | if self._conn is not None:
122 | for obj in pobjs:
123 | if hasattr(obj, 'prepare'):
124 | obj.prepare(self._conn)
125 | qobjs = [o.getquoted() for o in pobjs]
126 | return b'(' + b', '.join(qobjs) + b')'
127 |
128 | def __str__(self):
129 | return str(self.getquoted())
130 |
131 |
132 | class NoneAdapter(object):
133 | """Adapt None to NULL.
134 |
135 | This adapter is not used normally as a fast path in mogrify uses NULL,
136 | but it makes easier to adapt composite types.
137 | """
138 | def __init__(self, obj):
139 | pass
140 |
141 | def getquoted(self, _null=b"NULL"):
142 | return _null
143 |
144 |
145 | def make_dsn(dsn=None, **kwargs):
146 | """Convert a set of keywords into a connection strings."""
147 | if dsn is None and not kwargs:
148 | return ''
149 |
150 | # If no kwarg is specified don't mung the dsn, but verify it
151 | if not kwargs:
152 | parse_dsn(dsn)
153 | return dsn
154 |
155 | # Override the dsn with the parameters
156 | if 'database' in kwargs:
157 | if 'dbname' in kwargs:
158 | raise TypeError(
159 | "you can't specify both 'database' and 'dbname' arguments")
160 | kwargs['dbname'] = kwargs.pop('database')
161 |
162 | # Drop the None arguments
163 | kwargs = {k: v for (k, v) in kwargs.items() if v is not None}
164 |
165 | if dsn is not None:
166 | tmp = parse_dsn(dsn)
167 | tmp.update(kwargs)
168 | kwargs = tmp
169 |
170 | dsn = " ".join(["%s=%s" % (k, _param_escape(str(v)))
171 | for (k, v) in kwargs.items()])
172 |
173 | # verify that the returned dsn is valid
174 | parse_dsn(dsn)
175 |
176 | return dsn
177 |
178 |
179 | def _param_escape(s,
180 | re_escape=_re.compile(r"([\\'])"),
181 | re_space=_re.compile(r'\s')):
182 | """
183 | Apply the escaping rule required by PQconnectdb
184 | """
185 | if not s:
186 | return "''"
187 |
188 | s = re_escape.sub(r'\\\1', s)
189 | if re_space.search(s):
190 | s = "'" + s + "'"
191 |
192 | return s
193 |
194 |
195 | # Create default json typecasters for PostgreSQL 9.2 oids
196 | from psycopg2._json import register_default_json, register_default_jsonb # noqa
197 |
198 | try:
199 | JSON, JSONARRAY = register_default_json()
200 | JSONB, JSONBARRAY = register_default_jsonb()
201 | except ImportError:
202 | pass
203 |
204 | del register_default_json, register_default_jsonb
205 |
206 |
207 | # Create default Range typecasters
208 | from psycopg2. _range import Range # noqa
209 | del Range
210 |
211 |
212 | # Add the "cleaned" version of the encodings to the key.
213 | # When the encoding is set its name is cleaned up from - and _ and turned
214 | # uppercase, so an encoding not respecting these rules wouldn't be found in the
215 | # encodings keys and would raise an exception with the unicode typecaster
216 | for k, v in list(encodings.items()):
217 | k = k.replace('_', '').replace('-', '').upper()
218 | encodings[k] = v
219 |
220 | del k, v
221 |
--------------------------------------------------------------------------------
/xray-setup/psycopg2/pool.py:
--------------------------------------------------------------------------------
1 | """Connection pooling for psycopg2
2 |
3 | This module implements thread-safe (and not) connection pools.
4 | """
5 | # psycopg/pool.py - pooling code for psycopg
6 | #
7 | # Copyright (C) 2003-2019 Federico Di Gregorio
8 | #
9 | # psycopg2 is free software: you can redistribute it and/or modify it
10 | # under the terms of the GNU Lesser General Public License as published
11 | # by the Free Software Foundation, either version 3 of the License, or
12 | # (at your option) any later version.
13 | #
14 | # In addition, as a special exception, the copyright holders give
15 | # permission to link this program with the OpenSSL library (or with
16 | # modified versions of OpenSSL that use the same license as OpenSSL),
17 | # and distribute linked combinations including the two.
18 | #
19 | # You must obey the GNU Lesser General Public License in all respects for
20 | # all of the code used other than OpenSSL.
21 | #
22 | # psycopg2 is distributed in the hope that it will be useful, but WITHOUT
23 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
25 | # License for more details.
26 |
27 | import psycopg2
28 | from psycopg2 import extensions as _ext
29 |
30 |
31 | class PoolError(psycopg2.Error):
32 | pass
33 |
34 |
35 | class AbstractConnectionPool(object):
36 | """Generic key-based pooling code."""
37 |
38 | def __init__(self, minconn, maxconn, *args, **kwargs):
39 | """Initialize the connection pool.
40 |
41 | New 'minconn' connections are created immediately calling 'connfunc'
42 | with given parameters. The connection pool will support a maximum of
43 | about 'maxconn' connections.
44 | """
45 | self.minconn = int(minconn)
46 | self.maxconn = int(maxconn)
47 | self.closed = False
48 |
49 | self._args = args
50 | self._kwargs = kwargs
51 |
52 | self._pool = []
53 | self._used = {}
54 | self._rused = {} # id(conn) -> key map
55 | self._keys = 0
56 |
57 | for i in range(self.minconn):
58 | self._connect()
59 |
60 | def _connect(self, key=None):
61 | """Create a new connection and assign it to 'key' if not None."""
62 | conn = psycopg2.connect(*self._args, **self._kwargs)
63 | if key is not None:
64 | self._used[key] = conn
65 | self._rused[id(conn)] = key
66 | else:
67 | self._pool.append(conn)
68 | return conn
69 |
70 | def _getkey(self):
71 | """Return a new unique key."""
72 | self._keys += 1
73 | return self._keys
74 |
75 | def _getconn(self, key=None):
76 | """Get a free connection and assign it to 'key' if not None."""
77 | if self.closed:
78 | raise PoolError("connection pool is closed")
79 | if key is None:
80 | key = self._getkey()
81 |
82 | if key in self._used:
83 | return self._used[key]
84 |
85 | if self._pool:
86 | self._used[key] = conn = self._pool.pop()
87 | self._rused[id(conn)] = key
88 | return conn
89 | else:
90 | if len(self._used) == self.maxconn:
91 | raise PoolError("connection pool exhausted")
92 | return self._connect(key)
93 |
94 | def _putconn(self, conn, key=None, close=False):
95 | """Put away a connection."""
96 | if self.closed:
97 | raise PoolError("connection pool is closed")
98 |
99 | if key is None:
100 | key = self._rused.get(id(conn))
101 | if key is None:
102 | raise PoolError("trying to put unkeyed connection")
103 |
104 | if len(self._pool) < self.minconn and not close:
105 | # Return the connection into a consistent state before putting
106 | # it back into the pool
107 | if not conn.closed:
108 | status = conn.info.transaction_status
109 | if status == _ext.TRANSACTION_STATUS_UNKNOWN:
110 | # server connection lost
111 | conn.close()
112 | elif status != _ext.TRANSACTION_STATUS_IDLE:
113 | # connection in error or in transaction
114 | conn.rollback()
115 | self._pool.append(conn)
116 | else:
117 | # regular idle connection
118 | self._pool.append(conn)
119 | # If the connection is closed, we just discard it.
120 | else:
121 | conn.close()
122 |
123 | # here we check for the presence of key because it can happen that a
124 | # thread tries to put back a connection after a call to close
125 | if not self.closed or key in self._used:
126 | del self._used[key]
127 | del self._rused[id(conn)]
128 |
129 | def _closeall(self):
130 | """Close all connections.
131 |
132 | Note that this can lead to some code fail badly when trying to use
133 | an already closed connection. If you call .closeall() make sure
134 | your code can deal with it.
135 | """
136 | if self.closed:
137 | raise PoolError("connection pool is closed")
138 | for conn in self._pool + list(self._used.values()):
139 | try:
140 | conn.close()
141 | except Exception:
142 | pass
143 | self.closed = True
144 |
145 |
146 | class SimpleConnectionPool(AbstractConnectionPool):
147 | """A connection pool that can't be shared across different threads."""
148 |
149 | getconn = AbstractConnectionPool._getconn
150 | putconn = AbstractConnectionPool._putconn
151 | closeall = AbstractConnectionPool._closeall
152 |
153 |
154 | class ThreadedConnectionPool(AbstractConnectionPool):
155 | """A connection pool that works with the threading module."""
156 |
157 | def __init__(self, minconn, maxconn, *args, **kwargs):
158 | """Initialize the threading lock."""
159 | import threading
160 | AbstractConnectionPool.__init__(
161 | self, minconn, maxconn, *args, **kwargs)
162 | self._lock = threading.Lock()
163 |
164 | def getconn(self, key=None):
165 | """Get a free connection and assign it to 'key' if not None."""
166 | self._lock.acquire()
167 | try:
168 | return self._getconn(key)
169 | finally:
170 | self._lock.release()
171 |
172 | def putconn(self, conn=None, key=None, close=False):
173 | """Put away an unused connection."""
174 | self._lock.acquire()
175 | try:
176 | self._putconn(conn, key, close)
177 | finally:
178 | self._lock.release()
179 |
180 | def closeall(self):
181 | """Close all connections (even the one currently in use.)"""
182 | self._lock.acquire()
183 | try:
184 | self._closeall()
185 | finally:
186 | self._lock.release()
187 |
--------------------------------------------------------------------------------
/xray-setup/psycopg2/psycopg1.py:
--------------------------------------------------------------------------------
1 | """psycopg 1.1.x compatibility module
2 |
3 | This module uses the new style connection and cursor types to build a psycopg
4 | 1.1.1.x compatibility layer. It should be considered a temporary hack to run
5 | old code while porting to psycopg 2. Import it as follows::
6 |
7 | from psycopg2 import psycopg1 as psycopg
8 | """
9 | # psycopg/psycopg1.py - psycopg 1.1.x compatibility module
10 | #
11 | # Copyright (C) 2003-2010 Federico Di Gregorio
12 | #
13 | # psycopg2 is free software: you can redistribute it and/or modify it
14 | # under the terms of the GNU Lesser General Public License as published
15 | # by the Free Software Foundation, either version 3 of the License, or
16 | # (at your option) any later version.
17 | #
18 | # In addition, as a special exception, the copyright holders give
19 | # permission to link this program with the OpenSSL library (or with
20 | # modified versions of OpenSSL that use the same license as OpenSSL),
21 | # and distribute linked combinations including the two.
22 | #
23 | # You must obey the GNU Lesser General Public License in all respects for
24 | # all of the code used other than OpenSSL.
25 | #
26 | # psycopg2 is distributed in the hope that it will be useful, but WITHOUT
27 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
28 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
29 | # License for more details.
30 |
31 | import psycopg2._psycopg as _2psycopg # noqa
32 | from psycopg2.extensions import cursor as _2cursor
33 | from psycopg2.extensions import connection as _2connection
34 |
35 | from psycopg2 import * # noqa
36 | import psycopg2.extensions as _ext
37 | _2connect = connect
38 |
39 |
40 | def connect(*args, **kwargs):
41 | """connect(dsn, ...) -> new psycopg 1.1.x compatible connection object"""
42 | kwargs['connection_factory'] = connection
43 | conn = _2connect(*args, **kwargs)
44 | conn.set_isolation_level(_ext.ISOLATION_LEVEL_READ_COMMITTED)
45 | return conn
46 |
47 |
48 | class connection(_2connection):
49 | """psycopg 1.1.x connection."""
50 |
51 | def cursor(self):
52 | """cursor() -> new psycopg 1.1.x compatible cursor object"""
53 | return _2connection.cursor(self, cursor_factory=cursor)
54 |
55 | def autocommit(self, on_off=1):
56 | """autocommit(on_off=1) -> switch autocommit on (1) or off (0)"""
57 | if on_off > 0:
58 | self.set_isolation_level(_ext.ISOLATION_LEVEL_AUTOCOMMIT)
59 | else:
60 | self.set_isolation_level(_ext.ISOLATION_LEVEL_READ_COMMITTED)
61 |
62 |
63 | class cursor(_2cursor):
64 | """psycopg 1.1.x cursor.
65 |
66 | Note that this cursor implements the exact procedure used by psycopg 1 to
67 | build dictionaries out of result rows. The DictCursor in the
68 | psycopg.extras modules implements a much better and faster algorithm.
69 | """
70 |
71 | def __build_dict(self, row):
72 | res = {}
73 | for i in range(len(self.description)):
74 | res[self.description[i][0]] = row[i]
75 | return res
76 |
77 | def dictfetchone(self):
78 | row = _2cursor.fetchone(self)
79 | if row:
80 | return self.__build_dict(row)
81 | else:
82 | return row
83 |
84 | def dictfetchmany(self, size):
85 | res = []
86 | rows = _2cursor.fetchmany(self, size)
87 | for row in rows:
88 | res.append(self.__build_dict(row))
89 | return res
90 |
91 | def dictfetchall(self):
92 | res = []
93 | rows = _2cursor.fetchall(self)
94 | for row in rows:
95 | res.append(self.__build_dict(row))
96 | return res
97 |
--------------------------------------------------------------------------------
/xray-setup/psycopg2/tz.py:
--------------------------------------------------------------------------------
1 | """tzinfo implementations for psycopg2
2 |
3 | This module holds two different tzinfo implementations that can be used as
4 | the 'tzinfo' argument to datetime constructors, directly passed to psycopg
5 | functions or used to set the .tzinfo_factory attribute in cursors.
6 | """
7 | # psycopg/tz.py - tzinfo implementation
8 | #
9 | # Copyright (C) 2003-2019 Federico Di Gregorio
10 | #
11 | # psycopg2 is free software: you can redistribute it and/or modify it
12 | # under the terms of the GNU Lesser General Public License as published
13 | # by the Free Software Foundation, either version 3 of the License, or
14 | # (at your option) any later version.
15 | #
16 | # In addition, as a special exception, the copyright holders give
17 | # permission to link this program with the OpenSSL library (or with
18 | # modified versions of OpenSSL that use the same license as OpenSSL),
19 | # and distribute linked combinations including the two.
20 | #
21 | # You must obey the GNU Lesser General Public License in all respects for
22 | # all of the code used other than OpenSSL.
23 | #
24 | # psycopg2 is distributed in the hope that it will be useful, but WITHOUT
25 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
27 | # License for more details.
28 |
29 | import datetime
30 | import time
31 |
32 | ZERO = datetime.timedelta(0)
33 |
34 |
35 | class FixedOffsetTimezone(datetime.tzinfo):
36 | """Fixed offset in minutes east from UTC.
37 |
38 | This is exactly the implementation__ found in Python 2.3.x documentation,
39 | with a small change to the `!__init__()` method to allow for pickling
40 | and a default name in the form ``sHH:MM`` (``s`` is the sign.).
41 |
42 | The implementation also caches instances. During creation, if a
43 | FixedOffsetTimezone instance has previously been created with the same
44 | offset and name that instance will be returned. This saves memory and
45 | improves comparability.
46 |
47 | .. __: https://docs.python.org/library/datetime.html
48 | """
49 | _name = None
50 | _offset = ZERO
51 |
52 | _cache = {}
53 |
54 | def __init__(self, offset=None, name=None):
55 | if offset is not None:
56 | self._offset = datetime.timedelta(minutes=offset)
57 | if name is not None:
58 | self._name = name
59 |
60 | def __new__(cls, offset=None, name=None):
61 | """Return a suitable instance created earlier if it exists
62 | """
63 | key = (offset, name)
64 | try:
65 | return cls._cache[key]
66 | except KeyError:
67 | tz = super(FixedOffsetTimezone, cls).__new__(cls, offset, name)
68 | cls._cache[key] = tz
69 | return tz
70 |
71 | def __repr__(self):
72 | offset_mins = self._offset.seconds // 60 + self._offset.days * 24 * 60
73 | return "psycopg2.tz.FixedOffsetTimezone(offset=%r, name=%r)" \
74 | % (offset_mins, self._name)
75 |
76 | def __getinitargs__(self):
77 | offset_mins = self._offset.seconds // 60 + self._offset.days * 24 * 60
78 | return offset_mins, self._name
79 |
80 | def utcoffset(self, dt):
81 | return self._offset
82 |
83 | def tzname(self, dt):
84 | if self._name is not None:
85 | return self._name
86 | else:
87 | seconds = self._offset.seconds + self._offset.days * 86400
88 | hours, seconds = divmod(seconds, 3600)
89 | minutes = seconds / 60
90 | if minutes:
91 | return "%+03d:%d" % (hours, minutes)
92 | else:
93 | return "%+03d" % hours
94 |
95 | def dst(self, dt):
96 | return ZERO
97 |
98 |
99 | STDOFFSET = datetime.timedelta(seconds=-time.timezone)
100 | if time.daylight:
101 | DSTOFFSET = datetime.timedelta(seconds=-time.altzone)
102 | else:
103 | DSTOFFSET = STDOFFSET
104 | DSTDIFF = DSTOFFSET - STDOFFSET
105 |
106 |
107 | class LocalTimezone(datetime.tzinfo):
108 | """Platform idea of local timezone.
109 |
110 | This is the exact implementation from the Python 2.3 documentation.
111 | """
112 | def utcoffset(self, dt):
113 | if self._isdst(dt):
114 | return DSTOFFSET
115 | else:
116 | return STDOFFSET
117 |
118 | def dst(self, dt):
119 | if self._isdst(dt):
120 | return DSTDIFF
121 | else:
122 | return ZERO
123 |
124 | def tzname(self, dt):
125 | return time.tzname[self._isdst(dt)]
126 |
127 | def _isdst(self, dt):
128 | tt = (dt.year, dt.month, dt.day,
129 | dt.hour, dt.minute, dt.second,
130 | dt.weekday(), 0, -1)
131 | stamp = time.mktime(tt)
132 | tt = time.localtime(stamp)
133 | return tt.tm_isdst > 0
134 |
135 |
136 | LOCAL = LocalTimezone()
137 |
138 | # TODO: pre-generate some interesting time zones?
139 |
--------------------------------------------------------------------------------
/xray-setup/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/xray-setup/tests/__init__.py
--------------------------------------------------------------------------------
/xray-setup/tests/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/xray-setup/tests/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------
/xray-setup/tests/__pycache__/test_log_helper.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/xray-setup/tests/__pycache__/test_log_helper.cpython-38.pyc
--------------------------------------------------------------------------------
/xray-setup/tests/__pycache__/test_resource_helper.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/xray-setup/tests/__pycache__/test_resource_helper.cpython-38.pyc
--------------------------------------------------------------------------------
/xray-setup/tests/__pycache__/test_utils.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/xray-setup/tests/__pycache__/test_utils.cpython-38.pyc
--------------------------------------------------------------------------------
/xray-setup/tests/test_log_helper.py:
--------------------------------------------------------------------------------
1 | from crhelper.log_helper import *
2 | import unittest
3 | import logging
4 |
5 |
6 | class TestLogHelper(unittest.TestCase):
7 |
8 | def test_logging_no_formatting(self):
9 | logger = logging.getLogger('1')
10 | handler = logging.StreamHandler()
11 | logger.addHandler(handler)
12 | orig_formatters = []
13 | for c in range(len(logging.root.handlers)):
14 | orig_formatters.append(logging.root.handlers[c].formatter)
15 | setup(level='DEBUG', formatter_cls=None, boto_level='CRITICAL')
16 | new_formatters = []
17 | for c in range(len(logging.root.handlers)):
18 | new_formatters.append(logging.root.handlers[c].formatter)
19 | self.assertEqual(orig_formatters, new_formatters)
20 |
21 | def test_logging_boto_explicit(self):
22 | logger = logging.getLogger('2')
23 | handler = logging.StreamHandler()
24 | logger.addHandler(handler)
25 | setup(level='DEBUG', formatter_cls=None, boto_level='CRITICAL')
26 | for t in ['boto', 'boto3', 'botocore', 'urllib3']:
27 | b_logger = logging.getLogger(t)
28 | self.assertEqual(b_logger.level, 50)
29 |
30 | def test_logging_json(self):
31 | logger = logging.getLogger('3')
32 | handler = logging.StreamHandler()
33 | logger.addHandler(handler)
34 | setup(level='DEBUG', formatter_cls=JsonFormatter, RequestType='ContainerInit')
35 | for handler in logging.root.handlers:
36 | self.assertEqual(JsonFormatter, type(handler.formatter))
37 |
38 | def test_logging_boto_implicit(self):
39 | logger = logging.getLogger('4')
40 | handler = logging.StreamHandler()
41 | logger.addHandler(handler)
42 | setup(level='DEBUG', formatter_cls=JsonFormatter, RequestType='ContainerInit')
43 | for t in ['boto', 'boto3', 'botocore', 'urllib3']:
44 | b_logger = logging.getLogger(t)
45 | self.assertEqual(b_logger.level, 10)
46 |
47 | def test_logging_json_keys(self):
48 | with self.assertLogs() as ctx:
49 | logger = logging.getLogger()
50 | handler = logging.StreamHandler()
51 | logger.addHandler(handler)
52 | setup(level='DEBUG', formatter_cls=JsonFormatter, RequestType='ContainerInit')
53 | logger.info("test")
54 | logs = json.loads(ctx.output[0])
55 | self.assertEqual(["timestamp", "level", "location", "RequestType", "message"], list(logs.keys()))
56 |
57 | def test_logging_json_parse_message(self):
58 | with self.assertLogs() as ctx:
59 | logger = logging.getLogger()
60 | handler = logging.StreamHandler()
61 | logger.addHandler(handler)
62 | setup(level='DEBUG', formatter_cls=JsonFormatter, RequestType='ContainerInit')
63 | logger.info("{}")
64 | logs = json.loads(ctx.output[0])
65 | self.assertEqual({}, logs["message"])
66 |
67 | def test_logging_json_exception(self):
68 | with self.assertLogs() as ctx:
69 | logger = logging.getLogger()
70 | handler = logging.StreamHandler()
71 | logger.addHandler(handler)
72 | setup(level='DEBUG', formatter_cls=JsonFormatter, RequestType='ContainerInit')
73 | try:
74 | 1 + 't'
75 | except Exception as e:
76 | logger.info("[]", exc_info=True)
77 | logs = json.loads(ctx.output[0])
78 | self.assertIn("exception", logs.keys())
79 |
--------------------------------------------------------------------------------
/xray-setup/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | import json
2 | from unittest.mock import patch, Mock
3 | from crhelper import utils
4 | import unittest
5 |
6 |
7 | class TestLogHelper(unittest.TestCase):
8 | TEST_URL = "https://test_url/this/is/the/url?query=123#aaa"
9 |
10 | @patch('crhelper.utils.HTTPSConnection', autospec=True)
11 | def test_send_succeeded_response(self, https_connection_mock):
12 | utils._send_response(self.TEST_URL, {})
13 | https_connection_mock.assert_called_once_with("test_url")
14 | https_connection_mock.return_value.request.assert_called_once_with(
15 | body='{}',
16 | headers={"content-type": "", "content-length": "2"},
17 | method="PUT",
18 | url="/this/is/the/url?query=123#aaa",
19 | )
20 |
21 | @patch('crhelper.utils.HTTPSConnection', autospec=True)
22 | def test_send_failed_response(self, https_connection_mock):
23 | utils._send_response(self.TEST_URL, Mock())
24 | https_connection_mock.assert_called_once_with("test_url")
25 | response = json.loads(https_connection_mock.return_value.request.call_args[1]["body"])
26 | expected_body = '{"Status": "FAILED", "Data": {}, "Reason": "' + response["Reason"] + '"}'
27 | https_connection_mock.return_value.request.assert_called_once_with(
28 | body=expected_body,
29 | headers={"content-type": "", "content-length": str(len(expected_body))},
30 | method="PUT",
31 | url="/this/is/the/url?query=123#aaa",
32 | )
33 |
--------------------------------------------------------------------------------
/xray-setup/tests/unit/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/xray-setup/tests/unit/__init__.py
--------------------------------------------------------------------------------
/xray-setup/tests/unit/__pycache__/__init__.cpython-38.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-quickstart/quickstart-jfrog-artifactory/db7f5fce57fd71a8d634f33038100ca5d8494e1a/xray-setup/tests/unit/__pycache__/__init__.cpython-38.pyc
--------------------------------------------------------------------------------