├── .github
└── settings.yml
├── .gitignore
├── CODEOWNERS
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── MAINTAINERS.md
├── Makefile
├── README.md
├── SECURITY.md
├── ci
├── azure-pipelines-release.yml
├── azure-pipelines.yml
└── scripts
│ └── publish_docker.sh
├── config
├── baseimage
│ └── Dockerfile
└── baseos
│ └── Dockerfile
├── images
├── couchdb
│ ├── 10-docker-default.ini
│ ├── 20-fabric-default.ini
│ ├── Dockerfile
│ ├── docker-entrypoint.sh
│ └── vm.args
├── kafka
│ ├── Dockerfile
│ ├── docker-entrypoint.sh
│ └── kafka-run-class.sh
└── zookeeper
│ ├── Dockerfile
│ └── docker-entrypoint.sh
└── scripts
├── common
├── cleanup.sh
├── golang_crossCompileSetup.sh
├── init.sh
├── packages.sh
└── setup.sh
├── docker
├── fixup.sh
└── init.sh
└── multiarch.sh
/.github/settings.yml:
--------------------------------------------------------------------------------
1 | repository:
2 | name: fabric-baseimage
3 | description: Deprecated Fabric Base Images
4 | homepage: https://wiki.hyperledger.org/display/fabric
5 | default_branch: main
6 | has_downloads: true
7 | has_issues: false
8 | has_projects: false
9 | has_wiki: false
10 | archived: true
11 | private: false
12 | allow_squash_merge: true
13 | allow_merge_commit: false
14 | allow_rebase_merge: true
15 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | build/
2 |
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 |
3 | # Fabric Maintainers
4 | * @hyperledger/fabric-maintainers
5 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | Code of Conduct Guidelines
2 | ==========================
3 |
4 | Please review the Hyperledger [Code of
5 | Conduct](https://wiki.hyperledger.org/community/hyperledger-project-code-of-conduct)
6 | before participating. It is important that we keep things civil.
7 |
8 | 
This work is licensed under a Creative Commons Attribution 4.0 International License.
9 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributions Welcome!
2 |
3 | This repository is part of the Fabric project.
4 | Please consult [Fabric's CONTRIBUTING documentation](http://hyperledger-fabric.readthedocs.io/en/latest/CONTRIBUTING.html) for information on how to contribute to this repository.
5 |
6 | 
This work is licensed under a Creative Commons Attribution 4.0 International License.
7 | s
8 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/MAINTAINERS.md:
--------------------------------------------------------------------------------
1 | ## Maintainers
2 |
3 | This repository is part of the Fabric project.
4 | Please consult [Fabric's MAINTAINERS documentation](https://github.com/hyperledger/fabric/blob/main/MAINTAINERS.md) for the list of people maintaining this repository.
5 |
6 | 
This work is licensed under a Creative Commons Attribution 4.0 International License.
7 | s
8 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright Greg Haskins All Rights Reserved.
3 | # Copyright IBM Corp. All Rights Reserved.
4 | #
5 | # SPDX-License-Identifier: Apache-2.0
6 | #
7 | # -------------------------------------------------------------
8 | # This makefile defines the following targets
9 | #
10 | # - all - Builds the baseimages and the thirdparty images
11 | # - docker - Builds the baseimages (baseimage,basejvm,baseos)
12 | # - dependent-images - Builds the thirdparty images (couchdb,kafka,zookeeper)
13 | # - couchdb - Builds the couchdb image
14 | # - kafka - Builds the kafka image
15 | # - zookeeper - Builds the zookeeper image
16 | # - install - Builds the baseimage,baseos,basejvm and publishes the images to dockerhub
17 | # - clean - Cleans all the docker images
18 |
19 | DOCKER_NS ?= hyperledger
20 | BASENAME ?= $(DOCKER_NS)/fabric
21 | VERSION ?= 0.4.22
22 |
23 | ARCH=$(shell go env GOARCH)
24 | DOCKER_TAG ?= $(ARCH)-$(VERSION)
25 |
26 | ifneq ($(http_proxy),)
27 | DOCKER_BUILD_FLAGS+=--build-arg 'http_proxy=$(http_proxy)'
28 | endif
29 | ifneq ($(https_proxy),)
30 | DOCKER_BUILD_FLAGS+=--build-arg 'https_proxy=$(https_proxy)'
31 | endif
32 | ifneq ($(HTTP_PROXY),)
33 | DOCKER_BUILD_FLAGS+=--build-arg 'HTTP_PROXY=$(HTTP_PROXY)'
34 | endif
35 | ifneq ($(HTTPS_PROXY),)
36 | DOCKER_BUILD_FLAGS+=--build-arg 'HTTPS_PROXY=$(HTTPS_PROXY)'
37 | endif
38 | ifneq ($(no_proxy),)
39 | DOCKER_BUILD_FLAGS+=--build-arg 'no_proxy=$(no_proxy)'
40 | endif
41 | ifneq ($(NO_PROXY),)
42 | DOCKER_BUILD_FLAGS+=--build-arg 'NO_PROXY=$(NO_PROXY)'
43 | endif
44 |
45 | DBUILD = docker build $(DOCKER_BUILD_FLAGS)
46 |
47 | # NOTE this is for building the dependent images (kafka, zk, couchdb)
48 | BASE_DOCKER_NS ?= hyperledger
49 |
50 | DOCKER_IMAGES = baseos baseimage
51 | DEPENDENT_IMAGES = couchdb kafka zookeeper
52 | DUMMY = .$(DOCKER_TAG)
53 |
54 | all: docker dependent-images
55 |
56 | build/docker/%/$(DUMMY):
57 | $(eval TARGET = ${patsubst build/docker/%/$(DUMMY),%,${@}})
58 | $(eval DOCKER_NAME = $(BASENAME)-$(TARGET))
59 | @mkdir -p $(@D)
60 | @echo "Building docker $(TARGET)"
61 | docker build -f config/$(TARGET)/Dockerfile \
62 | -t $(DOCKER_NAME) \
63 | -t $(DOCKER_NAME):$(DOCKER_TAG) \
64 | .
65 | @touch $@
66 |
67 | build/docker/%/.push: build/docker/%/$(DUMMY)
68 | @docker login \
69 | --username=$(DOCKER_HUB_USERNAME) \
70 | --password=$(DOCKER_HUB_PASSWORD)
71 | @docker push $(BASENAME)-$(patsubst build/docker/%/.push,%,$@):$(DOCKER_TAG)
72 |
73 | docker: $(patsubst %,build/docker/%/$(DUMMY),$(DOCKER_IMAGES))
74 |
75 | install: $(patsubst %,build/docker/%/.push,$(DOCKER_IMAGES))
76 |
77 | dependent-images: $(DEPENDENT_IMAGES)
78 |
79 | dependent-images-install: $(patsubst %,build/image/%/.push,$(DEPENDENT_IMAGES))
80 |
81 | couchdb: build/image/couchdb/$(DUMMY)
82 |
83 | kafka: build/image/kafka/$(DUMMY)
84 |
85 | zookeeper: build/image/zookeeper/$(DUMMY)
86 |
87 | build/image/%/$(DUMMY):
88 | @mkdir -p $(@D)
89 | $(eval TARGET = ${patsubst build/image/%/$(DUMMY),%,${@}})
90 | @echo "Docker: building $(TARGET) image"
91 | $(DBUILD) ${BUILD_ARGS} -t $(DOCKER_NS)/fabric-$(TARGET) -f images/${TARGET}/Dockerfile images/${TARGET}
92 | docker tag $(DOCKER_NS)/fabric-$(TARGET) $(DOCKER_NS)/fabric-$(TARGET):$(DOCKER_TAG)
93 | @touch $@
94 |
95 | build/image/%/.push: build/image/%/$(DUMMY)
96 | @docker login \
97 | --username=$(DOCKER_HUB_USERNAME) \
98 | --password=$(DOCKER_HUB_PASSWORD)
99 | @docker push $(BASENAME)-$(patsubst build/image/%/.push,%,$@):$(DOCKER_TAG)
100 |
101 | clean:
102 | -rm -rf build
103 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # :warning: Deprecation Notice
2 |
3 | The Fabric-Baseimage repository and the images built from this repository are deprecated. Critical bugs will continue to be patched through the Fabric 1.4.x LTS lifecycle. The images affected by this deprecation are:
4 |
5 | - hyperledger/fabric-baseimage (with a version < 1.0.0)
6 | - hyperledger/fabric-baseos (with a version < 1.0.0)
7 | - hyperledger/fabric-couchdb (all versions)
8 | - hyperledger/fabric-zookeeper (all versions)
9 | - hyperledger/fabric-kafka (all versions)
10 |
11 | Users should migrate to open source versions of these images.
12 |
13 | # Baseimage Introduction
14 |
15 | This directory contains the infrastructure for creating a new baseimage used as the basis for various docker images consumed within the Hyperledger Fabric workflow such as chaincode compilation/execution, unit-testing, and even cluster simulation. It is based on ubuntu-16.04 with various opensource projects added such as golang, grpc, and node.js. The actual Hyperledger code is injected just-in-time before deployment. The resulting images are published to image registries such as [hub.docker.com][fabric-baseimage].
16 |
17 | The purpose of this baseimage is to act as a bridge between a raw ubuntu/xenial configuration and the customizations required for supporting a Hyperledger Fabric environment. Some of the FOSS components that need to be added to Ubuntu do not have convenient native packages. Therefore, they are built from source. However, the build process is generally expensive (often taking in excess of 30 minutes) so it is fairly inefficient to JIT assemble these components on demand.
18 |
19 | Therefore, the expensive FOSS components are built into this baseimage once and subsequently cached on the public repositories so that workflows may simply consume the objects without requiring a local build cycle.
20 |
21 |
22 | ## Intended Audience
23 |
24 | This repository is only intended for release managers curating the base images. Typical developers may safely ignore this completely. Anyone wishing to customize their image is encouraged to do so via downstream means, such as a custom Dockerfile.
25 |
26 | ## Exceptions
27 |
28 | If a component is found to be both broadly applicable and expensive to build JIT, it may be a candidate for inclusion in a future baseimage.
29 |
30 | ## Usage
31 |
32 | * "make docker" will build the docker images and commit it to your local environment; e.g. "hyperledger/fabric-baseimage". The docker image is also tagged with architecture and release details.
33 | * "make install" build build the docker images and push them to Docker Hub.
34 |
35 | 
This work is licensed under a Creative Commons Attribution 4.0 International License.
36 |
37 | [fabric-baseimage]: https://hub.docker.com/r/hyperledger/fabric-baseimage/
38 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Hyperledger Security Policy
2 |
3 | ## Reporting a Security Bug
4 |
5 | If you think you have discovered a security issue in any of the Hyperledger projects, we'd love to hear from you. We will take all security bugs seriously and if confirmed upon investigation we will patch it within a reasonable amount of time and release a public security bulletin discussing the impact and credit the discoverer.
6 |
7 | There are two ways to report a security bug. The easiest is to email a description of the flaw and any related information (e.g. reproduction steps, version) to [security at hyperledger dot org](mailto:security@hyperledger.org).
8 |
9 | The other way is to file a confidential security bug in our [JIRA bug tracking system](https://jira.hyperledger.org). Be sure to set the “Security Level” to “Security issue”.
10 |
11 | The process by which the Hyperledger Security Team handles security bugs is documented further in our [Defect Response page](https://wiki.hyperledger.org/display/HYP/Defect+Response) on our [wiki](https://wiki.hyperledger.org).
12 |
13 |
--------------------------------------------------------------------------------
/ci/azure-pipelines-release.yml:
--------------------------------------------------------------------------------
1 | # Copyright the Hyperledger Fabric cont ributors. All rights reserved.
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | name: RELEASE-$(Date:yyyyMMdd)$(Rev:.rrr)
6 | trigger: none
7 | pr: none
8 |
9 | variables:
10 | - group: credentials
11 | - name: GOPATH
12 | value: $(Agent.BuildDirectory)/go
13 | - name: GOVER
14 | value: 1.14.12
15 |
16 | stages:
17 | - stage: BuildAndPushDockerImages
18 | dependsOn: []
19 | displayName: "Build and Push Fabric Baseimage Docker Images"
20 | jobs:
21 | - job: Docker
22 | pool:
23 | vmImage: ubuntu-16.04
24 | steps:
25 | - template: install_deps.yml
26 | - checkout: self
27 | path: 'go/src/github.com/hyperledger/fabric'
28 | displayName: Checkout Fabric Code
29 | - script: ./ci/scripts/publish_docker.sh
30 | env:
31 | DOCKER_PASSWORD: $(DockerHub-Password)
32 | DOCKER_USERNAME: $(DockerHub-Username)
33 | displayName: Publish Docker Images
34 |
--------------------------------------------------------------------------------
/ci/azure-pipelines.yml:
--------------------------------------------------------------------------------
1 | # Copyright the Hyperledger Fabric contributors. All rights reserved.
2 | #
3 | # SPDX-License-Identifier: Apache-2.0
4 |
5 | name: $(SourceBranchName)-$(Date:yyyyMMdd)$(Rev:.rrr)
6 | trigger:
7 | - main
8 | pr:
9 | - main
10 |
11 | jobs:
12 | - job: VerifyBuild
13 | pool:
14 | vmImage: ubuntu-16.04
15 | steps:
16 | - checkout: self
17 | displayName: Checkout Fabric BaseImage
18 | - script: make docker dependent-images
19 | displayName: Build Images
20 |
--------------------------------------------------------------------------------
/ci/scripts/publish_docker.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright IBM Corp. All Rights Reserved.
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 | set -euo pipefail
6 |
7 | make docker dependent-images
8 |
9 | wget -qO "$PWD/manifest-tool" https://github.com/estesp/manifest-tool/releases/download/v1.0.0/manifest-tool-linux-amd64
10 | chmod +x ./manifest-tool
11 |
12 | for image in baseos baseimage kafka zookeeper couchdb; do
13 | docker login --username "${DOCKER_USERNAME}" --password "${DOCKER_PASSWORD}"
14 | docker tag "hyperledger/fabric-${image}" "hyperledger/fabric-${image}:amd64-${RELEASE}"
15 | docker push "hyperledger/fabric-${image}:amd64-${RELEASE}"
16 |
17 | ./manifest-tool push from-args --platforms linux/amd64 --template "hyperledger/fabric-${image}:amd64-${RELEASE}" --target "hyperledger/fabric-${image}:${RELEASE}"
18 | ./manifest-tool push from-args --platforms linux/amd64 --template "hyperledger/fabric-${image}:amd64-${RELEASE}" --target "hyperledger/fabric-${image}:$(sed 's/..$//' <<< ${RELEASE})"
19 | ./manifest-tool push from-args --platforms linux/amd64 --template "hyperledger/fabric-${image}:amd64-${RELEASE}" --target "hyperledger/fabric-${image}:latest"
20 | done
21 |
--------------------------------------------------------------------------------
/config/baseimage/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright Greg Haskins All Rights Reserved.
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 | #
6 | FROM adoptopenjdk:8u222-b10-jdk-openj9-0.15.1
7 | COPY scripts /tmp/scripts
8 | RUN cd /tmp/scripts && \
9 | common/packages.sh && \
10 | common/setup.sh && \
11 | docker/fixup.sh && \
12 | common/cleanup.sh && \
13 | rm -rf /tmp/scripts
14 | ENV GOPATH=/opt/gopath
15 | ENV GOROOT=/opt/go
16 | ENV PATH=$PATH:$GOROOT/bin:$GOPATH/bin
17 |
--------------------------------------------------------------------------------
/config/baseos/Dockerfile:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright Greg Haskins All Rights Reserved.
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 | #
6 | FROM debian:buster-20190910-slim
7 | COPY scripts /tmp/scripts
8 | RUN cd /tmp/scripts && \
9 | common/init.sh && \
10 | docker/init.sh && \
11 | common/cleanup.sh && \
12 | rm -rf /tmp/scripts
13 |
--------------------------------------------------------------------------------
/images/couchdb/10-docker-default.ini:
--------------------------------------------------------------------------------
1 | ; CouchDB Configuration Settings
2 |
3 | ; Custom settings should be made in this file. They will override settings
4 | ; in default.ini, but unlike changes made to default.ini, this file won't be
5 | ; overwritten on server upgrade.
6 |
7 | [chttpd]
8 | bind_address = any
9 |
10 | [httpd]
11 | bind_address = any
12 |
13 | ; Specify the maximum size of the HTTP request body
14 | max_http_request_size = 4294967296
15 |
--------------------------------------------------------------------------------
/images/couchdb/20-fabric-default.ini:
--------------------------------------------------------------------------------
1 | ; CouchDB Configuration Settings
2 |
3 | ; Custom settings preferred by Fabric
4 |
5 | [couchdb]
6 | ; ensure that uri_file is located in the data volume
7 | uri_file = ./data/couch.uri
8 |
9 | ; Specify the number of database shards that can be open concurrently.
10 | ; CouchDB uses LRU cache to manage open databases, and closes databases
11 | ; as needed. Deployments with large numbers of channels and high
12 | ; concurrency may need to increase this setting.
13 | max_dbs_open = 8000
14 |
15 | ; delayed_commits must remain at the CouchDB default 'false' to ensure data is flushed to disk upon every write
16 |
17 | ; Specify the maximum document body size
18 | max_document_size = 4294967296
19 |
20 | [cluster]
21 | ; peer maintains a single replica
22 | n = 1
23 |
24 | ; adjust q to set the level of parallelism locally
25 | ; recommended to have no more than 10 million documents/shard (q)
26 | ; for 100 million documents, q=10 -- at a minimum
27 | q = 8
28 |
29 | ;This is a default rule for all databases.
30 | ;When database fragmentation (unused versions) reaches 30% of the total
31 | ;file size, the database will be compacted.
32 | [compactions]
33 | _default = [{db_fragmentation, "30%"}, {view_fragmentation, "30%"}]
34 | ;Optional compaction default that will only allow compactions from 11PM to 4AM
35 | ;_default = [{db_fragmentation, "30%"}, {view_fragmentation, "30%"}, {from, "23:00"}, {to, "04:00"}]
36 |
37 | ;Database compaction settings.
38 | ;Databases will be checked every 300s (5min)
39 | ;Databases less than 256K in size will not be compacted
40 | [compaction_daemon]
41 | check_interval = 300
42 | min_file_size = 256000
43 |
44 | [couch_httpd_auth]
45 | iterations = 1000 ; iterations for password hashing
46 |
47 | [attachments]
48 | compressible_types = text/*, application/javascript, application/json, application/xml, application/octet-stream
49 |
--------------------------------------------------------------------------------
/images/couchdb/Dockerfile:
--------------------------------------------------------------------------------
1 | # Copyright Greg Haskins All Rights Reserved
2 | # SPDX-License-Identifier: Apache-2.0
3 | # Based on https://github.com/apache/couchdb-docker/blob/main/2.1.1/Dockerfile
4 |
5 | FROM debian:stretch-20190910-slim
6 |
7 | # Add CouchDB user account
8 | RUN groupadd -g 5984 -r couchdb && useradd -u 5984 -d /opt/couchdb -g couchdb couchdb
9 |
10 | RUN apt-get update -y && apt-get install -y --no-install-recommends \
11 | ca-certificates \
12 | curl \
13 | erlang-nox \
14 | erlang-reltool \
15 | libicu57 \
16 | libmozjs185-1.0 \
17 | openssl \
18 | dirmngr \
19 | gnupg \
20 | && rm -rf /var/lib/apt/lists/*
21 |
22 | # grab gosu for easy step-down from root and tini for signal handling
23 | # see https://github.com/apache/couchdb-docker/pull/28#discussion_r141112407
24 | ENV GOSU_VERSION 1.10
25 | ENV TINI_VERSION 0.16.1
26 | RUN set -ex; \
27 | \
28 | apt-get update; \
29 | apt-get install -y --no-install-recommends wget; \
30 | rm -rf /var/lib/apt/lists/*; \
31 | \
32 | dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')"; \
33 | \
34 | # install gosu
35 | wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/${GOSU_VERSION}/gosu-$dpkgArch"; \
36 | wget -O /usr/local/bin/gosu.asc "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch.asc"; \
37 | export GNUPGHOME="$(mktemp -d)"; \
38 | for server in $(shuf -e ha.pool.sks-keyservers.net \
39 | hkp://p80.pool.sks-keyservers.net:80 \
40 | keyserver.ubuntu.com \
41 | hkp://keyserver.ubuntu.com:80 \
42 | pgp.mit.edu) ; do \
43 | gpg --keyserver "$server" --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4 && break || : ; \
44 | done; \
45 | gpg --batch --verify /usr/local/bin/gosu.asc /usr/local/bin/gosu; \
46 | rm -r "$GNUPGHOME" || true; rm -r /usr/local/bin/gosu.asc || true; \
47 | chmod +x /usr/local/bin/gosu; \
48 | gosu nobody true; \
49 | \
50 | # install tini
51 | wget -O /usr/local/bin/tini "https://github.com/krallin/tini/releases/download/v${TINI_VERSION}/tini-$dpkgArch"; \
52 | wget -O /usr/local/bin/tini.asc "https://github.com/krallin/tini/releases/download/v${TINI_VERSION}/tini-$dpkgArch.asc"; \
53 | export GNUPGHOME="$(mktemp -d)"; \
54 | for server in $(shuf -e ha.pool.sks-keyservers.net \
55 | hkp://p80.pool.sks-keyservers.net:80 \
56 | keyserver.ubuntu.com \
57 | hkp://keyserver.ubuntu.com:80 \
58 | pgp.mit.edu) ; do \
59 | gpg --keyserver "$server" --recv-keys 595E85A6B1B4779EA4DAAEC70B588DFF0527A9B7 && break || : ; \
60 | done; \
61 | gpg --batch --verify /usr/local/bin/tini.asc /usr/local/bin/tini; \
62 | rm -r "$GNUPGHOME" || true; rm -r /usr/local/bin/tini.asc || true; \
63 | chmod +x /usr/local/bin/tini; \
64 | tini --version; \
65 | \
66 | apt-get purge -y --auto-remove wget
67 |
68 | # https://www.apache.org/dist/couchdb/KEYS
69 | ENV GPG_KEYS \
70 | 15DD4F3B8AACA54740EB78C7B7B7C53943ECCEE1 \
71 | 1CFBFA43C19B6DF4A0CA3934669C02FFDF3CEBA3 \
72 | 25BBBAC113C1BFD5AA594A4C9F96B92930380381 \
73 | 4BFCA2B99BADC6F9F105BEC9C5E32E2D6B065BFB \
74 | 5D680346FAA3E51B29DBCB681015F68F9DA248BC \
75 | 7BCCEB868313DDA925DF1805ECA5BCB7BB9656B0 \
76 | C3F4DFAEAD621E1C94523AEEC376457E61D50B88 \
77 | D2B17F9DA23C0A10991AF2E3D9EE01E47852AEE4 \
78 | E0AF0A194D55C84E4A19A801CDB0C0F904F4EE9B \
79 | 29E4F38113DF707D722A6EF91FE9AF73118F1A7C \
80 | 2EC788AE3F239FA13E82D215CDE711289384AE37
81 | RUN set -xe \
82 | && for key in $GPG_KEYS; do \
83 | for server in $(shuf -e ha.pool.sks-keyservers.net \
84 | hkp://p80.pool.sks-keyservers.net:80 \
85 | keyserver.ubuntu.com \
86 | hkp://keyserver.ubuntu.com:80 \
87 | pgp.mit.edu) ; do \
88 | gpg --keyserver "$server" --recv-keys "$key" && break || : ; \
89 | done; \
90 | done
91 |
92 | ENV COUCHDB_VERSION 2.3.1
93 |
94 | # Download dev dependencies
95 | RUN buildDeps=' \
96 | apt-transport-https \
97 | gcc \
98 | g++ \
99 | erlang-dev \
100 | libcurl4-openssl-dev \
101 | libicu-dev \
102 | libmozjs185-dev \
103 | make \
104 | ' \
105 | && apt-get update -y -qq && apt-get install -y --no-install-recommends $buildDeps \
106 | # Acquire CouchDB source code
107 | && cd /usr/src && mkdir couchdb \
108 | && curl -fSL https://archive.apache.org/dist/couchdb/source/$COUCHDB_VERSION/apache-couchdb-$COUCHDB_VERSION.tar.gz -o couchdb.tar.gz \
109 | && curl -fSL https://archive.apache.org/dist/couchdb/source/$COUCHDB_VERSION/apache-couchdb-$COUCHDB_VERSION.tar.gz.asc -o couchdb.tar.gz.asc \
110 | && gpg --batch --verify couchdb.tar.gz.asc couchdb.tar.gz \
111 | && tar -xzf couchdb.tar.gz -C couchdb --strip-components=1 \
112 | && cd couchdb \
113 | # Build the release and install into /opt
114 | && ./configure --disable-docs \
115 | && make release \
116 | && mv /usr/src/couchdb/rel/couchdb /opt/ \
117 | # Cleanup build detritus
118 | && apt-get purge -y --auto-remove $buildDeps \
119 | && rm -rf /var/lib/apt/lists/* /usr/src/couchdb* \
120 | && mkdir /opt/couchdb/data \
121 | && chown -R couchdb:couchdb /opt/couchdb
122 |
123 | # Add configuration
124 | COPY ./vm.args /opt/couchdb/etc/
125 | COPY ./10-docker-default.ini /opt/couchdb/etc/default.d/
126 | COPY ./20-fabric-default.ini /opt/couchdb/etc/default.d/
127 |
128 | COPY ./docker-entrypoint.sh /
129 |
130 | # Setup directories and permissions
131 | RUN chmod +x /docker-entrypoint.sh \
132 | && chown -R couchdb:couchdb /opt/couchdb/etc/default.d/ /opt/couchdb/etc/vm.args \
133 | && chmod -R 0770 /opt/couchdb/data \
134 | && chmod 664 /opt/couchdb/etc/*.ini \
135 | && chmod 664 /opt/couchdb/etc/default.d/*.ini \
136 | && chmod 775 /opt/couchdb/etc/*.d
137 |
138 | WORKDIR /opt/couchdb
139 | EXPOSE 5984 4369 9100
140 |
141 | VOLUME ["/opt/couchdb/data"]
142 | VOLUME ["/opt/couchdb/etc/local.d"]
143 |
144 | ENTRYPOINT ["tini", "--", "/docker-entrypoint.sh"]
145 | CMD ["/opt/couchdb/bin/couchdb"]
146 |
--------------------------------------------------------------------------------
/images/couchdb/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
3 | # use this file except in compliance with the License. You may obtain a copy of
4 | # the License at
5 | #
6 | # http://www.apache.org/licenses/LICENSE-2.0
7 | #
8 | # Unless required by applicable law or agreed to in writing, software
9 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
10 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
11 | # License for the specific language governing permissions and limitations under
12 | # the License.
13 |
14 | set -e
15 |
16 | # first arg is `-something` or `+something`
17 | if [ "${1#-}" != "$1" ] || [ "${1#+}" != "$1" ]; then
18 | set -- /opt/couchdb/bin/couchdb "$@"
19 | fi
20 |
21 | # first arg is the bare word `couchdb`
22 | if [ "$1" = 'couchdb' ]; then
23 | shift
24 | set -- /opt/couchdb/bin/couchdb "$@"
25 | fi
26 |
27 | if [ "$1" = '/opt/couchdb/bin/couchdb' ]; then
28 | # Check that we own everything in /opt/couchdb and fix if necessary. We also
29 | # add the `-f` flag in all the following invocations because there may be
30 | # cases where some of these ownership and permissions issues are non-fatal
31 | # (e.g. a config file owned by root with o+r is actually fine), and we don't
32 | # to be too aggressive about crashing here ...
33 | find /opt/couchdb \! \( -user couchdb -group couchdb \) -exec chown -f couchdb:couchdb '{}' +
34 |
35 | # Ensure that data files have the correct permissions. We were previously
36 | # preventing any access to these files outside of couchdb:couchdb, but it
37 | # turns out that CouchDB itself does not set such restrictive permissions
38 | # when it creates the files. The approach taken here ensures that the
39 | # contents of the datadir have the same permissions as they had when they
40 | # were initially created. This should minimize any startup delay.
41 | find /opt/couchdb/data -type d ! -perm 0755 -exec chmod -f 0755 '{}' +
42 | find /opt/couchdb/data -type f ! -perm 0644 -exec chmod -f 0644 '{}' +
43 |
44 | # Do the same thing for configuration files and directories. Technically
45 | # CouchDB only needs read access to the configuration files as all online
46 | # changes will be applied to the "docker.ini" file below, but we set 644
47 | # for the sake of consistency.
48 | find /opt/couchdb/etc -type d ! -perm 0755 -exec chmod -f 0755 '{}' +
49 | find /opt/couchdb/etc -type f ! -perm 0644 -exec chmod -f 0644 '{}' +
50 |
51 | if [ ! -z "$NODENAME" ] && ! grep "couchdb@" /opt/couchdb/etc/vm.args; then
52 | echo "-name couchdb@$NODENAME" >> /opt/couchdb/etc/vm.args
53 | fi
54 |
55 | # Ensure that CouchDB will write custom settings in this file
56 | touch /opt/couchdb/etc/local.d/docker.ini
57 |
58 | if [ "$COUCHDB_USER" ] && [ "$COUCHDB_PASSWORD" ]; then
59 | # Create admin only if not already present
60 | if ! grep -Pzoqr "\[admins\]\n$COUCHDB_USER =" /opt/couchdb/etc/local.d/*.ini; then
61 | printf "\n[admins]\n%s = %s\n" "$COUCHDB_USER" "$COUCHDB_PASSWORD" >> /opt/couchdb/etc/local.d/docker.ini
62 | fi
63 | fi
64 |
65 | if [ "$COUCHDB_SECRET" ]; then
66 | # Set secret only if not already present
67 | if ! grep -Pzoqr "\[couch_httpd_auth\]\nsecret =" /opt/couchdb/etc/local.d/*.ini; then
68 | printf "\n[couch_httpd_auth]\nsecret = %s\n" "$COUCHDB_SECRET" >> /opt/couchdb/etc/local.d/docker.ini
69 | fi
70 | fi
71 |
72 | chown -f couchdb:couchdb /opt/couchdb/etc/local.d/docker.ini || true
73 |
74 | # if we don't find an [admins] section followed by a non-comment, display a warning
75 | if ! grep -Pzoqr '\[admins\]\n[^;]\w+' /opt/couchdb/etc/default.d/*.ini /opt/couchdb/etc/local.d/*.ini; then
76 | # The - option suppresses leading tabs but *not* spaces. :)
77 | cat >&2 <<-'EOWARN'
78 | ****************************************************
79 | WARNING: CouchDB is running in Admin Party mode.
80 | This will allow anyone with access to the
81 | CouchDB port to access your database. In
82 | Docker's default configuration, this is
83 | effectively any other container on the same
84 | system.
85 | Use "-e COUCHDB_USER=admin -e COUCHDB_PASSWORD=password"
86 | to set it in "docker run".
87 | ****************************************************
88 | EOWARN
89 | fi
90 |
91 |
92 | exec gosu couchdb "$@"
93 | fi
94 |
95 | exec "$@"
96 |
--------------------------------------------------------------------------------
/images/couchdb/vm.args:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
2 | # use this file except in compliance with the License. You may obtain a copy of
3 | # the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10 | # License for the specific language governing permissions and limitations under
11 | # the License.
12 |
13 | # Ensure that the Erlang VM listens on a known port
14 | -kernel inet_dist_listen_min 9100
15 | -kernel inet_dist_listen_max 9100
16 |
17 | # Tell kernel and SASL not to log anything
18 | -kernel error_logger silent
19 | -sasl sasl_error_logger false
20 |
21 | # Use kernel poll functionality if supported by emulator
22 | +K true
23 |
24 | # Start a pool of asynchronous IO threads
25 | +A 16
26 |
27 | # Comment this line out to enable the interactive Erlang shell on startup
28 | +Bd -noinput
29 |
--------------------------------------------------------------------------------
/images/kafka/Dockerfile:
--------------------------------------------------------------------------------
1 | # Copyright Greg Haskins All Rights Reserved
2 | # Copyright IBM Corp. All Rights Reserved.
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 | #
6 | FROM debian:buster-20190910-slim as download
7 | RUN apt-get update \
8 | && apt-get install -y curl \
9 | tar \
10 | gnupg;
11 |
12 | ENV SCALA_VERSION=2.11 \
13 | KAFKA_VERSION=1.0.2 \
14 | KAFKA_DOWNLOAD_SHA1=4B56E63F9E5E69BCAA0E15313F75F1B15F6CF1E4
15 |
16 | RUN curl -fSL "http://archive.apache.org/dist/kafka/${KAFKA_VERSION}/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" -o kafka.tgz
17 |
18 | RUN echo "${KAFKA_DOWNLOAD_SHA1} kafka.tgz" | sha1sum -c - \
19 | && mkdir /opt/kafka \
20 | && tar xfz kafka.tgz -C /opt/kafka --strip-components=1 \
21 | && rm -f kafka.tgz;
22 |
23 | FROM adoptopenjdk:8u222-b10-jre-openj9-0.15.1
24 | COPY --from=download /opt/kafka /opt/kafka
25 | ADD ./kafka-run-class.sh /opt/kafka/bin/kafka-run-class.sh
26 | ADD ./docker-entrypoint.sh /docker-entrypoint.sh
27 |
28 | EXPOSE 9092
29 | EXPOSE 9093
30 |
31 | ENTRYPOINT ["/docker-entrypoint.sh"]
32 | CMD ["/opt/kafka/bin/kafka-server-start.sh"]
33 |
--------------------------------------------------------------------------------
/images/kafka/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # Copyright IBM Corp. All Rights Reserved.
4 | #
5 | # SPDX-License-Identifier: Apache-2.0
6 | #
7 |
8 | # This script will either start the kafka server, or run the user
9 | # specified command.
10 |
11 | # Exit immediately if a pipeline returns a non-zero status.
12 | set -e
13 |
14 | KAFKA_HOME=/opt/kafka
15 | KAFKA_EXE=${KAFKA_HOME}/bin/kafka-server-start.sh
16 | KAFKA_SERVER_PROPERTIES=${KAFKA_HOME}/config/server.properties
17 |
18 | # handle starting the kafka server with an option
19 | # (genericly handled, but only --override known to me at this time)
20 | if [ "${1:0:1}" = '-' ]; then
21 | set -- ${KAFKA_EXE} ${KAFKA_SERVER_PROPERTIES} "$@"
22 | fi
23 |
24 | # handle default (i.e. no custom options or commands)
25 | if [ "$1" = "${KAFKA_EXE}" ]; then
26 |
27 | # add the server.properties to the command
28 | set -- ${KAFKA_EXE} ${KAFKA_SERVER_PROPERTIES}
29 |
30 | # compute the advertised host name if a command was specified
31 | if [[ -z ${KAFKA_ADVERTISED_HOST_NAME} && -n ${KAFKA_ADVERTISED_HOST_NAME_COMMAND} ]] ; then
32 | export KAFKA_ADVERTISED_HOST_NAME=$(eval ${KAFKA_ADVERTISED_HOST_NAME_COMMAND})
33 | fi
34 |
35 | # compute the advertised port if a command was specified
36 | if [[ -z ${KAFKA_ADVERTISED_PORT} && -n ${KAFKA_ADVERTISED_PORT_COMMAND} ]] ; then
37 | export KAFKA_ADVERTISED_PORT=$(eval ${KAFKA_ADVERTISED_PORT_COMMAND})
38 | fi
39 |
40 | # default to auto set the broker id
41 | if [ -z "$KAFKA_BROKER_ID" ] ; then
42 | export KAFKA_BROKER_ID=-1
43 | fi
44 |
45 | # disable time based log retention by default
46 | if [ -z "$KAFKA_LOG_RETENTION_MS" ] ; then
47 | export KAFKA_LOG_RETENTION_MS=-1
48 | fi
49 |
50 | # add newline to end of server.properties if missing
51 | tail -c 1 ${KAFKA_SERVER_PROPERTIES} | read -r _ || printf "\n" >> ${KAFKA_SERVER_PROPERTIES}
52 |
53 | # update server.properties by searching for envinroment variables named
54 | # KAFKA_* and converting them to properties in the kafka server properties file.
55 | for ENV_ENTRY in $(env | grep "^KAFKA_") ; do
56 | # skip some entries that should do not belong in server.properties
57 | if [[ $ENV_ENTRY =~ ^KAFKA_HOME= ]] ; then continue ; fi
58 | if [[ $ENV_ENTRY =~ ^KAFKA_EXE= ]] ; then continue ; fi
59 | if [[ $ENV_ENTRY =~ ^KAFKA_VERSION= ]] ; then continue ; fi
60 | if [[ $ENV_ENTRY =~ ^KAFKA_DOWNLOAD_SHA1= ]] ; then continue ; fi
61 | if [[ $ENV_ENTRY =~ ^KAFKA_SERVER_PROPERTIES= ]] ; then continue ; fi
62 | if [[ $ENV_ENTRY =~ ^KAFKA_ADVERTISED_HOST_NAME_COMMAND= ]] ; then continue ; fi
63 | if [[ $ENV_ENTRY =~ ^KAFKA_ADVERTISED_PORT_COMMAND= ]] ; then continue ; fi
64 | # transform KAFKA_XXX_YYY to xxx.yyy
65 | KAFKA_PROPERTY_NAME="$(echo ${ENV_ENTRY%%=*} | sed -e 's/^KAFKA_//;s/_/./g' | tr '[:upper:]' '[:lower:]')"
66 | # get property value
67 | KAFKA_PROPERTY_VALUE="${ENV_ENTRY#*=}"
68 | # update server.properties
69 | if grep -q "^\s*#\?\s*${KAFKA_PROPERTY_NAME}" ${KAFKA_SERVER_PROPERTIES} ; then
70 | # the property is already defined (maybe even commented out), so edit the file
71 | sed -i -e "s|^\s*${KAFKA_PROPERTY_NAME}\s*=.*$|${KAFKA_PROPERTY_NAME}=${KAFKA_PROPERTY_VALUE}|" ${KAFKA_SERVER_PROPERTIES}
72 | sed -i -e "s|^\s*#\s*${KAFKA_PROPERTY_NAME}\s*=.*$|${KAFKA_PROPERTY_NAME}=${KAFKA_PROPERTY_VALUE}|" ${KAFKA_SERVER_PROPERTIES}
73 | else
74 | echo "${KAFKA_PROPERTY_NAME}=${KAFKA_PROPERTY_VALUE}">>${KAFKA_SERVER_PROPERTIES}
75 | fi
76 | done
77 | fi
78 |
79 | exec "$@"
80 |
--------------------------------------------------------------------------------
/images/kafka/kafka-run-class.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Licensed to the Apache Software Foundation (ASF) under one or more
3 | # contributor license agreements. See the NOTICE file distributed with
4 | # this work for additional information regarding copyright ownership.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | if [ $# -lt 1 ];
18 | then
19 | echo "USAGE: $0 [-daemon] [-name servicename] [-loggc] classname [opts]"
20 | exit 1
21 | fi
22 |
23 | # CYGINW == 1 if Cygwin is detected, else 0.
24 | if [[ $(uname -a) =~ "CYGWIN" ]]; then
25 | CYGWIN=1
26 | else
27 | CYGWIN=0
28 | fi
29 |
30 | if [ -z "$INCLUDE_TEST_JARS" ]; then
31 | INCLUDE_TEST_JARS=false
32 | fi
33 |
34 | # Exclude jars not necessary for running commands.
35 | regex="(-(test|src|scaladoc|javadoc)\.jar|jar.asc)$"
36 | should_include_file() {
37 | if [ "$INCLUDE_TEST_JARS" = true ]; then
38 | return 0
39 | fi
40 | file=$1
41 | if [ -z "$(echo "$file" | egrep "$regex")" ] ; then
42 | return 0
43 | else
44 | return 1
45 | fi
46 | }
47 |
48 | base_dir=$(dirname $0)/..
49 |
50 | if [ -z "$SCALA_VERSION" ]; then
51 | SCALA_VERSION=2.11.11
52 | fi
53 |
54 | if [ -z "$SCALA_BINARY_VERSION" ]; then
55 | SCALA_BINARY_VERSION=$(echo $SCALA_VERSION | cut -f 1-2 -d '.')
56 | fi
57 |
58 | # run ./gradlew copyDependantLibs to get all dependant jars in a local dir
59 | shopt -s nullglob
60 | for dir in "$base_dir"/core/build/dependant-libs-${SCALA_VERSION}*;
61 | do
62 | if [ -z "$CLASSPATH" ] ; then
63 | CLASSPATH="$dir/*"
64 | else
65 | CLASSPATH="$CLASSPATH:$dir/*"
66 | fi
67 | done
68 |
69 | for file in "$base_dir"/examples/build/libs/kafka-examples*.jar;
70 | do
71 | if should_include_file "$file"; then
72 | CLASSPATH="$CLASSPATH":"$file"
73 | fi
74 | done
75 |
76 | for file in "$base_dir"/clients/build/libs/kafka-clients*.jar;
77 | do
78 | if should_include_file "$file"; then
79 | CLASSPATH="$CLASSPATH":"$file"
80 | fi
81 | done
82 |
83 | for file in "$base_dir"/streams/build/libs/kafka-streams*.jar;
84 | do
85 | if should_include_file "$file"; then
86 | CLASSPATH="$CLASSPATH":"$file"
87 | fi
88 | done
89 |
90 | for file in "$base_dir"/streams/examples/build/libs/kafka-streams-examples*.jar;
91 | do
92 | if should_include_file "$file"; then
93 | CLASSPATH="$CLASSPATH":"$file"
94 | fi
95 | done
96 |
97 | for file in "$base_dir"/streams/build/dependant-libs-${SCALA_VERSION}/rocksdb*.jar;
98 | do
99 | CLASSPATH="$CLASSPATH":"$file"
100 | done
101 |
102 | for file in "$base_dir"/tools/build/libs/kafka-tools*.jar;
103 | do
104 | if should_include_file "$file"; then
105 | CLASSPATH="$CLASSPATH":"$file"
106 | fi
107 | done
108 |
109 | for dir in "$base_dir"/tools/build/dependant-libs-${SCALA_VERSION}*;
110 | do
111 | CLASSPATH="$CLASSPATH:$dir/*"
112 | done
113 |
114 | for cc_pkg in "api" "transforms" "runtime" "file" "json" "tools"
115 | do
116 | for file in "$base_dir"/connect/${cc_pkg}/build/libs/connect-${cc_pkg}*.jar;
117 | do
118 | if should_include_file "$file"; then
119 | CLASSPATH="$CLASSPATH":"$file"
120 | fi
121 | done
122 | if [ -d "$base_dir/connect/${cc_pkg}/build/dependant-libs" ] ; then
123 | CLASSPATH="$CLASSPATH:$base_dir/connect/${cc_pkg}/build/dependant-libs/*"
124 | fi
125 | done
126 |
127 | # classpath addition for release
128 | for file in "$base_dir"/libs/*;
129 | do
130 | if should_include_file "$file"; then
131 | CLASSPATH="$CLASSPATH":"$file"
132 | fi
133 | done
134 |
135 | for file in "$base_dir"/core/build/libs/kafka_${SCALA_BINARY_VERSION}*.jar;
136 | do
137 | if should_include_file "$file"; then
138 | CLASSPATH="$CLASSPATH":"$file"
139 | fi
140 | done
141 | shopt -u nullglob
142 |
143 | if [ -z "$CLASSPATH" ] ; then
144 | echo "Classpath is empty. Please build the project first e.g. by running './gradlew jar -Pscala_version=$SCALA_VERSION'"
145 | exit 1
146 | fi
147 |
148 | # JMX settings
149 | if [ -z "$KAFKA_JMX_OPTS" ]; then
150 | KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false "
151 | fi
152 |
153 | # JMX port to use
154 | if [ $JMX_PORT ]; then
155 | KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT "
156 | fi
157 |
158 | # Log directory to use
159 | if [ "x$LOG_DIR" = "x" ]; then
160 | LOG_DIR="$base_dir/logs"
161 | fi
162 |
163 | # Log4j settings
164 | if [ -z "$KAFKA_LOG4J_OPTS" ]; then
165 | # Log to console. This is a tool.
166 | LOG4J_DIR="$base_dir/config/tools-log4j.properties"
167 | # If Cygwin is detected, LOG4J_DIR is converted to Windows format.
168 | (( CYGWIN )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}")
169 | KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_DIR}"
170 | else
171 | # create logs directory
172 | if [ ! -d "$LOG_DIR" ]; then
173 | mkdir -p "$LOG_DIR"
174 | fi
175 | fi
176 |
177 | # If Cygwin is detected, LOG_DIR is converted to Windows format.
178 | (( CYGWIN )) && LOG_DIR=$(cygpath --path --mixed "${LOG_DIR}")
179 | KAFKA_LOG4J_OPTS="-Dkafka.logs.dir=$LOG_DIR $KAFKA_LOG4J_OPTS"
180 |
181 | # Generic jvm settings you want to add
182 | if [ -z "$KAFKA_OPTS" ]; then
183 | KAFKA_OPTS=""
184 | fi
185 |
186 | # Set Debug options if enabled
187 | if [ "x$KAFKA_DEBUG" != "x" ]; then
188 |
189 | # Use default ports
190 | DEFAULT_JAVA_DEBUG_PORT="5005"
191 |
192 | if [ -z "$JAVA_DEBUG_PORT" ]; then
193 | JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT"
194 | fi
195 |
196 | # Use the defaults if JAVA_DEBUG_OPTS was not set
197 | DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=${DEBUG_SUSPEND_FLAG:-n},address=$JAVA_DEBUG_PORT"
198 | if [ -z "$JAVA_DEBUG_OPTS" ]; then
199 | JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS"
200 | fi
201 |
202 | echo "Enabling Java debug options: $JAVA_DEBUG_OPTS"
203 | KAFKA_OPTS="$JAVA_DEBUG_OPTS $KAFKA_OPTS"
204 | fi
205 |
206 | # Which java to use
207 | if [ -z "$JAVA_HOME" ]; then
208 | JAVA="java"
209 | else
210 | JAVA="$JAVA_HOME/bin/java"
211 | fi
212 |
213 | # Memory options
214 | if [ -z "$KAFKA_HEAP_OPTS" ]; then
215 | KAFKA_HEAP_OPTS="-Xmx256M"
216 | fi
217 |
218 | # JVM performance options
219 | if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then
220 | KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true"
221 | fi
222 |
223 |
224 | while [ $# -gt 0 ]; do
225 | COMMAND=$1
226 | case $COMMAND in
227 | -name)
228 | DAEMON_NAME=$2
229 | CONSOLE_OUTPUT_FILE=$LOG_DIR/$DAEMON_NAME.out
230 | shift 2
231 | ;;
232 | -loggc)
233 | if [ -z "$KAFKA_GC_LOG_OPTS" ]; then
234 | GC_LOG_ENABLED="true"
235 | fi
236 | shift
237 | ;;
238 | -daemon)
239 | DAEMON_MODE="true"
240 | shift
241 | ;;
242 | *)
243 | break
244 | ;;
245 | esac
246 | done
247 |
248 | # GC options
249 | GC_FILE_SUFFIX='-gc.log'
250 | GC_LOG_FILE_NAME=''
251 | if [ "x$GC_LOG_ENABLED" = "xtrue" ]; then
252 | GC_LOG_FILE_NAME=$DAEMON_NAME$GC_FILE_SUFFIX
253 | if $JAVA -XshowSettings:properties -version 2>&1 | grep --silent "^\s*java.vendor\s*=\s*IBM Corporation" ; then
254 | # IBM Java uses -Xverbosegclog instead of either -Xloggc or -Xlog:gc*:file
255 | KAFKA_GC_LOG_OPTS="-Xverbosegclog:$LOG_DIR/$GC_LOG_FILE_NAME -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
256 | else
257 | # the first segment of the version number, which is '1' for releases before Java 9
258 | # it then becomes '9', '10', ...
259 | JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([^.-]*).*"/\1/p')
260 | if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
261 | KAFKA_GC_LOG_OPTS="-Xlog:gc*:file=$LOG_DIR/$GC_LOG_FILE_NAME:time,tags:filecount=10,filesize=102400"
262 | else
263 | KAFKA_GC_LOG_OPTS="-Xloggc:$LOG_DIR/$GC_LOG_FILE_NAME -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
264 | fi
265 | fi
266 | fi
267 |
268 | # If Cygwin is detected, classpath is converted to Windows format.
269 | (( CYGWIN )) && CLASSPATH=$(cygpath --path --mixed "${CLASSPATH}")
270 |
271 | # Launch mode
272 | if [ "x$DAEMON_MODE" = "xtrue" ]; then
273 | nohup $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null &
274 | else
275 | exec $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@"
276 | fi
277 |
--------------------------------------------------------------------------------
/images/zookeeper/Dockerfile:
--------------------------------------------------------------------------------
1 | # Copyright Greg Haskins All Rights Reserved
2 | # Copyright IBM Corp. All Rights Reserved.
3 | #
4 | # SPDX-License-Identifier: Apache-2.0
5 | #
6 | FROM debian:buster-20190910-slim as download
7 | # Based on https://github.com/31z4/zookeeper-docker/blob/master/3.4.9/Dockerfile
8 | RUN apt-get update \
9 | && apt-get install -y curl tar git make gcc
10 |
11 | # Install su-exec
12 | RUN set -x \
13 | && git clone https://github.com/ncopa/su-exec /tmp/su-exec/ \
14 | && cd /tmp/su-exec \
15 | && make all \
16 | && cp su-exec /usr/local/bin/
17 |
18 | ENV ZOO_USER=zookeeper \
19 | ZOO_CONF_DIR=/conf
20 |
21 | ARG GPG_KEY=C823E3E5B12AF29C67F81976F5CECB3CB5E9BD2D
22 | ARG DISTRO_NAME=zookeeper-3.4.14
23 |
24 | # Download Apache Zookeeper, verify its PGP signature, untar and clean up
25 | RUN set -x \
26 | && mkdir "$ZOO_CONF_DIR" \
27 | && cd / \
28 | && curl -fsSL "http://archive.apache.org/dist/zookeeper/$DISTRO_NAME/$DISTRO_NAME.tar.gz" | tar -xz \
29 | && mv "$DISTRO_NAME/conf/"* "$ZOO_CONF_DIR"
30 |
31 |
32 | FROM adoptopenjdk:8u222-b10-jre-openj9-0.15.1
33 | ARG DISTRO_NAME=zookeeper-3.4.14
34 | ENV ZOO_USER=zookeeper \
35 | ZOO_CONF_DIR=/conf \
36 | ZOO_DATA_DIR=/data \
37 | ZOO_DATA_LOG_DIR=/datalog \
38 | ZOO_PORT=2181 \
39 | ZOO_TICK_TIME=2000 \
40 | ZOO_INIT_LIMIT=5 \
41 | ZOO_SYNC_LIMIT=2 \
42 | ZOO_AUTOPURGE_SNAPRETAINCOUNT=3 \
43 | ZOO_AUTOPURGE_PURGEINTERVAL=1
44 |
45 | # Add a user and make dirs
46 | RUN set -x \
47 | && useradd "$ZOO_USER" \
48 | && mkdir -p "$ZOO_DATA_LOG_DIR" "$ZOO_DATA_DIR" "$ZOO_CONF_DIR" \
49 | && chown "$ZOO_USER:$ZOO_USER" "$ZOO_DATA_LOG_DIR" "$ZOO_DATA_DIR" "$ZOO_CONF_DIR"
50 |
51 | WORKDIR $DISTRO_NAME
52 |
53 | VOLUME ["$ZOO_DATA_DIR", "$ZOO_DATA_LOG_DIR"]
54 |
55 | EXPOSE $ZOO_PORT 2888 3888
56 |
57 | ENV PATH=$PATH:/$DISTRO_NAME/bin \
58 | ZOOCFGDIR=$ZOO_CONF_DIR
59 |
60 | COPY --from=download /usr/local/bin /usr/bin
61 | COPY --from=download ${ZOO_CONF_DIR} ${ZOO_CONF_DIR}
62 | COPY --from=download /${DISTRO_NAME} /${DISTRO_NAME}
63 | COPY ./docker-entrypoint.sh /
64 |
65 | RUN chown -R "$ZOO_USER:$ZOO_USER" "$ZOO_CONF_DIR"
66 |
67 | ENTRYPOINT ["/docker-entrypoint.sh"]
68 | CMD ["zkServer.sh", "start-foreground"]
69 |
--------------------------------------------------------------------------------
/images/zookeeper/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright IBM Corp. All Rights Reserved.
4 | #
5 | # SPDX-License-Identifier: Apache-2.0
6 | #
7 |
8 |
9 | set -e
10 |
11 | # Allow the container to be started with `--user`
12 | if [ "$1" = 'zkServer.sh' -a "$(id -u)" = '0' ]; then
13 | chown -R "$ZOO_USER" "$ZOO_DATA_DIR" "$ZOO_DATA_LOG_DIR"
14 | exec su-exec "$ZOO_USER" "$0" "$@"
15 | fi
16 |
17 | # Generate the config only if it doesn't exist
18 | if [ ! -f "$ZOO_CONF_DIR/zoo.cfg" ]; then
19 | CONFIG="$ZOO_CONF_DIR/zoo.cfg"
20 |
21 | echo "clientPort=$ZOO_PORT" >> "$CONFIG"
22 | echo "dataDir=$ZOO_DATA_DIR" >> "$CONFIG"
23 | echo "dataLogDir=$ZOO_DATA_LOG_DIR" >> "$CONFIG"
24 |
25 | echo "tickTime=$ZOO_TICK_TIME" >> "$CONFIG"
26 | echo "initLimit=$ZOO_INIT_LIMIT" >> "$CONFIG"
27 | echo "syncLimit=$ZOO_SYNC_LIMIT" >> "$CONFIG"
28 |
29 | echo "autopurge.snapRetainCount=$ZOO_AUTOPURGE_SNAPRETAINCOUNT" >> "$CONFIG"
30 | echo "autopurge.purgeInterval=$ZOO_AUTOPURGE_PURGEINTERVAL" >> "$CONFIG"
31 |
32 | for server in $ZOO_SERVERS; do
33 | echo "$server" >> "$CONFIG"
34 | done
35 | fi
36 |
37 | # Write myid only if it doesn't exist
38 | if [ ! -f "$ZOO_DATA_DIR/myid" ]; then
39 | echo "${ZOO_MY_ID:-1}" > "$ZOO_DATA_DIR/myid"
40 | fi
41 |
42 | exec "$@"
43 |
--------------------------------------------------------------------------------
/scripts/common/cleanup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright Greg Haskins All Rights Reserved.
4 | #
5 | # SPDX-License-Identifier: Apache-2.0
6 | #
7 | set -e
8 |
9 | # clean up our environment
10 | apt-get -y autoremove
11 | apt-get clean
12 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
13 |
--------------------------------------------------------------------------------
/scripts/common/golang_crossCompileSetup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright Greg Haskins All Rights Reserved.
4 | #
5 | # SPDX-License-Identifier: Apache-2.0
6 | #
7 |
8 | helpme()
9 | {
10 | cat </etc/profile.d/goroot.sh
62 | export GOROOT=$GOROOT
63 | export GOPATH=$GOPATH
64 | export PATH=\$PATH:$GOROOT/bin:$GOPATH/bin
65 | EOF
66 |
67 | # ----------------------------------------------------------------
68 | # Install NodeJS
69 | # ----------------------------------------------------------------
70 | NODE_VER=8.16.1
71 | NPM_VER=6.11.3
72 |
73 | ARCH=`uname -m | sed 's|i686|x86|' | sed 's|x86_64|x64|'`
74 | NODE_PKG=node-v$NODE_VER-linux-$ARCH.tar.gz
75 | SRC_PATH=/tmp/$NODE_PKG
76 |
77 | # First remove any prior packages downloaded in case of failure
78 | cd /tmp
79 | rm -f node*.tar.gz
80 | wget --quiet https://nodejs.org/dist/v$NODE_VER/$NODE_PKG
81 | cd /usr/local && sudo tar --strip-components 1 -xzf $SRC_PATH
82 |
83 | # update npm to latest
84 | npm install -g npm@$NPM_VER
85 |
86 | # Install python2.7
87 | apt-get -y install python
88 |
89 | # ----------------------------------------------------------------
90 | # Install protocol buffer support
91 | #
92 | # See https://github.com/google/protobuf
93 | # ----------------------------------------------------------------
94 | PROTOBUF_VER=3.1.0
95 | PROTOBUF_PKG=v$PROTOBUF_VER.tar.gz
96 |
97 | cd /tmp
98 | wget --quiet https://github.com/google/protobuf/archive/$PROTOBUF_PKG
99 | tar xpzf $PROTOBUF_PKG
100 | cd protobuf-$PROTOBUF_VER
101 | ./autogen.sh
102 | # NOTE: By default, the package will be installed to /usr/local. However, on many platforms, /usr/local/lib is not part of LD_LIBRARY_PATH.
103 | # You can add it, but it may be easier to just install to /usr instead.
104 | #
105 | # To do this, invoke configure as follows:
106 | #
107 | # ./configure --prefix=/usr
108 | #
109 | #./configure
110 | ./configure --prefix=/usr
111 |
112 | make
113 | make check
114 | make install
115 | export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
116 | cd ~/
117 |
118 | # Make our versioning persistent
119 | echo $BASEIMAGE_RELEASE > /etc/hyperledger-baseimage-release
120 |
--------------------------------------------------------------------------------
/scripts/docker/fixup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright Greg Haskins All Rights Reserved.
4 | #
5 | # SPDX-License-Identifier: Apache-2.0
6 | #
7 | chgrp -R root /opt/gopath
8 | chmod g+rw /opt/gopath
9 |
10 | mkdir /var/hyperledger
11 | chgrp -R root /var/hyperledger
12 | chmod g+rw /var/hyperledger
13 |
--------------------------------------------------------------------------------
/scripts/docker/init.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Copyright Greg Haskins All Rights Reserved.
4 | #
5 | # SPDX-License-Identifier: Apache-2.0
6 | #
7 | apt-get update
8 | apt-get install -y wget tzdata
9 |
--------------------------------------------------------------------------------
/scripts/multiarch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # Copyright IBM Corp. All Rights Reserved.
4 | #
5 | # SPDX-License-Identifier: Apache-2.0
6 | #
7 |
8 | usage() {
9 | echo "Usage: $0 "
10 | echo " and credentials for the repository"
11 | echo "ENV:"
12 | echo " NS=$NS"
13 | echo " VERSION=$VERSION"
14 | exit 1
15 | }
16 |
17 | missing() {
18 | echo "Error: some image(s) missing from registry"
19 | echo "ENV:"
20 | echo " NS=$NS"
21 | echo " VERSION=$VERSION"
22 | exit 1
23 | }
24 |
25 | failed() {
26 | echo "Error: multiarch manifest push failed"
27 | echo "ENV:"
28 | echo " NS=$NS"
29 | echo " VERSION=$VERSION"
30 | exit 1
31 | }
32 |
33 | USER=${1:-nobody}
34 | PASSWORD=${2:-nohow}
35 | NS=${NS:-hyperledger}
36 | VERSION=${BASE_VERSION:-0.4.8}
37 |
38 | if [ "$#" -ne 2 ]; then
39 | usage
40 | fi
41 |
42 | # verify that manifest-tool is installed and found on PATH
43 | which manifest-tool
44 | if [ "$?" -ne 0 ]; then
45 | echo "manifest-tool not installed or not found on PATH"
46 | exit 1
47 | fi
48 |
49 | IMAGES="fabric-baseos fabric-baseimage fabric-kafka fabric-zookeeper fabric-couchdb"
50 |
51 | # check that all images have been published
52 | for image in ${IMAGES}; do
53 | docker pull ${NS}/${image}:amd64-${VERSION} || missing
54 | docker pull ${NS}/${image}:s390x-${VERSION} || missing
55 | # docker pull ${NS}/${image}:ppc64le-${VERSION} || missing
56 | done
57 |
58 | # push the multiarch manifest and tag with just $VERSION and 'latest'
59 | for image in ${IMAGES}; do
60 | manifest-tool --username ${USER} --password ${PASSWORD} push from-args\
61 | --platforms linux/amd64,linux/s390x --template "${NS}/${image}:ARCH-${VERSION}"\
62 | --target "${NS}/${image}:${VERSION}"
63 | manifest-tool --username ${USER} --password ${PASSWORD} push from-args\
64 | --platforms linux/amd64,linux/s390x --template "${NS}/${image}:ARCH-${VERSION}"\
65 | --target "${NS}/${image}:latest"
66 | done
67 |
68 | # test that manifest is working as expected
69 | for image in ${IMAGES}; do
70 | docker pull ${NS}/${image}:${VERSION} || failed
71 | docker pull ${NS}/${image}:latest || failed
72 | done
73 |
74 | echo "Successfully pushed multiarch manifest"
75 | exit 0
76 |
--------------------------------------------------------------------------------