├── .gitignore ├── LICENSE ├── MSFT_EDC_Demo.postman_collection.json ├── README.md ├── SECURITY.md ├── build.gradle.kts ├── deployment ├── did-web │ ├── connector3.json │ ├── consumer.json │ ├── index.html │ └── provider.json └── terraform │ ├── aws.tf │ ├── blobstore.tf │ ├── complex_schematic_drawing.jpg │ ├── cosmosdb.tf │ ├── eventgrid.tf │ ├── lease.js │ ├── main.tf │ ├── nextForState.js │ ├── output.tf │ ├── test-document.txt │ ├── variables.tf │ └── vault_secrets.tf ├── docker-compose.yaml ├── extensions ├── dataseeding │ ├── catalog │ │ ├── build.gradle.kts │ │ └── src │ │ │ └── main │ │ │ ├── java │ │ │ └── org │ │ │ │ └── eclipse │ │ │ │ └── dataspaceconnector │ │ │ │ └── dataseeding │ │ │ │ └── catalog │ │ │ │ └── CatalogDataseedingExtension.java │ │ │ └── resources │ │ │ ├── META-INF │ │ │ └── services │ │ │ │ └── org.eclipse.dataspaceconnector.spi.system.ServiceExtension │ │ │ ├── nodes-dockercompose.json │ │ │ ├── nodes-local.json │ │ │ └── nodes.json │ └── hub │ │ ├── build.gradle.kts │ │ └── src │ │ └── main │ │ ├── java │ │ └── org │ │ │ └── eclipse │ │ │ └── dataspaceconnector │ │ │ └── dataseeding │ │ │ └── catalog │ │ │ └── IdentityHubDataseedingExtension.java │ │ └── resources │ │ └── META-INF │ │ └── services │ │ └── org.eclipse.dataspaceconnector.spi.system.ServiceExtension ├── federated-catalog-api │ ├── build.gradle.kts │ └── src │ │ └── main │ │ ├── java │ │ └── org │ │ │ └── eclipse │ │ │ └── dataspaceconnector │ │ │ └── demo │ │ │ └── edc_demo │ │ │ └── api │ │ │ ├── FederatedCatalogApiController.java │ │ │ └── FederatedCatalogApiExtension.java │ │ └── resources │ │ └── META-INF │ │ └── services │ │ └── org.eclipse.dataspaceconnector.spi.system.ServiceExtension ├── identity-hub-verifier │ ├── build.gradle.kts │ └── src │ │ ├── main │ │ ├── java │ │ │ └── org │ │ │ │ └── eclipse │ │ │ │ └── dataspaceconnector │ │ │ │ └── iam │ │ │ │ └── did │ │ │ │ └── credentials │ │ │ │ ├── DemoCredentialsVerifierExtension.java │ │ │ │ └── IdentityHubCredentialsVerifier.java │ │ └── resources │ │ │ └── META-INF │ │ │ └── services │ │ │ └── org.eclipse.dataspaceconnector.spi.system.ServiceExtension │ │ └── test │ │ └── java │ │ └── org │ │ └── eclipse │ │ └── dataspaceconnector │ │ └── iam │ │ └── did │ │ └── credentials │ │ └── IdentityHubCredentialsVerifierTest.java └── transfer-azure-s3 │ ├── build.gradle.kts │ └── src │ └── main │ ├── java │ └── org │ │ └── eclipse │ │ └── dataspaceconnector │ │ └── transfer │ │ └── CloudTransferExtension.java │ └── resources │ └── META-INF │ └── services │ └── org.eclipse.dataspaceconnector.spi.system.ServiceExtension ├── gradle.properties ├── gradle └── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── gradlew ├── gradlew.bat ├── launchers ├── connector │ ├── Dockerfile │ ├── Dockerfile.compose │ ├── build.gradle.kts │ ├── connector3.properties │ ├── consumer.properties │ └── provider.properties ├── junit │ ├── build.gradle.kts │ └── src │ │ └── test │ │ ├── java │ │ └── org │ │ │ └── eclipse │ │ │ └── dataspaceconnector │ │ │ └── test │ │ │ ├── EdcExtension.java │ │ │ ├── QueryRunner.java │ │ │ ├── TestExtensions.java │ │ │ └── TestServiceExtensionContext.java │ │ └── resources │ │ └── private.pem └── registration-service │ ├── Dockerfile │ ├── build.gradle.kts │ └── src │ └── main │ └── java │ └── com │ └── microsoft │ └── edc │ └── showcase │ └── demo │ └── RegistrationServiceRuntime.java ├── scripts └── did │ ├── generate.sh │ └── template │ └── template.json └── settings.gradle.kts /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled class file 2 | *.class 3 | 4 | # Log file 5 | *.log 6 | 7 | # BlueJ files 8 | *.ctxt 9 | 10 | # Mobile Tools for Java (J2ME) 11 | .mtj.tmp/ 12 | 13 | # Package Files # 14 | *.jar 15 | *.war 16 | *.nar 17 | *.ear 18 | *.zip 19 | *.tar.gz 20 | *.rar 21 | 22 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 23 | hs_err_pid* 24 | *.tfvars 25 | **/.terraform* 26 | .idea/ 27 | .gradle/ 28 | **/build 29 | keys*/ 30 | 31 | **/out -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Eclipse Public License - v 2.0 2 | 3 | THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE 4 | PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION 5 | OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. 6 | 7 | 1. DEFINITIONS 8 | 9 | "Contribution" means: 10 | 11 | a) in the case of the initial Contributor, the initial content 12 | Distributed under this Agreement, and 13 | 14 | b) in the case of each subsequent Contributor: 15 | i) changes to the Program, and 16 | ii) additions to the Program; 17 | where such changes and/or additions to the Program originate from 18 | and are Distributed by that particular Contributor. A Contribution 19 | "originates" from a Contributor if it was added to the Program by 20 | such Contributor itself or anyone acting on such Contributor's behalf. 21 | Contributions do not include changes or additions to the Program that 22 | are not Modified Works. 23 | 24 | "Contributor" means any person or entity that Distributes the Program. 25 | 26 | "Licensed Patents" mean patent claims licensable by a Contributor which 27 | are necessarily infringed by the use or sale of its Contribution alone 28 | or when combined with the Program. 29 | 30 | "Program" means the Contributions Distributed in accordance with this 31 | Agreement. 32 | 33 | "Recipient" means anyone who receives the Program under this Agreement 34 | or any Secondary License (as applicable), including Contributors. 35 | 36 | "Derivative Works" shall mean any work, whether in Source Code or other 37 | form, that is based on (or derived from) the Program and for which the 38 | editorial revisions, annotations, elaborations, or other modifications 39 | represent, as a whole, an original work of authorship. 40 | 41 | "Modified Works" shall mean any work in Source Code or other form that 42 | results from an addition to, deletion from, or modification of the 43 | contents of the Program, including, for purposes of clarity any new file 44 | in Source Code form that contains any contents of the Program. Modified 45 | Works shall not include works that contain only declarations, 46 | interfaces, types, classes, structures, or files of the Program solely 47 | in each case in order to link to, bind by name, or subclass the Program 48 | or Modified Works thereof. 49 | 50 | "Distribute" means the acts of a) distributing or b) making available 51 | in any manner that enables the transfer of a copy. 52 | 53 | "Source Code" means the form of a Program preferred for making 54 | modifications, including but not limited to software source code, 55 | documentation source, and configuration files. 56 | 57 | "Secondary License" means either the GNU General Public License, 58 | Version 2.0, or any later versions of that license, including any 59 | exceptions or additional permissions as identified by the initial 60 | Contributor. 61 | 62 | 2. GRANT OF RIGHTS 63 | 64 | a) Subject to the terms of this Agreement, each Contributor hereby 65 | grants Recipient a non-exclusive, worldwide, royalty-free copyright 66 | license to reproduce, prepare Derivative Works of, publicly display, 67 | publicly perform, Distribute and sublicense the Contribution of such 68 | Contributor, if any, and such Derivative Works. 69 | 70 | b) Subject to the terms of this Agreement, each Contributor hereby 71 | grants Recipient a non-exclusive, worldwide, royalty-free patent 72 | license under Licensed Patents to make, use, sell, offer to sell, 73 | import and otherwise transfer the Contribution of such Contributor, 74 | if any, in Source Code or other form. This patent license shall 75 | apply to the combination of the Contribution and the Program if, at 76 | the time the Contribution is added by the Contributor, such addition 77 | of the Contribution causes such combination to be covered by the 78 | Licensed Patents. The patent license shall not apply to any other 79 | combinations which include the Contribution. No hardware per se is 80 | licensed hereunder. 81 | 82 | c) Recipient understands that although each Contributor grants the 83 | licenses to its Contributions set forth herein, no assurances are 84 | provided by any Contributor that the Program does not infringe the 85 | patent or other intellectual property rights of any other entity. 86 | Each Contributor disclaims any liability to Recipient for claims 87 | brought by any other entity based on infringement of intellectual 88 | property rights or otherwise. As a condition to exercising the 89 | rights and licenses granted hereunder, each Recipient hereby 90 | assumes sole responsibility to secure any other intellectual 91 | property rights needed, if any. For example, if a third party 92 | patent license is required to allow Recipient to Distribute the 93 | Program, it is Recipient's responsibility to acquire that license 94 | before distributing the Program. 95 | 96 | d) Each Contributor represents that to its knowledge it has 97 | sufficient copyright rights in its Contribution, if any, to grant 98 | the copyright license set forth in this Agreement. 99 | 100 | e) Notwithstanding the terms of any Secondary License, no 101 | Contributor makes additional grants to any Recipient (other than 102 | those set forth in this Agreement) as a result of such Recipient's 103 | receipt of the Program under the terms of a Secondary License 104 | (if permitted under the terms of Section 3). 105 | 106 | 3. REQUIREMENTS 107 | 108 | 3.1 If a Contributor Distributes the Program in any form, then: 109 | 110 | a) the Program must also be made available as Source Code, in 111 | accordance with section 3.2, and the Contributor must accompany 112 | the Program with a statement that the Source Code for the Program 113 | is available under this Agreement, and informs Recipients how to 114 | obtain it in a reasonable manner on or through a medium customarily 115 | used for software exchange; and 116 | 117 | b) the Contributor may Distribute the Program under a license 118 | different than this Agreement, provided that such license: 119 | i) effectively disclaims on behalf of all other Contributors all 120 | warranties and conditions, express and implied, including 121 | warranties or conditions of title and non-infringement, and 122 | implied warranties or conditions of merchantability and fitness 123 | for a particular purpose; 124 | 125 | ii) effectively excludes on behalf of all other Contributors all 126 | liability for damages, including direct, indirect, special, 127 | incidental and consequential damages, such as lost profits; 128 | 129 | iii) does not attempt to limit or alter the recipients' rights 130 | in the Source Code under section 3.2; and 131 | 132 | iv) requires any subsequent distribution of the Program by any 133 | party to be under a license that satisfies the requirements 134 | of this section 3. 135 | 136 | 3.2 When the Program is Distributed as Source Code: 137 | 138 | a) it must be made available under this Agreement, or if the 139 | Program (i) is combined with other material in a separate file or 140 | files made available under a Secondary License, and (ii) the initial 141 | Contributor attached to the Source Code the notice described in 142 | Exhibit A of this Agreement, then the Program may be made available 143 | under the terms of such Secondary Licenses, and 144 | 145 | b) a copy of this Agreement must be included with each copy of 146 | the Program. 147 | 148 | 3.3 Contributors may not remove or alter any copyright, patent, 149 | trademark, attribution notices, disclaimers of warranty, or limitations 150 | of liability ("notices") contained within the Program from any copy of 151 | the Program which they Distribute, provided that Contributors may add 152 | their own appropriate notices. 153 | 154 | 4. COMMERCIAL DISTRIBUTION 155 | 156 | Commercial distributors of software may accept certain responsibilities 157 | with respect to end users, business partners and the like. While this 158 | license is intended to facilitate the commercial use of the Program, 159 | the Contributor who includes the Program in a commercial product 160 | offering should do so in a manner which does not create potential 161 | liability for other Contributors. Therefore, if a Contributor includes 162 | the Program in a commercial product offering, such Contributor 163 | ("Commercial Contributor") hereby agrees to defend and indemnify every 164 | other Contributor ("Indemnified Contributor") against any losses, 165 | damages and costs (collectively "Losses") arising from claims, lawsuits 166 | and other legal actions brought by a third party against the Indemnified 167 | Contributor to the extent caused by the acts or omissions of such 168 | Commercial Contributor in connection with its distribution of the Program 169 | in a commercial product offering. The obligations in this section do not 170 | apply to any claims or Losses relating to any actual or alleged 171 | intellectual property infringement. In order to qualify, an Indemnified 172 | Contributor must: a) promptly notify the Commercial Contributor in 173 | writing of such claim, and b) allow the Commercial Contributor to control, 174 | and cooperate with the Commercial Contributor in, the defense and any 175 | related settlement negotiations. The Indemnified Contributor may 176 | participate in any such claim at its own expense. 177 | 178 | For example, a Contributor might include the Program in a commercial 179 | product offering, Product X. That Contributor is then a Commercial 180 | Contributor. If that Commercial Contributor then makes performance 181 | claims, or offers warranties related to Product X, those performance 182 | claims and warranties are such Commercial Contributor's responsibility 183 | alone. Under this section, the Commercial Contributor would have to 184 | defend claims against the other Contributors related to those performance 185 | claims and warranties, and if a court requires any other Contributor to 186 | pay any damages as a result, the Commercial Contributor must pay 187 | those damages. 188 | 189 | 5. NO WARRANTY 190 | 191 | EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT 192 | PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" 193 | BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR 194 | IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF 195 | TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR 196 | PURPOSE. Each Recipient is solely responsible for determining the 197 | appropriateness of using and distributing the Program and assumes all 198 | risks associated with its exercise of rights under this Agreement, 199 | including but not limited to the risks and costs of program errors, 200 | compliance with applicable laws, damage to or loss of data, programs 201 | or equipment, and unavailability or interruption of operations. 202 | 203 | 6. DISCLAIMER OF LIABILITY 204 | 205 | EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT 206 | PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS 207 | SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 208 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST 209 | PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 210 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 211 | ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE 212 | EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE 213 | POSSIBILITY OF SUCH DAMAGES. 214 | 215 | 7. GENERAL 216 | 217 | If any provision of this Agreement is invalid or unenforceable under 218 | applicable law, it shall not affect the validity or enforceability of 219 | the remainder of the terms of this Agreement, and without further 220 | action by the parties hereto, such provision shall be reformed to the 221 | minimum extent necessary to make such provision valid and enforceable. 222 | 223 | If Recipient institutes patent litigation against any entity 224 | (including a cross-claim or counterclaim in a lawsuit) alleging that the 225 | Program itself (excluding combinations of the Program with other software 226 | or hardware) infringes such Recipient's patent(s), then such Recipient's 227 | rights granted under Section 2(b) shall terminate as of the date such 228 | litigation is filed. 229 | 230 | All Recipient's rights under this Agreement shall terminate if it 231 | fails to comply with any of the material terms or conditions of this 232 | Agreement and does not cure such failure in a reasonable period of 233 | time after becoming aware of such noncompliance. If all Recipient's 234 | rights under this Agreement terminate, Recipient agrees to cease use 235 | and distribution of the Program as soon as reasonably practicable. 236 | However, Recipient's obligations under this Agreement and any licenses 237 | granted by Recipient relating to the Program shall continue and survive. 238 | 239 | Everyone is permitted to copy and distribute copies of this Agreement, 240 | but in order to avoid inconsistency the Agreement is copyrighted and 241 | may only be modified in the following manner. The Agreement Steward 242 | reserves the right to publish new versions (including revisions) of 243 | this Agreement from time to time. No one other than the Agreement 244 | Steward has the right to modify this Agreement. The Eclipse Foundation 245 | is the initial Agreement Steward. The Eclipse Foundation may assign the 246 | responsibility to serve as the Agreement Steward to a suitable separate 247 | entity. Each new version of the Agreement will be given a distinguishing 248 | version number. The Program (including Contributions) may always be 249 | Distributed subject to the version of the Agreement under which it was 250 | received. In addition, after a new version of the Agreement is published, 251 | Contributor may elect to Distribute the Program (including its 252 | Contributions) under the new version. 253 | 254 | Except as expressly stated in Sections 2(a) and 2(b) above, Recipient 255 | receives no rights or licenses to the intellectual property of any 256 | Contributor under this Agreement, whether expressly, by implication, 257 | estoppel or otherwise. All rights in the Program not expressly granted 258 | under this Agreement are reserved. Nothing in this Agreement is intended 259 | to be enforceable by any entity that is not a Contributor or Recipient. 260 | No third-party beneficiary rights are created under this Agreement. 261 | 262 | Exhibit A - Form of Secondary Licenses Notice 263 | 264 | "This Source Code may also be made available under the following 265 | Secondary Licenses when the conditions for such availability set forth 266 | in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), 267 | version(s), and exceptions or additional permissions here}." 268 | 269 | Simply including a copy of this Agreement, including this Exhibit A 270 | is not sufficient to license the Source Code under Secondary Licenses. 271 | 272 | If it is not possible or desirable to put the notice in a particular 273 | file, then You may include the notice in a location (such as a LICENSE 274 | file in a relevant directory) where a recipient would be likely to 275 | look for such a notice. 276 | 277 | You may add additional accurate notices of copyright ownership. 278 | -------------------------------------------------------------------------------- /MSFT_EDC_Demo.postman_collection.json: -------------------------------------------------------------------------------- 1 | { 2 | "info": { 3 | "_postman_id": "5b174c43-f7a0-45f0-bd62-5a4e54a4aa9d", 4 | "name": "MSFT EDC Demo", 5 | "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" 6 | }, 7 | "item": [ 8 | { 9 | "name": "Health check consumer", 10 | "request": { 11 | "method": "GET", 12 | "header": [], 13 | "url": { 14 | "raw": "{{consumer_url}}/api/health", 15 | "host": [ 16 | "{{consumer_url}}" 17 | ], 18 | "path": [ 19 | "api", 20 | "health" 21 | ] 22 | } 23 | }, 24 | "response": [] 25 | }, 26 | { 27 | "name": "Query Dataspace Catalog", 28 | "event": [ 29 | { 30 | "listen": "test", 31 | "script": { 32 | "exec": [ 33 | "let responseData=pm.response.text();", 34 | "let catalogJson = JSON.parse(responseData);", 35 | "for (var i = 0; i < catalogJson.length; i++){", 36 | "", 37 | " let offer = catalogJson[i];", 38 | " if(offer.id.startsWith('test-document-az_provider') ){", 39 | " pm.environment.set(\"offerId\", offer.id)", 40 | " }", 41 | "", 42 | "}" 43 | ], 44 | "type": "text/javascript" 45 | } 46 | } 47 | ], 48 | "request": { 49 | "method": "GET", 50 | "header": [], 51 | "url": { 52 | "raw": "{{consumer_url}}/api/catalog/cached", 53 | "host": [ 54 | "{{consumer_url}}" 55 | ], 56 | "path": [ 57 | "api", 58 | "catalog", 59 | "cached" 60 | ] 61 | } 62 | }, 63 | "response": [] 64 | }, 65 | { 66 | "name": "Negotiate Contract (IDS Multipart)", 67 | "event": [ 68 | { 69 | "listen": "test", 70 | "script": { 71 | "exec": [ 72 | "let responseData=pm.response.text();", 73 | "pm.environment.set(\"negotiationId\", responseData)" 74 | ], 75 | "type": "text/javascript" 76 | } 77 | } 78 | ], 79 | "request": { 80 | "method": "POST", 81 | "header": [], 82 | "body": { 83 | "mode": "raw", 84 | "raw": "{\n \"connectorAddress\":\"{{provider_url}}/api/ids/multipart\",\n \"protocol\": \"ids-multipart\",\n \"connectorId\":\"consumer\",\n \"offerId\": \"{{offerId}}\"\n}", 85 | "options": { 86 | "raw": { 87 | "language": "json" 88 | } 89 | } 90 | }, 91 | "url": { 92 | "raw": "{{consumer_url}}/api/negotiation", 93 | "host": [ 94 | "{{consumer_url}}" 95 | ], 96 | "path": [ 97 | "api", 98 | "negotiation" 99 | ] 100 | } 101 | }, 102 | "response": [] 103 | }, 104 | { 105 | "name": "Get Negotiation State", 106 | "event": [ 107 | { 108 | "listen": "test", 109 | "script": { 110 | "exec": [ 111 | "let responseData=pm.response.text();", 112 | "let json = JSON.parse(responseData);", 113 | "pm.environment.set(\"agreementId\", null)", 114 | "if(json.contractAgreement !== null)", 115 | " pm.environment.set(\"agreementId\", json.contractAgreement.id)", 116 | "" 117 | ], 118 | "type": "text/javascript" 119 | } 120 | } 121 | ], 122 | "protocolProfileBehavior": { 123 | "disableBodyPruning": true 124 | }, 125 | "request": { 126 | "method": "GET", 127 | "header": [ 128 | { 129 | "key": "X-Api-Key", 130 | "value": "x-edc-showcase-x", 131 | "type": "text" 132 | } 133 | ], 134 | "body": { 135 | "mode": "raw", 136 | "raw": "", 137 | "options": { 138 | "raw": { 139 | "language": "json" 140 | } 141 | } 142 | }, 143 | "url": { 144 | "raw": "{{consumer_url}}/api/control/negotiation/{{negotiationId}}", 145 | "host": [ 146 | "{{consumer_url}}" 147 | ], 148 | "path": [ 149 | "api", 150 | "control", 151 | "negotiation", 152 | "{{negotiationId}}" 153 | ] 154 | } 155 | }, 156 | "response": [] 157 | }, 158 | { 159 | "name": "Request Data (sync, multipart) Copy", 160 | "event": [ 161 | { 162 | "listen": "test", 163 | "script": { 164 | "exec": [ 165 | "" 166 | ], 167 | "type": "text/javascript" 168 | } 169 | } 170 | ], 171 | "request": { 172 | "method": "POST", 173 | "header": [], 174 | "body": { 175 | "mode": "raw", 176 | "raw": "{\n \"id\": \"412123341234\",\n \"edctype\": \"dataspaceconnector:datarequest\",\n \"connectorAddress\":\"{{provider_url}}/api/ids/multipart\",\n \"protocol\": \"ids-multipart\",\n \"connectorId\":\"consumer\",\n \"contractId\": \"{{agreementId}}\",\n \"assetId\": \"demo-train-data_provider\",\n \"dataDestination\":{\n \"type\": \"http\"\n },\n \"isSync\": true,\n \"managedResources\": false,\n \"properties\":{\n \"foo\": \"bar\",\n \"isSync\": true\n }\n}", 177 | "options": { 178 | "raw": { 179 | "language": "json" 180 | } 181 | } 182 | }, 183 | "url": { 184 | "raw": "{{consumer_url}}/api/datarequest", 185 | "host": [ 186 | "{{consumer_url}}" 187 | ], 188 | "path": [ 189 | "api", 190 | "datarequest" 191 | ] 192 | } 193 | }, 194 | "response": [] 195 | }, 196 | { 197 | "name": "Request Data (async, multipart)", 198 | "event": [ 199 | { 200 | "listen": "test", 201 | "script": { 202 | "exec": [ 203 | "let responseData=pm.response.text();", 204 | "let json = JSON.parse(responseData);", 205 | "pm.environment.set(\"processId\", json.content)" 206 | ], 207 | "type": "text/javascript" 208 | } 209 | } 210 | ], 211 | "request": { 212 | "method": "POST", 213 | "header": [], 214 | "body": { 215 | "mode": "raw", 216 | "raw": "{\n \"edctype\": \"dataspaceconnector:datarequest\",\n \"id\": null,\n \"processId\": null,\n \"connectorAddress\": \"{{provider_url}}/api/ids/multipart\",\n \"protocol\": \"ids-multipart\",\n \"connectorId\": \"consumer\",\n \"assetId\": \"test-document-az_provider\",\n \"contractId\": \"{{agreementId}}\",\n \"dataDestination\": {\n \"properties\": {\n \"container\": \"dst-container\",\n \"keyName\": \"edcshowcasegpstorage-key1\",\n \"type\": \"AzureStorage\",\n \"account\": \"edcshowcasegpstorage\",\n \"blobname\": \"received.txt\"\n },\n \"keyName\": \"edcshowcasegpstorage-key1\",\n \"type\": \"AzureStorage\"\n },\n \"managedResources\": true,\n \"destinationType\": \"AzureStorage\",\n \"transferType\": {\n \"contentType\": \"application/octet-stream\",\n \"isFinite\": true\n }\n}", 217 | "options": { 218 | "raw": { 219 | "language": "json" 220 | } 221 | } 222 | }, 223 | "url": { 224 | "raw": "{{consumer_url}}/api/datarequest", 225 | "host": [ 226 | "{{consumer_url}}" 227 | ], 228 | "path": [ 229 | "api", 230 | "datarequest" 231 | ] 232 | } 233 | }, 234 | "response": [] 235 | }, 236 | { 237 | "name": "Get Status of Transfer Process", 238 | "request": { 239 | "method": "GET", 240 | "header": [], 241 | "url": { 242 | "raw": "{{consumer_url}}/api/datarequest/{{processId}}/state", 243 | "host": [ 244 | "{{consumer_url}}" 245 | ], 246 | "path": [ 247 | "api", 248 | "datarequest", 249 | "{{processId}}", 250 | "state" 251 | ] 252 | } 253 | }, 254 | "response": [] 255 | }, 256 | { 257 | "name": "Deprovision Request", 258 | "request": { 259 | "method": "DELETE", 260 | "header": [], 261 | "url": { 262 | "raw": "{{consumer_url}}/api/datarequest/{{processId}}", 263 | "host": [ 264 | "{{consumer_url}}" 265 | ], 266 | "path": [ 267 | "api", 268 | "datarequest", 269 | "{{processId}}" 270 | ] 271 | } 272 | }, 273 | "response": [] 274 | } 275 | ] 276 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # The `edc-showcase` application 2 | 3 | Demo Application to show how the EDC can be used to implement distributed identities and federated catalogs. 4 | 5 | _This document describes the working concept rather than the finished application._ 6 | 7 | ## Setup 8 | 9 | - create 3 private keys and the associated DID document containing the corresponding public key in JWK format. You will 10 | find a utility script for that purpose in `scripts/did` (use the `-h` option for more details on how to use it). 11 | - pre-define three Hub URLs (ideally they should look exactly how ACI URLs or AKS URLs are generated) 12 | - on every request, generate a JWT signed with the connector private key that you previously generated and containing: 13 | + the DID URL as claim (payload) 14 | + an expiration date (t+5min) 15 | - create a certificate and a private key in `*.pem` format as well as the corresponding `*.pfx` file: 16 | - generate the files: 17 | ```bash 18 | openssl req -newkey rsa:2048 -new -nodes -x509 -days 3650 -keyout key.pem -out cert.pem 19 | openssl pkcs12 -inkey key.pem -in cert.cert -export -out cert.pfx 20 | ``` 21 | - store the contents of `cert.pfx` in an environment variable named `TF_VAR_CERTIFICATE` (assuming `bash` syntax): 22 | ```bash 23 | export TF_VAR_CERTIFICATE=$( 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /build.gradle.kts: -------------------------------------------------------------------------------- 1 | plugins { 2 | java 3 | `java-library` 4 | } 5 | 6 | repositories { 7 | mavenCentral() 8 | mavenLocal() 9 | } 10 | 11 | subprojects { 12 | repositories { 13 | mavenCentral() 14 | mavenLocal() 15 | maven { 16 | url = uri("https://maven.iais.fraunhofer.de/artifactory/eis-ids-public/") 17 | } 18 | // maven { 19 | // url = uri("https://maven.pkg.github.com/paullatzelsperger/dataspaceconnector") 20 | // credentials { 21 | // username = System.getenv("GITHUB_ACTOR") 22 | // password = System.getenv("GITHUB_TOKEN") 23 | // } 24 | // } 25 | } 26 | tasks.register("allDeps") {} 27 | } 28 | 29 | val jetBrainsAnnotationsVersion: String by project 30 | val jacksonVersion: String by project 31 | val jupiterVersion: String by project 32 | 33 | allprojects { 34 | pluginManager.withPlugin("java-library") { 35 | group = "com.microsoft" 36 | version = "1.0-SNAPSHOT" 37 | dependencies { 38 | api("org.jetbrains:annotations:${jetBrainsAnnotationsVersion}") 39 | api("com.fasterxml.jackson.core:jackson-core:${jacksonVersion}") 40 | api("com.fasterxml.jackson.core:jackson-annotations:${jacksonVersion}") 41 | api("com.fasterxml.jackson.core:jackson-databind:${jacksonVersion}") 42 | api("com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${jacksonVersion}") 43 | 44 | testImplementation("org.junit.jupiter:junit-jupiter-api:${jupiterVersion}") 45 | testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine:${jupiterVersion}") 46 | testImplementation("org.easymock:easymock:4.2") 47 | testImplementation("org.assertj:assertj-core:3.19.0") 48 | 49 | } 50 | } 51 | 52 | tasks.withType { 53 | useJUnitPlatform() 54 | } 55 | tasks.withType { 56 | testLogging { 57 | events("passed", "skipped", "failed") 58 | showStackTraces = true 59 | exceptionFormat = org.gradle.api.tasks.testing.logging.TestExceptionFormat.FULL 60 | } 61 | } 62 | } 63 | 64 | dependencies { 65 | testImplementation("org.junit.jupiter:junit-jupiter-api:5.7.0") 66 | testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine:5.7.0") 67 | } 68 | 69 | val test by tasks.getting(Test::class) { 70 | useJUnitPlatform() 71 | } -------------------------------------------------------------------------------- /deployment/did-web/connector3.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "did:web:edcshowcasegpstorage.z6.web.core.windows.net:connector3", 3 | "@context": [ 4 | "https://www.w3.org/ns/did/v1", 5 | { 6 | "@base": "did:web:edcshowcasegpstorage.z6.web.core.windows.net:connector3" 7 | } 8 | ], 9 | "service": [ 10 | { 11 | "id": "#connector3-identity-hub-url", 12 | "type": "IdentityHub", 13 | "serviceEndpoint": "http://edc-showcase-connector3.westeurope.azurecontainer.io:8181/api/identity-hub/" 14 | } 15 | ], 16 | "verificationMethod": [ 17 | { 18 | "id": "#connector3-key-1", 19 | "controller": "", 20 | "type": "JsonWebKey2020", 21 | "publicKeyJwk": { 22 | "crv": "P-256", 23 | "kty": "EC", 24 | "x": "7VwFiU6Fpu3GUYZNUROn4p9azCOgTFJhScQoq8xnJbw", 25 | "y": "LhSPLMbtQ7NfaplmzDnDJBbCdm7QFcERwWQlrXClFoA" 26 | } 27 | } 28 | ], 29 | "authentication": [ 30 | "#connector3-key-1" 31 | ] 32 | } -------------------------------------------------------------------------------- /deployment/did-web/consumer.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "did:web:edcshowcasegpstorage.z6.web.core.windows.net:consumer", 3 | "@context": [ 4 | "https://www.w3.org/ns/did/v1", 5 | { 6 | "@base": "did:web:edcshowcasegpstorage.z6.web.core.windows.net:consumer" 7 | } 8 | ], 9 | "service": [ 10 | { 11 | "id": "#consumer-identity-hub-url", 12 | "type": "IdentityHub", 13 | "serviceEndpoint": "http://edc-showcase-consumer.westeurope.azurecontainer.io:8181/api/identity-hub/" 14 | } 15 | ], 16 | "verificationMethod": [ 17 | { 18 | "id": "#consumer-key-1", 19 | "controller": "", 20 | "type": "JsonWebKey2020", 21 | "publicKeyJwk": { 22 | "kty": "EC", 23 | "crv": "P-256", 24 | "x": "4mi45pgE5iPdhluNpmtnAFztWi8vxMrDSoXqD5ah2Rk", 25 | "y": "FdxTvkrkYtmxPgdmFpxRzZSVvcVUEksSzr1cH_kT58w" 26 | } 27 | } 28 | ], 29 | "authentication": [ 30 | "#consumer-key-1" 31 | ] 32 | } -------------------------------------------------------------------------------- /deployment/did-web/index.html: -------------------------------------------------------------------------------- 1 | 2 |

Welcome to the Web DID Demo!!

3 | This is where all the Web-DIDs for the EDC Demo Installation are hosted.
4 | Please find the concrete DID Documents at: 5 | 10 | cd -------------------------------------------------------------------------------- /deployment/did-web/provider.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "did:web:edcshowcasegpstorage.z6.web.core.windows.net:provider", 3 | "@context": [ 4 | "https://www.w3.org/ns/did/v1", 5 | { 6 | "@base": "did:web:edcshowcasegpstorage.z6.web.core.windows.net:provider" 7 | } 8 | ], 9 | "service": [ 10 | { 11 | "id": "#provider-identity-hub-url", 12 | "type": "IdentityHub", 13 | "serviceEndpoint": "http://edc-showcase-provider.westeurope.azurecontainer.io:8181/api/identity-hub/" 14 | } 15 | ], 16 | "verificationMethod": [ 17 | { 18 | "id": "#provider-key-1", 19 | "controller": "", 20 | "type": "JsonWebKey2020", 21 | "publicKeyJwk": { 22 | "kty": "EC", 23 | "crv": "P-256", 24 | "x": "nesyr7cFTZAXvOJZKnF5vOb_oEaJOl5PMguCutetEao", 25 | "y": "7Q9B5SENr8vSGS5e_DrSr-AsT28ojHeYXxfKa1vRWNQ" 26 | } 27 | } 28 | ], 29 | "authentication": [ 30 | "#provider-key-1" 31 | ] 32 | } -------------------------------------------------------------------------------- /deployment/terraform/aws.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | # Configuration options 3 | region = var.aws_region 4 | } 5 | 6 | 7 | resource "aws_iam_user" "aws-user" { 8 | name = var.aws_user 9 | path = "/" 10 | force_destroy = true 11 | } 12 | 13 | resource "aws_iam_access_key" "gx_access_key" { 14 | user = aws_iam_user.aws-user.name 15 | 16 | } 17 | 18 | resource "aws_iam_user_policy_attachment" "gx-s3fullaccess" { 19 | user = aws_iam_user.aws-user.name 20 | policy_arn = "arn:aws:iam::aws:policy/AmazonS3FullAccess" 21 | } 22 | 23 | resource "aws_iam_user_policy_attachment" "gx-iamfullaccess" { 24 | user = aws_iam_user.aws-user.name 25 | policy_arn = "arn:aws:iam::aws:policy/IAMFullAccess" 26 | } 27 | 28 | 29 | resource "aws_s3_bucket" "src-bucket" { 30 | bucket = "edc-showcase-src-bucket" 31 | } 32 | 33 | output "new_user" { 34 | sensitive = true 35 | value = { 36 | secret = aws_iam_access_key.gx_access_key.secret 37 | id = aws_iam_access_key.gx_access_key.id 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /deployment/terraform/blobstore.tf: -------------------------------------------------------------------------------- 1 | #storage account 2 | resource "azurerm_storage_account" "main-blobstore" { 3 | name = "${replace(var.environment, "-", "")}gpstorage" 4 | resource_group_name = azurerm_resource_group.core-resourcegroup.name 5 | location = azurerm_resource_group.core-resourcegroup.location 6 | account_tier = "Standard" 7 | account_replication_type = "GRS" 8 | account_kind = "StorageV2" 9 | allow_blob_public_access = true // allows public access via SAS 10 | //allows for blobs, queues, fileshares, etc. 11 | static_website { 12 | index_document = "index.html" 13 | } 14 | } 15 | 16 | # storage container 17 | resource "azurerm_storage_container" "main-blob-container" { 18 | 19 | name = "src-container" 20 | storage_account_name = azurerm_storage_account.main-blobstore.name 21 | } 22 | 23 | # put a file as blob to the storage container 24 | resource "azurerm_storage_blob" "testfile" { 25 | name = "test-document.txt" 26 | storage_account_name = azurerm_storage_account.main-blobstore.name 27 | storage_container_name = azurerm_storage_container.main-blob-container.name 28 | type = "Block" 29 | source = "test-document.txt" 30 | } 31 | 32 | resource "azurerm_storage_blob" "testfile3" { 33 | name = "complex_schematic_drawing.jpg" 34 | storage_account_name = azurerm_storage_account.main-blobstore.name 35 | storage_container_name = azurerm_storage_container.main-blob-container.name 36 | type = "Block" 37 | source = "complex_schematic_drawing.jpg" 38 | } 39 | 40 | # the index file for static web content, i.e. the Web DID 41 | resource "azurerm_storage_blob" "index-html" { 42 | name = "index.html" 43 | storage_account_name = azurerm_storage_account.main-blobstore.name 44 | storage_container_name = "$web" 45 | type = "Block" 46 | source = "../did-web/index.html" 47 | content_type = "text/html" 48 | } 49 | 50 | # upload the DID document for the consumer 51 | resource "azurerm_storage_blob" "consumer-webdid" { 52 | name = "consumer/did.json" 53 | storage_account_name = azurerm_storage_account.main-blobstore.name 54 | storage_container_name = "$web" 55 | type = "Block" 56 | source = "../did-web/consumer.json" 57 | content_type = "applicaton/json" 58 | } 59 | 60 | # upload the DID document for the provider 61 | resource "azurerm_storage_blob" "provider-webdid" { 62 | name = "provider/did.json" 63 | storage_account_name = azurerm_storage_account.main-blobstore.name 64 | storage_container_name = "$web" 65 | type = "Block" 66 | source = "../did-web/provider.json" 67 | content_type = "applicaton/json" 68 | } 69 | 70 | # upload the DID document for the 3rd connector 71 | resource "azurerm_storage_blob" "connector3-webdid" { 72 | name = "connector3/did.json" 73 | storage_account_name = azurerm_storage_account.main-blobstore.name 74 | storage_container_name = "$web" 75 | type = "Block" 76 | source = "../did-web/connector3.json" 77 | content_type = "applicaton/json" 78 | } -------------------------------------------------------------------------------- /deployment/terraform/complex_schematic_drawing.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/edc-showcase/a1271a8c2cc04ae6fa3ae3f00c9328b1880137a8/deployment/terraform/complex_schematic_drawing.jpg -------------------------------------------------------------------------------- /deployment/terraform/cosmosdb.tf: -------------------------------------------------------------------------------- 1 | resource "azurerm_cosmosdb_account" "showcase-cosmos-account" { 2 | name = "${var.environment}-cosmos" 3 | location = azurerm_resource_group.core-resourcegroup.location 4 | resource_group_name = azurerm_resource_group.core-resourcegroup.name 5 | offer_type = "Standard" 6 | kind = "GlobalDocumentDB" 7 | 8 | enable_automatic_failover = false 9 | # enable_free_tier = true 10 | 11 | capabilities { 12 | name = "EnableAggregationPipeline" 13 | } 14 | 15 | consistency_policy { 16 | consistency_level = "Strong" 17 | } 18 | 19 | geo_location { 20 | location = azurerm_resource_group.core-resourcegroup.location 21 | failover_priority = 0 22 | } 23 | } 24 | 25 | # create database that contains all the asset indexes 26 | resource "azurerm_cosmosdb_sql_database" "asset-index-db" { 27 | name = "asset-index" 28 | resource_group_name = azurerm_cosmosdb_account.showcase-cosmos-account.resource_group_name 29 | account_name = azurerm_cosmosdb_account.showcase-cosmos-account.name 30 | throughput = 400 31 | } 32 | 33 | # Asset Index container for Consumer 34 | resource "azurerm_cosmosdb_sql_container" "consumer-assetindex-container" { 35 | name = var.consumer-name 36 | resource_group_name = azurerm_cosmosdb_account.showcase-cosmos-account.resource_group_name 37 | account_name = azurerm_cosmosdb_account.showcase-cosmos-account.name 38 | database_name = azurerm_cosmosdb_sql_database.asset-index-db.name 39 | partition_key_path = "/partitionKey" 40 | partition_key_version = 1 41 | throughput = 400 42 | 43 | indexing_policy { 44 | indexing_mode = "Consistent" 45 | 46 | included_path { 47 | path = "/*" 48 | } 49 | 50 | included_path { 51 | path = "/included/?" 52 | } 53 | 54 | excluded_path { 55 | path = "/excluded/?" 56 | } 57 | } 58 | 59 | } 60 | 61 | # Asset Index container for Provider 62 | resource "azurerm_cosmosdb_sql_container" "provider-assetindex-container" { 63 | name = var.provider-name 64 | resource_group_name = azurerm_cosmosdb_account.showcase-cosmos-account.resource_group_name 65 | account_name = azurerm_cosmosdb_account.showcase-cosmos-account.name 66 | database_name = azurerm_cosmosdb_sql_database.asset-index-db.name 67 | partition_key_path = "/partitionKey" 68 | partition_key_version = 1 69 | throughput = 400 70 | 71 | indexing_policy { 72 | indexing_mode = "Consistent" 73 | 74 | included_path { 75 | path = "/*" 76 | } 77 | 78 | included_path { 79 | path = "/included/?" 80 | } 81 | 82 | excluded_path { 83 | path = "/excluded/?" 84 | } 85 | } 86 | } 87 | 88 | 89 | # create database that contains all the contract-definition indexes 90 | resource "azurerm_cosmosdb_sql_database" "contractdefinition-store-db" { 91 | name = "contract-definition-store" 92 | resource_group_name = azurerm_cosmosdb_account.showcase-cosmos-account.resource_group_name 93 | account_name = azurerm_cosmosdb_account.showcase-cosmos-account.name 94 | throughput = 400 95 | } 96 | 97 | # ContractDefinition-Store container for Consumer 98 | resource "azurerm_cosmosdb_sql_container" "consumer-contractdefstore-container" { 99 | name = var.consumer-name 100 | resource_group_name = azurerm_cosmosdb_account.showcase-cosmos-account.resource_group_name 101 | account_name = azurerm_cosmosdb_account.showcase-cosmos-account.name 102 | database_name = azurerm_cosmosdb_sql_database.contractdefinition-store-db.name 103 | partition_key_path = "/partitionKey" 104 | partition_key_version = 1 105 | throughput = 400 106 | 107 | indexing_policy { 108 | indexing_mode = "Consistent" 109 | 110 | included_path { 111 | path = "/*" 112 | } 113 | 114 | included_path { 115 | path = "/included/?" 116 | } 117 | 118 | excluded_path { 119 | path = "/excluded/?" 120 | } 121 | } 122 | 123 | } 124 | 125 | # ContractDefinition-Store container for Provider 126 | resource "azurerm_cosmosdb_sql_container" "provider-contractdefstore-container" { 127 | name = var.provider-name 128 | resource_group_name = azurerm_cosmosdb_account.showcase-cosmos-account.resource_group_name 129 | account_name = azurerm_cosmosdb_account.showcase-cosmos-account.name 130 | database_name = azurerm_cosmosdb_sql_database.contractdefinition-store-db.name 131 | partition_key_path = "/partitionKey" 132 | partition_key_version = 1 133 | throughput = 400 134 | 135 | indexing_policy { 136 | indexing_mode = "Consistent" 137 | 138 | included_path { 139 | path = "/*" 140 | } 141 | 142 | included_path { 143 | path = "/included/?" 144 | } 145 | 146 | excluded_path { 147 | path = "/excluded/?" 148 | } 149 | } 150 | } 151 | 152 | 153 | # create database that contains all the contract-definition indexes 154 | resource "azurerm_cosmosdb_sql_database" "contractnegotiation-store-db" { 155 | name = "contract-negotiation-store" 156 | resource_group_name = azurerm_cosmosdb_account.showcase-cosmos-account.resource_group_name 157 | account_name = azurerm_cosmosdb_account.showcase-cosmos-account.name 158 | throughput = 400 159 | } 160 | 161 | # ContractDefinition-Store container for Consumer 162 | resource "azurerm_cosmosdb_sql_container" "consumer-contractnegotiation-container" { 163 | name = var.consumer-name 164 | resource_group_name = azurerm_cosmosdb_account.showcase-cosmos-account.resource_group_name 165 | account_name = azurerm_cosmosdb_account.showcase-cosmos-account.name 166 | database_name = azurerm_cosmosdb_sql_database.contractnegotiation-store-db.name 167 | partition_key_path = "/partitionKey" 168 | partition_key_version = 1 169 | throughput = 400 170 | 171 | indexing_policy { 172 | indexing_mode = "Consistent" 173 | 174 | included_path { 175 | path = "/*" 176 | } 177 | 178 | included_path { 179 | path = "/included/?" 180 | } 181 | 182 | excluded_path { 183 | path = "/excluded/?" 184 | } 185 | } 186 | 187 | } 188 | 189 | # ContractDefinition-Store container for Provider 190 | resource "azurerm_cosmosdb_sql_container" "provider-contractnegotiation-container" { 191 | name = var.provider-name 192 | resource_group_name = azurerm_cosmosdb_account.showcase-cosmos-account.resource_group_name 193 | account_name = azurerm_cosmosdb_account.showcase-cosmos-account.name 194 | database_name = azurerm_cosmosdb_sql_database.contractnegotiation-store-db.name 195 | partition_key_path = "/partitionKey" 196 | partition_key_version = 1 197 | throughput = 400 198 | 199 | indexing_policy { 200 | indexing_mode = "Consistent" 201 | 202 | included_path { 203 | path = "/*" 204 | } 205 | 206 | included_path { 207 | path = "/included/?" 208 | } 209 | 210 | excluded_path { 211 | path = "/excluded/?" 212 | } 213 | } 214 | } 215 | 216 | 217 | # Stored Procedures for Contract Negotiation Store 218 | resource "azurerm_cosmosdb_sql_stored_procedure" "nextForState-consumer" { 219 | name = "nextForState" 220 | resource_group_name = azurerm_cosmosdb_account.showcase-cosmos-account.resource_group_name 221 | account_name = azurerm_cosmosdb_account.showcase-cosmos-account.name 222 | database_name = azurerm_cosmosdb_sql_database.contractnegotiation-store-db.name 223 | container_name = azurerm_cosmosdb_sql_container.consumer-contractnegotiation-container.name 224 | body = file("nextForState.js") 225 | } 226 | 227 | resource "azurerm_cosmosdb_sql_stored_procedure" "nextForState-provider" { 228 | name = "nextForState" 229 | resource_group_name = azurerm_cosmosdb_account.showcase-cosmos-account.resource_group_name 230 | account_name = azurerm_cosmosdb_account.showcase-cosmos-account.name 231 | database_name = azurerm_cosmosdb_sql_database.contractnegotiation-store-db.name 232 | container_name = azurerm_cosmosdb_sql_container.provider-contractnegotiation-container.name 233 | 234 | body = file("nextForState.js") 235 | } 236 | resource "azurerm_cosmosdb_sql_stored_procedure" "lease-consumer" { 237 | name = "lease" 238 | resource_group_name = azurerm_cosmosdb_account.showcase-cosmos-account.resource_group_name 239 | account_name = azurerm_cosmosdb_account.showcase-cosmos-account.name 240 | database_name = azurerm_cosmosdb_sql_database.contractnegotiation-store-db.name 241 | container_name = azurerm_cosmosdb_sql_container.consumer-contractnegotiation-container.name 242 | body = file("lease.js") 243 | } 244 | 245 | resource "azurerm_cosmosdb_sql_stored_procedure" "lease-provider" { 246 | name = "lease" 247 | resource_group_name = azurerm_cosmosdb_account.showcase-cosmos-account.resource_group_name 248 | account_name = azurerm_cosmosdb_account.showcase-cosmos-account.name 249 | database_name = azurerm_cosmosdb_sql_database.contractnegotiation-store-db.name 250 | container_name = azurerm_cosmosdb_sql_container.provider-contractnegotiation-container.name 251 | 252 | body = file("lease.js") 253 | } -------------------------------------------------------------------------------- /deployment/terraform/eventgrid.tf: -------------------------------------------------------------------------------- 1 | // topic for connector application events 2 | resource "azurerm_eventgrid_topic" "control-topic" { 3 | location = azurerm_resource_group.core-resourcegroup.location 4 | name = "${var.environment}-control-events" 5 | resource_group_name = azurerm_resource_group.core-resourcegroup.name 6 | } 7 | 8 | // keyvault secret for the access key 9 | resource "azurerm_key_vault_secret" "event-grid-key" { 10 | key_vault_id = azurerm_key_vault.main-vault.id 11 | name = azurerm_eventgrid_topic.control-topic.name 12 | value = azurerm_eventgrid_topic.control-topic.primary_access_key 13 | depends_on = [ 14 | azurerm_role_assignment.current-user-secretsofficer 15 | ] 16 | } 17 | 18 | // storage queue that will be used for control events 19 | resource "azurerm_storage_queue" "system-event-queue" { 20 | name = "${var.environment}-control" 21 | storage_account_name = azurerm_storage_account.main-blobstore.name 22 | } 23 | 24 | // subscription for the control events 25 | resource "azurerm_eventgrid_event_subscription" "control-events" { 26 | name = "${azurerm_eventgrid_topic.control-topic.name}-sub" 27 | scope = azurerm_eventgrid_topic.control-topic.id 28 | storage_queue_endpoint { 29 | queue_name = azurerm_storage_queue.system-event-queue.name 30 | storage_account_id = azurerm_storage_account.main-blobstore.id 31 | } 32 | } 33 | 34 | output "topic-endpoint" { 35 | value = azurerm_eventgrid_topic.control-topic.endpoint 36 | } 37 | -------------------------------------------------------------------------------- /deployment/terraform/lease.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 - 2022 Microsoft Corporation 3 | * 4 | * This program and the accompanying materials are made available under the 5 | * terms of the Apache License, Version 2.0 which is available at 6 | * https://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * SPDX-License-Identifier: Apache-2.0 9 | * 10 | * Contributors: 11 | * Microsoft Corporation - initial API and implementation 12 | * 13 | */ 14 | 15 | /** 16 | * Use this stored procedure to acquire or break the lease of a document 17 | * @param objectId The database ID of the document 18 | * @param connectorId The name/identifier of the calling runtime 19 | * @param shouldLease Whether the lease should be _acquired_ or _broken_ 20 | */ 21 | 22 | function lease(objectId, connectorId, shouldLease) { 23 | var context = getContext(); 24 | var collection = context.getCollection(); 25 | var collectionLink = collection.getSelfLink(); 26 | var response = context.getResponse(); 27 | 28 | 29 | // first query 30 | var filterQuery = { 31 | 'query': 'SELECT * FROM t WHERE t.wrappedInstance.id = @objectId', 'parameters': [{ 32 | 'name': '@objectId', 'value': objectId 33 | }] 34 | }; 35 | 36 | var accept = collection.queryDocuments(collectionLink, filterQuery, {}, function (err, items, responseOptions) { 37 | if (err) throw new Error("Error" + err.message); 38 | 39 | 40 | if (!items || !items.length) { 41 | let err = "No documents found!"; 42 | response.setBody(err) 43 | console.log(err) 44 | return; 45 | } 46 | if (items.length > 1) { 47 | let err = "too many docs found for query: expected 1, got " + items.length; 48 | console.log(err); 49 | throw err; 50 | } 51 | 52 | let document = items[0]; 53 | 54 | if (document.lease != null && document.lease.leasedBy !== connectorId) { 55 | throw "Document is locked by another connector" 56 | } 57 | 58 | if (shouldLease) 59 | lease(document, connectorId); 60 | else //clear lease 61 | document.lease = null; 62 | 63 | 64 | response.setBody(document) 65 | }); 66 | 67 | if (!accept) throw "Unable to read document details, abort "; 68 | 69 | function lease(document, connectorId) { 70 | document.lease = { 71 | leasedBy: connectorId, 72 | leasedAt: Date.now(), 73 | leaseDuration: 60000 74 | }; 75 | 76 | var accept = collection.replaceDocument(document._self, document, function (err, itemReplaced) { 77 | if (err) throw "Unable to update Document, abort "; 78 | }) 79 | if (!accept) throw "Unable to update Document, abort"; 80 | console.log("updated lease of document " + document.id) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /deployment/terraform/main.tf: -------------------------------------------------------------------------------- 1 | # Configure the Azure provider 2 | terraform { 3 | # comment out this object if you want to use local state only 4 | backend "azurerm" { 5 | resource_group_name = "edc-infrastructure" 6 | storage_account_name = "edcstate" 7 | container_name = "terraform-state-edc-showcase" 8 | key = "terraform.state" 9 | } 10 | required_providers { 11 | azurerm = { 12 | source = "hashicorp/azurerm" 13 | version = ">= 2.85.0" 14 | } 15 | azuread = { 16 | source = "hashicorp/azuread" 17 | version = "2.9.0" 18 | } 19 | aws = { 20 | source = "hashicorp/aws" 21 | version = "3.65.0" 22 | } 23 | http = { 24 | source = "hashicorp/http" 25 | version = "2.1.0" 26 | } 27 | } 28 | } 29 | 30 | provider "azurerm" { 31 | features { 32 | key_vault { 33 | purge_soft_delete_on_destroy = true 34 | recover_soft_deleted_key_vaults = false 35 | } 36 | } 37 | } 38 | provider "azuread" { 39 | # Configuration options 40 | } 41 | 42 | data "azurerm_client_config" "current" {} 43 | data "azurerm_subscription" "primary" {} 44 | 45 | 46 | resource "azurerm_resource_group" "core-resourcegroup" { 47 | name = "${var.environment}-resources" 48 | location = var.location 49 | } 50 | 51 | # App registration for the primary identity 52 | resource "azuread_application" "demo-app-id" { 53 | display_name = "PrimaryIdentity-${var.environment}" 54 | sign_in_audience = "AzureADMyOrg" 55 | } 56 | 57 | resource "azuread_application_certificate" "demo-main-identity-cert" { 58 | type = "AsymmetricX509Cert" 59 | application_object_id = azuread_application.demo-app-id.id 60 | value = var.CERTIFICATE 61 | end_date_relative = "2400h" 62 | } 63 | 64 | resource "azuread_service_principal" "main-app-sp" { 65 | application_id = azuread_application.demo-app-id.application_id 66 | app_role_assignment_required = false 67 | tags = [ 68 | "terraform" 69 | ] 70 | } 71 | 72 | # Keyvault 73 | resource "azurerm_key_vault" "main-vault" { 74 | name = "${var.environment}-vault" 75 | location = azurerm_resource_group.core-resourcegroup.location 76 | resource_group_name = azurerm_resource_group.core-resourcegroup.name 77 | enabled_for_disk_encryption = false 78 | tenant_id = data.azurerm_client_config.current.tenant_id 79 | soft_delete_retention_days = 7 80 | purge_protection_enabled = false 81 | 82 | sku_name = "standard" 83 | enable_rbac_authorization = true 84 | 85 | } 86 | 87 | # Role assignment so that the primary identity may access the vault 88 | resource "azurerm_role_assignment" "primary-id" { 89 | scope = azurerm_key_vault.main-vault.id 90 | role_definition_name = "Key Vault Secrets Officer" 91 | principal_id = azuread_service_principal.main-app-sp.object_id 92 | } 93 | 94 | # Role assignment that the primary identity may provision/deprovision azure resources 95 | resource "azurerm_role_assignment" "primary-id-arm" { 96 | principal_id = azuread_service_principal.main-app-sp.object_id 97 | scope = data.azurerm_subscription.primary.id 98 | role_definition_name = "Contributor" 99 | } 100 | 101 | # Role assignment so that the currently logged in user may access the vault, needed to add secrets 102 | resource "azurerm_role_assignment" "current-user-secretsofficer" { 103 | scope = azurerm_key_vault.main-vault.id 104 | role_definition_name = "Key Vault Secrets Officer" 105 | principal_id = data.azurerm_client_config.current.object_id 106 | } 107 | # Role assignment so that the currently logged in user may access the vault, needed to add keys 108 | resource "azurerm_role_assignment" "current-user-cryptoofficer" { 109 | scope = azurerm_key_vault.main-vault.id 110 | role_definition_name = "Key Vault Crypto Officer" 111 | principal_id = data.azurerm_client_config.current.object_id 112 | } 113 | 114 | 115 | # registration service = ion crawler 116 | #resource "azurerm_container_group" "registration-service" { 117 | # name = "${var.environment}-${var.regsvc-name}" 118 | # location = azurerm_resource_group.core-resourcegroup.location 119 | # resource_group_name = azurerm_resource_group.core-resourcegroup.name 120 | # os_type = "Linux" 121 | # ip_address_type = "public" 122 | # dns_name_label = "${var.environment}-${var.regsvc-name}" 123 | # image_registry_credential { 124 | # password = var.docker_repo_password 125 | # server = var.docker_repo_url 126 | # username = var.docker_repo_username 127 | # } 128 | # container { 129 | # cpu = 2 130 | # image = "${var.docker_repo_url}/paullatzelsperger/edc-showcase/regsvc:latest" 131 | # // image = "paullatzelsperger/gx-reg-svc:latest" 132 | # memory = "2" 133 | # name = var.regsvc-name 134 | # 135 | # ports { 136 | # port = 8181 137 | # protocol = "TCP" 138 | # } 139 | # 140 | # environment_variables = { 141 | # CLIENTID = azuread_application.demo-app-id.application_id, 142 | # TENANTID = data.azurerm_client_config.current.tenant_id, 143 | # VAULTNAME = azurerm_key_vault.main-vault.name, 144 | # CONNECTOR_NAME = var.regsvc-name 145 | # TOPIC_NAME = azurerm_eventgrid_topic.control-topic.name 146 | # TOPIC_ENDPOINT = azurerm_eventgrid_topic.control-topic.endpoint 147 | # ION_URL = "http://gx-ion-node.westeurope.cloudapp.azure.com:3000/" 148 | # LOADER_BATCH_SIZE = 2 149 | # } 150 | # 151 | # volume { 152 | # mount_path = "/cert" 153 | # name = "certificates" 154 | # share_name = "certificates" 155 | # storage_account_key = var.backend_account_key 156 | # storage_account_name = var.backend_account_name 157 | # read_only = true 158 | # } 159 | # } 160 | #} 161 | 162 | # connector that acts as data provider 163 | resource "azurerm_container_group" "provider-connector" { 164 | name = "${var.environment}-${var.provider-name}" 165 | location = azurerm_resource_group.core-resourcegroup.location 166 | resource_group_name = azurerm_resource_group.core-resourcegroup.name 167 | os_type = "Linux" 168 | ip_address_type = "public" 169 | dns_name_label = "${var.environment}-${var.provider-name}" 170 | image_registry_credential { 171 | password = var.docker_repo_password 172 | server = var.docker_repo_url 173 | username = var.docker_repo_username 174 | } 175 | container { 176 | cpu = 2 177 | image = "${var.docker_repo_url}/paullatzelsperger/edc-showcase/connector:latest" 178 | memory = "2" 179 | name = var.provider-name 180 | 181 | ports { 182 | port = 8181 183 | protocol = "TCP" 184 | } 185 | 186 | environment_variables = { 187 | IDS_WEBHOOK_ADDRESS = "http://${var.environment}-${var.provider-name}.${var.location}.azurecontainer.io:8181" 188 | CLIENTID = azuread_application.demo-app-id.application_id, 189 | TENANTID = data.azurerm_client_config.current.tenant_id, 190 | VAULTNAME = azurerm_key_vault.main-vault.name, 191 | CONNECTOR_NAME = var.provider-name 192 | TOPIC_NAME = azurerm_eventgrid_topic.control-topic.name 193 | TOPIC_ENDPOINT = azurerm_eventgrid_topic.control-topic.endpoint 194 | DID_URL = "did:web:edcshowcasegpstorage.z6.web.core.windows.net:provider" 195 | LOADER_BATCH_SIZE = 1 196 | DOH_SERVER = "https://cloudflare-dns.com/dns-query" 197 | COSMOS_ACCOUNT = azurerm_cosmosdb_account.showcase-cosmos-account.name 198 | COSMOS_DB = azurerm_cosmosdb_sql_database.asset-index-db.name 199 | COSMOS_PARTKEY = "edcpartkeyprov" 200 | COSMOS_CONTAINER = azurerm_cosmosdb_sql_container.provider-assetindex-container.name 201 | 202 | CDS_DATABASE = azurerm_cosmosdb_sql_database.contractdefinition-store-db.name 203 | CDS_CONTAINER = azurerm_cosmosdb_sql_container.provider-contractdefstore-container.name 204 | 205 | CNS_DATABASE = azurerm_cosmosdb_sql_database.contractnegotiation-store-db.name 206 | CNS_CONTAINER = azurerm_cosmosdb_sql_container.provider-contractnegotiation-container.name 207 | } 208 | 209 | volume { 210 | mount_path = "/cert" 211 | name = "certificates" 212 | share_name = "certificates" 213 | storage_account_key = var.backend_account_key 214 | storage_account_name = var.backend_account_name 215 | read_only = true 216 | } 217 | } 218 | } 219 | 220 | # connector that acts as data consumer 221 | resource "azurerm_container_group" "consumer-connector" { 222 | name = "${var.environment}-${var.consumer-name}" 223 | location = azurerm_resource_group.core-resourcegroup.location 224 | resource_group_name = azurerm_resource_group.core-resourcegroup.name 225 | os_type = "Linux" 226 | ip_address_type = "public" 227 | dns_name_label = "${var.environment}-${var.consumer-name}" 228 | image_registry_credential { 229 | password = var.docker_repo_password 230 | server = var.docker_repo_url 231 | username = var.docker_repo_username 232 | } 233 | container { 234 | cpu = 2 235 | image = "${var.docker_repo_url}/paullatzelsperger/edc-showcase/connector:latest" 236 | memory = "2" 237 | name = var.consumer-name 238 | 239 | ports { 240 | port = 8181 241 | protocol = "TCP" 242 | } 243 | 244 | environment_variables = { 245 | IDS_WEBHOOK_ADDRESS = "http://${var.environment}-${var.consumer-name}.${var.location}.azurecontainer.io:8181" 246 | CLIENTID = azuread_application.demo-app-id.application_id, 247 | TENANTID = data.azurerm_client_config.current.tenant_id, 248 | VAULTNAME = azurerm_key_vault.main-vault.name, 249 | CONNECTOR_NAME = var.consumer-name 250 | TOPIC_NAME = azurerm_eventgrid_topic.control-topic.name 251 | TOPIC_ENDPOINT = azurerm_eventgrid_topic.control-topic.endpoint 252 | DID_URL = "did:web:edcshowcasegpstorage.z6.web.core.windows.net:consumer" 253 | DOH_SERVER = "https://cloudflare-dns.com/dns-query" 254 | LOADER_BATCH_SIZE = 2 255 | COSMOS_ACCOUNT = azurerm_cosmosdb_account.showcase-cosmos-account.name 256 | COSMOS_DB = azurerm_cosmosdb_sql_database.asset-index-db.name 257 | COSMOS_PARTKEY = "edcpartkeycons" 258 | COSMOS_CONTAINER = azurerm_cosmosdb_sql_container.consumer-assetindex-container.name 259 | 260 | CDS_DATABASE = azurerm_cosmosdb_sql_database.contractdefinition-store-db.name 261 | CDS_CONTAINER = azurerm_cosmosdb_sql_container.consumer-contractdefstore-container.name 262 | 263 | CNS_DATABASE = azurerm_cosmosdb_sql_database.contractnegotiation-store-db.name 264 | CNS_CONTAINER = azurerm_cosmosdb_sql_container.consumer-contractnegotiation-container.name 265 | } 266 | 267 | volume { 268 | mount_path = "/cert" 269 | name = "certificates" 270 | share_name = "certificates" 271 | storage_account_key = var.backend_account_key 272 | storage_account_name = var.backend_account_name 273 | read_only = true 274 | } 275 | } 276 | } 277 | -------------------------------------------------------------------------------- /deployment/terraform/nextForState.js: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020 - 2022 Microsoft Corporation 3 | * 4 | * This program and the accompanying materials are made available under the 5 | * terms of the Apache License, Version 2.0 which is available at 6 | * https://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * SPDX-License-Identifier: Apache-2.0 9 | * 10 | * Contributors: 11 | * Microsoft Corporation - initial API and implementation 12 | * 13 | */ 14 | 15 | /** 16 | * Returns the next batch of documents that are in a certain state and acquires an exclusive lock on it (a "lease"). 17 | * This SPROC will only return items that are not yet leased, or where the lease has expired. Thus, two subsequent calls with identical parameters 18 | * will yield different results. 19 | * 20 | * @param state the desired state 21 | * @param limit the batch size 22 | * @param connectorId The name of the calling runtime 23 | */ 24 | 25 | function nextForState(state, limit, connectorId) { 26 | var context = getContext(); 27 | var collection = context.getCollection(); 28 | var collectionLink = collection.getSelfLink(); 29 | var response = context.getResponse(); 30 | 31 | 32 | // first query 33 | var filterQuery = { 34 | 'query': 'SELECT * FROM t WHERE t.wrappedInstance.state = @state AND (t.lease = null OR (t.lease.leasedAt + t.lease.leaseDuration) < @now) ORDER BY t.wrappedInstance.stateTimestamp OFFSET 0 LIMIT @limit', 35 | 'parameters': [ 36 | { 37 | 'name': '@state', 'value': parseInt(state, 10) 38 | }, 39 | { 40 | 'name': '@limit', 'value': parseInt(limit, 10) 41 | }, 42 | { 43 | 'name': '@leaser', 'value': connectorId 44 | }, 45 | { 46 | 'name': '@now', 'value': Date.now() 47 | } 48 | ] 49 | }; 50 | 51 | var accept = collection.queryDocuments(collectionLink, filterQuery, {}, function (err, items, responseOptions) { 52 | if (err) throw new Error("Error" + err.message); 53 | 54 | 55 | if (!items || !items.length || items.length <= 0) { 56 | response.setBody('no docs found') 57 | console.log("No documents found!") 58 | } 59 | 60 | console.log("found " + items.length + " documents!") 61 | 62 | // add lock to all items 63 | for (var i = 0; i < items.length; i++) { 64 | lease(items[i], connectorId) 65 | } 66 | response.setBody(items) 67 | }); 68 | 69 | if (!accept) throw "Unable to read document details, abort "; 70 | 71 | function lease(document, connectorId) { 72 | document.lease = { 73 | leasedBy: connectorId, 74 | leasedAt: Date.now(), 75 | leaseDuration: 60000 76 | }; 77 | 78 | var accept = collection.replaceDocument(document._self, document, function (err, itemReplaced) { 79 | if (err) throw "Unable to update Document, abort "; 80 | }) 81 | if (!accept) throw "Unable to update Document, abort"; 82 | console.log("updated lease of document " + document.id) 83 | } 84 | } -------------------------------------------------------------------------------- /deployment/terraform/output.tf: -------------------------------------------------------------------------------- 1 | output "primary_client_id" { 2 | value = azuread_application.demo-app-id.application_id 3 | } 4 | 5 | output "primary_id_certfile" { 6 | value = abspath("${path.root}/cert.pfx") 7 | } 8 | 9 | output "provider-url" { 10 | value = "${azurerm_container_group.provider-connector.dns_name_label}.${var.location}.azurecontainer.io" 11 | } 12 | output "consumer-url" { 13 | value = "${azurerm_container_group.consumer-connector.dns_name_label}.${var.location}.azurecontainer.io" 14 | } 15 | # 16 | #output "consumer-url" { 17 | # value = "${azurerm_container_group.consumer-connector.dns_name_label}.${var.location}azureconainer.io" 18 | #} 19 | # 20 | #output "rev-svc-url" { 21 | # value = "${azurerm_container_group.registration-service.dns_name_label}.${var.location}azureconainer.io" 22 | #} 23 | 24 | output "vault-name" { 25 | value = azurerm_key_vault.main-vault.name 26 | } 27 | 28 | output "static-web" { 29 | value = azurerm_storage_account.main-blobstore.primary_web_endpoint 30 | } 31 | 32 | //output "URLs" { 33 | // value = { 34 | // provider = "https://${module.provider-cluster-bmw.public-ip.fqdn}" 35 | // consumer-fr = azurerm_container_group.consumer-fr.fqdn 36 | // } 37 | //} 38 | -------------------------------------------------------------------------------- /deployment/terraform/test-document.txt: -------------------------------------------------------------------------------- 1 | this is a test file! 2 | -------------------------------------------------------------------------------- /deployment/terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "location" { 2 | description = "geographic location of the Azure resources" 3 | default = "westeurope" 4 | type = string 5 | } 6 | 7 | variable "aws_region" { 8 | description = "geographic location of the AWS resources" 9 | default = "us-east-1" 10 | type = string 11 | } 12 | 13 | variable "aws_user" { 14 | description = "name of the AWS user being created" 15 | default = "edc-showcase-aws-user" 16 | type = string 17 | } 18 | 19 | variable "environment" { 20 | description = "identifying string that is used as prefix in all azure resources" 21 | } 22 | 23 | 24 | variable "backend_account_name" { 25 | type = string 26 | description = "A storage account where the Terraform state and certificates etc. are stored" 27 | default = "edcstate" 28 | } 29 | 30 | variable "backend_account_key" { 31 | type = string 32 | description = "Access key of the storage account that holds the terraform state and the certificate file share." 33 | } 34 | 35 | variable "CERTIFICATE" { 36 | type = string 37 | description = "PEM-encoded content of the private key file, that is used to secure the primary azure app SP" 38 | } 39 | 40 | variable "docker_repo_password" { 41 | type = string 42 | } 43 | 44 | variable "docker_repo_username" { 45 | type = string 46 | } 47 | 48 | variable "docker_repo_url" { 49 | type = string 50 | default = "ghcr.io" 51 | } 52 | # unique name for the consumer connector 53 | variable "consumer-name" { 54 | type = string 55 | default = "consumer" 56 | } 57 | 58 | # unique name for the provider connector 59 | variable "provider-name" { 60 | type = string 61 | default = "provider" 62 | } 63 | 64 | # unique name for the registration service 65 | variable "regsvc-name" { 66 | type = string 67 | default = "regsvc" 68 | } 69 | 70 | # partition key for the asset index based on cosmosdb 71 | variable "asset-index-partkey" { 72 | default = "assetIndexPartition" 73 | } -------------------------------------------------------------------------------- /deployment/terraform/vault_secrets.tf: -------------------------------------------------------------------------------- 1 | # vault secrets 2 | resource "azurerm_key_vault_secret" "aws-keyid" { 3 | name = "edc-aws-access-key" 4 | value = aws_iam_access_key.gx_access_key.id 5 | key_vault_id = azurerm_key_vault.main-vault.id 6 | depends_on = [ 7 | azurerm_role_assignment.current-user-secretsofficer 8 | ] 9 | } 10 | 11 | resource "azurerm_key_vault_secret" "aws-secret" { 12 | name = "edc-aws-secret-access-key" 13 | value = aws_iam_access_key.gx_access_key.secret 14 | key_vault_id = azurerm_key_vault.main-vault.id 15 | depends_on = [ 16 | azurerm_role_assignment.current-user-secretsofficer 17 | ] 18 | } 19 | 20 | resource "azurerm_key_vault_secret" "aws-credentials" { 21 | key_vault_id = azurerm_key_vault.main-vault.id 22 | name = "aws-credentials" 23 | value = jsonencode({ 24 | "accessKeyId" = aws_iam_access_key.gx_access_key.id, 25 | "secretAccessKey" = aws_iam_access_key.gx_access_key.secret 26 | }) 27 | depends_on = [ 28 | azurerm_role_assignment.current-user-secretsofficer 29 | ] 30 | } 31 | 32 | resource "azurerm_key_vault_secret" "blobstorekey" { 33 | name = "${azurerm_storage_account.main-blobstore.name}-key1" 34 | value = azurerm_storage_account.main-blobstore.primary_access_key 35 | key_vault_id = azurerm_key_vault.main-vault.id 36 | depends_on = [ 37 | azurerm_role_assignment.current-user-secretsofficer 38 | ] 39 | } 40 | 41 | resource "azurerm_key_vault_secret" "consumer_private_key" { 42 | name = var.consumer-name 43 | value = file("../../keys2/consumer.pem") 44 | key_vault_id = azurerm_key_vault.main-vault.id 45 | depends_on = [ 46 | azurerm_role_assignment.current-user-secretsofficer 47 | ] 48 | } 49 | 50 | resource "azurerm_key_vault_secret" "provider_private_key" { 51 | name = var.provider-name 52 | value = file("../../keys2/provider.pem") # todo: replace with variable 53 | key_vault_id = azurerm_key_vault.main-vault.id 54 | depends_on = [ 55 | azurerm_role_assignment.current-user-secretsofficer 56 | ] 57 | } 58 | 59 | resource "azurerm_key_vault_secret" "verifier_private_key" { 60 | name = "verifier" 61 | value = file("../../keys2/verifier.pem") # todo: replace with variable 62 | key_vault_id = azurerm_key_vault.main-vault.id 63 | depends_on = [ 64 | azurerm_role_assignment.current-user-secretsofficer 65 | ] 66 | } 67 | 68 | resource "azurerm_key_vault_secret" "connector3_private_key" { 69 | name = "connector3" 70 | value = file("../../keys2/connector3.pem") # todo: replace with variable 71 | key_vault_id = azurerm_key_vault.main-vault.id 72 | depends_on = [ 73 | azurerm_role_assignment.current-user-secretsofficer 74 | ] 75 | } 76 | 77 | resource "azurerm_key_vault_secret" "cosmos_account_key" { 78 | key_vault_id = azurerm_key_vault.main-vault.id 79 | name = azurerm_cosmosdb_account.showcase-cosmos-account.name 80 | value = azurerm_cosmosdb_account.showcase-cosmos-account.primary_key 81 | } -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | consumer: 4 | container_name: consumer 5 | build: 6 | context: launchers/connector 7 | dockerfile: Dockerfile.compose 8 | ports: 9 | - "8181:8181" 10 | - "8182:8182" 11 | - "8282:8282" 12 | environment: 13 | - IDS_PORT=8282 14 | - DATA_PORT=8182 15 | - IDS_ID=urn:connector:consumer 16 | - IDS_WEBHOOK_ADDRESS=http://consumer:8282 17 | - CLIENTID=85e1ff6d-9e61-4637-8e72-d78184467d74 18 | - TENANTID=1d9b26d0-b6ea-4495-97d3-e85ccf73a154 19 | - VAULTNAME=edc-showcase-vault 20 | - CONNECTOR_NAME=consumer 21 | - DID_URL=did:web:edcshowcasegpstorage.z6.web.core.windows.net:consumer 22 | - LOADER_BATCH_SIZE=1 23 | - DOH_SERVER=https://cloudflare-dns.com/dns-query 24 | - COSMOS_ACCOUNT=edc-showcase-cosmos 25 | - COSMOS_DB=asset-index 26 | - COSMOS_CONTAINER=consumer 27 | - COSMOS_PARTKEY=edcpartkeycons 28 | - CDS_DATABASE=contract-definition-store 29 | - CDS_CONTAINER=consumer 30 | - CNS_DATABASE=contract-negotiation-store 31 | - CNS_CONTAINER=consumer 32 | volumes: 33 | - type: bind 34 | source: ./deployment/ 35 | target: /cert 36 | deploy: 37 | resources: 38 | reservations: 39 | cpus: "6" 40 | memory: "8g" 41 | provider: 42 | container_name: provider 43 | build: 44 | context: launchers/connector 45 | dockerfile: Dockerfile.compose 46 | ports: 47 | - "9191:8181" 48 | - "9192:9192" 49 | - "9292:9292" 50 | environment: 51 | - IDS_PORT=9292 52 | - DATA_PORT=9192 53 | - IDS_ID=urn:connector:provider 54 | - IDS_WEBHOOK_ADDRESS=http://provider:9292 55 | - CLIENTID=85e1ff6d-9e61-4637-8e72-d78184467d74 56 | - TENANTID=1d9b26d0-b6ea-4495-97d3-e85ccf73a154 57 | - VAULTNAME=edc-showcase-vault 58 | - CONNECTOR_NAME=provider 59 | - DID_URL=did:web:edcshowcasegpstorage.z6.web.core.windows.net:provider 60 | - LOADER_BATCH_SIZE=1 61 | - DOH_SERVER=https://cloudflare-dns.com/dns-query 62 | - COSMOS_ACCOUNT=edc-showcase-cosmos 63 | - COSMOS_DB=asset-index 64 | - COSMOS_CONTAINER=provider 65 | - COSMOS_PARTKEY=edcpartkeyprov 66 | - CDS_DATABASE=contract-definition-store 67 | - CDS_CONTAINER=provider 68 | - CNS_DATABASE=contract-negotiation-store 69 | - CNS_CONTAINER=provider 70 | volumes: 71 | - type: bind 72 | source: ./deployment/ 73 | target: /cert 74 | deploy: 75 | resources: 76 | reservations: 77 | cpus: "6" 78 | memory: "8g" -------------------------------------------------------------------------------- /extensions/dataseeding/catalog/build.gradle.kts: -------------------------------------------------------------------------------- 1 | plugins { 2 | `java-library` 3 | } 4 | 5 | val edcversion: String by project 6 | val group = "org.eclipse.dataspaceconnector" 7 | 8 | dependencies { 9 | 10 | api("${group}:spi:${edcversion}") 11 | api("${group}:catalog-cache:${edcversion}") 12 | implementation("${group}:spi:${edcversion}") 13 | implementation("${group}:common-util:${edcversion}") 14 | implementation("${group}:dataloading:${edcversion}") 15 | 16 | } 17 | -------------------------------------------------------------------------------- /extensions/dataseeding/catalog/src/main/java/org/eclipse/dataspaceconnector/dataseeding/catalog/CatalogDataseedingExtension.java: -------------------------------------------------------------------------------- 1 | package org.eclipse.dataspaceconnector.dataseeding.catalog; 2 | 3 | import com.fasterxml.jackson.core.type.TypeReference; 4 | import com.fasterxml.jackson.databind.DeserializationFeature; 5 | import com.fasterxml.jackson.databind.ObjectMapper; 6 | import org.eclipse.dataspaceconnector.catalog.spi.FederatedCacheNode; 7 | import org.eclipse.dataspaceconnector.catalog.spi.FederatedCacheNodeDirectory; 8 | import org.eclipse.dataspaceconnector.dataloading.AssetEntry; 9 | import org.eclipse.dataspaceconnector.dataloading.AssetLoader; 10 | import org.eclipse.dataspaceconnector.policy.model.Action; 11 | import org.eclipse.dataspaceconnector.policy.model.AtomicConstraint; 12 | import org.eclipse.dataspaceconnector.policy.model.LiteralExpression; 13 | import org.eclipse.dataspaceconnector.policy.model.Operator; 14 | import org.eclipse.dataspaceconnector.policy.model.Permission; 15 | import org.eclipse.dataspaceconnector.policy.model.Policy; 16 | import org.eclipse.dataspaceconnector.spi.EdcSetting; 17 | import org.eclipse.dataspaceconnector.spi.asset.AssetSelectorExpression; 18 | import org.eclipse.dataspaceconnector.spi.contract.offer.store.ContractDefinitionStore; 19 | import org.eclipse.dataspaceconnector.spi.monitor.Monitor; 20 | import org.eclipse.dataspaceconnector.spi.policy.store.PolicyStore; 21 | import org.eclipse.dataspaceconnector.spi.system.Inject; 22 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtension; 23 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtensionContext; 24 | import org.eclipse.dataspaceconnector.spi.types.domain.DataAddress; 25 | import org.eclipse.dataspaceconnector.spi.types.domain.asset.Asset; 26 | import org.eclipse.dataspaceconnector.spi.types.domain.contract.offer.ContractDefinition; 27 | 28 | import java.io.InputStream; 29 | import java.security.SecureRandom; 30 | import java.util.List; 31 | import java.util.stream.Collectors; 32 | import java.util.stream.IntStream; 33 | 34 | public class CatalogDataseedingExtension implements ServiceExtension { 35 | @EdcSetting 36 | private static final String NODES_FILE_SETTING = "edc.showcase.fcc.nodes.file"; 37 | @Inject 38 | private AssetLoader assetIndexLoader; 39 | @Inject 40 | private ContractDefinitionStore contractDefinitionStore; 41 | @Inject 42 | private FederatedCacheNodeDirectory nodeDirectory; 43 | @Inject 44 | private PolicyStore policyStore; 45 | 46 | @Override 47 | public void initialize(ServiceExtensionContext context) { 48 | Monitor monitor = context.getMonitor(); 49 | 50 | var nodesFile = context.getSetting(NODES_FILE_SETTING, "nodes.json"); 51 | monitor.info("Using FCC Node directory file " + nodesFile); 52 | 53 | //generate+save assets 54 | var assets = createAssets(context.getConnectorId()); 55 | assets.forEach(assetIndexLoader::accept); 56 | 57 | // generate+save policies 58 | var accessPolicies = IntStream.range(0, 10).mapToObj(i -> createAccessPolicy("edc-demo-access-policy-" + i)).peek(policyStore::save).collect(Collectors.toList()); 59 | var contractPolicies = IntStream.range(0, 10).mapToObj(i -> createContractPolicy("edc-demo-contract-policy-" + i)).peek(policyStore::save).collect(Collectors.toList()); 60 | 61 | //publish asset 62 | assets.stream().map(AssetEntry::getAsset) 63 | .forEach(a -> publishAsset(a, random(accessPolicies), random(contractPolicies))); 64 | 65 | // populate node directory 66 | var nodes = readNodesFromJson(nodesFile); 67 | nodes.forEach(nodeDirectory::insert); 68 | 69 | monitor.info("Catalog Data seeding done"); 70 | } 71 | 72 | public void publishAsset(Asset asset, Policy accessPolicy, Policy contractPolicy) { 73 | var cdef = ContractDefinition.Builder.newInstance() 74 | .id(asset.getId()) 75 | .accessPolicyId(accessPolicy.getUid()) 76 | .contractPolicyId(contractPolicy.getUid()) 77 | .selectorExpression(AssetSelectorExpression.Builder.newInstance().whenEquals(Asset.PROPERTY_ID, asset.getId()).build()) 78 | .build(); 79 | contractDefinitionStore.save(cdef); 80 | } 81 | 82 | private T random(List items) { 83 | var random = new SecureRandom(); 84 | var rnd = random.nextInt(items.size()); 85 | return items.get(rnd); 86 | } 87 | 88 | private Policy createAccessPolicy(String id) { 89 | return Policy.Builder.newInstance() 90 | .id(id) 91 | .permission(Permission.Builder.newInstance() 92 | .target("") 93 | .action(Action.Builder.newInstance() 94 | .type("USE") 95 | .build()) 96 | .constraint(AtomicConstraint.Builder.newInstance() 97 | .leftExpression(new LiteralExpression("foo")) 98 | .operator(Operator.EQ) 99 | .rightExpression(new LiteralExpression("bar")) 100 | .build()) 101 | .build()) 102 | .build(); 103 | } 104 | 105 | private Policy createContractPolicy(String id) { 106 | return Policy.Builder.newInstance() 107 | .id(id) 108 | .permission(Permission.Builder.newInstance() 109 | .target("") 110 | .action(Action.Builder.newInstance() 111 | .type("USE") 112 | .build()) 113 | .build()) 114 | .build(); 115 | } 116 | 117 | 118 | private List readNodesFromJson(String resourceName) { 119 | try (InputStream in = Thread.currentThread().getContextClassLoader().getResourceAsStream(resourceName)) { 120 | ObjectMapper mapper = new ObjectMapper(); 121 | mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); 122 | 123 | var tr = new TypeReference>() { 124 | }; 125 | return mapper.readValue(in, tr); 126 | } catch (Exception e) { 127 | throw new RuntimeException(e); 128 | } 129 | } 130 | 131 | private List createAssets(String connectorId) { 132 | 133 | var asset1 = Asset.Builder.newInstance() 134 | .property("type", "file") 135 | .name("test-document") 136 | .id("test-document_" + connectorId) 137 | .contentType("text/plain") 138 | .version("1.0") 139 | .build(); 140 | 141 | var dataAddress1 = DataAddress.Builder.newInstance() 142 | .type("file") 143 | .property("path", "/home/paul/Documents/") 144 | .property("filename", "test-document.txt") 145 | .build(); 146 | 147 | 148 | var asset2 = Asset.Builder.newInstance() 149 | .property("type", "AzureStorage") 150 | .name("test-document-az") 151 | .id("test-document-az_" + connectorId) 152 | .contentType("text/plain") 153 | .version("1.0") 154 | .build(); 155 | 156 | var dataAddress2 = DataAddress.Builder.newInstance() 157 | .type("AzureStorage") 158 | .property("account", "edcshowcasegpstorage") 159 | .property("container", "src-container") 160 | .property("blobname", "test-document.txt") 161 | .build(); 162 | 163 | var asset3 = Asset.Builder.newInstance() 164 | .property("type", "AzureStorage") 165 | .name("schematic_drawing-az") 166 | .id("schematic-drawing-az_" + connectorId) 167 | .contentType("image/png") 168 | .version("1.0") 169 | .build(); 170 | 171 | var dataAddress3 = DataAddress.Builder.newInstance() 172 | .type("AzureStorage") 173 | .property("account", "edcshowcasegpstorage") 174 | .property("container", "src-container") 175 | .property("blobname", "complex_schematic_drawing.jpg") 176 | .build(); 177 | 178 | var asset4 = Asset.Builder.newInstance() 179 | .property("type", "http") 180 | .name("demo-todos") 181 | .id("demo-todos_" + connectorId) 182 | .version("1.0") 183 | .build(); 184 | 185 | var dataAddress4 = DataAddress.Builder.newInstance() 186 | .type("http") 187 | .property("targetUrl", "https://jsonplaceholder.typicode.com/todos/1") 188 | .build(); 189 | 190 | var asset5 = Asset.Builder.newInstance() 191 | .property("type", "http") 192 | .name("demo-train-data") 193 | .id("demo-train-data_" + connectorId) 194 | .version("1.0") 195 | .build(); 196 | 197 | var dataAddress5 = DataAddress.Builder.newInstance() 198 | .type("http") 199 | .property("targetUrl", "https://jsonplaceholder.typicode.com/todos/2") 200 | .build(); 201 | 202 | 203 | return List.of(new AssetEntry(asset1, dataAddress1), new AssetEntry(asset2, dataAddress2), new AssetEntry(asset3, dataAddress3), new AssetEntry(asset4, dataAddress4), new AssetEntry(asset5, dataAddress5)); 204 | } 205 | } 206 | 207 | 208 | -------------------------------------------------------------------------------- /extensions/dataseeding/catalog/src/main/resources/META-INF/services/org.eclipse.dataspaceconnector.spi.system.ServiceExtension: -------------------------------------------------------------------------------- 1 | org.eclipse.dataspaceconnector.dataseeding.catalog.CatalogDataseedingExtension -------------------------------------------------------------------------------- /extensions/dataseeding/catalog/src/main/resources/nodes-dockercompose.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "consumer", 4 | "url": "http://consumer:8282", 5 | "supportedProtocols": [ 6 | "ids-multipart" 7 | ] 8 | }, 9 | { 10 | "name": "provider", 11 | "url": "http://provider:9292", 12 | "supportedProtocols": [ 13 | "ids-multipart" 14 | ] 15 | } 16 | ] 17 | -------------------------------------------------------------------------------- /extensions/dataseeding/catalog/src/main/resources/nodes-local.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "consumer", 4 | "url": "http://localhost:8282", 5 | "supportedProtocols": [ 6 | "ids-multipart" 7 | ] 8 | }, 9 | { 10 | "name": "provider", 11 | "url": "http://localhost:9292", 12 | "supportedProtocols": [ 13 | "ids-multipart" 14 | ] 15 | } 16 | ] 17 | -------------------------------------------------------------------------------- /extensions/dataseeding/catalog/src/main/resources/nodes.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "name": "consumer", 4 | "url": "http://edc-showcase-consumer.westeurope.azurecontainer.io:8181", 5 | "supportedProtocols": [ 6 | "ids-multipart" 7 | ] 8 | }, 9 | { 10 | "name": "provider", 11 | "url": "http://edc-showcase-provider.westeurope.azurecontainer.io:8181", 12 | "supportedProtocols": [ 13 | "ids-multipart" 14 | ] 15 | }, 16 | { 17 | "name": "connector3", 18 | "url": "http://edc-showcase-connector3.westeurope.azurecontainer.io:8181", 19 | "supportedProtocols": [ 20 | "ids-multipart" 21 | ] 22 | } 23 | ] 24 | -------------------------------------------------------------------------------- /extensions/dataseeding/hub/build.gradle.kts: -------------------------------------------------------------------------------- 1 | plugins { 2 | `java-library` 3 | } 4 | 5 | val edcversion: String by project 6 | val group = "org.eclipse.dataspaceconnector" 7 | 8 | dependencies { 9 | 10 | implementation("${group}:spi:${edcversion}") 11 | implementation("${group}:identity-did-spi:${edcversion}") 12 | 13 | } 14 | -------------------------------------------------------------------------------- /extensions/dataseeding/hub/src/main/java/org/eclipse/dataspaceconnector/dataseeding/catalog/IdentityHubDataseedingExtension.java: -------------------------------------------------------------------------------- 1 | package org.eclipse.dataspaceconnector.dataseeding.catalog; 2 | 3 | import org.eclipse.dataspaceconnector.iam.did.spi.hub.IdentityHubStore; 4 | import org.eclipse.dataspaceconnector.iam.did.spi.hub.message.Commit; 5 | import org.eclipse.dataspaceconnector.spi.system.Inject; 6 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtension; 7 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtensionContext; 8 | 9 | import java.time.Instant; 10 | import java.util.Map; 11 | import java.util.UUID; 12 | 13 | public class IdentityHubDataseedingExtension implements ServiceExtension { 14 | 15 | @Inject 16 | private IdentityHubStore hubStore; 17 | 18 | @Override 19 | public void initialize(ServiceExtensionContext context) { 20 | var monitor = context.getMonitor(); 21 | hubStore = context.getService(IdentityHubStore.class); 22 | 23 | var objectId = UUID.randomUUID().toString(); 24 | var payload = 25 | Map.of("region", "eu", "created", Instant.now().toEpochMilli()); 26 | var commit = Commit.Builder.newInstance().type("RegistrationCredentials").context("ION Demo").iss(context.getConnectorId()).sub("test").objectId(objectId).payload(payload).build(); 27 | hubStore.write(commit); 28 | 29 | 30 | // END DEBUG 31 | 32 | 33 | monitor.info("Catalog Data seeding done"); 34 | } 35 | 36 | } 37 | -------------------------------------------------------------------------------- /extensions/dataseeding/hub/src/main/resources/META-INF/services/org.eclipse.dataspaceconnector.spi.system.ServiceExtension: -------------------------------------------------------------------------------- 1 | org.eclipse.dataspaceconnector.dataseeding.catalog.IdentityHubDataseedingExtension -------------------------------------------------------------------------------- /extensions/federated-catalog-api/build.gradle.kts: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020, 2021 Microsoft Corporation 3 | * 4 | * This program and the accompanying materials are made available under the 5 | * terms of the Apache License, Version 2.0 which is available at 6 | * https://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * SPDX-License-Identifier: Apache-2.0 9 | * 10 | * Contributors: 11 | * Microsoft Corporation - initial API and implementation 12 | * 13 | */ 14 | 15 | plugins { 16 | `java-library` 17 | } 18 | 19 | val edcversion: String by project 20 | val rsApi: String by project 21 | val group = "org.eclipse.dataspaceconnector" 22 | dependencies { 23 | implementation("${group}:spi:${edcversion}") 24 | implementation("${group}:ids-spi:${edcversion}") 25 | implementation("${group}:common-util:${edcversion}") 26 | implementation("${group}:federated-catalog-spi:${edcversion}") 27 | implementation("${group}:blob-core:${edcversion}") 28 | implementation("${group}:dataloading:${edcversion}") 29 | 30 | implementation("jakarta.ws.rs:jakarta.ws.rs-api:${rsApi}") 31 | 32 | } 33 | -------------------------------------------------------------------------------- /extensions/federated-catalog-api/src/main/java/org/eclipse/dataspaceconnector/demo/edc_demo/api/FederatedCatalogApiController.java: -------------------------------------------------------------------------------- 1 | package org.eclipse.dataspaceconnector.demo.edc_demo.api; 2 | 3 | import jakarta.ws.rs.Consumes; 4 | import jakarta.ws.rs.GET; 5 | import jakarta.ws.rs.Path; 6 | import jakarta.ws.rs.Produces; 7 | import jakarta.ws.rs.core.MediaType; 8 | import jakarta.ws.rs.core.Response; 9 | import org.eclipse.dataspaceconnector.catalog.spi.QueryEngine; 10 | import org.eclipse.dataspaceconnector.catalog.spi.QueryResponse; 11 | import org.eclipse.dataspaceconnector.catalog.spi.model.FederatedCatalogCacheQuery; 12 | import org.eclipse.dataspaceconnector.spi.monitor.Monitor; 13 | 14 | import java.util.Collections; 15 | 16 | @Consumes({ MediaType.APPLICATION_JSON }) 17 | @Produces({ MediaType.APPLICATION_JSON }) 18 | @Path("/catalog") 19 | public class FederatedCatalogApiController { 20 | private final Monitor monitor; 21 | private final QueryEngine catalogQueryEngine; 22 | 23 | public FederatedCatalogApiController(Monitor monitor, QueryEngine catalogQueryEngine) { 24 | this.monitor = monitor; 25 | this.catalogQueryEngine = catalogQueryEngine; 26 | } 27 | 28 | @GET 29 | @Path("health") 30 | public Response getHealth() { 31 | monitor.info("GET /edc-demo/health - getHealth()"); 32 | var result = Collections.singletonMap("status", "up and running"); 33 | return Response.ok(result).build(); 34 | } 35 | 36 | @GET 37 | @Path("contract-offers") 38 | public Response getContractOffers() { 39 | monitor.info("GET /edc-demo/contract-offers - getContractOffers()"); 40 | 41 | FederatedCatalogCacheQuery query = FederatedCatalogCacheQuery 42 | .Builder 43 | .newInstance() 44 | .build(); 45 | 46 | var queryResponse = catalogQueryEngine.getCatalog(query); 47 | if (queryResponse.getStatus() == QueryResponse.Status.NO_ADAPTER_FOUND) { 48 | return Response.status(Response.Status.NOT_IMPLEMENTED).build(); 49 | } 50 | 51 | if (!queryResponse.getErrors().isEmpty()) { 52 | return Response.status(400, String.join(", ", queryResponse.getErrors())).build(); 53 | } 54 | 55 | var result = queryResponse.getOffers(); 56 | 57 | return Response.ok(result).build(); 58 | } 59 | } 60 | 61 | -------------------------------------------------------------------------------- /extensions/federated-catalog-api/src/main/java/org/eclipse/dataspaceconnector/demo/edc_demo/api/FederatedCatalogApiExtension.java: -------------------------------------------------------------------------------- 1 | package org.eclipse.dataspaceconnector.demo.edc_demo.api; 2 | 3 | import org.eclipse.dataspaceconnector.catalog.spi.QueryEngine; 4 | import org.eclipse.dataspaceconnector.spi.WebService; 5 | import org.eclipse.dataspaceconnector.spi.system.Inject; 6 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtension; 7 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtensionContext; 8 | 9 | 10 | public class FederatedCatalogApiExtension implements ServiceExtension { 11 | 12 | @Inject 13 | private WebService webService; 14 | @Inject 15 | private QueryEngine catalogQueryEngine; 16 | 17 | 18 | @Override 19 | public void initialize(ServiceExtensionContext context) { 20 | var monitor = context.getMonitor(); 21 | 22 | var controller = new FederatedCatalogApiController(monitor, catalogQueryEngine); 23 | webService.registerResource("data", controller); 24 | 25 | } 26 | 27 | @Override 28 | public String name() { 29 | return "Federated Catalog API Controller (EDC Showcase)"; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /extensions/federated-catalog-api/src/main/resources/META-INF/services/org.eclipse.dataspaceconnector.spi.system.ServiceExtension: -------------------------------------------------------------------------------- 1 | org.eclipse.dataspaceconnector.demo.edc_demo.api.FederatedCatalogApiExtension 2 | -------------------------------------------------------------------------------- /extensions/identity-hub-verifier/build.gradle.kts: -------------------------------------------------------------------------------- 1 | plugins { 2 | `java-library` 3 | } 4 | 5 | val edcversion: String by project 6 | val group = "org.eclipse.dataspaceconnector" 7 | dependencies { 8 | api("${group}:identity-did-spi:${edcversion}") 9 | api("${group}:identity-did-core:${edcversion}") 10 | api("${group}:identity-did-crypto:${edcversion}") 11 | } 12 | -------------------------------------------------------------------------------- /extensions/identity-hub-verifier/src/main/java/org/eclipse/dataspaceconnector/iam/did/credentials/DemoCredentialsVerifierExtension.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 Microsoft Corporation 3 | * 4 | * This program and the accompanying materials are made available under the 5 | * terms of the Apache License, Version 2.0 which is available at 6 | * https://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * SPDX-License-Identifier: Apache-2.0 9 | * 10 | * Contributors: 11 | * Microsoft Corporation - initial API and implementation 12 | * 13 | */ 14 | package org.eclipse.dataspaceconnector.iam.did.credentials; 15 | 16 | import org.eclipse.dataspaceconnector.iam.did.spi.credentials.CredentialsVerifier; 17 | import org.eclipse.dataspaceconnector.iam.did.spi.hub.IdentityHubClient; 18 | import org.eclipse.dataspaceconnector.spi.EdcException; 19 | import org.eclipse.dataspaceconnector.spi.system.Inject; 20 | import org.eclipse.dataspaceconnector.spi.system.Provides; 21 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtension; 22 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtensionContext; 23 | 24 | import static java.lang.String.format; 25 | import static org.eclipse.dataspaceconnector.iam.did.spi.document.DidConstants.DID_URL_SETTING; 26 | 27 | 28 | @Provides(CredentialsVerifier.class) 29 | public class DemoCredentialsVerifierExtension implements ServiceExtension { 30 | 31 | 32 | @Inject 33 | private IdentityHubClient hubClient; 34 | 35 | @Override 36 | public void initialize(ServiceExtensionContext context) { 37 | 38 | var didUrl = context.getSetting(DID_URL_SETTING, null); 39 | if (didUrl == null) { 40 | throw new EdcException(format("The DID Url setting '(%s)' was null!", DID_URL_SETTING)); 41 | } 42 | 43 | var credentialsVerifier = new IdentityHubCredentialsVerifier(hubClient, context.getMonitor(), didUrl); 44 | context.registerService(CredentialsVerifier.class, credentialsVerifier); 45 | 46 | context.getMonitor().info("Initialized Demo Credentials Verifier extension"); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /extensions/identity-hub-verifier/src/main/java/org/eclipse/dataspaceconnector/iam/did/credentials/IdentityHubCredentialsVerifier.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2021 Microsoft Corporation 3 | * 4 | * This program and the accompanying materials are made available under the 5 | * terms of the Apache License, Version 2.0 which is available at 6 | * https://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * SPDX-License-Identifier: Apache-2.0 9 | * 10 | * Contributors: 11 | * Microsoft Corporation - initial API and implementation 12 | * 13 | */ 14 | package org.eclipse.dataspaceconnector.iam.did.credentials; 15 | 16 | import org.eclipse.dataspaceconnector.iam.did.spi.credentials.CredentialsVerifier; 17 | import org.eclipse.dataspaceconnector.iam.did.spi.hub.IdentityHubClient; 18 | import org.eclipse.dataspaceconnector.iam.did.spi.hub.message.ObjectQuery; 19 | import org.eclipse.dataspaceconnector.iam.did.spi.hub.message.ObjectQueryRequest; 20 | import org.eclipse.dataspaceconnector.iam.did.spi.key.PublicKeyWrapper; 21 | import org.eclipse.dataspaceconnector.spi.monitor.Monitor; 22 | import org.eclipse.dataspaceconnector.spi.result.Result; 23 | 24 | import java.util.HashMap; 25 | import java.util.Map; 26 | import java.util.stream.Collectors; 27 | 28 | /** 29 | * Implements a sample credentials validator that checks for signed registration credentials. 30 | */ 31 | public class IdentityHubCredentialsVerifier implements CredentialsVerifier { 32 | private final IdentityHubClient hubClient; 33 | private final Monitor monitor; 34 | private final String issuer; 35 | 36 | /** 37 | * Create a new credentials verifier that uses an Identity Hub 38 | * 39 | * @param hubClient an instance of a {@link IdentityHubClient} 40 | * @param monitor a {@link Monitor} 41 | * @param issuer a String identifying "this" connector. Here, the verifying connector's DID URL needs to be passed. 42 | */ 43 | public IdentityHubCredentialsVerifier(IdentityHubClient hubClient, Monitor monitor, String issuer) { 44 | this.hubClient = hubClient; 45 | this.monitor = monitor; 46 | this.issuer = issuer; 47 | } 48 | 49 | @Override 50 | public Result> verifyCredentials(String hubBaseUrl, PublicKeyWrapper othersPublicKey) { 51 | 52 | // monitor.debug("Step 2: Starting credential verification against hub URL " + hubBaseUrl); 53 | // 54 | // var query = ObjectQuery.Builder.newInstance().context("ION Demo").type("RegistrationCredentials").build(); 55 | // monitor.debug("Step 2: Generating request, encrypted with PublicKey"); 56 | // 57 | // var queryRequest = ObjectQueryRequest.Builder.newInstance().query(query).iss(issuer).aud("aud").sub("credentials").build(); 58 | // monitor.debug("Starting credential verification against hub URL " + hubBaseUrl); 59 | // var credentials = hubClient.queryCredentials(queryRequest, hubBaseUrl, othersPublicKey); 60 | // monitor.info(credentials.getContent().size() + " credentials obtained from IdentityHub: "); 61 | // monitor.debug(credentials.getContent().entrySet().stream().map(e -> e.getKey() + " -> " + e.getValue()).collect(Collectors.joining(", "))); 62 | // if (credentials.failed()) { 63 | // return Result.failure("Error resolving credentials"); 64 | // } 65 | // 66 | // // only support String credentials; filter out others 67 | // var map = new HashMap(); 68 | // credentials.getContent().entrySet().stream().filter(entry -> entry.getValue() instanceof String).forEach(entry -> map.put(entry.getKey(), (String) entry.getValue())); 69 | // return Result.success(map); 70 | 71 | return Result.success(Map.of("region", "eu")); 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /extensions/identity-hub-verifier/src/main/resources/META-INF/services/org.eclipse.dataspaceconnector.spi.system.ServiceExtension: -------------------------------------------------------------------------------- 1 | org.eclipse.dataspaceconnector.iam.did.credentials.DemoCredentialsVerifierExtension 2 | -------------------------------------------------------------------------------- /extensions/identity-hub-verifier/src/test/java/org/eclipse/dataspaceconnector/iam/did/credentials/IdentityHubCredentialsVerifierTest.java: -------------------------------------------------------------------------------- 1 | package org.eclipse.dataspaceconnector.iam.did.credentials; 2 | 3 | import org.easymock.EasyMock; 4 | import org.eclipse.dataspaceconnector.iam.did.crypto.key.RsaPublicKeyWrapper; 5 | import org.eclipse.dataspaceconnector.iam.did.spi.hub.IdentityHubClient; 6 | import org.eclipse.dataspaceconnector.iam.did.spi.hub.message.ObjectQueryRequest; 7 | import org.eclipse.dataspaceconnector.iam.did.spi.key.PublicKeyWrapper; 8 | import org.eclipse.dataspaceconnector.spi.monitor.Monitor; 9 | import org.eclipse.dataspaceconnector.spi.result.Result; 10 | import org.junit.jupiter.api.Assertions; 11 | import org.junit.jupiter.api.BeforeEach; 12 | import org.junit.jupiter.api.Test; 13 | 14 | import java.security.KeyPair; 15 | import java.security.KeyPairGenerator; 16 | import java.security.NoSuchAlgorithmException; 17 | import java.security.interfaces.RSAPublicKey; 18 | import java.util.Map; 19 | 20 | 21 | class IdentityHubCredentialsVerifierTest { 22 | private IdentityHubClient hubClient; 23 | private IdentityHubCredentialsVerifier credentialsVerifier; 24 | private RSAPublicKey publicKey; 25 | 26 | @Test 27 | void verifyCredentials() { 28 | EasyMock.expect(hubClient.queryCredentials(EasyMock.isA(ObjectQueryRequest.class), EasyMock.isA(String.class), EasyMock.isA(PublicKeyWrapper.class))).andReturn(Result.success(Map.of("region", "EU"))); 29 | EasyMock.replay(hubClient); 30 | 31 | var result = credentialsVerifier.verifyCredentials("https://foo.com", new RsaPublicKeyWrapper(publicKey)); 32 | Assertions.assertTrue(result.succeeded()); 33 | Assertions.assertEquals("EU", result.getContent().get("region")); 34 | EasyMock.verify(hubClient); 35 | } 36 | 37 | @BeforeEach 38 | void setUp() throws NoSuchAlgorithmException { 39 | KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA"); 40 | kpg.initialize(2048); 41 | KeyPair kp = kpg.generateKeyPair(); 42 | publicKey = (RSAPublicKey) kp.getPublic(); 43 | hubClient = EasyMock.createMock(IdentityHubClient.class); 44 | credentialsVerifier = new IdentityHubCredentialsVerifier(hubClient, new Monitor() { 45 | }, "did:ion:test"); 46 | 47 | } 48 | 49 | } 50 | -------------------------------------------------------------------------------- /extensions/transfer-azure-s3/build.gradle.kts: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020, 2021 Microsoft Corporation 3 | * 4 | * This program and the accompanying materials are made available under the 5 | * terms of the Apache License, Version 2.0 which is available at 6 | * https://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * SPDX-License-Identifier: Apache-2.0 9 | * 10 | * Contributors: 11 | * Microsoft Corporation - initial API and implementation 12 | * 13 | */ 14 | 15 | plugins { 16 | `java-library` 17 | } 18 | 19 | val edcversion: String by project 20 | val rsApi: String by project 21 | val group = "org.eclipse.dataspaceconnector" 22 | val storageBlobVersion: String by project; 23 | 24 | 25 | dependencies { 26 | api("${group}:spi:${edcversion}") 27 | implementation("${group}:common-util:${edcversion}") 28 | implementation("${group}:blobstorage:${edcversion}") 29 | implementation("${group}:aws-s3:${edcversion}") 30 | // used for the BlobStoreWriter 31 | implementation("com.azure:azure-storage-blob:${storageBlobVersion}") 32 | 33 | } 34 | -------------------------------------------------------------------------------- /extensions/transfer-azure-s3/src/main/java/org/eclipse/dataspaceconnector/transfer/CloudTransferExtension.java: -------------------------------------------------------------------------------- 1 | package org.eclipse.dataspaceconnector.transfer; 2 | 3 | import net.jodah.failsafe.RetryPolicy; 4 | import org.eclipse.dataspaceconnector.aws.s3.core.S3ClientProvider; 5 | import org.eclipse.dataspaceconnector.aws.s3.operator.S3BucketReader; 6 | import org.eclipse.dataspaceconnector.aws.s3.operator.S3BucketWriter; 7 | import org.eclipse.dataspaceconnector.azure.blob.core.api.BlobStoreApi; 8 | import org.eclipse.dataspaceconnector.azure.blob.operator.BlobStoreReader; 9 | import org.eclipse.dataspaceconnector.azure.blob.operator.BlobStoreWriter; 10 | import org.eclipse.dataspaceconnector.spi.asset.DataAddressResolver; 11 | import org.eclipse.dataspaceconnector.spi.security.Vault; 12 | import org.eclipse.dataspaceconnector.spi.system.Inject; 13 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtension; 14 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtensionContext; 15 | import org.eclipse.dataspaceconnector.spi.transfer.flow.DataFlowManager; 16 | import org.eclipse.dataspaceconnector.spi.transfer.inline.DataOperatorRegistry; 17 | import org.eclipse.dataspaceconnector.transfer.core.inline.InlineDataFlowController; 18 | 19 | public class CloudTransferExtension implements ServiceExtension { 20 | 21 | 22 | @Inject 23 | private DataFlowManager dataFlowMgr; 24 | @Inject 25 | private DataOperatorRegistry registry; 26 | @Inject 27 | private RetryPolicy retryPolicy; 28 | @Inject 29 | private BlobStoreApi blobStoreApi; 30 | @Inject 31 | private S3ClientProvider clientProvider; 32 | 33 | @Override 34 | public void initialize(ServiceExtensionContext context) { 35 | Vault vault = context.getService(Vault.class); 36 | 37 | registry.registerWriter(new S3BucketWriter(context.getMonitor(), context.getTypeManager(), retryPolicy, clientProvider)); 38 | registry.registerWriter(new BlobStoreWriter(context.getMonitor(), context.getTypeManager())); 39 | registry.registerReader(new BlobStoreReader(blobStoreApi)); 40 | registry.registerReader(new S3BucketReader(context.getMonitor(), vault, clientProvider )); 41 | var flowController = new InlineDataFlowController(vault, context.getMonitor(), registry); 42 | dataFlowMgr.register(flowController); 43 | 44 | context.getMonitor().info("Initialized transfer extension"); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /extensions/transfer-azure-s3/src/main/resources/META-INF/services/org.eclipse.dataspaceconnector.spi.system.ServiceExtension: -------------------------------------------------------------------------------- 1 | org.eclipse.dataspaceconnector.transfer.CloudTransferExtension 2 | -------------------------------------------------------------------------------- /gradle.properties: -------------------------------------------------------------------------------- 1 | jetBrainsAnnotationsVersion=15.0 2 | jacksonVersion=2.12.1 3 | jupiterVersion=5.5.2 4 | edcversion=0.0.1-SNAPSHOT 5 | nimbusVersion=8.22.1 6 | rsApi=3.0.0 7 | #org.gradle.console=rich 8 | org.gradle.parallel=true 9 | org.gradle.workers.max=32 10 | storageBlobVersion=12.11.0 -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/edc-showcase/a1271a8c2cc04ae6fa3ae3f00c9328b1880137a8/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionBase=GRADLE_USER_HOME 2 | distributionPath=wrapper/dists 3 | distributionUrl=https\://services.gradle.org/distributions/gradle-7.3-bin.zip 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists 6 | -------------------------------------------------------------------------------- /gradlew: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 4 | # Copyright © 2015-2021 the original authors. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # https://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | ############################################################################## 20 | # 21 | # Gradle start up script for POSIX generated by Gradle. 22 | # 23 | # Important for running: 24 | # 25 | # (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is 26 | # noncompliant, but you have some other compliant shell such as ksh or 27 | # bash, then to run this script, type that shell name before the whole 28 | # command line, like: 29 | # 30 | # ksh Gradle 31 | # 32 | # Busybox and similar reduced shells will NOT work, because this script 33 | # requires all of these POSIX shell features: 34 | # * functions; 35 | # * expansions «$var», «${var}», «${var:-default}», «${var+SET}», 36 | # «${var#prefix}», «${var%suffix}», and «$( cmd )»; 37 | # * compound commands having a testable exit status, especially «case»; 38 | # * various built-in commands including «command», «set», and «ulimit». 39 | # 40 | # Important for patching: 41 | # 42 | # (2) This script targets any POSIX shell, so it avoids extensions provided 43 | # by Bash, Ksh, etc; in particular arrays are avoided. 44 | # 45 | # The "traditional" practice of packing multiple parameters into a 46 | # space-separated string is a well documented source of bugs and security 47 | # problems, so this is (mostly) avoided, by progressively accumulating 48 | # options in "$@", and eventually passing that to Java. 49 | # 50 | # Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, 51 | # and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; 52 | # see the in-line comments for details. 53 | # 54 | # There are tweaks for specific operating systems such as AIX, CygWin, 55 | # Darwin, MinGW, and NonStop. 56 | # 57 | # (3) This script is generated from the Groovy template 58 | # https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt 59 | # within the Gradle project. 60 | # 61 | # You can find Gradle at https://github.com/gradle/gradle/. 62 | # 63 | ############################################################################## 64 | 65 | # Attempt to set APP_HOME 66 | 67 | # Resolve links: $0 may be a link 68 | app_path=$0 69 | 70 | # Need this for daisy-chained symlinks. 71 | while 72 | APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path 73 | [ -h "$app_path" ] 74 | do 75 | ls=$( ls -ld "$app_path" ) 76 | link=${ls#*' -> '} 77 | case $link in #( 78 | /*) app_path=$link ;; #( 79 | *) app_path=$APP_HOME$link ;; 80 | esac 81 | done 82 | 83 | APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit 84 | 85 | APP_NAME="Gradle" 86 | APP_BASE_NAME=${0##*/} 87 | 88 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 89 | DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' 90 | 91 | # Use the maximum available, or set MAX_FD != -1 to use that value. 92 | MAX_FD=maximum 93 | 94 | warn () { 95 | echo "$*" 96 | } >&2 97 | 98 | die () { 99 | echo 100 | echo "$*" 101 | echo 102 | exit 1 103 | } >&2 104 | 105 | # OS specific support (must be 'true' or 'false'). 106 | cygwin=false 107 | msys=false 108 | darwin=false 109 | nonstop=false 110 | case "$( uname )" in #( 111 | CYGWIN* ) cygwin=true ;; #( 112 | Darwin* ) darwin=true ;; #( 113 | MSYS* | MINGW* ) msys=true ;; #( 114 | NONSTOP* ) nonstop=true ;; 115 | esac 116 | 117 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 118 | 119 | 120 | # Determine the Java command to use to start the JVM. 121 | if [ -n "$JAVA_HOME" ] ; then 122 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 123 | # IBM's JDK on AIX uses strange locations for the executables 124 | JAVACMD=$JAVA_HOME/jre/sh/java 125 | else 126 | JAVACMD=$JAVA_HOME/bin/java 127 | fi 128 | if [ ! -x "$JAVACMD" ] ; then 129 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 130 | 131 | Please set the JAVA_HOME variable in your environment to match the 132 | location of your Java installation." 133 | fi 134 | else 135 | JAVACMD=java 136 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 137 | 138 | Please set the JAVA_HOME variable in your environment to match the 139 | location of your Java installation." 140 | fi 141 | 142 | # Increase the maximum file descriptors if we can. 143 | if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then 144 | case $MAX_FD in #( 145 | max*) 146 | MAX_FD=$( ulimit -H -n ) || 147 | warn "Could not query maximum file descriptor limit" 148 | esac 149 | case $MAX_FD in #( 150 | '' | soft) :;; #( 151 | *) 152 | ulimit -n "$MAX_FD" || 153 | warn "Could not set maximum file descriptor limit to $MAX_FD" 154 | esac 155 | fi 156 | 157 | # Collect all arguments for the java command, stacking in reverse order: 158 | # * args from the command line 159 | # * the main class name 160 | # * -classpath 161 | # * -D...appname settings 162 | # * --module-path (only if needed) 163 | # * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. 164 | 165 | # For Cygwin or MSYS, switch paths to Windows format before running java 166 | if "$cygwin" || "$msys" ; then 167 | APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) 168 | CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) 169 | 170 | JAVACMD=$( cygpath --unix "$JAVACMD" ) 171 | 172 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 173 | for arg do 174 | if 175 | case $arg in #( 176 | -*) false ;; # don't mess with options #( 177 | /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath 178 | [ -e "$t" ] ;; #( 179 | *) false ;; 180 | esac 181 | then 182 | arg=$( cygpath --path --ignore --mixed "$arg" ) 183 | fi 184 | # Roll the args list around exactly as many times as the number of 185 | # args, so each arg winds up back in the position where it started, but 186 | # possibly modified. 187 | # 188 | # NB: a `for` loop captures its iteration list before it begins, so 189 | # changing the positional parameters here affects neither the number of 190 | # iterations, nor the values presented in `arg`. 191 | shift # remove old arg 192 | set -- "$@" "$arg" # push replacement arg 193 | done 194 | fi 195 | 196 | # Collect all arguments for the java command; 197 | # * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of 198 | # shell script including quotes and variable substitutions, so put them in 199 | # double quotes to make sure that they get re-expanded; and 200 | # * put everything else in single quotes, so that it's not re-expanded. 201 | 202 | set -- \ 203 | "-Dorg.gradle.appname=$APP_BASE_NAME" \ 204 | -classpath "$CLASSPATH" \ 205 | org.gradle.wrapper.GradleWrapperMain \ 206 | "$@" 207 | 208 | # Use "xargs" to parse quoted args. 209 | # 210 | # With -n1 it outputs one arg per line, with the quotes and backslashes removed. 211 | # 212 | # In Bash we could simply go: 213 | # 214 | # readarray ARGS < <( xargs -n1 <<<"$var" ) && 215 | # set -- "${ARGS[@]}" "$@" 216 | # 217 | # but POSIX shell has neither arrays nor command substitution, so instead we 218 | # post-process each arg (as a line of input to sed) to backslash-escape any 219 | # character that might be a shell metacharacter, then use eval to reverse 220 | # that process (while maintaining the separation between arguments), and wrap 221 | # the whole thing up as a single "set" statement. 222 | # 223 | # This will of course break if any of these variables contains a newline or 224 | # an unmatched quote. 225 | # 226 | 227 | eval "set -- $( 228 | printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | 229 | xargs -n1 | 230 | sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | 231 | tr '\n' ' ' 232 | )" '"$@"' 233 | 234 | exec "$JAVACMD" "$@" 235 | -------------------------------------------------------------------------------- /gradlew.bat: -------------------------------------------------------------------------------- 1 | @rem 2 | @rem Copyright 2015 the original author or authors. 3 | @rem 4 | @rem Licensed under the Apache License, Version 2.0 (the "License"); 5 | @rem you may not use this file except in compliance with the License. 6 | @rem You may obtain a copy of the License at 7 | @rem 8 | @rem https://www.apache.org/licenses/LICENSE-2.0 9 | @rem 10 | @rem Unless required by applicable law or agreed to in writing, software 11 | @rem distributed under the License is distributed on an "AS IS" BASIS, 12 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | @rem See the License for the specific language governing permissions and 14 | @rem limitations under the License. 15 | @rem 16 | 17 | @if "%DEBUG%" == "" @echo off 18 | @rem ########################################################################## 19 | @rem 20 | @rem Gradle startup script for Windows 21 | @rem 22 | @rem ########################################################################## 23 | 24 | @rem Set local scope for the variables with windows NT shell 25 | if "%OS%"=="Windows_NT" setlocal 26 | 27 | set DIRNAME=%~dp0 28 | if "%DIRNAME%" == "" set DIRNAME=. 29 | set APP_BASE_NAME=%~n0 30 | set APP_HOME=%DIRNAME% 31 | 32 | @rem Resolve any "." and ".." in APP_HOME to make it shorter. 33 | for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi 34 | 35 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 36 | set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" 37 | 38 | @rem Find java.exe 39 | if defined JAVA_HOME goto findJavaFromJavaHome 40 | 41 | set JAVA_EXE=java.exe 42 | %JAVA_EXE% -version >NUL 2>&1 43 | if "%ERRORLEVEL%" == "0" goto execute 44 | 45 | echo. 46 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 47 | echo. 48 | echo Please set the JAVA_HOME variable in your environment to match the 49 | echo location of your Java installation. 50 | 51 | goto fail 52 | 53 | :findJavaFromJavaHome 54 | set JAVA_HOME=%JAVA_HOME:"=% 55 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 56 | 57 | if exist "%JAVA_EXE%" goto execute 58 | 59 | echo. 60 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 61 | echo. 62 | echo Please set the JAVA_HOME variable in your environment to match the 63 | echo location of your Java installation. 64 | 65 | goto fail 66 | 67 | :execute 68 | @rem Setup the command line 69 | 70 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 71 | 72 | 73 | @rem Execute Gradle 74 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* 75 | 76 | :end 77 | @rem End local scope for the variables with windows NT shell 78 | if "%ERRORLEVEL%"=="0" goto mainEnd 79 | 80 | :fail 81 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 82 | rem the _cmd.exe /c_ return code! 83 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 84 | exit /b 1 85 | 86 | :mainEnd 87 | if "%OS%"=="Windows_NT" endlocal 88 | 89 | :omega 90 | -------------------------------------------------------------------------------- /launchers/connector/Dockerfile: -------------------------------------------------------------------------------- 1 | # -buster is required to have apt available 2 | FROM openjdk:11-jre-slim-buster 3 | 4 | # by default curl is not available, so install it 5 | RUN apt update && apt install curl -y 6 | 7 | WORKDIR /app 8 | COPY ./build/libs/connector.jar /app 9 | 10 | EXPOSE 8181 11 | EXPOSE 8182 12 | EXPOSE 8282 13 | 14 | # health status is determined by the availability of the /health endpoint 15 | #HEALTHCHECK --interval=5s --timeout=5s --retries=10 CMD curl --fail -X GET http://localhost:8181/api/health || exit 1 16 | 17 | ENTRYPOINT java \ 18 | -Dweb.http.port=8181 \ 19 | -Dweb.http.path=/api \ 20 | -Dweb.http.data.port=8182 \ 21 | -Dweb.http.data.path=/api/v1/data \ 22 | -Dweb.http.ids.port=8282 \ 23 | -Dweb.http.ids.path=/api/v1/ids\ 24 | -Dedc.ids.id=${IDS_ID} \ 25 | -Dids.webhook.address=${IDS_WEBHOOK_ADDRESS} \ 26 | -Dedc.api.control.auth.apikey.value=x-edc-showcase-x \ 27 | -Dedc.api.auth.key=x-edc-showcase-x \ 28 | -Dedc.web.rest.cors.enabled=true \ 29 | -Dedc.vault.clientid=${CLIENTID} \ 30 | -Dedc.vault.tenantid=${TENANTID} \ 31 | -Dedc.vault.certificate=/cert/cert.pfx \ 32 | -Dedc.vault.name=${VAULTNAME} \ 33 | -Dedc.connector.name=${CONNECTOR_NAME} \ 34 | -Dedc.events.topic.name=${TOPIC_NAME} \ 35 | -Dedc.events.topic.endpoint=${TOPIC_ENDPOINT} \ 36 | -Dedc.identity.did.url=${DID_URL} \ 37 | -Dedc.catalog.cache.loader.batch.size=${LOADER_BATCH_SIZE} \ 38 | -Dedc.webdid.doh.url=${DOH_SERVER} \ 39 | -Dedc.assetindex.cosmos.account-name=${COSMOS_ACCOUNT} \ 40 | -Dedc.assetindex.cosmos.database-name=${COSMOS_DB} \ 41 | -Dedc.cosmos.partition-key=${COSMOS_PARTKEY} \ 42 | -Dedc.assetindex.cosmos.container-name=${COSMOS_CONTAINER} \ 43 | -Dedc.contractdefinitionstore.cosmos.account-name=${COSMOS_ACCOUNT} \ 44 | -Dedc.contractdefinitionstore.cosmos.database-name=${CDS_DATABASE} \ 45 | -Dedc.contractdefinitionstore.cosmos.container-name=${CDS_CONTAINER} \ 46 | -Dedc.contractnegotiationstore.cosmos.account-name=${COSMOS_ACCOUNT} \ 47 | -Dedc.contractnegotiationstore.cosmos.database-name=${CNS_DATABASE} \ 48 | -Dedc.contractnegotiationstore.cosmos.container-name=${CNS_CONTAINER} \ 49 | -Dedc.showcase.fcc.nodes.file=nodes.json \ 50 | -Djava.security.edg=file:/dev/.urandom -jar connector.jar 51 | -------------------------------------------------------------------------------- /launchers/connector/Dockerfile.compose: -------------------------------------------------------------------------------- 1 | # -buster is required to have apt available 2 | FROM openjdk:11-jre-slim-buster 3 | 4 | # by default curl is not available, so install it 5 | #:RUN apt update && apt install curl -y 6 | 7 | WORKDIR /app 8 | COPY ./build/libs/connector.jar /app 9 | 10 | 11 | # health status is determined by the availability of the /health endpoint 12 | #HEALTHCHECK --interval=5s --timeout=5s --retries=10 CMD curl --fail -X GET http://localhost:8181/api/health || exit 1 13 | 14 | ENTRYPOINT java \ 15 | -Dweb.http.port=8181 \ 16 | -Dweb.http.path=/api \ 17 | -Dweb.http.data.port=${DATA_PORT} \ 18 | -Dweb.http.data.path=/api/v1/data \ 19 | -Dweb.http.ids.port=${IDS_PORT} \ 20 | -Dweb.http.ids.path=/api/v1/ids\ 21 | -Dedc.ids.id=${IDS_ID} \ 22 | -Dids.webhook.address=${IDS_WEBHOOK_ADDRESS} \ 23 | -Dedc.api.control.auth.apikey.value=x-edc-showcase-x \ 24 | -Dedc.api.auth.key=x-edc-showcase-x \ 25 | -Dedc.web.rest.cors.enabled=true \ 26 | -Dedc.web.rest.cors.headers=origin,content-type,accept,authorization,x-api-key \ 27 | -Dedc.vault.clientid=${CLIENTID} \ 28 | -Dedc.vault.tenantid=${TENANTID} \ 29 | -Dedc.vault.certificate=/cert/cert.pfx \ 30 | -Dedc.vault.name=${VAULTNAME} \ 31 | -Dedc.connector.name=${CONNECTOR_NAME} \ 32 | -Dedc.events.topic.name=${TOPIC_NAME} \ 33 | -Dedc.events.topic.endpoint=${TOPIC_ENDPOINT} \ 34 | -Dedc.identity.did.url=${DID_URL} \ 35 | -Dedc.catalog.cache.loader.batch.size=${LOADER_BATCH_SIZE} \ 36 | -Dedc.catalog.cache.execution.delay.seconds=2 \ 37 | -Dedc.webdid.doh.url=${DOH_SERVER} \ 38 | -Dedc.assetindex.cosmos.account-name=${COSMOS_ACCOUNT} \ 39 | -Dedc.assetindex.cosmos.database-name=${COSMOS_DB} \ 40 | -Dedc.cosmos.partition-key=${COSMOS_PARTKEY} \ 41 | -Dedc.assetindex.cosmos.container-name=${COSMOS_CONTAINER} \ 42 | -Dedc.contractdefinitionstore.cosmos.account-name=${COSMOS_ACCOUNT} \ 43 | -Dedc.contractdefinitionstore.cosmos.database-name=${CDS_DATABASE} \ 44 | -Dedc.contractdefinitionstore.cosmos.container-name=${CDS_CONTAINER} \ 45 | -Dedc.contractnegotiationstore.cosmos.account-name=${COSMOS_ACCOUNT} \ 46 | -Dedc.contractnegotiationstore.cosmos.database-name=${CNS_DATABASE} \ 47 | -Dedc.contractnegotiationstore.cosmos.container-name=${CNS_CONTAINER} \ 48 | -Dedc.showcase.fcc.nodes.file=nodes-dockercompose.json \ 49 | -Dedc.catalog.cache.execution.delay-seconds=1 \ 50 | -Djava.security.edg=file:/dev/.urandom -jar connector.jar 51 | -------------------------------------------------------------------------------- /launchers/connector/build.gradle.kts: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) Microsoft Corporation. 3 | * All rights reserved. 4 | */ 5 | 6 | plugins { 7 | `java-library` 8 | application 9 | id("com.github.johnrengelman.shadow") version "7.0.0" 10 | } 11 | 12 | val edcversion: String by project 13 | val group = "org.eclipse.dataspaceconnector" 14 | 15 | dependencies { 16 | // dependencies from this project 17 | // implementation(project(":extensions:public-rest-api")) 18 | implementation(project(":extensions:dataseeding:catalog")) 19 | implementation(project(":extensions:dataseeding:hub")) 20 | implementation(project(":extensions:transfer-azure-s3")) 21 | implementation(project(":extensions:identity-hub-verifier")) 22 | implementation(project(":extensions:federated-catalog-api")) 23 | 24 | // EDC core dependencies 25 | implementation("${group}:core:${edcversion}") 26 | implementation("${group}:contract-definition-store-cosmos:${edcversion}") 27 | // implementation("${group}:contract-negotiation-store-cosmos:${edcversion}") 28 | implementation("${group}:observability-api:${edcversion}") 29 | implementation("${group}:control-api:${edcversion}") 30 | implementation("${group}:data-management-api:${edcversion}") 31 | implementation("${group}:auth-tokenbased:${edcversion}") 32 | 33 | // ids 34 | implementation("${group}:ids:${edcversion}") 35 | 36 | // simple in-memory and filesystem implementations 37 | implementation("${group}:filesystem-configuration:${edcversion}") 38 | implementation("${group}:assetindex-cosmos:${edcversion}") 39 | implementation("${group}:contract-definition-store-cosmos:${edcversion}") 40 | implementation("${group}:contract-negotiation-store-cosmos:${edcversion}") 41 | 42 | //cloud stuff 43 | implementation("${group}:azure-vault:${edcversion}") 44 | implementation("${group}:s3-provision:${edcversion}") 45 | implementation("${group}:blob-provision:${edcversion}") 46 | 47 | // distributed identity stuff 48 | implementation("${group}:identity-did-web:${edcversion}") 49 | implementation("${group}:identity-did-spi:${edcversion}") 50 | implementation("${group}:identity-did-core:${edcversion}") 51 | implementation("${group}:identity-did-service:${edcversion}") 52 | 53 | // embed an FCC into the runtime 54 | implementation("${group}:catalog:${edcversion}") 55 | 56 | } 57 | 58 | application { 59 | @Suppress("DEPRECATION") 60 | mainClassName = "org.eclipse.dataspaceconnector.boot.system.runtime.BaseRuntime" 61 | } 62 | 63 | tasks.withType { 64 | exclude("**/pom.properties", "**/pom.xm") 65 | mergeServiceFiles() 66 | archiveFileName.set("connector.jar") 67 | } 68 | 69 | -------------------------------------------------------------------------------- /launchers/connector/connector3.properties: -------------------------------------------------------------------------------- 1 | # Configuration of the connector: 2 | # 3 | # YOUR_CLIENT_ID: the client id that was created during terraform deployment and that corresponds to your Service Principal 4 | # YOUR_TENANT_ID: your Azure Tenant ID 5 | # ENVIRONMENT: the value you entered during terraform deployment 6 | # YOUR_DID_ID: the ID of the DID you either anchored on ION (did:ion:...) or host on a static web page (did:web:...), etc. 7 | # 8 | web.http.port=7171 9 | ids.webhook.address=http://localhost:7171 10 | edc.api.control.auth.apikey.value=x-edc-showcase-x 11 | edc.api.auth.key=x-edc-showcase-x 12 | edc.vault.clientid=85e1ff6d-9e61-4637-8e72-d78184467d74 13 | edc.vault.tenantid=1d9b26d0-b6ea-4495-97d3-e85ccf73a154 14 | edc.vault.certificate=/home/paul/dev/edc-showcase/deployment/cert.pfx 15 | edc.vault.name=edc-showcase-vault 16 | edc.events.topic.name=edc-showcase-control-events 17 | edc.events.topic.endpoint=https://edc-showcase-control-events.westeurope-1.eventgrid.azure.net/api/events 18 | edc.identity.did.url=did:web:edcshowcasegpstorage.z6.web.core.windows.net:connector3 19 | edc.connector.name=connector3 20 | edc.catalog.cache.loader.batch.size=1 21 | edc.webdid.doh.url=https://cloudflare-dns.com/dns-query 22 | edc.catalog.cache.execution.delay-seconds=1 23 | # asset index properties 24 | edc.assetindex.cosmos.account-name=edc-showcase-cosmos 25 | edc.assetindex.cosmos.database-name=asset-index 26 | edc.cosmos.partition-key=assetIndexPartition 27 | edc.assetindex.cosmos.preferred-region=westeurope 28 | edc.assetindex.cosmos.container-name=connector3 29 | edc.cosmos.query-metrics-enabled=true 30 | # contract definition store properties 31 | edc.contractdefinitionstore.cosmos.account-name=edc-showcase-cosmos 32 | edc.contractdefinitionstore.cosmos.database-name=contract-definition-store 33 | edc.contractdefinitionstore.cosmos.preferred-region=westeurope 34 | edc.contractdefinitionstore.cosmos.container-name=connector3 35 | # contract negotiation store properties 36 | edc.contractnegotiationstore.cosmos.account-name=edc-showcase-cosmos 37 | edc.contractnegotiationstore.cosmos.database-name=contract-negotiation-store 38 | edc.contractnegotiationstore.cosmos.preferred-region=westeurope 39 | edc.contractnegotiationstore.cosmos.container-name=connector3 40 | #nodes file 41 | edc.showcase.fcc.nodes.file=nodes-local.json -------------------------------------------------------------------------------- /launchers/connector/consumer.properties: -------------------------------------------------------------------------------- 1 | # Configuration of the connector: 2 | # 3 | # YOUR_CLIENT_ID: the client id that was created during terraform deployment and that corresponds to your Service Principal 4 | # YOUR_TENANT_ID: your Azure Tenant ID 5 | # ENVIRONMENT: the value you entered during terraform deployment 6 | # YOUR_DID_ID: the ID of the DID you either anchored on ION (did:ion:...) or host on a static web page (did:web:...), etc. 7 | # 8 | web.http.port=8181 9 | web.http.path=/api 10 | web.http.data.port=8182 11 | web.http.data.path=/api/v1/data 12 | web.http.ids.port=8282 13 | web.http.ids.path=/api/v1/ids 14 | ids.webhook.address=http://localhost:8282 15 | edc.ids.id=urn:connector:consumer 16 | edc.web.rest.cors.enabled=true 17 | edc.web.rest.cors.headers=origin,content-type,accept,authorization,x-api-key 18 | edc.api.control.auth.apikey.value=x-edc-showcase-x 19 | edc.api.auth.key=x-edc-showcase-x 20 | edc.vault.clientid=85e1ff6d-9e61-4637-8e72-d78184467d74 21 | edc.vault.tenantid=1d9b26d0-b6ea-4495-97d3-e85ccf73a154 22 | edc.vault.certificate=/home/paul/dev/edc-showcase/deployment/cert.pfx 23 | edc.vault.name=edc-showcase-vault 24 | edc.events.topic.name=edc-showcase-control-events 25 | edc.events.topic.endpoint=https://edc-showcase-control-events.westeurope-1.eventgrid.azure.net/api/events 26 | edc.identity.did.url=did:web:edcshowcasegpstorage.z6.web.core.windows.net:consumer 27 | edc.connector.name=consumer 28 | edc.catalog.cache.loader.batch.size=1 29 | edc.webdid.doh.url=https://cloudflare-dns.com/dns-query 30 | edc.catalog.cache.execution.delay-seconds=1 31 | # asset index properties 32 | edc.assetindex.cosmos.account-name=edc-showcase-cosmos 33 | edc.assetindex.cosmos.database-name=asset-index 34 | edc.cosmos.partition-key=edcpartkeycons 35 | edc.assetindex.cosmos.preferred-region=westeurope 36 | edc.assetindex.cosmos.container-name=consumer 37 | edc.cosmos.query-metrics-enabled=true 38 | # contract definition store properties 39 | edc.contractdefinitionstore.cosmos.account-name=edc-showcase-cosmos 40 | edc.contractdefinitionstore.cosmos.database-name=contract-definition-store 41 | edc.contractdefinitionstore.cosmos.preferred-region=westeurope 42 | edc.contractdefinitionstore.cosmos.container-name=consumer 43 | # contract negotiation store properties 44 | edc.contractnegotiationstore.cosmos.account-name=edc-showcase-cosmos 45 | edc.contractnegotiationstore.cosmos.database-name=contract-negotiation-store 46 | edc.contractnegotiationstore.cosmos.preferred-region=westeurope 47 | edc.contractnegotiationstore.cosmos.container-name=consumer 48 | #nodes file 49 | edc.showcase.fcc.nodes.file=nodes-local.json -------------------------------------------------------------------------------- /launchers/connector/provider.properties: -------------------------------------------------------------------------------- 1 | # Configuration of the connector: 2 | # 3 | # YOUR_CLIENT_ID: the client id that was created during terraform deployment and that corresponds to your Service Principal 4 | # YOUR_TENANT_ID: your Azure Tenant ID 5 | # ENVIRONMENT: the value you entered during terraform deployment 6 | # YOUR_DID_ID: the ID of the DID you either anchored on ION (did:ion:...) or host on a static web page (did:web:...), etc. 7 | # 8 | web.http.port=9191 9 | web.http.path=/api 10 | web.http.data.port=9192 11 | web.http.data.path=/api/v1/data 12 | web.http.ids.port=9292 13 | web.http.ids.path=/api/v1/ids 14 | ids.webhook.address=http://localhost:9292 15 | edc.ids.id=urn:connector:consumer 16 | edc.web.rest.cors.enabled=true 17 | edc.api.control.auth.apikey.value=x-edc-showcase-x 18 | edc.api.auth.key=x-edc-showcase-x 19 | edc.vault.clientid=85e1ff6d-9e61-4637-8e72-d78184467d74 20 | edc.vault.tenantid=1d9b26d0-b6ea-4495-97d3-e85ccf73a154 21 | edc.vault.certificate=/home/paul/dev/edc-showcase/deployment/cert.pfx 22 | edc.vault.name=edc-showcase-vault 23 | edc.events.topic.name=edc-showcase-control-events 24 | edc.events.topic.endpoint=https://edc-showcase-control-events.westeurope-1.eventgrid.azure.net/api/events 25 | edc.identity.did.url=did:web:edcshowcasegpstorage.z6.web.core.windows.net:provider 26 | edc.connector.name=provider 27 | edc.catalog.cache.loader.batch.size=1 28 | edc.webdid.doh.url=https://cloudflare-dns.com/dns-query 29 | edc.catalog.cache.execution.delay-seconds=1 30 | # asset index properties 31 | edc.assetindex.cosmos.account-name=edc-showcase-cosmos 32 | edc.assetindex.cosmos.database-name=asset-index 33 | edc.cosmos.partition-key=edcpartkeyprov 34 | edc.assetindex.cosmos.preferred-region=westeurope 35 | edc.assetindex.cosmos.container-name=provider 36 | edc.cosmos.query-metrics-enabled=true 37 | # contract definition store properties 38 | edc.contractdefinitionstore.cosmos.account-name=edc-showcase-cosmos 39 | edc.contractdefinitionstore.cosmos.database-name=contract-definition-store 40 | edc.contractdefinitionstore.cosmos.preferred-region=westeurope 41 | edc.contractdefinitionstore.cosmos.container-name=provider 42 | # contract negotiation store properties 43 | edc.contractnegotiationstore.cosmos.account-name=edc-showcase-cosmos 44 | edc.contractnegotiationstore.cosmos.database-name=contract-negotiation-store 45 | edc.contractnegotiationstore.cosmos.preferred-region=westeurope 46 | edc.contractnegotiationstore.cosmos.container-name=provider 47 | #nodes file 48 | edc.showcase.fcc.nodes.file=nodes-local.json -------------------------------------------------------------------------------- /launchers/junit/build.gradle.kts: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) Microsoft Corporation. 3 | * All rights reserved. 4 | */ 5 | 6 | plugins { 7 | `java-library` 8 | } 9 | 10 | val group = "org.eclipse.dataspaceconnector" 11 | val edcversion: String by project 12 | val jupiterVersion: String by project 13 | 14 | dependencies { 15 | implementation(project(":extensions:dataseeding:hub")) 16 | 17 | testImplementation("${group}:spi:${edcversion}") 18 | testImplementation("${group}:core.transfer:${edcversion}") 19 | testImplementation("${group}:ion.ion-core:${edcversion}") 20 | testImplementation("${group}:ion.ion-client:${edcversion}") 21 | 22 | testImplementation("${group}:ids-policy-mock:${edcversion}") 23 | testImplementation("${group}:ids:${edcversion}") 24 | testImplementation("${group}:iam.identity-did-core:${edcversion}") 25 | testImplementation("${group}:iam.identity-did-service:${edcversion}") 26 | testImplementation("${group}:iam.verifiable-credentials:${edcversion}") 27 | testImplementation(project(":extensions:identity-hub-verifier")) 28 | 29 | 30 | testImplementation("${group}:dataspaceconnector.junit.launcher:${edcversion}") 31 | testImplementation("${group}:common-util:${edcversion}") 32 | 33 | testImplementation("org.junit.jupiter:junit-jupiter-api:${jupiterVersion}") 34 | testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine:${jupiterVersion}") 35 | } 36 | 37 | -------------------------------------------------------------------------------- /launchers/junit/src/test/java/org/eclipse/dataspaceconnector/test/EdcExtension.java: -------------------------------------------------------------------------------- 1 | package org.eclipse.dataspaceconnector.test; 2 | 3 | 4 | import okhttp3.Interceptor; 5 | import org.eclipse.dataspaceconnector.monitor.MonitorProvider; 6 | import org.eclipse.dataspaceconnector.spi.EdcException; 7 | import org.eclipse.dataspaceconnector.spi.security.Vault; 8 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtension; 9 | import org.eclipse.dataspaceconnector.spi.system.SystemExtension; 10 | import org.eclipse.dataspaceconnector.spi.types.TypeManager; 11 | import org.eclipse.dataspaceconnector.system.ExtensionLoader; 12 | import org.eclipse.dataspaceconnector.system.ServiceLocator; 13 | import org.eclipse.dataspaceconnector.system.ServiceLocatorImpl; 14 | import org.junit.jupiter.api.extension.*; 15 | 16 | import java.util.ArrayList; 17 | import java.util.LinkedHashMap; 18 | import java.util.List; 19 | 20 | import static org.eclipse.dataspaceconnector.common.types.Cast.cast; 21 | 22 | /** 23 | * A JUnit extension for running an embedded EDC runtime as part of a test fixture. 24 | *

25 | * This extension attaches a EDC runtime to the {@link BeforeTestExecutionCallback} and {@link AfterTestExecutionCallback} lifecycle hooks. Parameter injection of runtime services is supported. 26 | */ 27 | public class EdcExtension implements BeforeTestExecutionCallback, AfterTestExecutionCallback, ParameterResolver { 28 | private final LinkedHashMap, Object> serviceMocks = new LinkedHashMap<>(); 29 | private final LinkedHashMap, List> systemExtensions = new LinkedHashMap<>(); 30 | private List runningServiceExtensions; 31 | private TestServiceExtensionContext context; 32 | 33 | /** 34 | * Registers a mock service with the runtime. 35 | * 36 | * @param mock the service mock 37 | */ 38 | public void registerServiceMock(Class type, T mock) { 39 | serviceMocks.put(type, mock); 40 | } 41 | 42 | /** 43 | * Registers a service extension with the runtime. 44 | */ 45 | public void registerSystemExtension(Class type, SystemExtension extension) { 46 | systemExtensions.computeIfAbsent(type, k -> new ArrayList<>()).add(extension); 47 | } 48 | 49 | public void registerInterceptor(Interceptor interceptor) { 50 | 51 | } 52 | 53 | @Override 54 | public void beforeTestExecution(ExtensionContext extensionContext) { 55 | var typeManager = new TypeManager(); 56 | 57 | var monitor = ExtensionLoader.loadMonitor(); 58 | 59 | MonitorProvider.setInstance(monitor); 60 | 61 | context = new TestServiceExtensionContext(typeManager, monitor, new MultiSourceServiceLocator()); 62 | context.initialize(); 63 | 64 | serviceMocks.forEach((key, value) -> context.registerService(cast(key), value)); 65 | 66 | try { 67 | if (!serviceMocks.containsKey(Vault.class)) { 68 | ExtensionLoader.loadVault(context); 69 | } 70 | 71 | runningServiceExtensions = context.loadServiceExtensions(); 72 | 73 | ExtensionLoader.bootServiceExtensions(runningServiceExtensions, context); 74 | } catch (Exception e) { 75 | throw new EdcException(e); 76 | } 77 | } 78 | 79 | @Override 80 | public void afterTestExecution(ExtensionContext context) { 81 | if (runningServiceExtensions != null) { 82 | var iter = runningServiceExtensions.listIterator(runningServiceExtensions.size()); 83 | while (iter.hasPrevious()) { 84 | iter.previous().shutdown(); 85 | } 86 | } 87 | } 88 | 89 | @Override 90 | public boolean supportsParameter(ParameterContext parameterContext, ExtensionContext extensionContext) throws ParameterResolutionException { 91 | var type = parameterContext.getParameter().getParameterizedType(); 92 | if (type.equals(EdcExtension.class)) { 93 | return true; 94 | } else if (type instanceof Class) { 95 | return context.hasService(cast(type)); 96 | } 97 | return false; 98 | } 99 | 100 | @Override 101 | public Object resolveParameter(ParameterContext parameterContext, ExtensionContext extensionContext) throws ParameterResolutionException { 102 | var type = parameterContext.getParameter().getParameterizedType(); 103 | if (type.equals(EdcExtension.class)) { 104 | return this; 105 | } else if (type instanceof Class) { 106 | return context.getService(cast(type)); 107 | } 108 | return null; 109 | } 110 | 111 | public void setSetting(String s, String s1) { 112 | context.overrideSetting(s, s1); 113 | } 114 | 115 | /** 116 | * A service locator that allows additional extensions to be manually loaded by a test fixture. This locator return the union of registered extensions and extensions loaded 117 | * by the delegate. 118 | */ 119 | private class MultiSourceServiceLocator implements ServiceLocator { 120 | private final ServiceLocator delegate = new ServiceLocatorImpl(); 121 | 122 | @Override 123 | public List loadImplementors(Class type, boolean required) { 124 | List extensions = cast(systemExtensions.getOrDefault(type, new ArrayList<>())); 125 | extensions.addAll(delegate.loadImplementors(type, required)); 126 | return extensions; 127 | } 128 | 129 | /** 130 | * This implementation will override singleton implementions found by the delegate. 131 | */ 132 | @Override 133 | public T loadSingletonImplementor(Class type, boolean required) { 134 | List extensions = systemExtensions.get(type); 135 | if (extensions == null || extensions.isEmpty()) { 136 | return delegate.loadSingletonImplementor(type, required); 137 | } else if (extensions.size() > 1) { 138 | throw new EdcException("Multiple extensions were registered for type: " + type.getName()); 139 | } 140 | return type.cast(extensions.get(0)); 141 | } 142 | } 143 | 144 | } 145 | -------------------------------------------------------------------------------- /launchers/junit/src/test/java/org/eclipse/dataspaceconnector/test/QueryRunner.java: -------------------------------------------------------------------------------- 1 | package org.eclipse.dataspaceconnector.test; 2 | 3 | import com.fasterxml.jackson.databind.ObjectMapper; 4 | import com.nimbusds.jose.JOSEException; 5 | import com.nimbusds.jose.jwk.ECKey; 6 | import org.eclipse.dataspaceconnector.iam.did.crypto.key.EcPrivateKeyWrapper; 7 | import org.eclipse.dataspaceconnector.iam.did.spi.hub.ClientResponse; 8 | import org.eclipse.dataspaceconnector.iam.did.spi.hub.IdentityHubClient; 9 | import org.eclipse.dataspaceconnector.iam.did.spi.hub.message.ObjectQueryRequest; 10 | import org.eclipse.dataspaceconnector.iam.did.spi.key.PrivateKeyWrapper; 11 | import org.eclipse.dataspaceconnector.iam.did.spi.key.PublicKeyWrapper; 12 | import org.eclipse.dataspaceconnector.ion.DefaultIonClient; 13 | import org.eclipse.dataspaceconnector.ion.spi.IonClient; 14 | import org.eclipse.dataspaceconnector.spi.message.RemoteMessageDispatcherRegistry; 15 | import org.eclipse.dataspaceconnector.spi.security.PrivateKeyResolver; 16 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtension; 17 | import org.eclipse.dataspaceconnector.spi.types.domain.metadata.QueryRequest; 18 | import org.junit.jupiter.api.BeforeAll; 19 | import org.junit.jupiter.api.BeforeEach; 20 | import org.junit.jupiter.api.Disabled; 21 | import org.junit.jupiter.api.Test; 22 | import org.junit.jupiter.api.extension.ExtendWith; 23 | 24 | import java.io.IOException; 25 | import java.nio.charset.Charset; 26 | import java.util.List; 27 | import java.util.Map; 28 | import java.util.Objects; 29 | import java.util.concurrent.CompletableFuture; 30 | 31 | import static org.assertj.core.api.Assertions.assertThat; 32 | import static org.eclipse.dataspaceconnector.common.types.Cast.cast; 33 | 34 | @Disabled 35 | @ExtendWith(EdcExtension.class) 36 | public class QueryRunner { 37 | 38 | private static final String PROVIDER_CONNECTOR = "http://localhost:9191/"; 39 | 40 | @BeforeAll 41 | static void setProperties() { 42 | // the url is the example one from identity.foundation/ion/explorer 43 | // this is the connector's own DID URL 44 | System.setProperty("edc.identity.did.url", "did:ion:EiAnKD8-jfdd0MDcZUjAbRgaThBrMxPTFOxcnfJhI7Ukaw"); 45 | System.setProperty("dataspaceconnector.connector.name", "test-query-connector"); 46 | System.setProperty("web.http.port", "7171"); 47 | } 48 | 49 | @Test 50 | void queryWithVerifiableCredentials(RemoteMessageDispatcherRegistry dispatcherRegistry) throws Exception { 51 | 52 | var query = QueryRequest.Builder.newInstance() 53 | .connectorAddress(PROVIDER_CONNECTOR) 54 | .connectorId(System.getProperty("dataspaceconnector.connector.name")) 55 | .queryLanguage("dataspaceconnector") 56 | .query("select *") 57 | .protocol("ids-rest").build(); 58 | 59 | CompletableFuture> future = cast(dispatcherRegistry.send(List.class, query, () -> null)); 60 | 61 | var artifacts = future.get(); 62 | assertThat(artifacts).isNotNull().isNotEmpty(); 63 | 64 | //TODO: run data request 65 | } 66 | 67 | @BeforeEach 68 | void before(EdcExtension extension) throws IOException, JOSEException { 69 | 70 | String privateKeyString = new String(Objects.requireNonNull(Thread.currentThread().getContextClassLoader().getResourceAsStream("private.pem")).readAllBytes(), Charset.defaultCharset()); 71 | 72 | var ecKey = ECKey.parseFromPEMEncodedObjects(privateKeyString); 73 | 74 | IonClient ionClient = new DefaultIonClient(new ObjectMapper()); 75 | var idHubclient = new IdentityHubClient() { 76 | @Override 77 | public ClientResponse> queryCredentials(ObjectQueryRequest objectQueryRequest, String s, PublicKeyWrapper publicKeyWrapper) { 78 | return null; 79 | } 80 | }; 81 | 82 | 83 | extension.registerSystemExtension(ServiceExtension.class, TestExtensions.identityServiceExtension()); 84 | extension.registerSystemExtension(ServiceExtension.class, TestExtensions.identityHubClientExtension(idHubclient)); 85 | extension.registerSystemExtension(ServiceExtension.class, TestExtensions.ionClientMockExtension(ionClient)); 86 | extension.registerSystemExtension(ServiceExtension.class, TestExtensions.keyResolvers(new PrivateKeyResolver() { 87 | @Override 88 | public T resolvePrivateKey(String s, Class aClass) { 89 | if (aClass == ECKey.class) { 90 | return aClass.cast(ecKey); 91 | } else if (aClass == PrivateKeyWrapper.class) { 92 | return aClass.cast(new EcPrivateKeyWrapper((ECKey) ecKey)); 93 | } else { 94 | throw new RuntimeException("Cannot utilize key type " + aClass); 95 | } 96 | } 97 | })); 98 | extension.registerSystemExtension(ServiceExtension.class, TestExtensions.identityHubExtension()); 99 | } 100 | 101 | } 102 | -------------------------------------------------------------------------------- /launchers/junit/src/test/java/org/eclipse/dataspaceconnector/test/TestExtensions.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020, 2021 Microsoft Corporation 3 | * 4 | * This program and the accompanying materials are made available under the 5 | * terms of the Apache License, Version 2.0 which is available at 6 | * https://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * SPDX-License-Identifier: Apache-2.0 9 | * 10 | * Contributors: 11 | * Microsoft Corporation - initial API and implementation 12 | * 13 | */ 14 | 15 | package org.eclipse.dataspaceconnector.test;/* 16 | * Copyright (c) Microsoft Corporation. 17 | * All rights reserved. 18 | */ 19 | 20 | import org.eclipse.dataspaceconnector.iam.did.credentials.IdentityHubCredentialsVerifier; 21 | import org.eclipse.dataspaceconnector.iam.did.resolution.DefaultDidPublicKeyResolver; 22 | import org.eclipse.dataspaceconnector.iam.did.resolution.DidResolverRegistryImpl; 23 | import org.eclipse.dataspaceconnector.iam.did.spi.credentials.CredentialsVerifier; 24 | import org.eclipse.dataspaceconnector.iam.did.spi.hub.IdentityHubClient; 25 | import org.eclipse.dataspaceconnector.iam.did.spi.resolution.DidPublicKeyResolver; 26 | import org.eclipse.dataspaceconnector.iam.did.spi.resolution.DidResolver; 27 | import org.eclipse.dataspaceconnector.identity.DistributedIdentityService; 28 | import org.eclipse.dataspaceconnector.ion.spi.IonClient; 29 | import org.eclipse.dataspaceconnector.spi.iam.IdentityService; 30 | import org.eclipse.dataspaceconnector.spi.monitor.Monitor; 31 | import org.eclipse.dataspaceconnector.spi.security.PrivateKeyResolver; 32 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtension; 33 | import org.eclipse.dataspaceconnector.spi.system.ServiceExtensionContext; 34 | import org.eclipse.dataspaceconnector.verifiablecredential.spi.VerifiableCredentialProvider; 35 | 36 | import java.util.Set; 37 | 38 | 39 | public class TestExtensions { 40 | 41 | private static final Monitor MONITOR = new Monitor() { 42 | }; 43 | 44 | public static ServiceExtension identityServiceExtension() { 45 | 46 | 47 | return new ServiceExtension() { 48 | @Override 49 | public Set provides() { 50 | return Set.of(IdentityService.FEATURE); 51 | } 52 | 53 | @Override 54 | public Set requires() { 55 | return Set.of(VerifiableCredentialProvider.FEATURE, IonClient.FEATURE, IdentityHubClient.FEATURE, DidPublicKeyResolver.FEATURE); 56 | } 57 | 58 | @Override 59 | public void initialize(ServiceExtensionContext context) { 60 | var verifiableCredentialProvider = context.getService(VerifiableCredentialProvider.class); 61 | var ionClient = context.getService(IonClient.class); 62 | var idHubclient = context.getService(IdentityHubClient.class); 63 | DidPublicKeyResolver publicKeyResolver = context.getService(DidPublicKeyResolver.class); 64 | var resolverRegistry = new DidResolverRegistryImpl(); 65 | var identityService = new DistributedIdentityService(verifiableCredentialProvider, ionClient, new IdentityHubCredentialsVerifier(idHubclient, MONITOR, "did:ion:test"), MONITOR); 66 | context.registerService(IdentityService.class, identityService); 67 | } 68 | }; 69 | } 70 | 71 | public static ServiceExtension identityHubClientExtension(IdentityHubClient hubclient) { 72 | return new ServiceExtension() { 73 | @Override 74 | public Set provides() { 75 | return Set.of(CredentialsVerifier.FEATURE, IdentityHubClient.FEATURE); 76 | } 77 | 78 | @Override 79 | public void initialize(ServiceExtensionContext context) { 80 | context.registerService(CredentialsVerifier.class, new IdentityHubCredentialsVerifier(hubclient, MONITOR, "did:ion:test")); 81 | context.registerService(IdentityHubClient.class, hubclient); 82 | } 83 | }; 84 | } 85 | 86 | public static ServiceExtension ionClientMockExtension(IonClient ionClient) { 87 | return new ServiceExtension() { 88 | @Override 89 | public Set provides() { 90 | return Set.of(IonClient.FEATURE, DidResolver.FEATURE); 91 | } 92 | 93 | 94 | @Override 95 | public void initialize(ServiceExtensionContext context) { 96 | context.registerService(IonClient.class, ionClient); 97 | context.registerService(DidResolver.class, ionClient); 98 | } 99 | }; 100 | } 101 | 102 | public static ServiceExtension keyResolvers(PrivateKeyResolver privateKeyResolver) { 103 | return new ServiceExtension() { 104 | @Override 105 | public Set provides() { 106 | return Set.of(DidPublicKeyResolver.FEATURE); 107 | } 108 | 109 | @Override 110 | public Set requires() { 111 | return Set.of(IonClient.FEATURE); 112 | } 113 | 114 | @Override 115 | public void initialize(ServiceExtensionContext context) { 116 | var ionClient = context.getService(IonClient.class); 117 | context.registerService(PrivateKeyResolver.class, privateKeyResolver); 118 | context.registerService(DidPublicKeyResolver.class, new DefaultDidPublicKeyResolver(ionClient)); 119 | } 120 | }; 121 | } 122 | 123 | public static ServiceExtension identityHubExtension() { 124 | return new IdentityDidCoreHubExtension(); 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /launchers/junit/src/test/java/org/eclipse/dataspaceconnector/test/TestServiceExtensionContext.java: -------------------------------------------------------------------------------- 1 | package org.eclipse.dataspaceconnector.test; 2 | 3 | import org.eclipse.dataspaceconnector.spi.monitor.Monitor; 4 | import org.eclipse.dataspaceconnector.spi.types.TypeManager; 5 | import org.eclipse.dataspaceconnector.system.DefaultServiceExtensionContext; 6 | import org.eclipse.dataspaceconnector.system.ServiceLocator; 7 | 8 | import java.util.HashMap; 9 | import java.util.Map; 10 | 11 | public class TestServiceExtensionContext extends DefaultServiceExtensionContext { 12 | private final Map overriddenSettings; 13 | 14 | public TestServiceExtensionContext(TypeManager typeManager, Monitor monitor, ServiceLocator serviceLocator) { 15 | super(typeManager, monitor, serviceLocator); 16 | this.overriddenSettings = new HashMap<>(); 17 | } 18 | 19 | @Override 20 | public String getSetting(String key, String defaultValue) { 21 | if (overriddenSettings.containsKey(key)) { 22 | return overriddenSettings.get(key); 23 | } 24 | 25 | return super.getSetting(key, defaultValue); 26 | } 27 | 28 | void overrideSetting(String key, String value) { 29 | overriddenSettings.put(key, value); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /launchers/junit/src/test/resources/private.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN EC PRIVATE KEY----- 2 | MHcCAQEEIJXQEY9Rqm5fIdz3NHghzV+tNb2n5AITs0yg/MN3lRuUoAoGCCqGSM49 3 | AwEHoUQDQgAE4mi45pgE5iPdhluNpmtnAFztWi8vxMrDSoXqD5ah2RkV3FO+SuRi 4 | 2bE+B2YWnFHNlJW9xVQSSxLOvVwf+RPnzA== 5 | -----END EC PRIVATE KEY----- 6 | -------------------------------------------------------------------------------- /launchers/registration-service/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:11-jre-slim 2 | 3 | WORKDIR /app 4 | COPY ./build/libs/regsvc.jar /app 5 | 6 | EXPOSE 8181 7 | 8 | ENTRYPOINT java \ 9 | -Dedc.vault.clientid=${CLIENTID} \ 10 | -Dedc.vault.tenantid=${TENANTID} \ 11 | -Dedc.vault.certificate=/cert/cert.pfx \ 12 | -Dedc.vault.name=${VAULTNAME} \ 13 | -Dedc.events.topic.name=${TOPIC_NAME} \ 14 | -Ddataspaceconnector.connector.name=${CONNECTOR_NAME} \ 15 | -Dedc.events.topic.endpoint=${TOPIC_ENDPOINT} \ 16 | -Dedc.ion.crawler.ion.url=${ION_URL} \ 17 | -Dedc.ion.crawler.randomize=false \ 18 | -Djava.security.edg=file:/dev/.urandom -jar regsvc.jar 19 | -------------------------------------------------------------------------------- /launchers/registration-service/build.gradle.kts: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) Microsoft Corporation. 3 | * All rights reserved. 4 | */ 5 | 6 | plugins { 7 | `java-library` 8 | application 9 | id("com.github.johnrengelman.shadow") version "7.0.0" 10 | } 11 | 12 | val edcversion: String by project 13 | val group = "org.eclipse.dataspaceconnector" 14 | 15 | dependencies { 16 | // EDC core dependencies 17 | implementation("${group}:core:${edcversion}") 18 | 19 | implementation("${group}:common-util:${edcversion}") 20 | implementation("${group}:azure-eventgrid-config:${edcversion}") 21 | 22 | implementation("${group}:registration-service:${edcversion}") 23 | implementation("${group}:registration-service-api:${edcversion}") 24 | } 25 | 26 | application { 27 | @Suppress("DEPRECATION") 28 | mainClassName = "com.microsoft.ion.demo.RegistrationServiceRuntime" 29 | } 30 | 31 | tasks.withType { 32 | exclude("**/pom.properties", "**/pom.xm") 33 | mergeServiceFiles() 34 | archiveFileName.set("regsvc.jar") 35 | } 36 | 37 | -------------------------------------------------------------------------------- /launchers/registration-service/src/main/java/com/microsoft/edc/showcase/demo/RegistrationServiceRuntime.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2020, 2021 Microsoft Corporation 3 | * 4 | * This program and the accompanying materials are made available under the 5 | * terms of the Apache License, Version 2.0 which is available at 6 | * https://www.apache.org/licenses/LICENSE-2.0 7 | * 8 | * SPDX-License-Identifier: Apache-2.0 9 | * 10 | * Contributors: 11 | * Microsoft Corporation - initial API and implementation 12 | * 13 | */ 14 | 15 | package com.microsoft.edc.showcase.demo; 16 | 17 | 18 | import org.eclipse.dataspaceconnector.boot.system.runtime.BaseRuntime; 19 | 20 | public class RegistrationServiceRuntime extends BaseRuntime { 21 | 22 | public static void main(String[] args) { 23 | new RegistrationServiceRuntime().boot(); 24 | } 25 | 26 | } 27 | -------------------------------------------------------------------------------- /scripts/did/generate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | usage="$(basename "$0") [-h] [-e ENVIRONMENT] [-n NAME] [-r REGION] [-t TEMPLATE] 4 | Generate private key and associated DID document for an entity (connector, registration service...). Options are: 5 | -h show this help text 6 | -e environment name (this is the one used to prefix all resources). This parameter is MANDATORY. 7 | -n name of the entity. This parameter is MANDATORY. 8 | -r region where resources are hosted. Default is: template/template.json. 9 | -t template file used to generate the DID document. Default is: westeurope" 10 | 11 | # COLLECT INPUT PARAMETERS 12 | options=':he:n:r:t:' 13 | while getopts $options option; do 14 | case "$option" in 15 | h) echo "$usage"; exit;; 16 | e) ENVIRONMENT=$OPTARG;; 17 | n) NAME=$OPTARG;; 18 | r) REGION=$OPTARG;; 19 | t) TEMPLATE=$OPTARG;; 20 | :) printf "missing argument for -%s\n" "$OPTARG" >&2; echo "$usage" >&2; exit 1;; 21 | \?) printf "illegal option: -%s\n" "$OPTARG" >&2; echo "$usage" >&2; exit 1;; 22 | esac 23 | done 24 | 25 | # CHECK MANDATORY PARAMETERS 26 | if [ ! "$ENVIRONMENT" ] || [ ! "$NAME" ]; then 27 | echo "arguments -i and -v must be provided" 28 | echo "$usage" >&2; exit 1 29 | fi 30 | 31 | # SET DEFAULT PARAMETERS IF NOT SPECIFIED 32 | if [ ! "$TEMPLATE" ]; then 33 | TEMPLATE="template/template.json" 34 | fi 35 | if [ ! "$REGION" ]; then 36 | REGION="westeurope" 37 | fi 38 | 39 | # FUNCTIONS 40 | getValueByKey() { 41 | local JSON=$1 42 | local KEY=$2 43 | echo $(echo $JSON | sed 's|,|\n|g' \ 44 | | grep "\"$KEY\"" \ 45 | | cut -d ":" -f2- \ 46 | | sed s/\"//g \ 47 | | sed s/\}//g) 48 | } 49 | 50 | echo "Generate private key in file $NAME.pem" 51 | openssl ecparam -name prime256v1 -genkey -noout -out $NAME.pem 52 | 53 | echo "Generate the public key in JWK format" 54 | readonly JWK=$(openssl ec -in $NAME.pem -pubout | docker run -i danedmunds/pem-to-jwk:latest --public) 55 | 56 | readonly SANITIZED_ENVIRONMENT="${ENVIRONMENT//-}" 57 | 58 | echo "Extract components from public key" 59 | readonly X=$(getValueByKey $JWK "x") 60 | readonly Y=$(getValueByKey $JWK "y") 61 | 62 | #echo "JWK: $JWK" 63 | #echo "X: $X" 64 | #echo "Y: $Y" 65 | 66 | echo "Generate DID document" 67 | sed "s/{{ENVIRONMENT}}/$ENVIRONMENT/g" $TEMPLATE \ 68 | | sed "s/{{NAME}}/$NAME/g" \ 69 | | sed "s/{{ENVIRONMENT}}/$ENVIRONMENT/g" \ 70 | | sed "s/{{REGION}}/$REGION/g" \ 71 | | sed "s/{{X}}/$X/g" \ 72 | | sed "s/{{Y}}/$Y/g" \ 73 | | sed "s/{{SANITIZED_ENVIRONMENT}}/$SANITIZED_ENVIRONMENT/g" \ 74 | | sed '/^\/\//d' > $NAME.json 75 | 76 | echo "SUCCESS!" -------------------------------------------------------------------------------- /scripts/did/template/template.json: -------------------------------------------------------------------------------- 1 | //***************************** 2 | //** DO NOT MODIFY THIS FILE ** 3 | //***************************** 4 | { 5 | "id": "did:web:{{SANITIZED_ENVIRONMENT}}gpstorage.z6.web.core.windows.net:{{NAME}}", 6 | "@context": [ 7 | "https://www.w3.org/ns/did/v1", 8 | { 9 | "@base": "did:web:{{SANITIZED_ENVIRONMENT}}gpstorage.z6.web.core.windows.net:{{NAME}}" 10 | } 11 | ], 12 | "service": [ 13 | { 14 | "id": "#{{NAME}}-identity-hub-url", 15 | "type": "IdentityHub", 16 | "serviceEndpoint": "http://{{ENVIRONMENT}}-{{NAME}}.{{REGION}}.azurecontainer.io:8181/api/identity-hub/" 17 | } 18 | ], 19 | "verificationMethod": [ 20 | { 21 | "id": "#{{NAME}}-key-1", 22 | "controller": "", 23 | "type": "JsonWebKey2020", 24 | "publicKeyJwk": { 25 | "kty": "EC", 26 | "crv": "P-256", 27 | "x": "{{X}}", 28 | "y": "{{Y}}" 29 | } 30 | } 31 | ], 32 | "authentication": [ 33 | "#{{NAME}}-key-1" 34 | ] 35 | } -------------------------------------------------------------------------------- /settings.gradle.kts: -------------------------------------------------------------------------------- 1 | rootProject.name = "edc-showcase" 2 | 3 | include(":launchers:connector") 4 | //disabled temporarily due to compile errors 5 | //include(":launchers:junit") 6 | include(":launchers:registration-service") 7 | 8 | include(":extensions:dataseeding:hub") 9 | include(":extensions:dataseeding:catalog") 10 | include(":extensions:public-rest-api") 11 | include(":extensions:federated-catalog-api") 12 | include(":extensions:transfer-azure-s3") 13 | include(":extensions:identity-hub-verifier") 14 | --------------------------------------------------------------------------------