├── .blazar-enabled
├── .build-jdk11
├── LICENSE.txt
├── README.md
├── accelerator-api
├── .build-executable
├── pom.xml
└── src
│ ├── main
│ ├── java
│ │ └── com
│ │ │ └── hubspot
│ │ │ └── snapshots
│ │ │ └── api
│ │ │ ├── AcceleratorConfiguration.java
│ │ │ ├── AcceleratorDataSourceFactory.java
│ │ │ ├── AcceleratorService.java
│ │ │ ├── SnapshotDao.java
│ │ │ └── SnapshotResource.java
│ └── resources
│ │ └── schema.sql
│ └── test
│ ├── java
│ └── com
│ │ └── hubspot
│ │ └── snapshots
│ │ └── api
│ │ └── AcceleratorAcceptanceTest.java
│ └── resources
│ └── test.yaml
├── accelerator-client
├── pom.xml
└── src
│ └── main
│ └── java
│ └── com
│ └── hubspot
│ └── snapshots
│ └── client
│ └── AcceleratorClient.java
├── accelerator-core
├── pom.xml
└── src
│ └── main
│ └── java
│ └── com
│ └── hubspot
│ └── snapshots
│ └── core
│ ├── SnapshotVersion.java
│ ├── SnapshotVersionCore.java
│ ├── SnapshotVersionEgg.java
│ └── Snapshots.java
├── accelerator-maven-extension
├── .build-executable
├── pom.xml
└── src
│ └── main
│ ├── java
│ ├── com
│ │ └── hubspot
│ │ │ └── snapshots
│ │ │ ├── AcceleratorHelper.java
│ │ │ ├── AcceleratorUpdater.java
│ │ │ └── AcceleratorUtils.java
│ └── org
│ │ └── eclipse
│ │ └── aether
│ │ ├── RepositorySystemSessionHelper.java
│ │ └── internal
│ │ └── impl
│ │ └── DefaultUpdateCheckManager.java
│ └── resources
│ └── META-INF
│ └── sisu
│ └── javax.inject.Named
├── accelerator-maven-plugin
├── pom.xml
└── src
│ └── main
│ └── java
│ └── com
│ └── hubspot
│ └── snapshots
│ └── plugin
│ └── ReportMojo.java
└── pom.xml
/.blazar-enabled:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/.build-jdk11:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HubSpot/maven-snapshot-accelerator/5900307c4221d914ee70902a3684b310ff171022/.build-jdk11
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2018 HubSpot, Inc.
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # maven-snapshot-accelerator
2 |
3 | ## Background
4 |
5 | Normally, when building with Maven, it requires at least two round-trips to the remote repository to resolve each snapshot dependency, one to fetch the `maven-metadata.xml` and one to fetch the `maven-metadata.xml.sha1` (this is assuming an update policy of `always`). For apps with hundreds of snapshot dependencies, this latency starts to become noticeable. And if the there is high latency to the remote repository (if these round-trips are transatlantic, for example), then the latency becomes downright unusable.
6 |
7 | In the CI use-case, the latency between our repository manager and CI server has been low enough that we've just ignored this problem. And for local development, we would either build in offline mode or set a really long interval in our update policy. But eventually you'll need to pick up a new build of a dependency or your update policy interval will lapse and then you'll have no choice but to go watch an episode of Curb Your Enthusiasm while your build chugs along. So the goal of this project is to speed up this dependency resolution process when using Maven snapshots.
8 |
9 | ## Design
10 |
11 | Ultimately, the idea is to have a service that can tell you in a single round-trip all of the snapshot dependencies that have changed since your previous build, and then use this information to short-circuit requests to the remote repository (if a dependency hasn't published a new snapshot, there's no need to fetch the `maven-metadata.xml` or `maven-metadata.xml.sha1` for it). If a snapshot has changed, Maven still needs to make multiple round-trips to the remote repository to fetch the new version, so the assumption underlying this design is that only a small percentage of dependencies change between builds. The system is comprised of three parts:
12 |
13 | - The API which supports two basic operations: Notify of a new snapshot, and give me all changed snapshots by offset. Right now this expects a SQL DB for persistence, but could be made more pluggable.
14 | - The Maven plugin which notifies the API after a snapshot has been published to the remote repository. You can add this to your CI script to make sure it happens for all builds, something like:
15 |
16 | `mvn -B deploy com.hubspot.snapshots:accelerator-maven-plugin:0.3:report`
17 | - The Maven extension which hits the API at the start of a build to find all new snapshots and then short-circuits metadata requests for dependencies that haven't changed.
18 |
19 | ## Getting Started
20 |
21 | ### Running the API
22 |
23 | The first step is to get the API up and running. We publish a JAR to Maven central with all of the dependencies bundled. The part you need to provide is your JDBC driver and a Dropwizard configuration pointing at the database. For testing, you can use an in-memory database like H2 along with [this](https://github.com/HubSpot/maven-snapshot-accelerator/blob/master/accelerator-api/src/test/resources/test.yaml) Dropwizard configuration (which we use for acceptance testing at build-time). To get the API up and running in this configuration, you need to download the API JAR, the H2 JAR, and the dropwizard configuration:
24 | ```bash
25 | curl -L -O https://repo1.maven.org/maven2/com/hubspot/snapshots/accelerator-api/0.3/accelerator-api-0.3-shaded.jar
26 | curl -L -O https://repo1.maven.org/maven2/com/h2database/h2/1.4.196/h2-1.4.196.jar
27 | curl -L -O https://raw.githubusercontent.com/HubSpot/maven-snapshot-accelerator/master/accelerator-api/src/test/resources/test.yaml
28 | ```
29 |
30 | Then you can run the API (Java 7+ required):
31 | ```bash
32 | java -cp accelerator-api-0.3-shaded.jar:h2-1.4.196.jar com.hubspot.snapshots.api.AcceleratorService server test.yaml
33 | ```
34 |
35 | Once the server has started up you can access the Dropwizard admin page at `http://localhost:8080/admin/`. From there you can click on the Healthcheck link to make sure that all healthchecks are passing, or click on the Metrics link to get a JSON dump of all metrics. You can also test the snapshot endpoints (examples are written with [HTTPie](https://github.com/jakubroztocil/httpie)):
36 |
37 | ```bash
38 | # delta should not return any snapshots
39 | ➜ ~ http localhost:8080/accelerator/snapshots/delta offset==0
40 | HTTP/1.1 200 OK
41 | Content-Length: 46
42 | Content-Type: application/json
43 | Date: Thu, 30 Nov 2017 20:50:17 GMT
44 | Vary: Accept-Encoding
45 |
46 | {
47 | "hasMore": false,
48 | "nextOffset": 0,
49 | "versions": []
50 | }
51 |
52 | # report a new snapshot version to the API
53 | ➜ ~ http post localhost:8080/accelerator/snapshots groupId=com.test artifactId=test baseVersion=0.1-SNAPSHOT resolvedVersion=0.1-20171129.222952-1
54 | HTTP/1.1 200 OK
55 | Content-Length: 120
56 | Content-Type: application/json
57 | Date: Thu, 30 Nov 2017 20:50:42 GMT
58 |
59 | {
60 | "artifactId": "test",
61 | "baseVersion": "0.1-SNAPSHOT",
62 | "groupId": "com.test",
63 | "id": 1,
64 | "resolvedVersion": "0.1-20171129.222952-1"
65 | }
66 |
67 | # delta should return the snapshot we reported
68 | ➜ ~ http localhost:8080/accelerator/snapshots/delta offset==0
69 | HTTP/1.1 200 OK
70 | Content-Length: 166
71 | Content-Type: application/json
72 | Date: Thu, 30 Nov 2017 20:50:56 GMT
73 | Vary: Accept-Encoding
74 |
75 | {
76 | "hasMore": false,
77 | "nextOffset": 1,
78 | "versions": [
79 | {
80 | "artifactId": "test",
81 | "baseVersion": "0.1-SNAPSHOT",
82 | "groupId": "com.test",
83 | "id": 1,
84 | "resolvedVersion": "0.1-20171129.222952-1"
85 | }
86 | ]
87 | }
88 |
89 | # delta with an offset of 1 should not return any snapshots
90 | ➜ ~ http localhost:8080/accelerator/snapshots/delta offset==1
91 | HTTP/1.1 200 OK
92 | Content-Length: 46
93 | Content-Type: application/json
94 | Date: Thu, 30 Nov 2017 20:51:09 GMT
95 | Vary: Accept-Encoding
96 |
97 | {
98 | "hasMore": false,
99 | "nextOffset": 1,
100 | "versions": []
101 | }
102 | ```
103 |
104 | #### Setting up the schema
105 |
106 | For convenience, the Dropwizard testing configuration tells the app to initialize the schema itself ([here](https://github.com/HubSpot/maven-snapshot-accelerator/blob/fa6decbf7dcca3dfeef00727580a7e9b51bfb790/accelerator-api/src/test/resources/test.yaml#L12)). You can use this same flag for a real deployment, but to do so the API would need to connect to the database as a user with DDL permissions. Instead, it may be preferable to set up the database schema before running the API. The expected schema (found [here](https://github.com/HubSpot/maven-snapshot-accelerator/blob/master/accelerator-api/src/main/resources/schema.sql)) is pretty simple, just a single table with 5 columns. You can initialize this with Liquibase or just create the table manually.
107 |
108 | ## Running the Maven plugin
109 |
110 | Now that the API is running, you'll want to add the accelerator Maven plugin to your CI builds so that the API gets notified of new snapshot versions. You can add a call to the plugin after the deploy step, to make sure that the publish has succeeded and that the resolved snapshot version is available. You'll also need to pass the base URL of the API as an environment variable or system property so that the plugin knows where to report to. You can do this by creating a `~/.mavenrc` file (which Maven sources before running the build) with the following contents:
111 | ```bash
112 | #!/bin/bash
113 |
114 | export ACCELERATOR_URL='https://myapidomain.com/accelerator'
115 | ```
116 |
117 | And then you can add the plugin to your CI script, for example:
118 | ```bash
119 | mvn -B deploy com.hubspot.snapshots:accelerator-maven-plugin:0.3:report
120 | ```
121 |
122 | By default, failure to notify the API will not fail the build. If you want to change this behavior, you can add `-Daccelerator.failOnError=true` to the Maven arguments.
123 |
124 | ## Using the Maven extension
125 |
126 | Now that the API is running and getting notified of new snapshot versions, the last step is to use the accelerator Maven extension. The extension will hit the API at the start of a Maven build to find out about any new snapshot versions. It keeps track of the API offset (so it only needs to fetch a delta) and the latest version of each snapshot via metadata files stored in your local Maven repository. To install the extension, you just need to download it and copy it to your Maven extensions folder:
127 |
128 | ```bash
129 | curl -L -O https://repo1.maven.org/maven2/com/hubspot/snapshots/accelerator-maven-extension/0.3/accelerator-maven-extension-0.3-shaded.jar
130 | mv accelerator-maven-extension-0.3-shaded.jar $M2_HOME/lib/ext
131 | ```
132 |
133 | Similar to the plugin install, you'll need to set an environment variable or system property that points at the API. You can achieve this using the same `~/.mavenrc` approach. If everything is set up properly, you should see a message like this printed at the start of your next Maven build:
134 | ```
135 | [INFO] Accelerator is healthy, will skip snapshot checks based on accelerator metadata
136 | ```
137 |
138 | ### IDE Compatibility
139 |
140 | For this to work in your IDE, make sure the IDE is set to use the same Maven install where you copied the extension JAR. Unfortunately, if you're using IntelliJ, there's a bit more work to do because it doesn't load extension JARs and there's no way we've found to make it do so (feel free to leave feedback on [this](https://youtrack.jetbrains.com/issue/IDEA-135229#comment=27-2481665) issue if you want to see this changed). To get around this, you need to copy the extension JAR to the lib folder. But now the problem is that there's no way to make our JAR come first on the classpath, and if it doesn't then the extension won't work. To get around this problem, we also need to replace the maven-resolver-impl JAR with a modified one that doesn't contain `org.eclipse.aether.internal.impl.DefaultUpdateCheckManager` (the class we override in the extension). We wrote a hacky script (available [here](https://gist.github.com/jhaber/55c0dbcb5d9aa59d53debc70123a2a1e)) to take care of this (quit IntelliJ before running that script, and re-open after it's done).
141 |
--------------------------------------------------------------------------------
/accelerator-api/.build-executable:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HubSpot/maven-snapshot-accelerator/5900307c4221d914ee70902a3684b310ff171022/accelerator-api/.build-executable
--------------------------------------------------------------------------------
/accelerator-api/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | 4.0.0
4 |
5 |
6 | com.hubspot.snapshots
7 | accelerator-parent
8 | 0.4-SNAPSHOT
9 |
10 |
11 | accelerator-api
12 |
13 |
14 | com.hubspot.snapshots.api.AcceleratorService
15 |
16 |
17 |
18 |
19 | com.hubspot.snapshots
20 | accelerator-core
21 |
22 |
23 | io.dropwizard
24 | dropwizard-core
25 |
26 |
27 | io.dropwizard
28 | dropwizard-jersey
29 |
30 |
31 | io.dropwizard
32 | dropwizard-db
33 |
34 |
35 | io.dropwizard
36 | dropwizard-jdbi
37 |
38 |
39 | org.jdbi
40 | jdbi
41 |
42 |
43 | org.liquibase
44 | liquibase-core
45 |
46 |
47 | com.hubspot.rosetta
48 | RosettaJdbi
49 |
50 |
51 | javax.inject
52 | javax.inject
53 |
54 |
55 | javax.ws.rs
56 | javax.ws.rs-api
57 |
58 |
59 | javax.validation
60 | validation-api
61 |
62 |
63 | com.fasterxml.jackson.core
64 | jackson-annotations
65 |
66 |
67 |
68 | junit
69 | junit
70 | test
71 |
72 |
73 | org.assertj
74 | assertj-core
75 | test
76 |
77 |
78 | io.dropwizard
79 | dropwizard-testing
80 | test
81 |
82 |
83 | com.hubspot.snapshots
84 | accelerator-client
85 | test
86 |
87 |
88 | com.h2database
89 | h2
90 | test
91 |
92 |
93 |
94 |
--------------------------------------------------------------------------------
/accelerator-api/src/main/java/com/hubspot/snapshots/api/AcceleratorConfiguration.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots.api;
2 |
3 | import javax.validation.Valid;
4 | import javax.validation.constraints.NotNull;
5 |
6 | import com.fasterxml.jackson.annotation.JsonProperty;
7 |
8 | import io.dropwizard.Configuration;
9 |
10 | public class AcceleratorConfiguration extends Configuration {
11 |
12 | @Valid
13 | @NotNull
14 | private AcceleratorDataSourceFactory database = new AcceleratorDataSourceFactory();
15 |
16 | @JsonProperty("database")
17 | public AcceleratorDataSourceFactory getDataSourceFactory() {
18 | return database;
19 | }
20 |
21 | @JsonProperty("database")
22 | public void setDataSourceFactory(AcceleratorDataSourceFactory factory) {
23 | this.database = factory;
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/accelerator-api/src/main/java/com/hubspot/snapshots/api/AcceleratorDataSourceFactory.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots.api;
2 |
3 | import com.fasterxml.jackson.annotation.JsonProperty;
4 |
5 | import io.dropwizard.db.DataSourceFactory;
6 |
7 | public class AcceleratorDataSourceFactory extends DataSourceFactory {
8 | private boolean initializeSchema = false;
9 |
10 | @JsonProperty
11 | public boolean getInitializeSchema() {
12 | return initializeSchema;
13 | }
14 |
15 | @JsonProperty
16 | public void setInitializeSchema(boolean initializeSchema) {
17 | this.initializeSchema = initializeSchema;
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/accelerator-api/src/main/java/com/hubspot/snapshots/api/AcceleratorService.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots.api;
2 |
3 | import org.skife.jdbi.v2.DBI;
4 | import org.skife.jdbi.v2.Handle;
5 | import org.skife.jdbi.v2.tweak.HandleCallback;
6 |
7 | import io.dropwizard.Application;
8 | import io.dropwizard.jdbi.DBIFactory;
9 | import io.dropwizard.setup.Environment;
10 | import liquibase.Contexts;
11 | import liquibase.Liquibase;
12 | import liquibase.database.DatabaseConnection;
13 | import liquibase.database.jvm.JdbcConnection;
14 | import liquibase.resource.ClassLoaderResourceAccessor;
15 |
16 | public class AcceleratorService extends Application {
17 |
18 | public static void main(String... args) throws Exception {
19 | new AcceleratorService().run(args);
20 | }
21 |
22 | @Override
23 | public void run(AcceleratorConfiguration configuration, Environment environment) {
24 | final DBIFactory factory = new DBIFactory();
25 | final DBI jdbi = factory.build(environment, configuration.getDataSourceFactory(), "mysql");
26 |
27 | if (configuration.getDataSourceFactory().getInitializeSchema()) {
28 | jdbi.withHandle(new HandleCallback() {
29 |
30 | @Override
31 | public Void withHandle(Handle handle) throws Exception {
32 | DatabaseConnection connection = new JdbcConnection(handle.getConnection());
33 | Liquibase liquibase = new Liquibase("schema.sql", new ClassLoaderResourceAccessor(), connection);
34 | liquibase.update(new Contexts());
35 | return null;
36 | }
37 | });
38 | }
39 |
40 | final SnapshotDao dao = jdbi.onDemand(SnapshotDao.class);
41 | environment.jersey().register(new SnapshotResource(dao));
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/accelerator-api/src/main/java/com/hubspot/snapshots/api/SnapshotDao.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots.api;
2 |
3 | import java.util.List;
4 |
5 | import org.skife.jdbi.v2.sqlobject.Bind;
6 | import org.skife.jdbi.v2.sqlobject.GetGeneratedKeys;
7 | import org.skife.jdbi.v2.sqlobject.SqlQuery;
8 | import org.skife.jdbi.v2.sqlobject.SqlUpdate;
9 | import org.skife.jdbi.v2.sqlobject.customizers.RegisterMapperFactory;
10 | import org.skife.jdbi.v2.sqlobject.mixins.Transactional;
11 |
12 | import com.hubspot.snapshots.core.SnapshotVersion;
13 | import com.hubspot.snapshots.core.SnapshotVersionEgg;
14 | import com.hubspot.rosetta.jdbi.BindWithRosetta;
15 | import com.hubspot.rosetta.jdbi.RosettaMapperFactory;
16 |
17 | @RegisterMapperFactory(RosettaMapperFactory.class)
18 | public interface SnapshotDao extends Transactional {
19 | int PAGE_SIZE = 1000;
20 |
21 | @SqlQuery("SELECT * FROM latest_snapshots WHERE id > :offset LIMIT " + PAGE_SIZE)
22 | List getDelta(@Bind("offset") int offset);
23 |
24 | @SqlUpdate("DELETE FROM latest_snapshots WHERE groupId = :groupId AND artifactId = :artifactId AND baseVersion = :baseVersion")
25 | void delete(@BindWithRosetta SnapshotVersionEgg snapshot);
26 |
27 | @GetGeneratedKeys
28 | @SqlUpdate("INSERT INTO latest_snapshots (groupId, artifactId, baseVersion, resolvedVersion) VALUES (:groupId, :artifactId, :baseVersion, :resolvedVersion)")
29 | int insert(@BindWithRosetta SnapshotVersionEgg snapshot);
30 | }
31 |
--------------------------------------------------------------------------------
/accelerator-api/src/main/java/com/hubspot/snapshots/api/SnapshotResource.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots.api;
2 |
3 | import java.util.List;
4 |
5 | import javax.inject.Inject;
6 | import javax.ws.rs.Consumes;
7 | import javax.ws.rs.GET;
8 | import javax.ws.rs.POST;
9 | import javax.ws.rs.Path;
10 | import javax.ws.rs.Produces;
11 | import javax.ws.rs.QueryParam;
12 | import javax.ws.rs.core.MediaType;
13 |
14 | import org.skife.jdbi.v2.Transaction;
15 | import org.skife.jdbi.v2.TransactionStatus;
16 |
17 | import com.hubspot.snapshots.core.SnapshotVersion;
18 | import com.hubspot.snapshots.core.SnapshotVersionEgg;
19 | import com.hubspot.snapshots.core.Snapshots;
20 |
21 | @Path("/snapshots")
22 | @Consumes(MediaType.APPLICATION_JSON)
23 | @Produces(MediaType.APPLICATION_JSON)
24 | public class SnapshotResource {
25 | private final SnapshotDao snapshotDao;
26 |
27 | @Inject
28 | public SnapshotResource(SnapshotDao snapshotDao) {
29 | this.snapshotDao = snapshotDao;
30 | }
31 |
32 | @GET
33 | @Path("/delta")
34 | public Snapshots getDelta(@QueryParam("offset") int offset) {
35 | List snapshots = snapshotDao.getDelta(offset);
36 | int nextOffset = nextOffset(snapshots, offset);
37 | return new Snapshots(snapshots, snapshots.size() == SnapshotDao.PAGE_SIZE, nextOffset);
38 | }
39 |
40 | @POST
41 | public SnapshotVersion report(final SnapshotVersionEgg snapshot) {
42 | int id = snapshotDao.inTransaction(new Transaction() {
43 |
44 | @Override
45 | public Integer inTransaction(SnapshotDao snapshotDao, TransactionStatus status) throws Exception {
46 | snapshotDao.delete(snapshot);
47 | return snapshotDao.insert(snapshot);
48 | }
49 | });
50 |
51 | return new SnapshotVersion(
52 | id,
53 | snapshot.getGroupId(),
54 | snapshot.getArtifactId(),
55 | snapshot.getBaseVersion(),
56 | snapshot.getResolvedVersion()
57 | );
58 | }
59 |
60 | private static int nextOffset(List snapshots, int previous) {
61 | int offset = previous;
62 |
63 | for (SnapshotVersion snapshot : snapshots) {
64 | if (snapshot.getId() > offset) {
65 | offset = snapshot.getId();
66 | }
67 | }
68 |
69 | return offset;
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/accelerator-api/src/main/resources/schema.sql:
--------------------------------------------------------------------------------
1 | --liquibase formatted sql
2 |
3 | --changeset jhaber:1
4 | CREATE TABLE IF NOT EXISTS latest_snapshots (
5 | id INT UNSIGNED NOT NULL AUTO_INCREMENT,
6 | groupId VARCHAR(64) NOT NULL,
7 | artifactId VARCHAR(64) NOT NULL,
8 | baseVersion VARCHAR(128) NOT NULL,
9 | resolvedVersion VARCHAR(128) NOT NULL,
10 | PRIMARY KEY (id),
11 | UNIQUE INDEX (groupId, artifactId, baseVersion)
12 | ) ENGINE=InnoDB;
13 |
14 | -- changeset jhaber:2 dbms:mysql
15 | ALTER TABLE latest_snapshots DEFAULT CHARSET=ascii COLLATE ascii_bin ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8
16 |
--------------------------------------------------------------------------------
/accelerator-api/src/test/java/com/hubspot/snapshots/api/AcceleratorAcceptanceTest.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots.api;
2 |
3 | import static org.assertj.core.api.Assertions.assertThat;
4 |
5 | import java.io.IOException;
6 | import java.sql.Connection;
7 | import java.sql.SQLException;
8 | import java.util.ArrayList;
9 | import java.util.Iterator;
10 | import java.util.List;
11 | import java.util.concurrent.atomic.AtomicInteger;
12 |
13 | import org.junit.After;
14 | import org.junit.AfterClass;
15 | import org.junit.BeforeClass;
16 | import org.junit.ClassRule;
17 | import org.junit.Test;
18 |
19 | import com.hubspot.snapshots.client.AcceleratorClient;
20 | import com.hubspot.snapshots.core.SnapshotVersion;
21 | import com.hubspot.snapshots.core.SnapshotVersionEgg;
22 |
23 | import io.dropwizard.db.ManagedDataSource;
24 | import io.dropwizard.testing.ConfigOverride;
25 | import io.dropwizard.testing.ResourceHelpers;
26 | import io.dropwizard.testing.junit.DropwizardAppRule;
27 |
28 | public class AcceleratorAcceptanceTest {
29 | private static final AtomicInteger SNAPSHOT_COUNTER = new AtomicInteger(0);
30 |
31 | @ClassRule
32 | public static final DropwizardAppRule RULE = new DropwizardAppRule<>(
33 | AcceleratorService.class,
34 | ResourceHelpers.resourceFilePath("test.yaml"),
35 | ConfigOverride.config("server.connector.port", "0")
36 | );
37 |
38 | private static ManagedDataSource dataSource;
39 | private static AcceleratorClient client;
40 |
41 | @BeforeClass
42 | public static void setup() throws Exception {
43 | dataSource = RULE.getConfiguration().getDataSourceFactory().build(RULE.getEnvironment().metrics(), "test");
44 | dataSource.start();
45 |
46 | client = AcceleratorClient.withBaseUrl(String.format("http://localhost:%d/accelerator", RULE.getLocalPort()));
47 | }
48 |
49 | @After
50 | public void cleanup() throws SQLException {
51 | try (Connection connection = dataSource.getConnection()) {
52 | connection.prepareStatement("TRUNCATE TABLE latest_snapshots").execute();
53 | }
54 | }
55 |
56 | @AfterClass
57 | public static void teardown() throws Exception {
58 | dataSource.stop();
59 | }
60 |
61 | @Test
62 | public void itReturnsEmptyDeltaWhenNoSnapshotsPresent() throws IOException {
63 | List snapshots = toList(client.getDelta(0));
64 | assertThat(snapshots).isEmpty();
65 | }
66 |
67 | @Test
68 | public void itReturnsNewSnapshotFromDelta() throws IOException {
69 | SnapshotVersionEgg snapshot = nextSnapshot();
70 | client.report(snapshot);
71 |
72 | List snapshots = toList(client.getDelta(0));
73 | assertThat(snapshots).hasSize(1);
74 |
75 | SnapshotVersion actual = snapshots.get(0);
76 | assertThat(actual.getGroupId()).isEqualTo(snapshot.getGroupId());
77 | assertThat(actual.getArtifactId()).isEqualTo(snapshot.getArtifactId());
78 | assertThat(actual.getBaseVersion()).isEqualTo(snapshot.getBaseVersion());
79 | assertThat(actual.getResolvedVersion()).isEqualTo(snapshot.getResolvedVersion());
80 |
81 | List nextPage = toList(client.getDelta(actual.getId()));
82 | assertThat(nextPage).isEmpty();
83 | }
84 |
85 | @Test
86 | public void itOverwritesSnapshotWithSameCoordinates() throws IOException {
87 | SnapshotVersionEgg first = nextSnapshot();
88 | client.report(first);
89 |
90 | SnapshotVersionEgg second = nextSnapshot();
91 | client.report(second);
92 |
93 | List snapshots = toList(client.getDelta(0));
94 | assertThat(snapshots).hasSize(1);
95 |
96 | SnapshotVersion actual = snapshots.get(0);
97 | assertThat(actual.getGroupId()).isEqualTo(second.getGroupId());
98 | assertThat(actual.getArtifactId()).isEqualTo(second.getArtifactId());
99 | assertThat(actual.getBaseVersion()).isEqualTo(second.getBaseVersion());
100 | assertThat(actual.getResolvedVersion()).isEqualTo(second.getResolvedVersion());
101 |
102 | SnapshotVersionEgg third = nextSnapshot();
103 | client.report(third);
104 |
105 | List nextPage = toList(client.getDelta(actual.getId()));
106 | assertThat(nextPage).hasSize(1);
107 |
108 | actual = nextPage.get(0);
109 | assertThat(actual.getGroupId()).isEqualTo(third.getGroupId());
110 | assertThat(actual.getArtifactId()).isEqualTo(third.getArtifactId());
111 | assertThat(actual.getBaseVersion()).isEqualTo(third.getBaseVersion());
112 | assertThat(actual.getResolvedVersion()).isEqualTo(third.getResolvedVersion());
113 | }
114 |
115 | private static SnapshotVersionEgg nextSnapshot() {
116 | return new SnapshotVersionEgg(
117 | "com.test",
118 | "test",
119 | "0.1-SNAPSHOT",
120 | "0.1-20171129.222952-" + SNAPSHOT_COUNTER.incrementAndGet()
121 | );
122 | }
123 |
124 | private static List toList(Iterator iterator) {
125 | List list = new ArrayList<>();
126 | while (iterator.hasNext()) {
127 | list.add(iterator.next());
128 | }
129 |
130 | return list;
131 | }
132 | }
133 |
--------------------------------------------------------------------------------
/accelerator-api/src/test/resources/test.yaml:
--------------------------------------------------------------------------------
1 | server:
2 | type: simple
3 | applicationContextPath: /accelerator
4 | connector:
5 | type: http
6 |
7 | database:
8 | driverClass: org.h2.Driver
9 | user: user
10 | password: password
11 | url: jdbc:h2:mem:accelerator;DATABASE_TO_UPPER=false;DB_CLOSE_DELAY=-1;mode=MySQL
12 | initializeSchema: true
13 |
--------------------------------------------------------------------------------
/accelerator-client/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | 4.0.0
4 |
5 |
6 | com.hubspot.snapshots
7 | accelerator-parent
8 | 0.4-SNAPSHOT
9 |
10 |
11 | accelerator-client
12 |
13 |
14 |
15 | com.hubspot.snapshots
16 | accelerator-core
17 |
18 |
19 | com.fasterxml.jackson.core
20 | jackson-databind
21 |
22 |
23 | com.squareup.okhttp3
24 | okhttp
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/accelerator-client/src/main/java/com/hubspot/snapshots/client/AcceleratorClient.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots.client;
2 |
3 | import java.io.IOException;
4 | import java.util.Iterator;
5 | import java.util.NoSuchElementException;
6 | import java.util.concurrent.TimeUnit;
7 |
8 | import com.fasterxml.jackson.databind.DeserializationFeature;
9 | import com.fasterxml.jackson.databind.ObjectMapper;
10 | import com.hubspot.snapshots.core.SnapshotVersion;
11 | import com.hubspot.snapshots.core.SnapshotVersionEgg;
12 | import com.hubspot.snapshots.core.Snapshots;
13 |
14 | import okhttp3.MediaType;
15 | import okhttp3.OkHttpClient;
16 | import okhttp3.Request;
17 | import okhttp3.RequestBody;
18 | import okhttp3.Response;
19 |
20 | public class AcceleratorClient {
21 | private static final String DETECTED_BASE_URL = detectBaseUrl();
22 |
23 | private static String detectBaseUrl() {
24 | String acceleratorUrl = System.getProperty("accelerator.url");
25 | if (acceleratorUrl != null) {
26 | return acceleratorUrl;
27 | }
28 |
29 | return System.getenv("ACCELERATOR_URL");
30 | }
31 |
32 | private final String reportUrl;
33 | private final String deltaUrl;
34 | private final OkHttpClient client;
35 | private final ObjectMapper mapper;
36 |
37 | private AcceleratorClient(String baseUrl) {
38 | this.reportUrl = baseUrl + "/snapshots";
39 | this.deltaUrl = baseUrl + "/snapshots/delta";
40 | this.client = new OkHttpClient.Builder()
41 | .followRedirects(false)
42 | .followSslRedirects(false)
43 | .connectTimeout(5, TimeUnit.SECONDS)
44 | .readTimeout(15, TimeUnit.SECONDS)
45 | .build();
46 | this.mapper = new ObjectMapper().disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);
47 | }
48 |
49 | public static AcceleratorClient detectingBaseUrl() {
50 | if (DETECTED_BASE_URL == null) {
51 | throw new IllegalStateException("Unable to detect base url, set ACCELERATOR_URL environment variable or accelerator.url system property");
52 | }
53 | return withBaseUrl(DETECTED_BASE_URL);
54 | }
55 |
56 | public static AcceleratorClient withBaseUrl(String baseUrl) {
57 | return new AcceleratorClient(baseUrl);
58 | }
59 |
60 | public static String detectedDeltaUrl() {
61 | return DETECTED_BASE_URL + "/snapshots/delta";
62 | }
63 |
64 | public Iterator getDelta(int offset) {
65 | return new SnapshotIterator(offset);
66 | }
67 |
68 | public SnapshotVersion report(SnapshotVersionEgg snapshot) throws IOException {
69 | MediaType mediaTye = MediaType.parse("application/json; charset=utf-8");
70 | RequestBody body = RequestBody.create(mediaTye, mapper.writeValueAsString(snapshot));
71 |
72 | Request request = new Request.Builder()
73 | .url(reportUrl)
74 | .post(body)
75 | .build();
76 |
77 | Response response = client.newCall(request).execute();
78 | if (response.code() != 200) {
79 | throw new IOException("Unexpected response code from accelerator API: " + response.code());
80 | }
81 |
82 | return mapper.readValue(response.body().byteStream(), SnapshotVersion.class);
83 | }
84 |
85 | private Snapshots getSinglePage(int offset) throws IOException {
86 | Request request = new Request.Builder()
87 | .url(deltaUrl + "?offset=" + offset)
88 | .build();
89 |
90 | try (Response response = client.newCall(request).execute()) {
91 | if (response.code() != 200) {
92 | throw new IOException("Unexpected response code from accelerator API: " + response.code());
93 | }
94 |
95 | return mapper.readValue(response.body().byteStream(), Snapshots.class);
96 | }
97 | }
98 |
99 | private enum State {
100 | READY, NOT_READY, DONE, FAILED
101 | }
102 |
103 | private class SnapshotIterator implements Iterator {
104 | private final int initialOffset;
105 | private State state;
106 | private SnapshotVersion next;
107 | private Snapshots snapshots;
108 | private Iterator iterator;
109 |
110 | public SnapshotIterator(int initialOffset) {
111 | this.initialOffset = initialOffset;
112 | this.state = State.NOT_READY;
113 | this.next = null;
114 | this.snapshots = null;
115 | this.iterator = null;
116 | }
117 |
118 | @Override
119 | public boolean hasNext() {
120 | if (state == State.FAILED) {
121 | throw new IllegalStateException("This iterator is in a failed state");
122 | }
123 | switch (state) {
124 | case DONE:
125 | return false;
126 | case READY:
127 | return true;
128 | default:
129 | return tryToComputeNext();
130 | }
131 | }
132 |
133 | @Override
134 | public SnapshotVersion next() {
135 | if (!hasNext()) {
136 | throw new NoSuchElementException();
137 | }
138 | state = State.NOT_READY;
139 | SnapshotVersion snapshot = next;
140 | next = null;
141 | return snapshot;
142 | }
143 |
144 | @Override
145 | public void remove() {
146 | throw new UnsupportedOperationException("remove");
147 | }
148 |
149 | private boolean tryToComputeNext() {
150 | state = State.FAILED; // temporary pessimism
151 | next = computeNext();
152 | if (state != State.DONE) {
153 | state = State.READY;
154 | return true;
155 | }
156 | return false;
157 | }
158 |
159 | private SnapshotVersion computeNext() {
160 | try {
161 | if (snapshots == null) {
162 | snapshots = getSinglePage(initialOffset);
163 | iterator = snapshots.getVersions().iterator();
164 | }
165 |
166 | while (!iterator.hasNext()) {
167 | if (snapshots.hasMore()) {
168 | snapshots = getSinglePage(snapshots.getNextOffset());
169 | iterator = snapshots.getVersions().iterator();
170 | } else {
171 | state = State.DONE;
172 | return null;
173 | }
174 | }
175 |
176 | return iterator.next();
177 | } catch (IOException e) {
178 | throw new RuntimeException(e);
179 | }
180 | }
181 | }
182 | }
183 |
--------------------------------------------------------------------------------
/accelerator-core/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | 4.0.0
4 |
5 |
6 | com.hubspot.snapshots
7 | accelerator-parent
8 | 0.4-SNAPSHOT
9 |
10 |
11 | accelerator-core
12 |
13 |
14 |
15 | com.fasterxml.jackson.core
16 | jackson-annotations
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/accelerator-core/src/main/java/com/hubspot/snapshots/core/SnapshotVersion.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots.core;
2 |
3 | import com.fasterxml.jackson.annotation.JsonCreator;
4 | import com.fasterxml.jackson.annotation.JsonIgnore;
5 | import com.fasterxml.jackson.annotation.JsonProperty;
6 |
7 | public class SnapshotVersion implements SnapshotVersionCore {
8 | private final int id;
9 | private final String groupId;
10 | private final String artifactId;
11 | private final String baseVersion;
12 | private final String resolvedVersion;
13 |
14 | @JsonCreator
15 | public SnapshotVersion(@JsonProperty("id") int id,
16 | @JsonProperty("groupId") String groupId,
17 | @JsonProperty("artifactId") String artifactId,
18 | @JsonProperty("baseVersion") String baseVersion,
19 | @JsonProperty("resolvedVersion") String resolvedVersion) {
20 | this.id = id;
21 | this.groupId = groupId;
22 | this.artifactId = artifactId;
23 | this.baseVersion = baseVersion;
24 | this.resolvedVersion = resolvedVersion;
25 | }
26 |
27 | public int getId() {
28 | return id;
29 | }
30 |
31 | @Override
32 | public String getGroupId() {
33 | return groupId;
34 | }
35 |
36 | @Override
37 | public String getArtifactId() {
38 | return artifactId;
39 | }
40 |
41 | @Override
42 | public String getBaseVersion() {
43 | return baseVersion;
44 | }
45 |
46 | @Override
47 | public String getResolvedVersion() {
48 | return resolvedVersion;
49 | }
50 |
51 | @JsonIgnore
52 | public String getTimestamp() {
53 | String[] parts = resolvedVersion.split("-");
54 | if (parts.length < 3) {
55 | throw new IllegalStateException();
56 | }
57 |
58 | return parts[parts.length - 2];
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/accelerator-core/src/main/java/com/hubspot/snapshots/core/SnapshotVersionCore.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots.core;
2 |
3 | public interface SnapshotVersionCore {
4 | String getGroupId();
5 | String getArtifactId();
6 | String getBaseVersion();
7 | String getResolvedVersion();
8 | }
9 |
--------------------------------------------------------------------------------
/accelerator-core/src/main/java/com/hubspot/snapshots/core/SnapshotVersionEgg.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots.core;
2 |
3 | import com.fasterxml.jackson.annotation.JsonCreator;
4 | import com.fasterxml.jackson.annotation.JsonProperty;
5 |
6 | public class SnapshotVersionEgg implements SnapshotVersionCore {
7 | private final String groupId;
8 | private final String artifactId;
9 | private final String baseVersion;
10 | private final String resolvedVersion;
11 |
12 | @JsonCreator
13 | public SnapshotVersionEgg(@JsonProperty("groupId") String groupId,
14 | @JsonProperty("artifactId") String artifactId,
15 | @JsonProperty("baseVersion") String baseVersion,
16 | @JsonProperty("resolvedVersion") String resolvedVersion) {
17 | this.groupId = groupId;
18 | this.artifactId = artifactId;
19 | this.baseVersion = baseVersion;
20 | this.resolvedVersion = resolvedVersion;
21 | }
22 |
23 | @Override
24 | public String getGroupId() {
25 | return groupId;
26 | }
27 |
28 | @Override
29 | public String getArtifactId() {
30 | return artifactId;
31 | }
32 |
33 | @Override
34 | public String getBaseVersion() {
35 | return baseVersion;
36 | }
37 |
38 | @Override
39 | public String getResolvedVersion() {
40 | return resolvedVersion;
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/accelerator-core/src/main/java/com/hubspot/snapshots/core/Snapshots.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots.core;
2 |
3 | import java.util.Collection;
4 |
5 | import com.fasterxml.jackson.annotation.JsonCreator;
6 | import com.fasterxml.jackson.annotation.JsonProperty;
7 |
8 | public class Snapshots {
9 | private final Collection versions;
10 | private final boolean hasMore;
11 | private final int nextOffset;
12 |
13 | @JsonCreator
14 | public Snapshots(@JsonProperty("versions") Collection versions,
15 | @JsonProperty("hasMore") boolean hasMore,
16 | @JsonProperty("nextOffset") int nextOffset) {
17 | this.versions = versions;
18 | this.hasMore = hasMore;
19 | this.nextOffset = nextOffset;
20 | }
21 |
22 | public Collection getVersions() {
23 | return versions;
24 | }
25 |
26 | @JsonProperty("hasMore")
27 | public boolean hasMore() {
28 | return hasMore;
29 | }
30 |
31 | public int getNextOffset() {
32 | return nextOffset;
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/accelerator-maven-extension/.build-executable:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HubSpot/maven-snapshot-accelerator/5900307c4221d914ee70902a3684b310ff171022/accelerator-maven-extension/.build-executable
--------------------------------------------------------------------------------
/accelerator-maven-extension/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | 4.0.0
4 |
5 |
6 | com.hubspot.snapshots
7 | accelerator-parent
8 | 0.4-SNAPSHOT
9 |
10 |
11 | accelerator-maven-extension
12 |
13 |
14 | true
15 | true
16 | true
17 |
18 |
19 |
20 |
21 | com.hubspot.snapshots
22 | accelerator-core
23 |
24 |
25 | com.hubspot.snapshots
26 | accelerator-client
27 |
28 |
29 | org.slf4j
30 | slf4j-api
31 | provided
32 |
33 |
34 | javax.inject
35 | javax.inject
36 | provided
37 |
38 |
39 | org.apache.maven.resolver
40 | maven-resolver-api
41 | provided
42 |
43 |
44 | org.apache.maven.resolver
45 | maven-resolver-spi
46 | provided
47 |
48 |
49 | org.apache.maven.resolver
50 | maven-resolver-impl
51 | provided
52 |
53 |
54 | org.apache.maven.resolver
55 | maven-resolver-util
56 | provided
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/accelerator-maven-extension/src/main/java/com/hubspot/snapshots/AcceleratorHelper.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots;
2 |
3 | import java.nio.file.Files;
4 | import java.nio.file.Path;
5 | import java.util.Properties;
6 |
7 | import javax.xml.parsers.DocumentBuilder;
8 | import javax.xml.parsers.DocumentBuilderFactory;
9 |
10 | import org.eclipse.aether.metadata.Metadata;
11 | import org.eclipse.aether.repository.LocalRepository;
12 | import org.eclipse.aether.repository.RemoteRepository;
13 | import org.slf4j.Logger;
14 | import org.slf4j.LoggerFactory;
15 | import org.w3c.dom.Element;
16 | import org.w3c.dom.Node;
17 | import org.w3c.dom.NodeList;
18 |
19 | public enum AcceleratorHelper {
20 | INSTANCE;
21 |
22 | private static final Logger LOG = LoggerFactory.getLogger(AcceleratorHelper.class);
23 | private static final DocumentBuilderFactory DOCUMENT_FACTORY = DocumentBuilderFactory.newInstance();
24 |
25 | public boolean shouldSkipUpdate(LocalRepository localRepository, Metadata metadata, RemoteRepository repository) {
26 | if (!AcceleratorUpdater.INSTANCE.isHealthy(localRepository)) {
27 | return false;
28 | } else if (metadata.getGroupId().isEmpty() || metadata.getArtifactId().isEmpty()) {
29 | return false;
30 | } else if (metadata.getVersion().isEmpty() || !metadata.getVersion().endsWith("SNAPSHOT")) {
31 | return false;
32 | } else if (repository == null) {
33 | return false;
34 | }
35 |
36 | Path acceleratorMetadata = AcceleratorUtils.INSTANCE.snapshotInfoPath(localRepository, metadata);
37 | Path mavenMetadata = AcceleratorUtils.INSTANCE.mavenMetadataPath(localRepository, metadata, repository);
38 |
39 | if (!Files.isDirectory(mavenMetadata.getParent())) {
40 | return false;
41 | }
42 |
43 | String mavenTimestamp = loadMavenMetadataTimestamp(mavenMetadata);
44 | if (mavenTimestamp == null) {
45 | return false;
46 | }
47 |
48 | String acceleratorTimestamp = loadAcceleratorMetadataTimestamp(acceleratorMetadata);
49 | if (acceleratorTimestamp == null) {
50 | return true;
51 | }
52 |
53 | return acceleratorTimestamp.compareTo(mavenTimestamp) <= 0;
54 | }
55 |
56 | private static String loadAcceleratorMetadataTimestamp(Path path) {
57 | Properties acceleratorMetadata = AcceleratorUtils.INSTANCE.readProperties(path);
58 | if (acceleratorMetadata == null) {
59 | return null;
60 | }
61 |
62 | return acceleratorMetadata.getProperty(AcceleratorUtils.LATEST_SNAPSHOT_TIMESTAMP);
63 | }
64 |
65 | private static String loadMavenMetadataTimestamp(Path path) {
66 | try {
67 | DocumentBuilder builder = DOCUMENT_FACTORY.newDocumentBuilder();
68 | Element element = builder.parse(path.toFile()).getDocumentElement();
69 | element.normalize();
70 |
71 | Node versioning = getChild(element, "versioning");
72 | if (versioning == null) {
73 | return null;
74 | }
75 |
76 | Node snapshot = getChild(versioning, "snapshot");
77 | if (snapshot == null) {
78 | return null;
79 | }
80 |
81 | Node timestamp = getChild(snapshot, "timestamp");
82 | if (timestamp == null) {
83 | return null;
84 | }
85 |
86 | return timestamp.getTextContent();
87 | } catch (Exception e) {
88 | LOG.debug("Error parsing maven metadata at path " + path, e);
89 | return null;
90 | }
91 | }
92 |
93 | private static Node getChild(Node node, String name) {
94 | NodeList children = node.getChildNodes();
95 | for (int i = 0; i < children.getLength(); i++) {
96 | Node child = children.item(i);
97 | if (name.equals(child.getNodeName())) {
98 | return child;
99 | }
100 | }
101 |
102 | return null;
103 | }
104 | }
105 |
--------------------------------------------------------------------------------
/accelerator-maven-extension/src/main/java/com/hubspot/snapshots/AcceleratorUpdater.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots;
2 |
3 | import java.nio.file.Files;
4 | import java.nio.file.Path;
5 | import java.util.Arrays;
6 | import java.util.Iterator;
7 | import java.util.List;
8 | import java.util.Properties;
9 | import java.util.concurrent.atomic.AtomicReference;
10 |
11 | import org.eclipse.aether.repository.LocalRepository;
12 | import org.slf4j.Logger;
13 | import org.slf4j.LoggerFactory;
14 |
15 | import com.hubspot.snapshots.client.AcceleratorClient;
16 | import com.hubspot.snapshots.core.SnapshotVersion;
17 |
18 | public enum AcceleratorUpdater {
19 | INSTANCE;
20 |
21 | private static final Logger LOG = LoggerFactory.getLogger(AcceleratorUpdater.class);
22 |
23 | private static final AtomicReference HEALTHY = new AtomicReference<>();
24 |
25 | private static boolean initialize(LocalRepository localRepository) {
26 | int offset = loadAcceleratorOffset(localRepository);
27 | LOG.debug("Loaded accelerator offset " + offset);
28 |
29 | int maxId = offset;
30 | int updated = 0;
31 | try {
32 | Iterator iter = AcceleratorClient.detectingBaseUrl().getDelta(offset);
33 | while (iter.hasNext()) {
34 | SnapshotVersion snapshot = iter.next();
35 | updateSnapshotInfo(localRepository, snapshot);
36 | maxId = Math.max(maxId, snapshot.getId());
37 | updated++;
38 | }
39 |
40 | LOG.debug("Processed " + updated + " new snapshots");
41 | writeAcceleratorInfo(localRepository, maxId);
42 | LOG.debug("Wrote new accelerator offset " + maxId + " to disk");
43 |
44 | LOG.info("Accelerator is healthy, will skip snapshot checks based on accelerator metadata");
45 | return true;
46 | } catch (Exception e) {
47 | LOG.warn("Unable to connect to the accelerator API at {}", AcceleratorClient.detectedDeltaUrl());
48 | LOG.warn("Will need to check for all snapshot updates");
49 | recordAcceleratorFailure(localRepository, offset, e);
50 | return false;
51 | }
52 | }
53 |
54 | public boolean isHealthy(LocalRepository localRepository) {
55 | if (HEALTHY.get() == null) {
56 | synchronized (this) {
57 | if (HEALTHY.get() == null) {
58 | HEALTHY.set(initialize(localRepository));
59 | }
60 | }
61 | }
62 |
63 | return HEALTHY.get();
64 | }
65 |
66 | private static int loadAcceleratorOffset(LocalRepository localRepository) {
67 | Path acceleratorStatusPath = AcceleratorUtils.INSTANCE.acceleratorStatusPath(localRepository);
68 |
69 | Properties acceleratorProperties = AcceleratorUtils.INSTANCE.readProperties(acceleratorStatusPath);
70 | if (acceleratorProperties == null) {
71 | return 0;
72 | }
73 |
74 | String s = acceleratorProperties.getProperty(AcceleratorUtils.LAST_PROCESSED_ID);
75 | if (s == null) {
76 | LOG.debug("Accelerator file is missing " + AcceleratorUtils.LAST_PROCESSED_ID + " at path " + acceleratorStatusPath);
77 | return 0;
78 | }
79 |
80 | try {
81 | return Integer.parseInt(s);
82 | } catch (NumberFormatException e) {
83 | LOG.debug("Accelerator file has an invalid " + AcceleratorUtils.LAST_PROCESSED_ID + " at path " + acceleratorStatusPath, e);
84 | return 0;
85 | }
86 | }
87 |
88 | private static void updateSnapshotInfo(LocalRepository localRepository, SnapshotVersion snapshot) {
89 | Path snapshotInfoPath = AcceleratorUtils.INSTANCE.snapshotInfoPath(localRepository, snapshot);
90 |
91 | if (Files.isDirectory(snapshotInfoPath.getParent())) {
92 | List lines = Arrays.asList(
93 | AcceleratorUtils.LATEST_SNAPSHOT_VERSION + "=" + snapshot.getResolvedVersion(),
94 | AcceleratorUtils.LATEST_SNAPSHOT_TIMESTAMP + "=" + snapshot.getTimestamp()
95 | );
96 | AcceleratorUtils.INSTANCE.writeToPath(lines, snapshotInfoPath);
97 | } else {
98 | LOG.debug("Skipping update because artifact is not in local repo for path " + snapshotInfoPath);
99 | }
100 | }
101 |
102 | private static void recordAcceleratorFailure(LocalRepository localRepository, int offset, Exception e) {
103 | LOG.debug("Error updating accelerator data", e);
104 |
105 | List lines = Arrays.asList(
106 | AcceleratorUtils.LAST_UPDATE_SUCCESS + "=false",
107 | AcceleratorUtils.LAST_UPDATE_TIMESTAMP + "=" + System.currentTimeMillis(),
108 | AcceleratorUtils.LAST_PROCESSED_ID + "=" + offset
109 | );
110 | try {
111 | AcceleratorUtils.INSTANCE.writeToPath(lines, AcceleratorUtils.INSTANCE.acceleratorStatusPath(localRepository));
112 | } catch (Exception f) {
113 | LOG.debug("Error recording accelerator failure on disk", f);
114 | }
115 | }
116 |
117 | private static void writeAcceleratorInfo(LocalRepository localRepository, int offset) {
118 | List lines = Arrays.asList(
119 | AcceleratorUtils.LAST_UPDATE_SUCCESS + "=true",
120 | AcceleratorUtils.LAST_UPDATE_TIMESTAMP + "=" + System.currentTimeMillis(),
121 | AcceleratorUtils.LAST_PROCESSED_ID + "=" + offset
122 | );
123 | AcceleratorUtils.INSTANCE.writeToPath(lines, AcceleratorUtils.INSTANCE.acceleratorStatusPath(localRepository));
124 | }
125 | }
126 |
--------------------------------------------------------------------------------
/accelerator-maven-extension/src/main/java/com/hubspot/snapshots/AcceleratorUtils.java:
--------------------------------------------------------------------------------
1 | package com.hubspot.snapshots;
2 |
3 | import java.io.IOException;
4 | import java.io.InputStream;
5 | import java.nio.charset.StandardCharsets;
6 | import java.nio.file.Files;
7 | import java.nio.file.Path;
8 | import java.nio.file.StandardCopyOption;
9 | import java.nio.file.StandardOpenOption;
10 | import java.nio.file.attribute.PosixFilePermission;
11 | import java.util.Arrays;
12 | import java.util.HashSet;
13 | import java.util.Properties;
14 | import java.util.Set;
15 |
16 | import org.eclipse.aether.metadata.Metadata;
17 | import org.eclipse.aether.repository.LocalRepository;
18 | import org.eclipse.aether.repository.RemoteRepository;
19 | import org.slf4j.Logger;
20 | import org.slf4j.LoggerFactory;
21 |
22 | import com.hubspot.snapshots.core.SnapshotVersion;
23 |
24 | public enum AcceleratorUtils {
25 | INSTANCE;
26 |
27 | private static final Logger LOG = LoggerFactory.getLogger(AcceleratorUtils.class);
28 |
29 | private static final Set PERMISSIONS = new HashSet<>(
30 | Arrays.asList(
31 | PosixFilePermission.OWNER_READ,
32 | PosixFilePermission.OWNER_WRITE,
33 | PosixFilePermission.GROUP_READ,
34 | PosixFilePermission.GROUP_WRITE,
35 | PosixFilePermission.OTHERS_READ,
36 | PosixFilePermission.OTHERS_WRITE
37 | )
38 | );
39 |
40 | static final String ACCELERATOR_STATUS_FILENAME = "accelerator.status";
41 | static final String ACCELERATOR_SNAPSHOT_FILENAME = "accelerator.snapshotInfo";
42 | static final String LAST_UPDATE_SUCCESS = "lastUpdateSuccess";
43 | static final String LAST_UPDATE_TIMESTAMP = "lastUpdateTimestamp";
44 | static final String LAST_PROCESSED_ID = "lastProcessedId";
45 | static final String LATEST_SNAPSHOT_VERSION = "latestSnapshotVersion";
46 | static final String LATEST_SNAPSHOT_TIMESTAMP = "latestSnapshotTimestamp";
47 |
48 | Path acceleratorStatusPath(LocalRepository localRepository) {
49 | return localRepo(localRepository).resolve(AcceleratorUtils.ACCELERATOR_STATUS_FILENAME);
50 | }
51 |
52 | Path snapshotInfoPath(LocalRepository localRepository, SnapshotVersion snapshot) {
53 | return baseDir(localRepository, snapshot.getGroupId(), snapshot.getArtifactId(), snapshot.getBaseVersion()).resolve(ACCELERATOR_SNAPSHOT_FILENAME);
54 | }
55 |
56 | Path snapshotInfoPath(LocalRepository localRepository, Metadata metadata) {
57 | return baseDir(localRepository, metadata.getGroupId(), metadata.getArtifactId(), metadata.getVersion()).resolve(ACCELERATOR_SNAPSHOT_FILENAME);
58 | }
59 |
60 | Path mavenMetadataPath(LocalRepository localRepository, Metadata metadata, RemoteRepository repository) {
61 | String fileName = "maven-metadata-" + repository.getId() +".xml";
62 | return baseDir(localRepository, metadata.getGroupId(), metadata.getArtifactId(), metadata.getVersion()).resolve(fileName);
63 | }
64 |
65 | private Path baseDir(LocalRepository localRepository, String groupId, String artifactId, String version) {
66 | String[] groupParts = groupId.split("\\.");
67 |
68 | Path snapshotInfoPath = localRepo(localRepository);
69 | for (String groupPart : groupParts) {
70 | snapshotInfoPath = snapshotInfoPath.resolve(groupPart);
71 | }
72 | return snapshotInfoPath
73 | .resolve(artifactId)
74 | .resolve(version);
75 | }
76 |
77 | Properties readProperties(Path path) {
78 | try (InputStream inputStream = Files.newInputStream(path, StandardOpenOption.READ)) {
79 | Properties properties = new Properties();
80 | properties.load(inputStream);
81 | return properties;
82 | } catch (IOException e) {
83 | LOG.debug("Error trying to read properties from " + path, e);
84 | return null;
85 | }
86 | }
87 |
88 | void writeToPath(Iterable extends CharSequence> lines, Path path) {
89 | Path temp = null;
90 | try {
91 | temp = Files.createTempFile(path.getParent(), "accelerator-", ".tmp");
92 | Files.setPosixFilePermissions(temp, PERMISSIONS);
93 | Files.write(temp, lines, StandardCharsets.UTF_8);
94 | Files.move(temp, path, StandardCopyOption.ATOMIC_MOVE);
95 | } catch (IOException e) {
96 | throw new RuntimeException("Error writing accelerator data to path " + path, e);
97 | } finally {
98 | if (temp != null) {
99 | try {
100 | Files.deleteIfExists(temp);
101 | } catch (IOException ignored) {}
102 | }
103 | }
104 | }
105 |
106 | private static Path localRepo(LocalRepository localRepository) {
107 | return localRepository.getBasedir().toPath();
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/accelerator-maven-extension/src/main/java/org/eclipse/aether/RepositorySystemSessionHelper.java:
--------------------------------------------------------------------------------
1 | package org.eclipse.aether;
2 |
3 | public class RepositorySystemSessionHelper {
4 |
5 | public static RepositorySystemSession getSession(AbstractForwardingRepositorySystemSession session) {
6 | return session.getSession();
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/accelerator-maven-extension/src/main/java/org/eclipse/aether/internal/impl/DefaultUpdateCheckManager.java:
--------------------------------------------------------------------------------
1 | package org.eclipse.aether.internal.impl;
2 |
3 | /*
4 | * Licensed to the Apache Software Foundation (ASF) under one
5 | * or more contributor license agreements. See the NOTICE file
6 | * distributed with this work for additional information
7 | * regarding copyright ownership. The ASF licenses this file
8 | * to you under the Apache License, Version 2.0 (the
9 | * "License"); you may not use this file except in compliance
10 | * with the License. You may obtain a copy of the License at
11 | *
12 | * http://www.apache.org/licenses/LICENSE-2.0
13 | *
14 | * Unless required by applicable law or agreed to in writing,
15 | * software distributed under the License is distributed on an
16 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
17 | * KIND, either express or implied. See the License for the
18 | * specific language governing permissions and limitations
19 | * under the License.
20 | */
21 |
22 | import static java.util.Objects.requireNonNull;
23 |
24 | import java.io.File;
25 | import java.lang.reflect.Field;
26 | import java.util.Collections;
27 | import java.util.HashMap;
28 | import java.util.Map;
29 | import java.util.Properties;
30 | import java.util.Set;
31 | import java.util.TreeSet;
32 | import java.util.concurrent.ConcurrentHashMap;
33 |
34 | import javax.inject.Inject;
35 | import javax.inject.Named;
36 |
37 | import org.eclipse.aether.AbstractForwardingRepositorySystemSession;
38 | import org.eclipse.aether.DefaultRepositorySystemSession;
39 | import org.eclipse.aether.RepositorySystemSession;
40 | import org.eclipse.aether.RepositorySystemSessionHelper;
41 | import org.eclipse.aether.SessionData;
42 | import org.eclipse.aether.artifact.Artifact;
43 | import org.eclipse.aether.impl.UpdateCheck;
44 | import org.eclipse.aether.impl.UpdateCheckManager;
45 | import org.eclipse.aether.impl.UpdatePolicyAnalyzer;
46 | import org.eclipse.aether.metadata.Metadata;
47 | import org.eclipse.aether.repository.AuthenticationDigest;
48 | import org.eclipse.aether.repository.Proxy;
49 | import org.eclipse.aether.repository.RemoteRepository;
50 | import org.eclipse.aether.resolution.ResolutionErrorPolicy;
51 | import org.eclipse.aether.spi.locator.Service;
52 | import org.eclipse.aether.spi.locator.ServiceLocator;
53 | import org.eclipse.aether.spi.log.Logger;
54 | import org.eclipse.aether.spi.log.LoggerFactory;
55 | import org.eclipse.aether.spi.log.NullLoggerFactory;
56 | import org.eclipse.aether.transfer.ArtifactNotFoundException;
57 | import org.eclipse.aether.transfer.ArtifactTransferException;
58 | import org.eclipse.aether.transfer.MetadataNotFoundException;
59 | import org.eclipse.aether.transfer.MetadataTransferException;
60 | import org.eclipse.aether.util.ConfigUtils;
61 |
62 | import com.hubspot.snapshots.AcceleratorHelper;
63 |
64 | /**
65 | */
66 | @Named
67 | public class DefaultUpdateCheckManager
68 | implements UpdateCheckManager, Service
69 | {
70 |
71 | private Logger logger = NullLoggerFactory.LOGGER;
72 |
73 | private UpdatePolicyAnalyzer updatePolicyAnalyzer;
74 |
75 | private static final String UPDATED_KEY_SUFFIX = ".lastUpdated";
76 |
77 | private static final String ERROR_KEY_SUFFIX = ".error";
78 |
79 | private static final String NOT_FOUND = "";
80 |
81 | private static final String SESSION_CHECKS = "updateCheckManager.checks";
82 |
83 | static final String CONFIG_PROP_SESSION_STATE = "aether.updateCheckManager.sessionState";
84 |
85 | private static final int STATE_ENABLED = 0;
86 |
87 | private static final int STATE_BYPASS = 1;
88 |
89 | private static final int STATE_DISABLED = 2;
90 |
91 | public DefaultUpdateCheckManager()
92 | {
93 | // enables default constructor
94 | }
95 |
96 | @Inject
97 | DefaultUpdateCheckManager( UpdatePolicyAnalyzer updatePolicyAnalyzer, LoggerFactory loggerFactory )
98 | {
99 | setUpdatePolicyAnalyzer( updatePolicyAnalyzer );
100 | setLoggerFactory( loggerFactory );
101 | }
102 |
103 | public void initService( ServiceLocator locator )
104 | {
105 | setLoggerFactory( locator.getService( LoggerFactory.class ) );
106 | setUpdatePolicyAnalyzer( locator.getService( UpdatePolicyAnalyzer.class ) );
107 | }
108 |
109 | public DefaultUpdateCheckManager setLoggerFactory( LoggerFactory loggerFactory )
110 | {
111 | this.logger = NullLoggerFactory.getSafeLogger( loggerFactory, getClass() );
112 | return this;
113 | }
114 |
115 | public DefaultUpdateCheckManager setUpdatePolicyAnalyzer( UpdatePolicyAnalyzer updatePolicyAnalyzer )
116 | {
117 | this.updatePolicyAnalyzer = requireNonNull( updatePolicyAnalyzer, "update policy analyzer cannot be null" );
118 | return this;
119 | }
120 |
121 | public void checkArtifact( RepositorySystemSession session, UpdateCheck check )
122 | {
123 | disableSnapshotNormalization(session);
124 |
125 | if ( check.getLocalLastUpdated() != 0
126 | && !isArtifactUpdatedRequired( session, check.getLocalLastUpdated(), check ) )
127 | {
128 | if ( logger.isDebugEnabled() )
129 | {
130 | logger.debug( "Skipped remote request for " + check.getItem()
131 | + ", locally installed artifact up-to-date." );
132 | }
133 |
134 | check.setRequired( false );
135 | return;
136 | }
137 |
138 | Artifact artifact = check.getItem();
139 | RemoteRepository repository = check.getRepository();
140 |
141 | File artifactFile = requireNonNull( check.getFile(), String.format( "The artifact '%s' has no file attached", artifact ) );
142 |
143 | boolean fileExists = check.isFileValid() && artifactFile.exists();
144 |
145 | File touchFile = getTouchFile( artifact, artifactFile );
146 | Properties props = read( touchFile );
147 |
148 | String updateKey = getUpdateKey( session, artifactFile, repository );
149 | String dataKey = getDataKey( artifact, artifactFile, repository );
150 |
151 | String error = getError( props, dataKey );
152 |
153 | long lastUpdated;
154 | if ( error == null )
155 | {
156 | if ( fileExists )
157 | {
158 | // last update was successful
159 | lastUpdated = artifactFile.lastModified();
160 | }
161 | else
162 | {
163 | // this is the first attempt ever
164 | lastUpdated = 0L;
165 | }
166 | }
167 | else if ( error.length() <= 0 )
168 | {
169 | // artifact did not exist
170 | lastUpdated = getLastUpdated( props, dataKey );
171 | }
172 | else
173 | {
174 | // artifact could not be transferred
175 | String transferKey = getTransferKey( session, artifact, artifactFile, repository );
176 | lastUpdated = getLastUpdated( props, transferKey );
177 | }
178 |
179 | if ( lastUpdated == 0L )
180 | {
181 | check.setRequired( true );
182 | }
183 | else if ( isAlreadyUpdated( session, updateKey ) )
184 | {
185 | if ( logger.isDebugEnabled() )
186 | {
187 | logger.debug( "Skipped remote request for " + check.getItem()
188 | + ", already updated during this session." );
189 | }
190 |
191 | check.setRequired( false );
192 | if ( error != null )
193 | {
194 | check.setException( newException( error, artifact, repository ) );
195 | }
196 | }
197 | else if ( isArtifactUpdatedRequired( session, lastUpdated, check ) )
198 | {
199 | check.setRequired( true );
200 | }
201 | else if ( fileExists )
202 | {
203 | if ( logger.isDebugEnabled() )
204 | {
205 | logger.debug( "Skipped remote request for " + check.getItem() + ", locally cached artifact up-to-date." );
206 | }
207 |
208 | check.setRequired( false );
209 | }
210 | else
211 | {
212 | int errorPolicy = Utils.getPolicy( session, artifact, repository );
213 | int cacheFlag = getCacheFlag( error );
214 | if ( ( errorPolicy & cacheFlag ) != 0 )
215 | {
216 | check.setRequired( false );
217 | check.setException( newException( error, artifact, repository ) );
218 | }
219 | else
220 | {
221 | check.setRequired( true );
222 | }
223 | }
224 | }
225 |
226 | private static int getCacheFlag( String error )
227 | {
228 | if ( error == null || error.length() <= 0 )
229 | {
230 | return ResolutionErrorPolicy.CACHE_NOT_FOUND;
231 | }
232 | else
233 | {
234 | return ResolutionErrorPolicy.CACHE_TRANSFER_ERROR;
235 | }
236 | }
237 |
238 | private ArtifactTransferException newException( String error, Artifact artifact, RemoteRepository repository )
239 | {
240 | if ( error == null || error.length() <= 0 )
241 | {
242 | return new ArtifactNotFoundException( artifact, repository, "Failure to find " + artifact + " in "
243 | + repository.getUrl() + " was cached in the local repository, "
244 | + "resolution will not be reattempted until the update interval of " + repository.getId()
245 | + " has elapsed or updates are forced", true );
246 | }
247 | else
248 | {
249 | return new ArtifactTransferException( artifact, repository, "Failure to transfer " + artifact + " from "
250 | + repository.getUrl() + " was cached in the local repository, "
251 | + "resolution will not be reattempted until the update interval of " + repository.getId()
252 | + " has elapsed or updates are forced. Original error: " + error, true );
253 | }
254 | }
255 |
256 | public void checkMetadata( RepositorySystemSession session, UpdateCheck check )
257 | {
258 | disableSnapshotNormalization(session);
259 |
260 | if ( check.getLocalLastUpdated() != 0
261 | && !isMetadataUpdatedRequired( session, check.getLocalLastUpdated(), check ) )
262 | {
263 | if ( logger.isDebugEnabled() )
264 | {
265 | logger.debug( "Skipped remote request for " + check.getItem()
266 | + ", locally installed metadata up-to-date." );
267 | }
268 |
269 | check.setRequired( false );
270 | return;
271 | }
272 |
273 | Metadata metadata = check.getItem();
274 | RemoteRepository repository = check.getRepository();
275 |
276 | File metadataFile = requireNonNull( check.getFile(), String.format( "The metadata '%s' has no file attached", metadata ) );
277 |
278 | boolean fileExists = check.isFileValid() && metadataFile.exists();
279 |
280 | File touchFile = getTouchFile( metadata, metadataFile );
281 | Properties props = read( touchFile );
282 |
283 | String updateKey = getUpdateKey( session, metadataFile, repository );
284 | String dataKey = getDataKey( metadata, metadataFile, check.getAuthoritativeRepository() );
285 |
286 | String error = getError( props, dataKey );
287 |
288 | long lastUpdated;
289 | if ( error == null )
290 | {
291 | if ( fileExists )
292 | {
293 | // last update was successful
294 | lastUpdated = getLastUpdated( props, dataKey );
295 | }
296 | else
297 | {
298 | // this is the first attempt ever
299 | lastUpdated = 0L;
300 | }
301 | }
302 | else if ( error.length() <= 0 )
303 | {
304 | // metadata did not exist
305 | lastUpdated = getLastUpdated( props, dataKey );
306 | }
307 | else
308 | {
309 | // metadata could not be transferred
310 | String transferKey = getTransferKey( session, metadata, metadataFile, repository );
311 | lastUpdated = getLastUpdated( props, transferKey );
312 | }
313 |
314 | if ( lastUpdated == 0L )
315 | {
316 | check.setRequired( true );
317 | }
318 | else if ( isAlreadyUpdated( session, updateKey ) )
319 | {
320 | if ( logger.isDebugEnabled() )
321 | {
322 | logger.debug( "Skipped remote request for " + check.getItem()
323 | + ", already updated during this session." );
324 | }
325 |
326 | check.setRequired( false );
327 | if ( error != null )
328 | {
329 | check.setException( newException( error, metadata, repository ) );
330 | }
331 | }
332 | else if ( isMetadataUpdatedRequired( session, lastUpdated, check ) )
333 | {
334 | check.setRequired( true );
335 | }
336 | else if ( fileExists )
337 | {
338 | if ( logger.isDebugEnabled() )
339 | {
340 | logger.debug( "Skipped remote request for " + check.getItem() + ", locally cached metadata up-to-date." );
341 | }
342 |
343 | check.setRequired( false );
344 | }
345 | else
346 | {
347 | int errorPolicy = Utils.getPolicy( session, metadata, repository );
348 | int cacheFlag = getCacheFlag( error );
349 | if ( ( errorPolicy & cacheFlag ) != 0 )
350 | {
351 | check.setRequired( false );
352 | check.setException( newException( error, metadata, repository ) );
353 | }
354 | else
355 | {
356 | check.setRequired( true );
357 | }
358 | }
359 | }
360 |
361 | private MetadataTransferException newException( String error, Metadata metadata, RemoteRepository repository )
362 | {
363 | if ( error == null || error.length() <= 0 )
364 | {
365 | return new MetadataNotFoundException( metadata, repository, "Failure to find " + metadata + " in "
366 | + repository.getUrl() + " was cached in the local repository, "
367 | + "resolution will not be reattempted until the update interval of " + repository.getId()
368 | + " has elapsed or updates are forced", true );
369 | }
370 | else
371 | {
372 | return new MetadataTransferException( metadata, repository, "Failure to transfer " + metadata + " from "
373 | + repository.getUrl() + " was cached in the local repository, "
374 | + "resolution will not be reattempted until the update interval of " + repository.getId()
375 | + " has elapsed or updates are forced. Original error: " + error, true );
376 | }
377 | }
378 |
379 | private long getLastUpdated( Properties props, String key )
380 | {
381 | String value = props.getProperty( key + UPDATED_KEY_SUFFIX, "" );
382 | try
383 | {
384 | return ( value.length() > 0 ) ? Long.parseLong( value ) : 1;
385 | }
386 | catch ( NumberFormatException e )
387 | {
388 | logger.debug( "Cannot parse lastUpdated date: \'" + value + "\'. Ignoring.", e );
389 | return 1;
390 | }
391 | }
392 |
393 | private String getError( Properties props, String key )
394 | {
395 | return props.getProperty( key + ERROR_KEY_SUFFIX );
396 | }
397 |
398 | private File getTouchFile( Artifact artifact, File artifactFile )
399 | {
400 | return new File( artifactFile.getPath() + ".lastUpdated" );
401 | }
402 |
403 | private File getTouchFile( Metadata metadata, File metadataFile )
404 | {
405 | return new File( metadataFile.getParent(), "resolver-status.properties" );
406 | }
407 |
408 | private String getDataKey( Artifact artifact, File artifactFile, RemoteRepository repository )
409 | {
410 | Set mirroredUrls = Collections.emptySet();
411 | if ( repository.isRepositoryManager() )
412 | {
413 | mirroredUrls = new TreeSet();
414 | for ( RemoteRepository mirroredRepository : repository.getMirroredRepositories() )
415 | {
416 | mirroredUrls.add( normalizeRepoUrl( mirroredRepository.getUrl() ) );
417 | }
418 | }
419 |
420 | StringBuilder buffer = new StringBuilder( 1024 );
421 |
422 | buffer.append( normalizeRepoUrl( repository.getUrl() ) );
423 | for ( String mirroredUrl : mirroredUrls )
424 | {
425 | buffer.append( '+' ).append( mirroredUrl );
426 | }
427 |
428 | return buffer.toString();
429 | }
430 |
431 | private String getTransferKey( RepositorySystemSession session, Artifact artifact, File artifactFile,
432 | RemoteRepository repository )
433 | {
434 | return getRepoKey( session, repository );
435 | }
436 |
437 | private String getDataKey( Metadata metadata, File metadataFile, RemoteRepository repository )
438 | {
439 | return metadataFile.getName();
440 | }
441 |
442 | private String getTransferKey( RepositorySystemSession session, Metadata metadata, File metadataFile,
443 | RemoteRepository repository )
444 | {
445 | return metadataFile.getName() + '/' + getRepoKey( session, repository );
446 | }
447 |
448 | private String getRepoKey( RepositorySystemSession session, RemoteRepository repository )
449 | {
450 | StringBuilder buffer = new StringBuilder( 128 );
451 |
452 | Proxy proxy = repository.getProxy();
453 | if ( proxy != null )
454 | {
455 | buffer.append( AuthenticationDigest.forProxy( session, repository ) ).append( '@' );
456 | buffer.append( proxy.getHost() ).append( ':' ).append( proxy.getPort() ).append( '>' );
457 | }
458 |
459 | buffer.append( AuthenticationDigest.forRepository( session, repository ) ).append( '@' );
460 |
461 | buffer.append( repository.getContentType() ).append( '-' );
462 | buffer.append( repository.getId() ).append( '-' );
463 | buffer.append( normalizeRepoUrl( repository.getUrl() ) );
464 |
465 | return buffer.toString();
466 | }
467 |
468 | private String normalizeRepoUrl( String url )
469 | {
470 | String result = url;
471 | if ( url != null && url.length() > 0 && !url.endsWith( "/" ) )
472 | {
473 | result = url + '/';
474 | }
475 | return result;
476 | }
477 |
478 | private String getUpdateKey( RepositorySystemSession session, File file, RemoteRepository repository )
479 | {
480 | return file.getAbsolutePath() + '|' + getRepoKey( session, repository );
481 | }
482 |
483 | private int getSessionState( RepositorySystemSession session )
484 | {
485 | String mode = ConfigUtils.getString( session, "true", CONFIG_PROP_SESSION_STATE );
486 | if ( Boolean.parseBoolean( mode ) )
487 | {
488 | // perform update check at most once per session, regardless of update policy
489 | return STATE_ENABLED;
490 | }
491 | else if ( "bypass".equalsIgnoreCase( mode ) )
492 | {
493 | // evaluate update policy but record update in session to prevent potential future checks
494 | return STATE_BYPASS;
495 | }
496 | else
497 | {
498 | // no session state at all, always evaluate update policy
499 | return STATE_DISABLED;
500 | }
501 | }
502 |
503 | private boolean isAlreadyUpdated( RepositorySystemSession session, Object updateKey )
504 | {
505 | if ( getSessionState( session ) >= STATE_BYPASS )
506 | {
507 | return false;
508 | }
509 | SessionData data = session.getData();
510 | Object checkedFiles = data.get( SESSION_CHECKS );
511 | if ( !( checkedFiles instanceof Map ) )
512 | {
513 | return false;
514 | }
515 | return ( (Map, ?>) checkedFiles ).containsKey( updateKey );
516 | }
517 |
518 | @SuppressWarnings( "unchecked" )
519 | private void setUpdated( RepositorySystemSession session, Object updateKey )
520 | {
521 | if ( getSessionState( session ) >= STATE_DISABLED )
522 | {
523 | return;
524 | }
525 | SessionData data = session.getData();
526 | Object checkedFiles = data.get( SESSION_CHECKS );
527 | while ( !( checkedFiles instanceof Map ) )
528 | {
529 | Object old = checkedFiles;
530 | checkedFiles = new ConcurrentHashMap