├── .gitignore
├── COPYING
├── README.md
├── default.nix
├── generate_programs_index
├── COPYING
├── default.nix
├── file-cache.hh
└── generate-programs-index.cc
├── nixipfs
├── create_channel_release
├── create_nixipfs
├── default.nix
├── garbage_collect
├── mirror_tarballs
├── release_nixos
├── setup.py
├── src
│ ├── __init__.py
│ ├── create_channel_release.py
│ ├── create_nixipfs.py
│ ├── defaults.py
│ ├── download_helpers.py
│ ├── garbage_collect.py
│ ├── hydra_helpers.py
│ ├── karkinos.py
│ ├── mirror_tarballs.py
│ ├── nix_helpers.py
│ ├── update_binary_cache.py
│ └── utils.py
└── update_binary_cache
├── nixos_release.json
├── pkgs.nix
└── progress
└── default.nix
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | __pycache__
3 | result
4 |
--------------------------------------------------------------------------------
/COPYING:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | NixIPFS Release Scripts
2 | ======================
3 | 
4 |
5 | This is a collection of scripts that fetch jobsets from a Hydra, create releases and
6 | publish them to IPFS.
7 | This is a working solution for NAR distribution but should be rewritten completely for
8 | an IPLD approach.
9 |
10 | Usage
11 | -----
12 |
13 | Start IPFS on your host or have the API close (latency) to you:
14 |
15 | ```
16 | release_nixos --dir /data/nixipfs --tmpdir /data/tmp --ipfsapi 127.0.0.1 5001 --config nixos_release.json
17 | ```
18 |
19 | This downloads the latest release builds of NixOS and all .narinfo + .nar files
20 | that belong to the runtime closure (if all store-paths are resolved) to `--dir`.
21 | `tmpdir` will be used for `.nar/.tar` extraction since `/tmp` is often too small
22 |
23 | * `--print_only` will not add anything to IPFS and will not download the *.nar
24 | files locally.
25 | Instead the paths are printed and can be piped to a file so you can fetch them
26 | using another tool / on another host.
27 | * `--gc` the scripts ship their own garbage collector that purges the global binary
28 | cache of all files that are not used by a release.
29 | * `--no_ipfs` will not add anything to IPFS
30 | * `--config` points to a json file that contains most of the parameters (see nixos_release.json for an example)
31 |
32 | The modules used by release_nixos have their own scripts that can be used from a
33 | CLI.
34 |
35 | * `create_channel_release` fetches the latest tested build of a single jobset in
36 | a project and creates a channel
37 | * `update_binary_cache` updates a global binary cache with the runtime closure of
38 | a release
39 | * `garbage_collect` deletes all unreferenced files from a global binary cache.
40 | * `create_nixipfs` creates a IPFS directory from a local directory
41 |
42 | Caching
43 | -------
44 |
45 | In order to reduce the requests to the IPFS API, the hashes of each directory is stored on disk.
46 | If you want to re-add a directory with changed content (e.g. `binary-cache-url`) you need to delete
47 | a file called `ipfs_hash` in the same directory.
48 |
49 | Run this in the `releases` path to add all releases again:
50 | ```
51 | find . -iname ipfs_hash | xargs rm
52 | ```
53 |
54 | License
55 | -------
56 |
57 | * `/nixipfs/*` is released under the GPLv3, see COPYING
58 | * `/generate_programs_index/*` has no license yet (Copyright by Eelco Dolstra, LGPL assumed)
59 |
--------------------------------------------------------------------------------
/default.nix:
--------------------------------------------------------------------------------
1 | { pkgs ? (import ./pkgs.nix){} }:
2 |
3 | rec {
4 | nixpkgs.config.packageOverrides = pkgs: {
5 | # fix nix release for generate_package_index
6 | # nixpkgs @ e0a848fb16244109f700f28bfb1b32c13be3d465
7 | nixUnstable = pkgs.nixUnstable;
8 | #.overrideDerivation (oldAttrs: {
9 | # suffix = "pre5511_c94f3d55";
10 | # src = pkgs.fetchFromGitHub {
11 | # owner = "NixOS";
12 | # repo = "nix";
13 | # rev = "c94f3d5575d7af5403274d1e9e2f3c9d72989751";
14 | # sha256 = "1akfzzm4f07wj6l7za916xv5rnh71pk3vl8dphgradjfqb37bv18";
15 | # };
16 | #});
17 | };
18 | pythonPackages = pkgs.python36Packages;
19 | progress = pkgs.callPackage ./progress/default.nix { pythonPackages = pythonPackages; };
20 | generate_programs_index = import ./generate_programs_index/default.nix { inherit pkgs; };
21 | nixipfs = import ./nixipfs/default.nix { inherit pkgs generate_programs_index progress pythonPackages; };
22 | nixipfsEnv = pkgs.stdenv.mkDerivation rec {
23 | name = "nixipfs-env";
24 | version = "0.0.0.1";
25 | src = ./.;
26 | buildInputs = [ nixipfs ];
27 | };
28 | }
29 |
--------------------------------------------------------------------------------
/generate_programs_index/COPYING:
--------------------------------------------------------------------------------
1 | generate-programs-index.cc has been copied from
2 | https://github.com/NixOS/nixos-channel-scripts
3 | Original Author: Eelco Dolstra
4 | License: LGPL (assumption based on NixOS/nix)
5 |
--------------------------------------------------------------------------------
/generate_programs_index/default.nix:
--------------------------------------------------------------------------------
1 | { pkgs ? (import ./../pkgs.nix), nix ? pkgs.nixUnstable }:
2 | with pkgs;
3 | stdenv.mkDerivation {
4 | name = "generate-programs-index";
5 | buildInputs = [ pkgconfig sqlite nlohmann_json nix ];
6 | buildCommand = ''
7 | mkdir -p $out/bin
8 | cp ${./file-cache.hh} file-cache.hh
9 | g++ -g ${./generate-programs-index.cc} -I . -Wall -std=c++14 -o $out/bin/generate-programs-index \
10 | $(pkg-config --cflags nix-main) \
11 | $(pkg-config --libs nix-main) \
12 | $(pkg-config --libs nix-expr) \
13 | $(pkg-config --libs nix-store) \
14 | -lsqlite3 -lgc
15 | '';
16 | }
17 |
--------------------------------------------------------------------------------
/generate_programs_index/file-cache.hh:
--------------------------------------------------------------------------------
1 | /* A local disk cache for fast lookups of NAR index files in a binary
2 | cache. */
3 |
4 | #include "binary-cache-store.hh"
5 | #include "fs-accessor.hh"
6 | #include "sqlite.hh"
7 | #include "sync.hh"
8 |
9 | #include
10 | #include
11 |
12 | using namespace nix;
13 |
14 | MakeError(BadJSON, Error);
15 |
16 | class FileCache
17 | {
18 | struct State
19 | {
20 | SQLite db;
21 | SQLiteStmt queryPath, insertPath, queryFiles, insertFile;
22 | };
23 |
24 | Sync state_;
25 |
26 | struct Stat : FSAccessor::Stat
27 | {
28 | std::string target;
29 | };
30 |
31 | public:
32 |
33 | FileCache(const Path & path)
34 | {
35 | auto state(state_.lock());
36 |
37 | static std::string cacheSchema = R"sql(
38 |
39 | create table if not exists StorePaths (
40 | id integer primary key autoincrement not null,
41 | path text unique not null
42 | );
43 |
44 | create table if not exists StorePathContents (
45 | storePath integer not null,
46 | subPath text not null,
47 | type integer not null,
48 | fileSize integer,
49 | isExecutable integer,
50 | target text,
51 | primary key (storePath, subPath),
52 | foreign key (storePath) references StorePaths(id) on delete cascade
53 | );
54 |
55 | )sql";
56 |
57 | state->db = SQLite(path);
58 | state->db.exec("pragma foreign_keys = 1");
59 | state->db.exec(cacheSchema);
60 |
61 | if (sqlite3_busy_timeout(state->db, 60 * 60 * 1000) != SQLITE_OK)
62 | throwSQLiteError(state->db, "setting timeout");
63 |
64 | state->queryPath.create(state->db,
65 | "select id from StorePaths where path = ?");
66 | state->insertPath.create(state->db,
67 | "insert or ignore into StorePaths(path) values (?)");
68 | state->queryFiles.create(state->db,
69 | "select subPath, type, fileSize, isExecutable, target from StorePathContents where storePath = ?");
70 | state->insertFile.create(state->db,
71 | "insert into StorePathContents(storePath, subPath, type, fileSize, isExecutable, target) values (?, ?, ?, ?, ?, ?)");
72 | }
73 |
74 | /* Return the files in a store path, using a SQLite database to
75 | cache the results. */
76 | std::map
77 | getFiles(ref binaryCache, const Path & storePath)
78 | {
79 | std::map files;
80 |
81 | /* Look up the path in the SQLite cache. */
82 | {
83 | auto state(state_.lock());
84 | auto useQueryPath(state->queryPath.use()(storePath));
85 | if (useQueryPath.next()) {
86 | auto id = useQueryPath.getInt(0);
87 | auto useQueryFiles(state->queryFiles.use()(id));
88 | while (useQueryFiles.next()) {
89 | Stat st;
90 | st.type = (FSAccessor::Type) useQueryFiles.getInt(1);
91 | st.fileSize = (uint64_t) useQueryFiles.getInt(2);
92 | st.isExecutable = useQueryFiles.getInt(3) != 0;
93 | if (!useQueryFiles.isNull(4))
94 | st.target = useQueryFiles.getStr(4);
95 | files.emplace(useQueryFiles.getStr(0), st);
96 | }
97 | return files;
98 | }
99 | }
100 |
101 | using json = nlohmann::json;
102 |
103 | std::function recurse;
104 |
105 | recurse = [&](const std::string & relPath, json & v) {
106 | Stat st;
107 |
108 | std::string type = v["type"];
109 |
110 | if (type == "directory") {
111 | st.type = FSAccessor::Type::tDirectory;
112 | for (auto i = v["entries"].begin(); i != v["entries"].end(); ++i) {
113 | std::string name = i.key();
114 | recurse(relPath.empty() ? name : relPath + "/" + name, i.value());
115 | }
116 | } else if (type == "regular") {
117 | st.type = FSAccessor::Type::tRegular;
118 | st.fileSize = v["size"];
119 | st.isExecutable = v.value("executable", false);
120 | } else if (type == "symlink") {
121 | st.type = FSAccessor::Type::tSymlink;
122 | st.target = v.value("target", "");
123 | } else return;
124 |
125 | files[relPath] = st;
126 | };
127 |
128 | /* It's not in the cache, so get the .ls.xz file (which
129 | contains a JSON serialisation of the listing of the NAR
130 | contents) from the binary cache. */
131 | auto now1 = std::chrono::steady_clock::now();
132 | auto s = binaryCache->getFile(storePathToHash(storePath) + ".ls");
133 | if (!s)
134 | printInfo("warning: no listing of %s in binary cache", storePath);
135 | else {
136 | try {
137 | json ls = json::parse(*s);
138 |
139 | if (ls.value("version", 0) != 1)
140 | throw Error("NAR index for ‘%s’ has an unsupported version", storePath);
141 |
142 | recurse("", ls.at("root"));
143 | } catch (std::invalid_argument & e) {
144 | // FIXME: some filenames have non-UTF8 characters in them,
145 | // which is not supported by nlohmann::json. So we have to
146 | // skip the entire package.
147 | throw BadJSON(e.what());
148 | }
149 | }
150 |
151 | /* Insert the store path into the database. */
152 | {
153 | auto state(state_.lock());
154 | SQLiteTxn txn(state->db);
155 |
156 | if (state->queryPath.use()(storePath).next()) return files;
157 | state->insertPath.use()(storePath).exec();
158 | uint64_t id = sqlite3_last_insert_rowid(state->db);
159 |
160 | for (auto & x : files) {
161 | state->insertFile.use()
162 | (id)
163 | (x.first)
164 | (x.second.type)
165 | (x.second.fileSize, x.second.type == FSAccessor::Type::tRegular)
166 | (x.second.isExecutable, x.second.type == FSAccessor::Type::tRegular)
167 | (x.second.target, x.second.type == FSAccessor::Type::tSymlink)
168 | .exec();
169 | }
170 |
171 | txn.commit();
172 | }
173 |
174 | auto now2 = std::chrono::steady_clock::now();
175 | printInfo("processed %s in %d ms", storePath,
176 | std::chrono::duration_cast(now2 - now1).count());
177 |
178 | return files;
179 | }
180 | };
181 |
182 |
--------------------------------------------------------------------------------
/generate_programs_index/generate-programs-index.cc:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include
4 | #include
5 |
6 | #include "shared.hh"
7 | #include "globals.hh"
8 | #include "eval.hh"
9 | #include "store-api.hh"
10 | #include "get-drvs.hh"
11 | #include "thread-pool.hh"
12 | #include "sqlite.hh"
13 | #include "download.hh"
14 | #include "binary-cache-store.hh"
15 |
16 | #include "file-cache.hh"
17 |
18 | using namespace nix;
19 |
20 | static const char * programsSchema = R"sql(
21 |
22 | create table if not exists Programs (
23 | name text not null,
24 | system text not null,
25 | package text not null,
26 | primary key (name, system, package)
27 | );
28 |
29 | )sql";
30 |
31 | void mainWrapped(int argc, char * * argv)
32 | {
33 | initNix();
34 | initGC();
35 |
36 | if (argc != 6) throw Error("usage: generate-programs-index CACHE-DB PROGRAMS-DB BINARY-CACHE-URI STORE-PATHS NIXPKGS-PATH");
37 |
38 | Path cacheDbPath = argv[1];
39 | Path programsDbPath = argv[2];
40 | Path storePathsFile = argv[4];
41 | Path nixpkgsPath = argv[5];
42 |
43 | settings.readOnlyMode = true;
44 | settings.showTrace = true;
45 |
46 | auto localStore = openStore();
47 | std::string binaryCacheUri = argv[3];
48 | if (hasSuffix(binaryCacheUri, "/")) binaryCacheUri.pop_back();
49 | auto binaryCache = openStore(binaryCacheUri).cast();
50 |
51 | /* Get the allowed store paths to be included in the database. */
52 | auto allowedPaths = tokenizeString(readFile(storePathsFile, true));
53 |
54 | PathSet allowedPathsClosure;
55 | binaryCache->computeFSClosure(allowedPaths, allowedPathsClosure);
56 |
57 | printMsg(lvlInfo, format("%d top-level paths, %d paths in closure")
58 | % allowedPaths.size() % allowedPathsClosure.size());
59 |
60 | FileCache fileCache(cacheDbPath);
61 |
62 | /* Initialise the programs database. */
63 | struct ProgramsState
64 | {
65 | SQLite db;
66 | SQLiteStmt insertProgram;
67 | };
68 |
69 | Sync programsState_;
70 |
71 | unlink(programsDbPath.c_str());
72 |
73 | {
74 | auto programsState(programsState_.lock());
75 |
76 | programsState->db = SQLite(programsDbPath);
77 | programsState->db.exec("pragma synchronous = off");
78 | programsState->db.exec("pragma main.journal_mode = truncate");
79 | programsState->db.exec(programsSchema);
80 |
81 | programsState->insertProgram.create(programsState->db,
82 | "insert or replace into Programs(name, system, package) values (?, ?, ?)");
83 | }
84 |
85 | EvalState state({}, localStore);
86 |
87 | Value vRoot;
88 | state.eval(state.parseExprFromFile(resolveExprPath(absPath(nixpkgsPath))), vRoot);
89 |
90 | /* Get all derivations. */
91 | DrvInfos packages;
92 |
93 | for (auto system : std::set{"x86_64-linux", "i686-linux"}) {
94 | auto args = state.allocBindings(2);
95 | Value * vConfig = state.allocValue();
96 | state.mkAttrs(*vConfig, 0);
97 | args->push_back(Attr(state.symbols.create("config"), vConfig));
98 | Value * vSystem = state.allocValue();
99 | mkString(*vSystem, system);
100 | args->push_back(Attr(state.symbols.create("system"), vSystem));
101 | args->sort();
102 | getDerivations(state, vRoot, "", *args, packages, true);
103 | }
104 |
105 | /* For each store path, figure out the package with the shortest
106 | attribute name. E.g. "nix" is preferred over "nixStable". */
107 | std::map packagesByPath;
108 |
109 | for (auto & package : packages)
110 | try {
111 | auto outputs = package.queryOutputs(true);
112 |
113 | for (auto & output : outputs) {
114 | if (!allowedPathsClosure.count(output.second)) continue;
115 | auto i = packagesByPath.find(output.second);
116 | if (i != packagesByPath.end() &&
117 | (i->second->attrPath.size() < package.attrPath.size() ||
118 | (i->second->attrPath.size() == package.attrPath.size() && i->second->attrPath < package.attrPath)))
119 | continue;
120 | packagesByPath[output.second] = &package;
121 | }
122 | } catch (AssertionError & e) {
123 | } catch (Error & e) {
124 | e.addPrefix(format("in package ‘%s’: ") % package.attrPath);
125 | throw;
126 | }
127 |
128 | /* Note: we don't index hidden files. */
129 | std::regex isProgram("bin/([^.][^/]*)");
130 |
131 | /* Process each store path. */
132 | auto doPath = [&](const Path & storePath, DrvInfo * package) {
133 | try {
134 | auto files = fileCache.getFiles(binaryCache, storePath);
135 | if (files.empty()) return;
136 |
137 | std::set programs;
138 |
139 | for (auto file : files) {
140 |
141 | std::smatch match;
142 | if (!std::regex_match(file.first, match, isProgram)) continue;
143 |
144 | auto curPath = file.first;
145 | auto stat = file.second;
146 |
147 | while (stat.type == FSAccessor::Type::tSymlink) {
148 |
149 | auto target = canonPath(
150 | hasPrefix(stat.target, "/")
151 | ? stat.target
152 | : dirOf(storePath + "/" + curPath) + "/" + stat.target);
153 | // FIXME: resolve symlinks in components of stat.target.
154 |
155 | if (!hasPrefix(target, "/nix/store/")) break;
156 |
157 | /* Assume that symlinks to other store paths point
158 | to executables. But check symlinks within the
159 | same store path. */
160 | if (target.compare(0, storePath.size(), storePath) != 0) {
161 | stat.type = FSAccessor::Type::tRegular;
162 | stat.isExecutable = true;
163 | break;
164 | }
165 |
166 | std::string sub(target, storePath.size() + 1);
167 |
168 | auto file2 = files.find(sub);
169 | if (file2 == files.end()) {
170 | printError("symlink ‘%s’ has non-existent target ‘%s’",
171 | storePath + "/" + file.first, stat.target);
172 | break;
173 | }
174 |
175 | curPath = sub;
176 | stat = file2->second;
177 | }
178 |
179 | if (stat.type == FSAccessor::Type::tDirectory
180 | || stat.type == FSAccessor::Type::tSymlink
181 | || (stat.type == FSAccessor::Type::tRegular && !stat.isExecutable))
182 | continue;
183 |
184 | programs.insert(match[1]);
185 | }
186 |
187 | if (programs.empty()) return;
188 |
189 | {
190 | auto programsState(programsState_.lock());
191 | SQLiteTxn txn(programsState->db);
192 | for (auto & program : programs)
193 | programsState->insertProgram.use()(program)(package->querySystem())(package->attrPath).exec();
194 | txn.commit();
195 | }
196 |
197 | } catch (BadJSON & e) {
198 | printError("error: in %s (%s): %s", package->attrPath, storePath, e.what());
199 | }
200 | };
201 |
202 | /* Enqueue work items for each package. */
203 | ThreadPool threadPool(16);
204 |
205 | for (auto & i : packagesByPath)
206 | threadPool.enqueue(std::bind(doPath, i.first, i.second));
207 |
208 | threadPool.process();
209 |
210 | /* Vacuum programs.sqlite to make it smaller. */
211 | {
212 | auto programsState(programsState_.lock());
213 | programsState->db.exec("vacuum");
214 | }
215 | }
216 |
217 | int main(int argc, char * * argv)
218 | {
219 | return handleExceptions(argv[0], [&]() {
220 | mainWrapped(argc, argv);
221 | });
222 | }
223 |
--------------------------------------------------------------------------------
/nixipfs/create_channel_release:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | from nixipfs.create_channel_release import create_channel_release
4 | from nixipfs.defaults import *
5 |
6 | if __name__ == "__main__":
7 | parser = argparse.ArgumentParser(description='Fetch a jobset from a Hydra')
8 | parser.add_argument('--channel', required=True, type=str)
9 | parser.add_argument('--hydra', default=DEFAULT_HYDRA, type=str)
10 | parser.add_argument('--project', required=True, type=str)
11 | parser.add_argument('--jobset', required=True, type=str)
12 | parser.add_argument('--job', required=True, type=str)
13 | parser.add_argument('--cache', default=DEFAULT_BINARY_CACHE_URL, type=str)
14 | parser.add_argument('--target_cache', default=DEFAULT_BINARY_CACHE_URL, type=str)
15 | parser.add_argument('--outdir', required=True, type=str)
16 | parser.add_argument('--tmpdir', default='.', type=str)
17 | args = parser.parse_args()
18 | ret = create_channel_release(channel=args.channel,
19 | hydra=args.hydra,
20 | project=args.project,
21 | jobset=args.jobset,
22 | job=args.job,
23 | cache=args.cache,
24 | outdir=args.outdir,
25 | tmpdir=args.tmpdir,
26 | target_cache=args.target_cache)
27 | print(ret)
28 |
--------------------------------------------------------------------------------
/nixipfs/create_nixipfs:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 | from nixipfs.create_nixipfs import create_nixipfs
4 |
5 | if __name__ == "__main__":
6 | parser = argparse.ArgumentParser(description='Creates a NixFS v0 from a local path')
7 | parser.add_argument('--dir', required=True, type=str)
8 | parser.add_argument('--ipfsapi', nargs=2, default=('127.0.0.1', 5001), metavar="IP PORT")
9 |
10 | args = parser.parse_args()
11 | create_nixipfs(args.dir, args.ipfsapi)
12 |
--------------------------------------------------------------------------------
/nixipfs/default.nix:
--------------------------------------------------------------------------------
1 | { pkgs ? (import ./../pkgs.nix),
2 | generate_programs_index ? (import ./../generate_programs_index {}),
3 | pythonPackages,
4 | progress
5 | }:
6 | with pkgs;
7 |
8 | pythonPackages.buildPythonPackage rec {
9 | name = "nixipfs-${version}";
10 | version = "0.4.0";
11 | src = ./.;
12 | propagatedBuildInputs = with pythonPackages; [
13 | python
14 | ipfsapi
15 | jsonschema
16 | nixUnstable
17 | generate_programs_index
18 | progress
19 | pygit2
20 | ];
21 | }
22 |
--------------------------------------------------------------------------------
/nixipfs/garbage_collect:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 |
4 | from nixipfs.garbage_collect import garbage_collect
5 |
6 | if __name__ == "__main__":
7 | parser = argparse.ArgumentParser(description='Garbage collects all files not linked from any release')
8 |
9 | parser.add_argument('--cache', required=True, type=str)
10 | parser.add_argument('--releases', required=True, type=str, nargs='*')
11 |
12 | args = parser.parse_args()
13 | garbage_collect(cache=args.cache, releases=args.releases)
14 |
--------------------------------------------------------------------------------
/nixipfs/mirror_tarballs:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 |
4 | from nixipfs.mirror_tarballs import mirror_tarballs
5 | from nixipfs.defaults import *
6 |
7 | if __name__ == "__main__":
8 | parser = argparse.ArgumentParser(description='Mirror all tarballs in a git revision')
9 | parser.add_argument('--revision', required=True, type=str)
10 | parser.add_argument('--dir', required=True, type=str)
11 | parser.add_argument('--tmp_dir', required=True, type=str)
12 | parser.add_argument('--repo', required=True, type=str)
13 | parser.add_argument('--concurrent', default=DEFAULT_CONCURRENT_DOWNLOADS, type=int)
14 | args = parser.parse_args()
15 | ret = mirror_tarballs(target_dir=args.dir, tmp_dir=args.tmp_dir, git_repo=args.repo, git_revision=args.revision, concurrent=args.concurrent)
16 | print(ret)
--------------------------------------------------------------------------------
/nixipfs/release_nixos:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import sys
3 | import argparse
4 | import os
5 | import shutil
6 | import json
7 | import jsonschema
8 | import time
9 | import datetime
10 | from nixipfs.create_channel_release import create_channel_release
11 | from nixipfs.create_nixipfs import create_nixipfs
12 | from nixipfs.garbage_collect import garbage_collect
13 | from nixipfs.update_binary_cache import update_binary_cache
14 | from nixipfs.mirror_tarballs import mirror_tarballs
15 | from nixipfs.nix_helpers import NarInfo
16 | from glob import glob
17 |
18 | config_schema = {
19 | "$schema": "http://json-schema.org/schema#",
20 | "type" : "object",
21 | "required" : ["hydra", "cache", "target_cache", "releases", "repo" ],
22 | "properties": {
23 | "hydra": {"type": "string", "format": "uri"},
24 | "cache": {"type": "string", "format": "uri"},
25 | "target_cache": {"type": "string"},
26 | "repo": {"type": "string"},
27 | "max_threads": {"type": "integer"},
28 | "max_ipfs_threads": {"type": "integer"},
29 | "releases": {"type": "array",
30 | "items": {
31 | "type": "object",
32 | "required" : [ "channel", "project", "jobset", "mirror" ],
33 | "properties" : {
34 | "channel": {"type": "string"},
35 | "project": {"type": "string"},
36 | "jobset" : {"type": "string"},
37 | "job" : {"type": "string"},
38 | "keep" : {"type": "integer", "minimum": 1},
39 | "mirror" : {"type": "boolean"}
40 | }
41 | }
42 | }
43 | }
44 | }
45 |
46 | def release_nixos(outdir, tmpdir, ipfsapi, print_only, no_ipfs, gc, config):
47 | releases = config["releases"]
48 | hydra = config["hydra"]
49 | cache = config["cache"]
50 | target_cache = config["target_cache"]
51 | max_threads = config.get("max_threads", 7)
52 |
53 | cache_info = {'StoreDir' : '/nix/store', 'WantMassQuery' : '1', 'Priority' : '40' }
54 |
55 | paths = []
56 |
57 | print("Using up to {} threads".format(max_threads))
58 | binary_cache_dir = os.path.join(outdir, 'binary_cache')
59 | channel_dir = os.path.join(outdir, 'channels')
60 | releases_dir = os.path.join(outdir, 'releases')
61 | mirror_dir = os.path.join(outdir, 'tarballs')
62 | lastsync_files = [ os.path.join(outdir, 'lastsync'),
63 | os.path.join(binary_cache_dir, 'lastsync'),
64 | os.path.join(channel_dir, 'lastsync'),
65 | os.path.join(mirror_dir, 'lastsync'),
66 | os.path.join(releases_dir, 'lastsync') ]
67 | os.makedirs(channel_dir, exist_ok=True)
68 | os.makedirs(releases_dir, exist_ok=True)
69 |
70 |
71 | for release in releases:
72 | print("Mirroring {}".format(release))
73 | path = create_channel_release(channel = release['channel'],
74 | hydra = hydra,
75 | project = release['project'],
76 | jobset = release['jobset'],
77 | job = release['job'],
78 | cache = cache,
79 | outdir = releases_dir,
80 | tmpdir = tmpdir,
81 | target_cache = target_cache)
82 | if not len(path):
83 | print("Could not release {}".format(release))
84 | sys.exit()
85 | else:
86 | paths.append(path)
87 | update_binary_cache(cache, path, outdir, max_threads, print_only, cache_info)
88 | channel_link = os.path.join(channel_dir, release['channel'])
89 | if os.path.islink(channel_link):
90 | os.unlink(channel_link)
91 | os.symlink(os.path.join("../releases", release['channel'], os.path.basename(path)), channel_link)
92 | if release['mirror'] == True:
93 | with open(os.path.join(path, "git-revision"), 'r') as f:
94 | revision = f.read().strip()
95 | mirror_tarballs(mirror_dir, tmpdir, config["repo"], revision, max_threads)
96 |
97 | if not os.path.isfile(os.path.join(binary_cache_dir, 'nix-cache-info')):
98 | nci = NarInfo()
99 | nci.d = cache_info
100 | with open(os.path.join(binary_cache_dir, 'nix-cache-info'), 'w') as f:
101 | f.write(nci.to_string())
102 |
103 | if gc:
104 | for release in releases:
105 | if "keep" in release:
106 | release_dirs = [ e.rstrip('/') for e in glob(os.path.join(releases_dir, release['channel']) + '/*/')]
107 | release_dirs.sort(key=lambda x: os.stat(x).st_ctime)
108 | # keep the newest release(s):
109 | for x in release_dirs[:-release["keep"]]:
110 | print("Deleting {}".format(x))
111 | shutil.rmtree(x)
112 |
113 | release_dirs = []
114 | for release_name in [ e.rstrip('/') for e in glob(releases_dir + '/*/')]:
115 | for release_dir in [ e.rstrip('/') for e in glob(release_name + '/*/')]:
116 | release_dirs.append(release_dir)
117 | garbage_collect(binary_cache_dir, release_dirs)
118 |
119 | if not (print_only or no_ipfs):
120 | create_nixipfs(outdir, ipfsapi)
121 |
122 | current_time = time.time()
123 | for lastsync_file in lastsync_files:
124 | with open(lastsync_file, 'w') as f:
125 | f.write("{}".format(int(current_time)))
126 |
127 | if __name__ == "__main__":
128 | parser = argparse.ArgumentParser(description='Release all the things! (NixOS)')
129 | parser.add_argument('--ipfsapi', default=('127.0.0.1', 5001), nargs=2, metavar="IP PORT")
130 | parser.add_argument('--dir', required=True)
131 | parser.add_argument('--tmpdir', required=True)
132 | parser.add_argument('--print_only', action='store_true')
133 | parser.add_argument('--gc', action='store_true')
134 | parser.add_argument('--no_ipfs', action='store_true')
135 | parser.add_argument('--config', required=True)
136 | args = parser.parse_args()
137 |
138 | # Check schema first
139 | jsonschema.Draft4Validator.check_schema(config_schema)
140 |
141 | with open(args.config, "r") as f:
142 | config = json.load(f)
143 | jsonschema.Draft4Validator(config_schema).validate(config)
144 | release_nixos(outdir=args.dir, tmpdir=args.tmpdir, ipfsapi=args.ipfsapi, print_only=args.print_only, no_ipfs=args.no_ipfs, gc=args.gc, config=config)
145 |
--------------------------------------------------------------------------------
/nixipfs/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from distutils.core import setup
4 |
5 | setup(name='nixipfs',
6 | description='modular NixOS release scripts',
7 | author='Maximilian Güntner',
8 | author_email='code@sourcediver.org',
9 | url='https://github.com/NixIPFS/nixipfs-scripts',
10 | scripts=['create_channel_release','create_nixipfs','release_nixos','update_binary_cache', 'garbage_collect', 'mirror_tarballs'],
11 | packages=['nixipfs'],
12 | package_dir={'nixipfs': 'src'},
13 | )
14 |
--------------------------------------------------------------------------------
/nixipfs/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/NixIPFS/nixipfs-scripts/4afc0a703e732b4afb31866f01775de7b9639b8d/nixipfs/src/__init__.py
--------------------------------------------------------------------------------
/nixipfs/src/create_channel_release.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import subprocess
4 | import tempfile
5 | import tarfile
6 | import lzma
7 | import sys
8 | import traceback
9 |
10 | from nixipfs.karkinos import *
11 | from nixipfs.hydra_helpers import *
12 | from nixipfs.download_helpers import *
13 | from nixipfs.utils import ccd
14 |
15 | # This is very close to that what the NixOS release script does.
16 | # A general approach to release an arbitrary jobset is still missing but it should be
17 | # easier to extend now with the Karkinos class and helper functions
18 | def create_channel_release(channel, hydra, project, jobset, job, cache, outdir, tmpdir, target_cache=None):
19 | release_info = ReleaseInfo(fetch_release_info(hydra, project, jobset, job))
20 | k = Karkinos(hydra, release_info.eval_id)
21 | eval_info = EvalInfo(k.fetch_eval_info())
22 | store_paths = k.fetch_store_paths()
23 | files_cache = os.path.join(outdir, "nixos-files.sqlite")
24 |
25 | out_dir = os.path.abspath(os.path.join(outdir, channel, release_info.name))
26 | tmp_dir = os.path.abspath(tmpdir)
27 | assert(os.path.isdir(tmp_dir))
28 |
29 | if os.path.isfile(os.path.join(out_dir, 'git-revision')):
30 | return out_dir
31 |
32 | os.makedirs(out_dir, exist_ok=True)
33 | with open(os.path.join(out_dir, "src-url"), "w") as f:
34 | f.write(k.eval_url)
35 |
36 | if target_cache == None:
37 | with open(os.path.join(out_dir, "binary-cache-url"), "w") as f:
38 | f.write(cache)
39 | else:
40 | with open(os.path.join(out_dir, "binary-cache-url"), "w") as f:
41 | f.write(target_cache)
42 |
43 | with open(os.path.join(out_dir, 'store-paths'), 'w') as f:
44 | f.write("\n".join(set(store_paths)))
45 |
46 | with lzma.open(os.path.join(out_dir, 'store-paths.xz'), 'w') as f:
47 | f.write("\n".join(set(store_paths)).encode('utf-8'))
48 |
49 | if channel.startswith('nixos'):
50 | k.download_file('nixos.channel', out_dir, 'nixexprs.tar.xz', tmp_dir=tmp_dir)
51 | k.download_file('nixos.iso_minimal.x86_64-linux', out_dir, tmp_dir=tmp_dir)
52 | if not channel.endswith('-small'):
53 | k.download_file('nixos.iso_minimal.i686-linux', out_dir, tmp_dir=tmp_dir)
54 | k.download_file('nixos.iso_graphical.x86_64-linux', out_dir, tmp_dir=tmp_dir)
55 | k.download_file('nixos.ova.x86_64-linux', out_dir, tmp_dir=tmp_dir)
56 | else:
57 | k.download_file('tarball', out_dir, 'nixexprs.tar.gz', tmp_dir=tmp_dir)
58 |
59 | if channel.startswith('nixos'):
60 | nixexpr_tar = os.path.join(out_dir, 'nixexprs.tar.xz')
61 | with tarfile.open(nixexpr_tar, "r:xz") as nixexpr:
62 | if any([s for s in nixexpr.getnames() if 'programs.sqlite' in s]):
63 | contains_programs = True
64 | else:
65 | contains_programs = False
66 |
67 | if not contains_programs:
68 | with tempfile.TemporaryDirectory() as temp_dir:
69 | nixexpr = tarfile.open(nixexpr_tar, 'r:xz')
70 | nixexpr.extractall(temp_dir)
71 | nixexpr.close()
72 |
73 | expr_dir = os.path.join(temp_dir, os.listdir(temp_dir)[0])
74 |
75 | try:
76 | subprocess.check_call('generate-programs-index {} {} {} {} {}'.format(
77 | files_cache,
78 | os.path.join(expr_dir, 'programs.sqlite'),
79 | cache,
80 | os.path.join(out_dir, 'store-paths'),
81 | os.path.join(expr_dir,'nixpkgs')),
82 | shell=True)
83 | os.remove(os.path.join(expr_dir, 'programs.sqlite-journal'))
84 | os.remove(nixexpr_tar)
85 | nixexpr = tarfile.open(nixexpr_tar, 'w:xz')
86 | with ccd(temp_dir):
87 | nixexpr.add(os.listdir()[0])
88 | nixexpr.close()
89 | except(subprocess.CalledProcessError):
90 | print("Could not execute {}".format("generate-programs-index"))
91 |
92 | with open(os.path.join(out_dir, "git-revision"), "w") as f:
93 | f.write(eval_info.git_rev)
94 | return out_dir
95 |
--------------------------------------------------------------------------------
/nixipfs/src/create_nixipfs.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import ipfsapi
4 | import contextlib
5 | import hashlib
6 | import time
7 | from glob import glob
8 |
9 | from nixipfs.nix_helpers import *
10 | from nixipfs.utils import LJustBar
11 |
12 | RELEASE_VALID_PATHS=['binary-cache-url', 'git-revision', 'nixexprs.tar.xz', '.iso', 'src-url', 'store-paths.xz']
13 | ADD_OPTIONS={'pin':'false', 'raw-leaves': 'true'}
14 | FILES_OPTIONS={'flush': 'false'}
15 |
16 | # TODO: upstream this to ipfsapi
17 | def files_flush(api, path, **kwargs):
18 | args = (path,)
19 | return api._client.request('/files/flush', args=args, **kwargs)
20 |
21 | def add_binary_cache(api, local_dir, mfs_dir, hash_cache):
22 | binary_cache_dir = os.path.join(local_dir, 'binary_cache')
23 | nar_dir = os.path.join(binary_cache_dir, 'nar')
24 |
25 | mfs_binary_cache_dir = os.path.join(mfs_dir, 'binary_cache')
26 | mfs_nar_dir = os.path.join(mfs_binary_cache_dir, 'nar')
27 |
28 | api.files_mkdir(mfs_binary_cache_dir)
29 | api.files_mkdir(mfs_nar_dir)
30 |
31 | nar_files = [ e for e in os.listdir(nar_dir) if '.nar' in e ]
32 | bar = LJustBar('Adding .nar', max=len(nar_files))
33 | for idx, nar in enumerate(nar_files):
34 | bar.next()
35 | if hash_cache.get(nar) is None:
36 | nar_path = os.path.join(nar_dir, nar)
37 | hash_cache.update({ nar : api.add(nar_path, recursive=False, opts=ADD_OPTIONS)['Hash']})
38 | bar.finish()
39 |
40 | bar = LJustBar('Copying .nar', max=len(nar_files))
41 | for idx, nar in enumerate(sorted(nar_files)):
42 | bar.next()
43 | api.files_cp("/ipfs/" + hash_cache[nar], os.path.join(mfs_nar_dir, nar), opts=FILES_OPTIONS)
44 | bar.finish()
45 |
46 | if os.path.isfile(os.path.join(binary_cache_dir, 'nix-cache-info')):
47 | api.files_cp("/ipfs/" + api.add(os.path.join(binary_cache_dir, 'nix-cache-info'), opts=ADD_OPTIONS)['Hash'],
48 | os.path.join(mfs_binary_cache_dir, 'nix-cache-info'), opts=FILES_OPTIONS)
49 |
50 | narinfo_files = [ e for e in os.listdir(binary_cache_dir) if e.endswith('.narinfo') ]
51 | bar = LJustBar('Adding .narinfo', max=len(narinfo_files))
52 | for idx, nip in enumerate(narinfo_files):
53 | bar.next()
54 | ni_hash = hash_cache.get(nip)
55 | if ni_hash is None:
56 | with open(os.path.join(binary_cache_dir, nip), 'r') as f:
57 | ni = NarInfo(f.read())
58 | ni.d['IPFSHash'] = hash_cache[ni.d['URL'].split('/')[1]]
59 | with open(os.path.join(binary_cache_dir, nip), 'w') as f:
60 | f.write("\n".join(ni.dump()+['']))
61 | ni_hash = api.add(os.path.join(binary_cache_dir, nip), recursive=False, opts=ADD_OPTIONS)['Hash']
62 | hash_cache.update({nip : ni_hash})
63 | bar.finish()
64 |
65 | bar = LJustBar('Copying .narinfo', max=len(narinfo_files))
66 | for idx, nip in enumerate(sorted(narinfo_files)):
67 | bar.next()
68 | api.files_cp("/ipfs/" + hash_cache[nip], os.path.join(mfs_binary_cache_dir, nip), opts=FILES_OPTIONS)
69 | bar.finish()
70 | files_flush(api, mfs_binary_cache_dir)
71 | return api.files_stat(mfs_binary_cache_dir)['Hash']
72 |
73 | def add_nixos_release(api, local_dir, mfs_dir, hash_cache):
74 | # if the directory has been added to IPFS once, reuse that hash
75 | hash_file = os.path.join(local_dir, "ipfs_hash")
76 | if os.path.isfile(hash_file):
77 | api.files_mkdir(os.path.dirname(mfs_dir), parents=True)
78 | with open(hash_file, 'r') as f:
79 | api.files_cp("/ipfs/" + f.read().strip(), mfs_dir, opts=FILES_OPTIONS)
80 | else:
81 | api.files_mkdir(mfs_dir, parents=True)
82 | file_hashes = {}
83 | files = [ x for x in os.listdir(local_dir) if [ y for y in RELEASE_VALID_PATHS if y in x ]]
84 | bar = LJustBar('Adding file', max=len(files))
85 | for f in files:
86 | bar.next()
87 | file_path = os.path.join(local_dir, f)
88 | if hash_cache.get(f) is not None:
89 | file_hashes.update({ f : hash_cache[f] })
90 | else:
91 | h = api.add(file_path, recursive=False, opts=ADD_OPTIONS)['Hash']
92 | file_hashes.update({ f : h})
93 | if f.endswith(".iso") or f.endswith(".ova"):
94 | hash_cache.update({f : h})
95 | bar.finish()
96 |
97 | bar = LJustBar('Adding file', max=len(file_hashes))
98 | for name, obj in file_hashes.items():
99 | bar.next()
100 | api.files_cp("/ipfs/" + obj, os.path.join(mfs_dir, name), opts=FILES_OPTIONS)
101 | bar.finish()
102 | add_binary_cache(api, local_dir, mfs_dir, hash_cache)
103 | with open(hash_file, 'w') as f:
104 | f.write(api.files_stat(mfs_dir)['Hash'].strip())
105 | files_flush(api, mfs_dir)
106 |
107 | def create_nixipfs(local_dir, ipfs_api):
108 | api = ipfsapi.connect(ipfs_api[0], ipfs_api[1])
109 | hash_cache = {}
110 | hash_cache_file = os.path.join(local_dir, 'ipfs_hashes')
111 | nixfs_dir = '{}_{}'.format('/nixfs', int(time.time()))
112 | channels_dir = os.path.join(local_dir, 'channels')
113 | releases_dir = os.path.join(local_dir, 'releases')
114 |
115 | if os.path.isfile(hash_cache_file):
116 | with open(hash_cache_file, 'r') as f:
117 | hash_cache.update(dict([ [e.split(':')[0].strip(),
118 | e.split(':')[1].strip() ] for e in f.readlines() ]))
119 | api.files_mkdir(nixfs_dir)
120 |
121 | # Add global binary cache
122 | print('adding global cache...')
123 | add_binary_cache(api, local_dir, nixfs_dir, hash_cache)
124 |
125 | # Add all releases
126 | for release_name in [ e.rstrip('/') for e in glob(releases_dir + '/*/')]:
127 | for release_dir in [ e.rstrip('/') for e in glob(release_name + '/*/')]:
128 | print('adding release: {}'.format(os.path.basename(release_dir)))
129 | add_nixos_release(api, release_dir, os.path.join(nixfs_dir, 'releases', os.path.basename(release_name), os.path.basename(release_dir)), hash_cache)
130 | # Add all channels
131 | for channel_dir in [ e.rstrip('/') for e in glob(channels_dir + '/*/')]:
132 | print('adding channel: {}'.format(os.path.basename(channel_dir)))
133 | add_nixos_release(api, channel_dir, os.path.join(nixfs_dir, 'channels', os.path.basename(channel_dir)), hash_cache)
134 |
135 | nixfs_hash = api.files_stat(nixfs_dir)['Hash']
136 | print('flushing...')
137 | files_flush(api, nixfs_dir)
138 | print('nixfs_hash: ' + nixfs_hash)
139 | print('pinning...')
140 | api.pin_add(nixfs_hash)
141 | ret = api.name_publish('/ipfs/' + nixfs_hash, lifetime="2h")
142 | print('published {} to /ipns/{}'.format(ret['Value'], ret['Name']))
143 | with open(hash_cache_file, 'w') as f:
144 | f.write("\n".join([ "{}:{}".format(k,v) for k,v in hash_cache.items() ]))
145 |
--------------------------------------------------------------------------------
/nixipfs/src/defaults.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | DEFAULT_CONCURRENT_DOWNLOADS=8
4 | DEFAULT_DOWNLOAD_TRIES=3
5 | DEFAULT_HTTP_ERROR_SLEEP=5
6 | DEFAULT_BINARY_CACHE_URL="https://cache.nixos.org"
7 | DEFAULT_HYDRA="https://hydra.nixos.org"
8 |
--------------------------------------------------------------------------------
/nixipfs/src/download_helpers.py:
--------------------------------------------------------------------------------
1 | import json
2 | import urllib.request
3 | import tempfile
4 | import os
5 | import tarfile
6 | import lzma
7 | import bz2
8 | import subprocess
9 | import queue
10 | import threading
11 | import time
12 | from shutil import copyfile
13 |
14 | from nixipfs.nix_helpers import nar_info_from_path, NarInfo
15 | from nixipfs.utils import ccd
16 | from nixipfs.defaults import *
17 |
18 | class DownloadFailed(Exception):
19 | pass
20 |
21 | def fetch_json(url):
22 | req = urllib.request.Request(url, headers = { "Content-Type" : "application/json",
23 | "Accept" : "application/json" })
24 | return json.loads(urllib.request.urlopen(req).read().decode('utf8'))
25 |
26 | def fetch_file_from_cache(path, binary_cache = DEFAULT_BINARY_CACHE_URL, local_cache = None, force = False, tries = DEFAULT_DOWNLOAD_TRIES):
27 | res = ""
28 | if not (local_cache == None and force == False):
29 | local_path = os.path.join(local_cache, path)
30 | if os.path.isfile(local_path):
31 | with open(local_path, "r") as f:
32 | res = f.read()
33 | if not len(res):
34 | url = "{}/{}".format(binary_cache, path)
35 | for x in range(0, tries):
36 | try:
37 | req = urllib.request.Request(url)
38 | res = urllib.request.urlopen(url).read().decode('utf8')
39 | if len(res):
40 | break
41 | except (urllib.error.ContentTooShortError, urllib.error.HTTPError, urllib.error.URLError):
42 | time.sleep(DEFAULT_HTTP_ERROR_SLEEP)
43 | return res
44 |
45 | def download_file_from_cache(path, dest, binary_cache = DEFAULT_BINARY_CACHE_URL, tries = DEFAULT_DOWNLOAD_TRIES):
46 | url = "{}/{}".format(binary_cache, path)
47 |
48 | for x in range(0, tries):
49 | holdoff = DEFAULT_HTTP_ERROR_SLEEP*x
50 | try:
51 | urllib.request.urlretrieve(url, dest)
52 | return
53 | except (urllib.error.ContentTooShortError, urllib.error.HTTPError, urllib.error.URLError):
54 | time.sleep(holdoff)
55 | # Only reached if download failed
56 | raise DownloadFailed("Failed to download {}".format(path))
57 |
58 | def fetch_release_info(hydra_url, project, jobset, job):
59 | url = "{}/job/{}/{}/{}/latest-finished".format(hydra_url, project, jobset, job)
60 | return fetch_json(url)
61 |
62 | def fetch_store_path(path, dest_file, binary_cache = DEFAULT_BINARY_CACHE_URL, tmp_dir=os.getcwd()):
63 | if not path.startswith("/nix/store/"):
64 | raise Exception("path not valid")
65 | ni = NarInfo(fetch_file_from_cache(nar_info_from_path(path)))
66 |
67 | with tempfile.TemporaryDirectory(dir=tmp_dir) as temp_dir:
68 | with ccd(temp_dir):
69 | nar_location = os.path.join(temp_dir, os.path.basename(ni.d['URL']))
70 | download_file_from_cache(ni.d['URL'], nar_location, binary_cache)
71 | assert(os.path.isfile(nar_location))
72 | if ni.d['Compression'] == 'xz' and nar_location.endswith("xz"):
73 | nar_extract_location = ".".join(nar_location.split(".")[:-1])
74 | with lzma.open(nar_location) as n:
75 | with open(nar_extract_location, "wb") as ne:
76 | ne.write(n.read())
77 | elif ni.d['Compression'] == 'bzip2' and nar_location.endswith("bz2"):
78 | nar_extract_location = ".".join(nar_location.split(".")[:-1])
79 | with bz2.open(nar_location) as n:
80 | with open(nar_extract_location, "wb") as ne:
81 | ne.write(n.read())
82 | else:
83 | nar_extract_location = nar_location
84 | path_in_nar = '/'.join([''] + path.split('/')[4:])
85 | subprocess.run("nix cat-nar {} {} > {}".format(nar_extract_location, path_in_nar, dest_file), shell=True)
86 | assert(os.path.isfile(dest_file))
87 |
88 | class NarInfoCollector:
89 | def __init__(self):
90 | self.queue = queue.Queue()
91 | self.work = set()
92 | self.work_done = set()
93 | self.lock = threading.Lock()
94 | self.collection = []
95 |
96 | def start(self, store_paths):
97 | for path in store_paths:
98 | self.add_work(nar_info_from_path(path))
99 |
100 | def get_work(self):
101 | return self.queue.get()
102 |
103 | def add_work(self, work):
104 | if not (work in self.work or
105 | work in self.work_done):
106 | self.work.add(work)
107 | self.queue.put(work)
108 |
109 | def turn_in(self, name, nar_info):
110 | n = NarInfo(nar_info)
111 | self.collection.append([name, n])
112 | nar_infos = [ nar_info_from_path(path) for path in n.d['References'].split(' ') ]
113 |
114 | with self.lock:
115 | self.work_done.add(name)
116 | self.work.remove(name)
117 | for n in nar_infos:
118 | if len(n):
119 | self.add_work(n)
120 | self.queue.task_done()
121 |
--------------------------------------------------------------------------------
/nixipfs/src/garbage_collect.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 |
4 | def find_garbage(cache, releases, keep=[]):
5 | links = set()
6 | for release in releases:
7 | linked_cache_path = os.path.join(release, 'binary_cache')
8 | links.update(set(
9 | [ e for e in os.listdir(linked_cache_path) if '.narinfo' in e ]))
10 | links.update(set(
11 | [ os.path.join('nar', e) for e in os.listdir(os.path.join(linked_cache_path, 'nar')) if '.nar' in e ]))
12 |
13 | files = set()
14 | files.update(set(
15 | [ e for e in os.listdir(cache) if '.narinfo' in e ]))
16 | files.update(set(
17 | [ os.path.join('nar', e) for e in os.listdir(os.path.join(cache, 'nar')) if '.nar' in e ]))
18 | return files.difference(links)
19 |
20 | def garbage_collect(cache, releases, keep=[]):
21 | garbage = find_garbage(cache, releases, keep)
22 | for g in garbage:
23 | if os.path.isfile(os.path.join(cache, g)):
24 | print("Deleting {}".format(g))
25 | os.unlink(os.path.join(cache, g))
26 |
--------------------------------------------------------------------------------
/nixipfs/src/hydra_helpers.py:
--------------------------------------------------------------------------------
1 | class BuildInfo:
2 | def __init__(self, info):
3 | self.info = info
4 |
5 | @property
6 | def path(self):
7 | return self.info['buildproducts']['1']['path']
8 |
9 | @property
10 | def sha256(self):
11 | return self.info['buildproducts']['1']['sha256hash']
12 |
13 | class ReleaseInfo:
14 | def __init__(self, info):
15 | self.info = info
16 |
17 | @property
18 | def id(self):
19 | return self.info['id']
20 |
21 | @property
22 | def name(self):
23 | return self.info['nixname']
24 |
25 | @property
26 | def eval_id(self):
27 | return self.info['jobsetevals'][0]
28 |
29 | class EvalInfo:
30 | def __init__(self, info):
31 | self.info = info
32 |
33 | @property
34 | def git_rev(self):
35 | return self.info['jobsetevalinputs']['nixpkgs']['revision']
36 |
--------------------------------------------------------------------------------
/nixipfs/src/karkinos.py:
--------------------------------------------------------------------------------
1 | import urllib.request
2 | import json
3 | import os
4 |
5 | from nixipfs.download_helpers import fetch_json, fetch_store_path
6 | from nixipfs.hydra_helpers import *
7 | from nixipfs.defaults import *
8 |
9 | class KarkinosURLopener(urllib.request.FancyURLopener):
10 | version = "Karkinos/11.11"
11 |
12 | class Karkinos:
13 | def __init__(self, hydra_url, eval_id, binary_cache = DEFAULT_BINARY_CACHE_URL):
14 | urllib._urlopener = KarkinosURLopener()
15 | self.hydra_url = hydra_url
16 | self.binary_cache = binary_cache
17 | self.eval_id = eval_id
18 |
19 | @property
20 | def eval_url(self):
21 | return "{}/eval/{}".format(self.hydra_url, self.eval_id)
22 |
23 | @property
24 | def store_path_url(self):
25 | return "{}/store-paths".format(self.eval_url)
26 |
27 | def build_info_url(self, jobname):
28 | return "{}/job/{}".format(self.eval_url, jobname)
29 |
30 | def fetch_eval_info(self):
31 | return fetch_json(self.eval_url)
32 |
33 | def fetch_store_paths(self):
34 | return fetch_json(self.store_path_url)
35 |
36 | def fetch_build_info(self, jobname):
37 | return fetch_json(self.build_info_url(jobname))
38 |
39 | def download_file(self, jobname, dest_dir, dest_name='', tmp_dir=os.getcwd()):
40 | build_info = BuildInfo(self.fetch_build_info(jobname))
41 | store_path = "/".join(build_info.path.split("/")[:4])
42 |
43 | if len(dest_name) == 0:
44 | dest_name = os.path.basename(build_info.path)
45 | dest_file = os.path.join(dest_dir, dest_name)
46 | if not os.path.isfile(dest_file):
47 | fetch_store_path(build_info.path, dest_file, self.binary_cache, tmp_dir)
48 |
--------------------------------------------------------------------------------
/nixipfs/src/mirror_tarballs.py:
--------------------------------------------------------------------------------
1 | import json
2 | import urllib.request
3 | import os
4 | import queue
5 | import tempfile
6 | import subprocess
7 | import threading
8 | import shlex
9 | import hashlib
10 | import time
11 | from pygit2 import clone_repository, GIT_RESET_HARD, Repository
12 | from shutil import copyfile
13 |
14 | from nixipfs.download_helpers import DownloadFailed
15 | from nixipfs.nix_helpers import nix_hash
16 | from nixipfs.utils import ccd
17 | from nixipfs.defaults import *
18 |
19 | # For testing purposes:
20 | NIX_EXPRS = [ 'builtins.removeAttrs ((import pkgs/top-level/release.nix { scrubJobs = false; supportedSystems = [ "x86_64-linux" "x86_64-darwin" ]; })) ["unstable" "tarball" "darwin-unstable" ]', '(import {}).hello' ]
21 |
22 | MAIN_ALGO = "sha512"
23 | MAIN_BASE = "base16"
24 |
25 | VALID_URL_SCHEMES = [ "http:", "https:", "ftp:", "mirror:" ]
26 |
27 | failed_entries_l = threading.Lock()
28 | failed_entries = []
29 |
30 | def nix_instantiate_cmd(expr):
31 | return "nix-instantiate --eval --json --strict maintainers/scripts/find-tarballs.nix --arg expr '{}'".format(expr)
32 |
33 | def create_mirror_dirs(target_dir, revision):
34 | md5_path = os.path.join(target_dir, "md5")
35 | sha1_path = os.path.join(target_dir, "sha1")
36 | sha256_path = os.path.join(target_dir, "sha256")
37 | sha512_path = os.path.join(target_dir, "sha512")
38 | name_path = os.path.join(target_dir, "by-name")
39 | revision_path = os.path.join(target_dir, "revisions", revision)
40 | os.makedirs(md5_path, exist_ok=True)
41 | os.makedirs(sha1_path, exist_ok=True)
42 | os.makedirs(sha256_path, exist_ok=True)
43 | os.makedirs(sha512_path, exist_ok=True)
44 | os.makedirs(name_path, exist_ok=True)
45 | os.makedirs(revision_path, exist_ok=True)
46 |
47 | def check_presence(target_dir, value):
48 | paths = [
49 | os.path.join(target_dir, "md5", value),
50 | os.path.join(target_dir, "sha1", value),
51 | os.path.join(target_dir, "sha256", value),
52 | os.path.join(target_dir, "sha512", value),
53 | # TODO: glob this
54 | os.path.join(target_dir, "by-name", value)
55 | ]
56 | return [ path for path in paths if os.path.exists(path) ]
57 |
58 | def mirror_file(target_dir, path, name, revision):
59 | make_path = lambda x: os.path.join(target_dir, x)
60 |
61 | md5_16 = nix_hash(path, hash_type="md5", base="base16")
62 | sha1_16 = nix_hash(path, hash_type="sha1", base="base16")
63 | sha256_16 = nix_hash(path, hash_type="sha256", base="base16")
64 | sha256_32 = nix_hash(path, hash_type="sha256", base="base32")
65 | sha512_16 = nix_hash(path, hash_type="sha512", base="base16")
66 | sha512_32 = nix_hash(path, hash_type="sha512", base="base32")
67 |
68 | main_file = make_path("sha512/{}".format(sha512_16))
69 |
70 | copyfile(path, main_file)
71 | md5_dir = os.path.join(target_dir, "md5")
72 | if not os.path.exists(os.path.join(md5_dir, md5_16)):
73 | os.symlink(os.path.relpath(main_file, start=md5_dir), os.path.join(md5_dir, md5_16))
74 |
75 | sha1_dir = os.path.join(target_dir, "sha1")
76 | if not os.path.exists(os.path.join(sha1_dir, sha1_16)):
77 | os.symlink(os.path.relpath(main_file, start=sha1_dir), os.path.join(sha1_dir, sha1_16))
78 |
79 | sha256_dir = os.path.join(target_dir, "sha256")
80 | if not os.path.exists(os.path.join(sha256_dir, sha256_16)):
81 | os.symlink(os.path.relpath(main_file, start=sha256_dir), os.path.join(sha256_dir, sha256_16))
82 | if not os.path.exists(os.path.join(sha256_dir, sha256_32)):
83 | os.symlink(os.path.relpath(main_file, start=sha256_dir), os.path.join(sha256_dir, sha256_32))
84 |
85 | sha512_dir = os.path.join(target_dir, "sha512")
86 | if not os.path.exists(os.path.join(sha512_dir, sha512_32)):
87 | os.symlink(os.path.relpath(main_file, start=sha512_dir), os.path.join(sha512_dir, sha512_32))
88 |
89 | # do something semi random to avoid collisions
90 | name_prefix = "{}_{}".format(revision, int(time.time()))
91 | by_name_dir = os.path.join(target_dir, "by-name", name_prefix)
92 | os.makedirs(by_name_dir, exist_ok=True)
93 | if not os.path.exists(os.path.join(by_name_dir, name)):
94 | os.symlink(os.path.relpath(main_file, start=by_name_dir), os.path.join(by_name_dir, name))
95 |
96 | revision_dir = os.path.join(target_dir, "revisions", revision)
97 | if not os.path.exists(os.path.join(revision_dir, sha512_16)):
98 | os.symlink(os.path.relpath(main_file, start=revision_dir), os.path.join(revision_dir, sha512_16))
99 |
100 | def download_worker(target_dir, revision, git_workdir):
101 | global download_queue
102 | count=0
103 | paths=[]
104 | while True:
105 | work = download_queue.get()
106 | if work is None:
107 | break
108 | try:
109 | res = nix_prefetch_url(work['url'], work['hash'], git_workdir, work['type'])
110 | mirror_file(target_dir, res['path'], work['name'], revision)
111 | paths.append(res['path'])
112 | count+=1
113 | if (count % 42 == 0):
114 | for path in paths:
115 | nix_store_delete(path)
116 | count=0
117 | paths = []
118 | except DownloadFailed:
119 | append_failed_entry(work)
120 | download_queue.task_done()
121 | for path in paths:
122 | nix_store_delete(path)
123 |
124 | def append_failed_entry(entry):
125 | failed_entries_l.acquire()
126 | failed_entries.append(entry)
127 | failed_entries_l.release()
128 |
129 | def nix_prefetch_url(url, hashv, git_workdir, hash_type="sha256"):
130 | assert(hash_type in [ "md5", "sha1", "sha256", "sha512" ])
131 | # For some reason, nix-prefetch-url stalls, the timeout kills the process
132 | # after 15 minutes, this should be enough for all downloads
133 | try:
134 | env = os.environ.copy()
135 | env["NIX_PATH"] = "nixpkgs={}".format(git_workdir)
136 | escaped_url = shlex.quote(url)
137 | res = subprocess.run("nix-prefetch-url --print-path --type {} {} {}".format(hash_type, escaped_url, hashv), shell=True, stdout=subprocess.PIPE, timeout=900, env=env)
138 | except subprocess.TimeoutExpired:
139 | raise DownloadFailed
140 | if res.returncode != 0:
141 | raise DownloadFailed
142 | lines = res.stdout.decode('utf-8').split('\n')
143 | r = {}
144 | r['hash'] = lines[0].strip()
145 | r['path'] = lines[1].strip()
146 | return r
147 |
148 | def nix_store_delete(path):
149 | res = subprocess.run("nix-store --delete {}".format(path), shell=True, stdout=subprocess.PIPE)
150 | return res.returncode
151 |
152 | def mirror_tarballs(target_dir, tmp_dir, git_repo, git_revision, concurrent=DEFAULT_CONCURRENT_DOWNLOADS):
153 | global failed_entries
154 | global download_queue
155 | create_mirror_dirs(target_dir, git_revision)
156 | download_queue = queue.Queue()
157 | threads = []
158 | repo_path = os.path.join(tmp_dir, "nixpkgs")
159 | os.makedirs(repo_path, exist_ok=True)
160 | with ccd(repo_path):
161 | exists = False
162 | try:
163 | repo = Repository(os.path.join(repo_path, ".git"))
164 | repo.remotes["origin"].fetch()
165 | exists = True
166 | except:
167 | pass
168 | if not exists:
169 | repo = clone_repository(git_repo, repo_path)
170 | repo.reset(git_revision, GIT_RESET_HARD)
171 | with ccd(repo.workdir):
172 | success = False
173 | env = os.environ.copy()
174 | env["NIX_PATH"] = "nixpkgs={}".format(repo.workdir)
175 | for expr in NIX_EXPRS:
176 | res = subprocess.run(nix_instantiate_cmd(expr), shell=True, stdout=subprocess.PIPE, env=env)
177 | if res.returncode != 0:
178 | print("nix instantiate failed!")
179 | else:
180 | success = True
181 | break
182 | if success is False:
183 | return "fatal: all nix instantiate processes failed!"
184 | output = json.loads(res.stdout.decode('utf-8').strip())
185 | # with open(os.path.join(target_dir, "tars.json"), "w") as f:
186 | # f.write(json.dumps(output))
187 | #with open(os.path.join(target_dir, "tars.json"), "r") as f:
188 | # output = json.loads(f.read())
189 | for idx, entry in enumerate(output):
190 | if not (len( [ x for x in VALID_URL_SCHEMES if entry['url'].startswith(x) ]) == 1):
191 | append_failed_entry(entry)
192 | print("url {} is not in the supported url schemes.".format(entry['url']))
193 | continue
194 | elif (len(check_presence(target_dir, entry['hash'])) or
195 | len(check_presence(target_dir, entry['name']))):
196 | print("url {} already mirrored".format(entry['url']))
197 | continue
198 | else:
199 | download_queue.put(entry)
200 | for i in range(concurrent):
201 | t = threading.Thread(target=download_worker, args=(target_dir, git_revision, repo.workdir, ))
202 | threads.append(t)
203 | t.start()
204 | download_queue.join()
205 | for i in range(concurrent):
206 | download_queue.put(None)
207 | for t in threads:
208 | t.join()
209 | log = "########################\n"
210 | log += "SUMMARY OF FAILED FILES:\n"
211 | log += "########################\n"
212 | for entry in failed_entries:
213 | log += "url:{}, name:{}\n".format(entry['url'], entry['name'])
214 | with open(os.path.join(target_dir, "revisions", git_revision, "log"), "w") as f:
215 | f.write(log)
216 | return log
217 |
--------------------------------------------------------------------------------
/nixipfs/src/nix_helpers.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import os
3 |
4 | def hash_part_in_path(path):
5 | if path.count('/') >= 3:
6 | # Path in the format /nix/store/hash-name
7 | return path.split('/')[3][:32]
8 | elif path.count('/') == 0:
9 | # without /nix/store/
10 | return path[:32]
11 | else:
12 | raise Exception("malformed path")
13 |
14 | def nar_info_from_path(path):
15 | h = hash_part_in_path(path)
16 | if len(h):
17 | return h + ".narinfo"
18 | else:
19 | return ""
20 |
21 | # TODO implement in pure python
22 | def nix_hash(path, hash_type="sha256", base="base32"):
23 | assert(os.path.isfile(path))
24 | assert(hash_type in [ "md5", "sha1", "sha256", "sha512" ])
25 | assert(base in [ "base16", "base32", "base64" ])
26 | h_res = subprocess.run("nix hash-file --{} --type {} {}".format(base, hash_type, path), shell=True, stdout=subprocess.PIPE)
27 | return h_res.stdout.decode('utf-8').strip()
28 |
29 | class NarInfo:
30 | def __init__(self, text = ""):
31 | self.d = {}
32 | if len(text):
33 | self.load(text)
34 |
35 | def load(self, text):
36 | for line in text.split('\n'):
37 | if line.count(':'):
38 | t = line.split(':', 1)
39 | self.d[t[0].strip()] = t[1].strip()
40 |
41 | def dump(self):
42 | res = []
43 | for k,v in self.d.items():
44 | res.append("{}: {}".format(k,v))
45 | # make it determinstic for hashing
46 | return sorted(res)
47 |
48 | def to_string(self):
49 | return '\n'.join(self.dump() + [''])
50 |
--------------------------------------------------------------------------------
/nixipfs/src/update_binary_cache.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import os
3 | import sys
4 | import subprocess
5 | import time
6 | import queue
7 | import threading
8 | import urllib
9 |
10 | from nixipfs.nix_helpers import *
11 | from nixipfs.download_helpers import *
12 | from nixipfs.defaults import *
13 |
14 | def download_worker(binary_cache):
15 | global nar_queue
16 | while True:
17 | work = nar_queue.get()
18 | if work is None:
19 | break
20 | # try a few times to correct a corrupt download
21 | for x in range(0, DEFAULT_DOWNLOAD_TRIES):
22 | try:
23 | download_file_from_cache(work[0], work[1], binary_cache)
24 | except DownloadFailed:
25 | break
26 | if os.path.isfile(work[1]):
27 | if nix_hash(work[1], work[2].split(':')[0], "base32") == work[2].split(':')[1].strip():
28 | break
29 | else:
30 | print("Hash verification for {} failed. Retrying.".format(work[1]))
31 | os.unlink(work[1])
32 | if not os.path.isfile(work[1]):
33 | print("Could not download {}".format(work[0]))
34 | nar_queue.task_done()
35 |
36 | def narinfo_worker(cache, local_cache):
37 | global nic
38 | while True:
39 | work = nic.get_work()
40 | if work is None:
41 | break
42 | narinfo = fetch_file_from_cache(work, cache, local_cache)
43 | nic.turn_in(work, narinfo)
44 |
45 | def update_binary_cache(cache, release, outdir, concurrent=DEFAULT_CONCURRENT_DOWNLOADS, print_only=False, cache_info=None):
46 | global nar_queue
47 | global nic
48 | binary_cache_path = os.path.join(outdir, 'binary_cache')
49 | linked_cache_path = os.path.join(release, 'binary_cache')
50 | assert(os.path.isdir(release))
51 | os.makedirs(os.path.join(binary_cache_path, 'nar'), exist_ok=True)
52 | os.makedirs(os.path.join(linked_cache_path, 'nar'), exist_ok=True)
53 |
54 | with open(os.path.join(release, 'store-paths'), 'r') as f:
55 | store_paths = f.read()
56 |
57 | threads = []
58 | nic = NarInfoCollector()
59 | nic.start(store_paths.split('\n'))
60 | for i in range(concurrent):
61 | t = threading.Thread(target=narinfo_worker, args=(cache, binary_cache_path))
62 | threads.append(t)
63 | t.start()
64 | nic.queue.join()
65 | for i in range(concurrent):
66 | nic.queue.put(None)
67 | for t in threads:
68 | t.join()
69 | threads = []
70 |
71 | # Write NarInfo files
72 | for ni in nic.collection:
73 | with open(os.path.join(binary_cache_path, ni[0]), 'w') as f:
74 | f.write(ni[1].to_string())
75 | # Figure out all nars and the fileHash that we want to fetch
76 | nars = { ni[1].d['URL'] : ni[1].d['FileHash'] for ni in nic.collection }
77 |
78 | if print_only:
79 | for nar, filehash in nars.items():
80 | print("{},{}".format(nar, filehash))
81 | else:
82 | nar_queue = queue.Queue()
83 | for i in range(concurrent):
84 | t = threading.Thread(target=download_worker, args=(cache, ))
85 | threads.append(t)
86 | t.start()
87 | for url, file_hash in nars.items():
88 | nar_location_disk = os.path.join(binary_cache_path, url)
89 | if not os.path.isfile(nar_location_disk):
90 | nar_queue.put([url, nar_location_disk, file_hash])
91 | nar_queue.join()
92 | for i in range(concurrent):
93 | nar_queue.put(None)
94 | for t in threads:
95 | t.join()
96 | # All nars/narinfos have been written, link to them
97 | with ccd(linked_cache_path):
98 | for ni in nic.collection:
99 | # Produces xyz.narinfo -> ../../binary_cache/xyz.narinfo
100 | target = os.path.join(binary_cache_path, ni[0])
101 | assert(os.path.isfile(target))
102 | if not os.path.isfile(os.path.basename(ni[0])):
103 | os.symlink(os.path.relpath(target), ni[0])
104 | with ccd(os.path.join(linked_cache_path, 'nar')):
105 | for nar, file_hash in nars.items():
106 | target = os.path.join(binary_cache_path, 'nar', os.path.basename(nar))
107 | assert(os.path.isfile(target))
108 | if not os.path.isfile(os.path.basename(nar)):
109 | os.symlink(os.path.relpath(target), os.path.basename(nar))
110 | if cache_info is not None:
111 | nci = NarInfo()
112 | nci.d = cache_info
113 | with open(os.path.join(linked_cache_path, 'nix-cache-info'), 'w') as f:
114 | f.write(nci.to_string())
115 |
--------------------------------------------------------------------------------
/nixipfs/src/utils.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import os
3 | from progress.bar import Bar
4 |
5 | class LJustBar(Bar):
6 | def __init__(self, message=None, width=16, **kwargs):
7 | super(Bar, self).__init__(message.ljust(max(width, len(message))), **kwargs)
8 |
9 | @contextlib.contextmanager
10 | def ccd(path):
11 | cur = os.getcwd()
12 | os.chdir(path)
13 | yield
14 | os.chdir(cur)
15 |
--------------------------------------------------------------------------------
/nixipfs/update_binary_cache:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import argparse
3 |
4 | from nixipfs.update_binary_cache import update_binary_cache
5 |
6 | from nixipfs.defaults import *
7 |
8 | if __name__ == "__main__":
9 | parser = argparse.ArgumentParser(description='Fetches all .narinfo and .nar files for a release path')
10 |
11 | parser.add_argument('--cache', default=DEFAULT_BINARY_CACHE_URL, type=str)
12 | parser.add_argument('--release', required=True, type=str)
13 | parser.add_argument('--outdir', required=True, type=str)
14 | parser.add_argument('--concurrent', default=DEFAULT_CONCURRENT_DOWNLOADS, type=int)
15 | parser.add_argument('--print_only', default=False, type=bool)
16 |
17 | args = parser.parse_args()
18 | update_binary_cache(cache=args.cache, release=args.release, outdir=args.outdir, concurrent=args.concurrent, print_only=args.print_only)
19 |
--------------------------------------------------------------------------------
/nixos_release.json:
--------------------------------------------------------------------------------
1 | {
2 | "cache": "https://cache.nixos.org",
3 | "hydra": "https://hydra.nixos.org",
4 | "max_threads": 69,
5 | "releases": [
6 | {
7 | "channel": "nixos-17.03-small",
8 | "job": "tested",
9 | "jobset": "release-17.03-small",
10 | "keep": 7,
11 | "project": "nixos"
12 | },
13 | {
14 | "channel": "nixos-17.03",
15 | "job": "tested",
16 | "jobset": "release-17.03",
17 | "keep": 7,
18 | "project": "nixos"
19 | },
20 | {
21 | "channel": "nixos-17.09-small",
22 | "job": "tested",
23 | "jobset": "release-17.09-small",
24 | "keep": 7,
25 | "project": "nixos"
26 | },
27 | {
28 | "channel": "nixos-17.09",
29 | "job": "tested",
30 | "jobset": "release-17.09",
31 | "keep": 7,
32 | "project": "nixos"
33 | },
34 | {
35 | "channel": "nixos-unstable-small",
36 | "job": "tested",
37 | "jobset": "unstable-small",
38 | "keep": 7,
39 | "project": "nixos"
40 | },
41 | {
42 | "channel": "nixos-unstable",
43 | "job": "tested",
44 | "jobset": "trunk-combined",
45 | "keep": 7,
46 | "project": "nixos"
47 | },
48 | {
49 | "channel": "nixpkgs-unstable",
50 | "job": "unstable",
51 | "jobset": "trunk",
52 | "keep": 7,
53 | "project": "nixpkgs"
54 | }
55 | ],
56 | "target_cache": "https://cache.rrza.de"
57 | }
58 |
--------------------------------------------------------------------------------
/pkgs.nix:
--------------------------------------------------------------------------------
1 | import
2 |
--------------------------------------------------------------------------------
/progress/default.nix:
--------------------------------------------------------------------------------
1 | { stdenv, pkgs, fetchurl, pythonPackages }:
2 |
3 | pythonPackages.buildPythonPackage rec {
4 | name = "progress-${version}";
5 | version = "1.3";
6 | src = fetchurl {
7 | url = "mirror://pypi/p/progress/${name}.tar.gz";
8 | sha256 = "02pnlh96ixf53mzxr5lgp451qg6b7ff4sl5mp2h1cryh7gp8k3f8";
9 | };
10 |
11 | }
12 |
--------------------------------------------------------------------------------