├── .gitignore ├── .gitmodules ├── COPYRIGHT.txt ├── LICENSE.txt ├── bin ├── distfile-spider ├── fastpull-daemon ├── genkit ├── indy-kit-metadata-update ├── inject-file ├── kit-regenerate ├── merge-all-kits ├── merge-gentoo-staging ├── qa │ ├── bad-dns │ └── kit-set-mismatch └── trickle ├── modules ├── bug_utils.py └── merge │ ├── __init__.py │ ├── async_engine.py │ ├── async_portage.py │ ├── config.py │ ├── db_core.py │ ├── extensions │ └── xproto.py │ └── merge_utils.py ├── tests ├── extra_packages.py └── kit-fixups │ └── foo-kit │ ├── 1.0-prime │ └── sys-apps │ │ ├── foobar │ │ └── foobar-1.5.ebuild │ │ └── foobartronic │ │ └── foobartronic-1.0.ebuild │ └── 1.1-prime │ └── sys-apps │ ├── foobar │ └── foobar-1.6.ebuild │ └── funapp │ └── funapp-2.0.ebuild ├── utils ├── google_upload_server.py ├── pkglist.py ├── python3_kit_qa_check.py └── spider_common.py └── wip └── upgrade_steps.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | *.pyc 3 | *.pyo 4 | *~ 5 | #*# 6 | .idea 7 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "submodules/pyservices"] 2 | path = submodules/pyservices 3 | url = ssh://git@code.funtoo.org:7999/~drobbins/pyservices.git 4 | -------------------------------------------------------------------------------- /COPYRIGHT.txt: -------------------------------------------------------------------------------- 1 | Copyright 1999-2012 Gentoo Foundation, 2008-2018 Funtoo Solutions, and 2 | multiple authors. Distributed under the terms of the GNU General Public 3 | License version 2. 4 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | 294 | Copyright (C) 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | , 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | -------------------------------------------------------------------------------- /bin/distfile-spider: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | import random 6 | import asyncio 7 | import aioftp 8 | import async_timeout 9 | import aiodns 10 | import aiohttp 11 | import logging 12 | from hashlib import sha256, sha512 13 | import socket 14 | from concurrent.futures import ThreadPoolExecutor 15 | # from utils.google_upload_server import google_upload 16 | from datetime import datetime, timedelta 17 | from sqlalchemy.orm import undefer 18 | from sqlalchemy import or_ 19 | 20 | sys.path.insert(0, os.path.normpath(os.path.join(os.path.realpath(__file__), "../../modules"))) 21 | from merge.db_core import * 22 | 23 | # TODO: convert to .merge configuration setting: 24 | fastpull_out = "/home/mirror/fastpull" 25 | resolver = aiohttp.AsyncResolver(nameservers=['8.8.8.8', '8.8.4.4'], timeout=5, tries=3) 26 | 27 | thirdp = {} 28 | with open('/var/git/meta-repo/kits/core-kit/profiles/thirdpartymirrors', 'r') as fd: 29 | for line in fd.readlines(): 30 | ls = line.split() 31 | thirdp[ls[0]] = [] 32 | for x in ls[1:]: 33 | if "fastpull" in x: 34 | continue 35 | else: 36 | thirdp[ls[0]].append(x) 37 | 38 | # TODO: only try to download one filename of the same name at a time. 39 | 40 | # maximum number of third-party mirrors to consider for download: 41 | 42 | max_mirrors = 3 43 | mirror_blacklist = [ "gentooexperimental" ] 44 | 45 | def src_uri_process(uri_text, fn): 46 | # converts \n delimited text of all SRC_URIs for file from ebuild into a list containing: 47 | # [ mirror_path, [ mirrors ] -- where mirrors[0] + "/" + mirror_path is a valid dl path 48 | # 49 | # or string, where string is just a single download path. 50 | 51 | global thirdp 52 | uris_to_process = uri_text.split("\n") 53 | uris_to_process = [ "http://distfiles.gentoo.org/distfiles/" + fn ] + uris_to_process 54 | out_uris = [] 55 | for uri in uris_to_process: 56 | if len(uri) == 0: 57 | continue 58 | if uri.startswith("mirror://"): 59 | uri = uri[9:] 60 | mirror_name = uri.split("/")[0] 61 | mirror_path = "/".join(uri.split("/")[1:]) 62 | if mirror_name not in thirdp: 63 | print("!!! Error: no third-party mirror defined for %s" % mirror_name) 64 | continue 65 | out_mirrors = [] 66 | for my_mirror in thirdp[mirror_name]: 67 | skip = False 68 | for bl_entry in mirror_blacklist: 69 | if my_mirror.find(bl_entry) != -1: 70 | skip = True 71 | break 72 | if skip: 73 | continue 74 | out_mirrors.append(my_mirror) 75 | out_uris.append([mirror_path, out_mirrors[:max_mirrors]]) 76 | elif uri.startswith("http://") or uri.startswith("https://") or uri.startswith("ftp://"): 77 | out_uris.append(uri) 78 | return out_uris 79 | 80 | def get_sha512(fn): 81 | with open(fn, "rb") as data: 82 | my_hash = sha512() 83 | my_hash.update(data.read()) 84 | return my_hash.hexdigest() 85 | 86 | async def ftp_fetch(host, path, outfile, digest_func): 87 | client = aioftp.Client() 88 | await client.connect(host) 89 | await client.login("anonymous", "drobbins@funtoo.org") 90 | fd = open(outfile, 'wb') 91 | hash = digest_func() 92 | if not await client.exists(path): 93 | return ("ftp_missing", None) 94 | stream = await client.download_stream(path) 95 | async for block in stream.iter_by_block(chunk_size): 96 | sys.stdout.write(".") 97 | sys.stdout.flush() 98 | fd.write(block) 99 | hash.update(block) 100 | await stream.finish() 101 | cur_digest = hash.hexdigest() 102 | await client.quit() 103 | fd.close() 104 | return (None, cur_digest) 105 | 106 | http_data_timeout = 60 107 | 108 | async def http_fetch(url, outfile, digest_func): 109 | global resolver 110 | connector = aiohttp.TCPConnector(family=socket.AF_INET,resolver=resolver,verify_ssl=False) 111 | headers = {} 112 | fmode = 'wb' 113 | hash = digest_func() 114 | if os.path.exists(outfile): 115 | os.unlink(outfile) 116 | async with aiohttp.ClientSession(connector=connector) as http_session: 117 | async with http_session.get(url, headers=headers, timeout=None) as response: 118 | if response.status != 200: 119 | return ("http_%s" % response.status, None) 120 | with open(outfile, fmode) as fd: 121 | while True: 122 | #with aiohttp.Timeout(http_data_timeout): 123 | try: 124 | chunk = await response.content.read(chunk_size) 125 | if not chunk: 126 | break 127 | else: 128 | sys.stdout.write(".") 129 | sys.stdout.flush() 130 | fd.write(chunk) 131 | hash.update(chunk) 132 | except aiohttp.EofStream: 133 | pass 134 | cur_digest = hash.hexdigest() 135 | return (None, cur_digest) 136 | 137 | 138 | def next_uri(uri_expand): 139 | for src_uri in uri_expand: 140 | if type(src_uri) == list: 141 | for uri in src_uri[1]: 142 | real_uri = uri 143 | if not real_uri.endswith("/"): 144 | real_uri += "/" 145 | real_uri += src_uri[0] 146 | yield real_uri 147 | else: 148 | yield src_uri 149 | 150 | fastpull_count = 0 151 | 152 | def fastpull_index(outfile, distfile_final): 153 | global fastpull_count 154 | # add to fastpull. 155 | d1 = distfile_final.rand_id[0] 156 | d2 = distfile_final.rand_id[1] 157 | outdir = os.path.join(fastpull_out, d1, d2) 158 | if not os.path.exists(outdir): 159 | os.makedirs(outdir) 160 | fastpull_outfile = os.path.join(outdir, distfile_final.rand_id) 161 | if os.path.lexists(fastpull_outfile): 162 | os.unlink(fastpull_outfile) 163 | os.link(outfile, fastpull_outfile) 164 | fastpull_count += 1 165 | return os.path.join(d1, d2, distfile_final.rand_id) 166 | 167 | async def keep_getting_files(db, task_num, q): 168 | timeout = 4800 169 | 170 | while True: 171 | 172 | # continually grab files.... 173 | d_id = await q.get() 174 | 175 | progress_map[d_id] = "selected" 176 | 177 | with db.get_session() as session: 178 | # This will attach to our current session 179 | await asyncio.sleep(0.1) 180 | d = session.query(db.QueuedDistfile).filter(db.QueuedDistfile.id == d_id).first() 181 | if d is None: 182 | print("File %s is none." % d_id) 183 | # no longer exists, no longer in progress, next file... 184 | progress_set.remove(d_id) 185 | continue 186 | 187 | #if not d.mirror: 188 | # print("No mirroring set for %s, deleting from queue." % d.filename) 189 | # session.delete(d) 190 | # session.commit() 191 | # progress_set.remove(d_id) 192 | # continue 193 | 194 | if d.digest_type == "sha256": 195 | digest_func = sha256 196 | else: 197 | digest_func = sha512 198 | 199 | uris = [] 200 | if d.src_uri is not None: 201 | uris = src_uri_process(d.src_uri, d.filename) 202 | if len(uris) == 0: 203 | print("Error: for file %s, no URIs available; skipping." % d.filename) 204 | try: 205 | session.delete(d) 206 | session.commit() 207 | except sqlalchemy.exc.InvalidRequestError: 208 | pass 209 | # already deleted by someone else 210 | progress_set.remove(d_id) 211 | continue 212 | 213 | filename = d.filename 214 | outfile = os.path.join("/home/mirror/distfiles/%s/%s" % (task_num, filename)) 215 | try: 216 | os.makedirs(os.path.dirname(outfile)) 217 | except FileExistsError: 218 | pass 219 | mylist = list(next_uri(uris)) 220 | fail_mode = None 221 | 222 | progress_map[d_id] = "dl_check" 223 | 224 | # if we have a sha512, then we can to a pre-download check to see if the file has been grabbed before. 225 | if d.digest_type == "sha512" and d.digest is not None: 226 | existing = session.query(db.Distfile).filter(db.Distfile.id == d.digest).first() 227 | if existing: 228 | if d.filename == existing.filename: 229 | print("%s already downloaded; skipping." % d.filename) 230 | session.delete(d) 231 | session.commit() 232 | # move to next file.... 233 | progress_set.remove(d_id) 234 | continue 235 | else: 236 | print("Filename %s exists under another SHA1 (%s) -- adding a mapping..." % (d.filename, existing.filename)) 237 | d_final = db.Distfile() 238 | d_final.id = existing.id 239 | d_final.rand_id = existing.rand_id 240 | d_final.filename = d.filename 241 | d_final.digest_type = d.digest_type 242 | if d.digest_type != "sha512": 243 | d_final.alt_digest = digest 244 | d_final.size = d.size 245 | d_final.catpkg = d.catpkg 246 | d_final.kit = d.kit 247 | d_final.src_uri = existing.src_uri 248 | d_final.mirror = d.mirror 249 | d_final.last_fetched_on = existing.last_fetched_on 250 | session.delete(d) 251 | session.add(d_final) 252 | session.commit() 253 | try: 254 | fastpull_file = fastpull_index(outfile, d_final) 255 | # add to queue to upload to google: 256 | # loop = asyncio.get_event_loop() 257 | # loop.run_in_executor(thread_exec, google_upload, fastpull_file) 258 | 259 | except FileNotFoundError: 260 | # something went bad, couldn't find file for indexing. 261 | fail_mode = "notfound" 262 | continue 263 | 264 | session.add(d_final) 265 | session.delete(d) 266 | session.commit() 267 | 268 | 269 | session.expunge_all() 270 | 271 | # force session close before download by exiting "with" 272 | 273 | last_uri = None 274 | 275 | for real_uri in mylist: 276 | 277 | # iterate through each potential URI for downloading a particular distfile. We'll keep trying until 278 | # we find one that works. 279 | 280 | # fail_mode will effectively store the last reason why our download failed. We reset it each iteration, 281 | # which is what we want. If fail_mode is set to something after our big loop exits, we know we have 282 | # truly failed downloading this distfile. 283 | 284 | print("Trying URI", real_uri) 285 | 286 | progress_map[d_id] = real_uri 287 | fail_mode = None 288 | 289 | if real_uri.startswith("ftp://"): 290 | # handle ftp download -- 291 | host_parts = real_uri[6:] 292 | host = host_parts.split("/")[0] 293 | path = "/".join(host_parts.split("/")[1:]) 294 | try: 295 | digest = None 296 | with async_timeout.timeout(timeout): 297 | fail_mode, digest = await ftp_fetch(host, path, outfile, digest_func) 298 | except asyncio.TimeoutError as e: 299 | fail_mode = "timeout" 300 | continue 301 | except socket.gaierror as e: 302 | fail_mode = "dnsfail" 303 | continue 304 | except OSError: 305 | fail_mode = "refused" 306 | continue 307 | except aioftp.errors.StatusCodeError: 308 | fail_mode = "ftp_code" 309 | continue 310 | except Exception as e: 311 | fail_mode = str(e) 312 | raise 313 | print("Download failure:", fail_mode) 314 | continue 315 | else: 316 | # handle http/https download -- 317 | try: 318 | digest = None 319 | with async_timeout.timeout(timeout): 320 | fail_mode, digest = await http_fetch(real_uri, outfile, digest_func) 321 | except asyncio.TimeoutError as e: 322 | fail_mode = "timeout" 323 | continue 324 | except aiodns.error.DNSError as e: 325 | fail_mode = "dnsfail" 326 | continue 327 | except ValueError as e: 328 | fail_mode = "bad_url" 329 | continue 330 | except aiohttp.errors.ClientOSError as e: 331 | fail_mode = "refused" 332 | continue 333 | except aiohttp.errors.ServerDisconnectedError as e: 334 | fail_mode = "disconn" 335 | continue 336 | except aiohttp.errors.ClientError: 337 | fail_mode = "aiohttp" 338 | continue 339 | except Exception as e: 340 | fail_mode = str(e) 341 | print("Download failure:", fail_mode) 342 | continue 343 | 344 | del progress_map[d_id] 345 | 346 | if d.digest is None or (digest is not None and digest == d.digest): 347 | # success! we can record our fine ketchup: 348 | 349 | if d.digest_type == "sha512" and digest is not None: 350 | my_id = digest 351 | else: 352 | try: 353 | my_id = get_sha512(outfile) 354 | except FileNotFoundError: 355 | fail_mode = "notfound" 356 | continue 357 | 358 | # create new session after download completes (successfully or not) 359 | with db.get_session() as session: 360 | 361 | existing = session.query(db.Distfile).filter(db.Distfile.id == my_id).first() 362 | 363 | if existing is not None: 364 | if existing.filename == filename: 365 | print("Downloaded %s, but already exists in our db. Skipping." % d.filename) 366 | fail_mode = None 367 | session.delete(d) 368 | session.commit() 369 | os.unlink(outfile) 370 | # done; process next distfile 371 | break 372 | 373 | d_final = db.Distfile() 374 | 375 | d_final.id = my_id 376 | d_final.rand_id = ''.join(random.choice('abcdef0123456789') for _ in range(128)) 377 | d_final.filename = d.filename 378 | d_final.digest_type = d.digest_type 379 | if d.digest_type != "sha512": 380 | d_final.alt_digest = digest 381 | d_final.size = d.size 382 | d_final.catpkg = d.catpkg 383 | d_final.kit = d.kit 384 | d_final.src_uri = d.src_uri 385 | d_final.mirror = d.mirror 386 | d_final.last_fetched_on = datetime.utcnow() 387 | 388 | 389 | try: 390 | fastpull_file = fastpull_index(outfile, d_final) 391 | # add to queue to upload to google: 392 | # loop = asyncio.get_event_loop() 393 | # loop.run_in_executor(thread_exec, google_upload, fastpull_file) 394 | 395 | except FileNotFoundError: 396 | # something went bad, couldn't find file for indexing. 397 | fail_mode = "notfound" 398 | continue 399 | 400 | session.add(d_final) 401 | session.delete(d) 402 | session.commit() 403 | 404 | os.unlink(outfile) 405 | # done; process next distfile 406 | break 407 | else: 408 | fail_mode = "digest" 409 | 410 | if fail_mode: 411 | # If we tried all SRC_URIs, and still failed, we will end up here, with fail_mode set to something. 412 | with db.get_session() as session: 413 | d = session.query(db.QueuedDistfile).filter(db.QueuedDistfile.id == d_id).first() 414 | if d == None: 415 | # object no longer exists, so skip this update: 416 | pass 417 | else: 418 | d.last_failure_on = d.last_attempted_on = datetime.utcnow() 419 | d.failtype = fail_mode 420 | d.failcount += 1 421 | session.add(d) 422 | session.commit() 423 | print() 424 | print("Download failure: %s" % d.filename) 425 | if last_uri: 426 | print(" Last URI:", last_uri) 427 | print(" Failure reason: %s" % fail_mode) 428 | print(" Expected filesize: %s" % d.size) 429 | outfile = os.path.join("/home/mirror/distfiles/", d.filename) 430 | if os.path.exists(outfile): 431 | print(" Partial filesize: %s" % os.path.getsize(outfile)) 432 | print() 433 | else: 434 | # we end up here if we are successful. Do successful output. 435 | sys.stdout.write("^") 436 | sys.stdout.flush() 437 | progress_set.remove(d_id) 438 | 439 | queue_size = 60 440 | query_size = 60 441 | workr_size = 10 442 | 443 | pending_q = asyncio.Queue(maxsize=queue_size) 444 | # set of all QueuedDistfile IDs currently being processed: 445 | progress_set = set() 446 | # dictionary of status info for all QueuedDistfile IDs: 447 | progress_map = {} 448 | 449 | async def qsize(q): 450 | while True: 451 | print() 452 | print("Queue size: %s" % q.qsize()) 453 | print("Added to fastpull: %s" % fastpull_count) 454 | print("In pending queue: %s" % pending_q.qsize()) 455 | print("In progress: %s" % len(list(map(str,progress_set)))) 456 | print("IDs in progress:") 457 | for my_id in sorted(list(progress_set)): 458 | print("{:8s}".format(str(my_id)), end="") 459 | if my_id in progress_map: 460 | print(progress_map[my_id]) 461 | else: 462 | print() 463 | # clean up stale progress_map entries 464 | to_del = [] 465 | for my_id in progress_map.keys(): 466 | if my_id not in progress_set: 467 | to_del.append(my_id) 468 | for my_id in to_del: 469 | del progress_map[my_id] 470 | await asyncio.sleep(15) 471 | 472 | async def get_more_distfiles(db, q): 473 | global now 474 | time_cutoff = datetime.utcnow() - timedelta(hours=24) 475 | #time_cutoff_hr = datetime.utcnow() - timedelta(hours=2) 476 | time_cutoff_hr = datetime.utcnow() 477 | # The asyncio.sleep() calls below not only sleep, they also turn this into a true async function. Otherwise we 478 | # would not allow other coroutines to run. 479 | while True: 480 | with db.get_session() as session: 481 | results = session.query(db.QueuedDistfile) 482 | results = results.options(undefer('last_attempted_on')) 483 | results = results.filter(or_(db.QueuedDistfile.last_attempted_on < time_cutoff, db.QueuedDistfile.last_attempted_on == None)) 484 | #results = results.filter(db.QueuedDistfile.mirror == True) 485 | results = results.order_by(db.QueuedDistfile.last_attempted_on) 486 | results = list(results.limit(query_size)) 487 | session.expunge_all() 488 | # force session to close here 489 | if len(list(results)) == 0: 490 | await asyncio.sleep(5) 491 | else: 492 | added = 0 493 | for d in results: 494 | if d.id not in progress_set: 495 | await q.put(d.id) 496 | # track file ids in progress. 497 | progress_set.add(d.id) 498 | progress_map[d.id] = "queued" 499 | added += 1 500 | if added == 0: 501 | await asyncio.sleep(0.5) 502 | 503 | 504 | #import logging 505 | #logging.basicConfig() 506 | #logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) 507 | 508 | logging.basicConfig( 509 | level=logging.INFO, 510 | format='PID %(process)5s %(name)18s: %(message)s', 511 | stream=sys.stderr, 512 | ) 513 | 514 | google_server_status = None 515 | 516 | chunk_size = 65536 517 | db = FastPullDatabase() 518 | loop = asyncio.get_event_loop() 519 | now = datetime.utcnow() 520 | thread_exec = ThreadPoolExecutor(max_workers=1) 521 | tasks = [ 522 | asyncio.async(get_more_distfiles(db, pending_q)), 523 | asyncio.async(qsize(pending_q)), 524 | ] 525 | 526 | for x in range(0,workr_size): 527 | tasks.append(asyncio.async(keep_getting_files(db, x, pending_q))) 528 | 529 | loop.run_until_complete(asyncio.gather(*tasks)) 530 | loop.close() 531 | 532 | # vim: ts=4 sw=4 noet 533 | -------------------------------------------------------------------------------- /bin/fastpull-daemon: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | from tornado.httpserver import HTTPServer 6 | import tornado.web 7 | import tornado.gen 8 | from tornado.ioloop import IOLoop 9 | 10 | from tornado.log import enable_pretty_logging 11 | enable_pretty_logging() 12 | import sqlalchemy.exc 13 | 14 | sys.path.insert(0, os.path.normpath(os.path.join(os.path.realpath(__file__), "../../modules"))) 15 | from merge.db_core import * 16 | 17 | class UptimeHandler(tornado.web.RequestHandler): 18 | 19 | def get(self): 20 | self.set_status(200) 21 | return 22 | 23 | class RedirectHandler(tornado.web.RequestHandler): 24 | 25 | # send people to google... 26 | # redirect_url = "https://storage.googleapis.com/fastpull-us/%s/%s/%s" 27 | 28 | # or send people to us... 29 | #redirect_url = "/%s/%s/%s" 30 | redirect_url = "https://1153732560.rsc.cdn77.org/%s/%s/%s" 31 | def get(self,fn): 32 | fn = os.path.basename(fn) 33 | success = False 34 | for attempt in range(0,3): 35 | try: 36 | with self.application.db.get_session() as session: 37 | result = session.query(self.application.db.Distfile).filter(self.application.db.Distfile.filename == fn).first() 38 | if not result: 39 | if not fn.endswith("/") and len(fn): 40 | miss = session.query(self.application.db.MissingRequestedFile).filter(self.application.db.MissingRequestedFile.filename == fn).first() 41 | if miss is None: 42 | miss = self.application.db.MissingRequestedFile() 43 | miss.filename = fn 44 | miss.last_failure_on = datetime.utcnow() 45 | if miss.failcount is None: 46 | miss.failcount = 0 47 | miss.failcount += 1 48 | session.add(miss) 49 | session.commit() 50 | else: 51 | rand_id = result.rand_id 52 | success = True 53 | session.close() 54 | break 55 | except sqlalchemy.exc.OperationalError: 56 | pass 57 | except sqlalchemy.exc.SQLAlchemyError: 58 | pass 59 | if success: 60 | url = self.redirect_url % ( rand_id[0], rand_id[1], rand_id ) 61 | self.redirect(url, permanent=False) 62 | else: 63 | self.set_status(404) 64 | 65 | settings = { 66 | "xsrf_cookies": False, 67 | "cache_json" : False, 68 | } 69 | 70 | class Application(tornado.web.Application): 71 | 72 | name = "fastpull alpha service" 73 | handlers = [ 74 | (r"/distfiles/distfiles/(.*)", RedirectHandler), 75 | (r"/distfiles/(.*)", RedirectHandler), 76 | (r"/up", UptimeHandler), 77 | (r"/(.*)", RedirectHandler), 78 | ] 79 | 80 | def __init__(self): 81 | tornado.web.Application.__init__(self, self.handlers, **settings) 82 | 83 | application = Application() 84 | application.db = FastPullDatabase() 85 | http_server = HTTPServer(application, xheaders=True) 86 | http_server.bind(8080, "127.0.0.1") 87 | http_server.start() 88 | 89 | # start ioloop 90 | IOLoop.instance().start() 91 | 92 | # vim: ts=4 sw=4 noet 93 | -------------------------------------------------------------------------------- /bin/genkit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | 6 | sys.path.insert(0, os.path.normpath(os.path.join(os.path.realpath(__file__), "../../modules"))) 7 | 8 | -------------------------------------------------------------------------------- /bin/indy-kit-metadata-update: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | # This utility is designed to be run on independently-maintained kits, whose paths should be specified as command-line arguments. 4 | # All branches defined in kit-fixups will be updated to contain up-to-date python USE settings as well as metadata cache. This 5 | # used to be done automatically by our auto-generation scripts, but since the kit is independently-maintained, we have to do 6 | # it using this little tool. 7 | # 8 | # Use as follows: 9 | # 10 | # As a regular user with commit/push rights to the kits, set up a ~/.merge config file as follows: 11 | # 12 | # [sources] 13 | # 14 | # flora = git@github.com:funtoo/flora 15 | # kit-fixups = git@github.com:funtoo/kit-fixups 16 | # 17 | # [work] 18 | # 19 | # source = /var/src 20 | # destination = /var/src 21 | # 22 | # This is a basic config file that just indicates that the code should look for core-kit and kit-fixups in /var/src, and 23 | # when it looks at these repos, the source URI matches what it expects. 24 | # 25 | # Next, make sure that your user is in the portage group and has the ability to write to /var/cache/edb: 26 | # 27 | # # chown -R drobbins:portage /var/cache/edb 28 | # 29 | # Next, run the following command, which will generate updated metadata and also add correct python-use settings to the 30 | # independently-maintained kit, and push these to origin: 31 | # 32 | # cd /var/src/merge-scripts 33 | # bin/indy-kit-metadata-update /var/src/xorg-kit /var/src/gnome-kit 34 | # 35 | # That's all there is to it! :) 36 | 37 | import os 38 | import sys 39 | sys.path.insert(0, os.path.normpath(os.path.join(os.path.realpath(__file__), "../../modules"))) 40 | import merge.merge_utils as mu 41 | from merge.config import Configuration 42 | import asyncio 43 | from merge.merge_utils import KitStabilityRating, KitType 44 | 45 | async def main_thread(config, args): 46 | 47 | fixup_repo = mu.GitTree("kit-fixups", config.branch("kit-fixups"), config=config, url=config.kit_fixups, root=config.source_trees + "/kit-fixups") 48 | await fixup_repo.initialize() 49 | # once the correct branch is checked out, then we want to do this import: 50 | sys.path.insert(0, fixup_repo.root + "/modules") 51 | from fixups.foundations import KitFoundation 52 | 53 | foundation = KitFoundation(config, KitStabilityRating, KitType) 54 | 55 | # developer specified a path to a git repo on the command-line, now we will look in all defined releases for independently-maintained kits with this 56 | # name and process all defined branches: 57 | 58 | already_processed_kit_keys = set() 59 | 60 | for path in args.kit_paths: 61 | path = os.path.realpath(path) 62 | print('Processing path %s' % path) 63 | if not os.path.isdir(path): 64 | print("Error: %s does not exist or is not a directory. Skipping." % path) 65 | continue 66 | 67 | kit_name = os.path.basename(path) 68 | for release, release_kits in foundation.kit_groups.items(): 69 | for kit_dict in release_kits: 70 | 71 | # Don't process kits that don't match: 72 | kit_branch = kit_dict["branch"] 73 | if "type" not in kit_dict or kit_dict["type"] != KitType.INDEPENDENTLY_MAINTAINED: 74 | continue 75 | if "name" not in kit_dict or kit_dict["name"] != kit_name: 76 | continue 77 | 78 | # Don't repeatedly process the same kit (can be defined in multiple releases) 79 | kit_key = "%s/%s" % (kit_name, kit_branch) 80 | if kit_key in already_processed_kit_keys: 81 | continue 82 | else: 83 | already_processed_kit_keys.add(kit_key) 84 | 85 | # OK, if we got here, we have something to process -- 86 | print("Processing branch %s" % kit_dict["branch"]) 87 | repo_config = mu.RepositoryConfig(kit_name, path, kit_dict["branch"], config) 88 | indy_kit = 'type' in kit_dict and kit_dict['type'] == KitType.INDEPENDENTLY_MAINTAINED 89 | tree = mu.GitTree(name=kit_name, root=path, config=config, url=config.base_url(kit_name) if indy_kit else config.indy_url(kit_name), branch=kit_dict["branch"], create=False) 90 | await tree.gitCheckout(branch=kit_dict["branch"]) 91 | steps = [ mu.GenCache(repo_config) ] + mu.generatePythonUSEUpdateSteps(repo_config, foundation, kit_name) 92 | await tree.run(steps) 93 | await tree.gitCommit(message="python USE setting and metadata cache updates", push=True, mirror=False) 94 | 95 | if __name__ == "__main__": 96 | import argparse 97 | 98 | parser = argparse.ArgumentParser() 99 | parser.add_argument("--config", type=str, default=None, help="Specify config file. Defaults to ~/.merge.") 100 | parser.add_argument("kit_paths", metavar="K", type=str, nargs="+", help="path to independently-maintained kits.") 101 | args = parser.parse_args() 102 | 103 | config = Configuration(args.config) 104 | 105 | loop = asyncio.get_event_loop() 106 | loop.run_until_complete(main_thread(config, args)) 107 | sys.exit(0) 108 | -------------------------------------------------------------------------------- /bin/inject-file: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | from hashlib import sha512 6 | from optparse import OptionParser 7 | 8 | sys.path.insert(0, os.path.normpath(os.path.join(os.path.realpath(__file__), "../../modules"))) 9 | from merge.db_core import * 10 | 11 | parser = OptionParser() 12 | parser.add_option("--catpkg", dest="catpkg", help="catpkg of ebuild") 13 | parser.add_option("--src_uri", dest="src_uri", help="download URL") 14 | parser.add_option("--kit", dest="kit", help="kit of ebuild") 15 | parser.add_option("--branch", dest="branch", help="branch of kit") 16 | parser.add_option("--replace", dest="replace", action="store_true", default=False, help="replace existing distfile in fastpull.") 17 | options, args = parser.parse_args() 18 | 19 | if len(args) != 1: 20 | print("Please specify a single file to inject into queued distfiles.") 21 | sys.exit(1) 22 | 23 | def get_sha512(fn): 24 | with open(fn, "rb") as data: 25 | my_hash = sha512() 26 | my_hash.update(data.read()) 27 | return my_hash.hexdigest() 28 | 29 | db = FastPullDatabase() 30 | fn = args[0] 31 | 32 | if not os.path.exists(fn): 33 | print("File %s does not exist. Can't inject." % fn) 34 | sys.exit(1) 35 | 36 | with db.get_session() as session: 37 | existing = session.query(db.Distfile).filter(db.Distfile.filename == os.path.basename(fn)).first() 38 | if existing: 39 | if options.replace is True: 40 | print("Removing distfile entry for existing file.") 41 | session.delete(existing) 42 | session.commit() 43 | else: 44 | print("File already exists in distfiles. Skipping.") 45 | sys.exit(1) 46 | qdsf = db.QueuedDistfile() 47 | qdsf.filename = os.path.basename(fn) 48 | qdsf.catpkg = options.catpkg 49 | qdsf.kit = options.kit 50 | qdsf.branch = options.branch 51 | qdsf.src_uri = options.src_uri 52 | qdsf.size = os.path.getsize(fn) 53 | qdsf.digest_type = "sha512" 54 | qdsf.digest = get_sha512(fn) 55 | with db.get_session() as session: 56 | # get_sha512() can take a long time; session can time out. 57 | session.add(qdsf) 58 | session.commit() 59 | print("Injected file %s into queued distfiles." % fn) 60 | -------------------------------------------------------------------------------- /bin/kit-regenerate: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import asyncio 4 | import os 5 | import sys 6 | 7 | sys.path.insert(0, os.path.normpath(os.path.join(os.path.realpath(__file__), "../../modules"))) 8 | import merge.merge_utils as mu 9 | from merge.config import Configuration 10 | from merge.merge_utils import KitType, KitStabilityRating, getKitSourceInstances, GitTree, copyFromSourceRepositoriesSteps 11 | 12 | 13 | # This script is designed to re-copy a fresh set of ebuilds to a kit that is typically independently-maintained. So 14 | # it will "re-generate" the kit using the package-sets, and also apply any fixups that might still exist. It can be 15 | # used to update an independently-maintained kit to a new snapshot. It should be used on a test branch which could 16 | # then be merged into the official branch. 17 | 18 | async def main_thread(config, args): 19 | fixup_repo = mu.GitTree("kit-fixups", config.branch("kit-fixups"), config=config, url=config.kit_fixups, 20 | root=config.source_trees + "/kit-fixups") 21 | await fixup_repo.initialize() 22 | 23 | # once the correct branch is checked out, then we want to do this import: 24 | sys.path.insert(0, fixup_repo.root + "/modules") 25 | from fixups.foundations import KitFoundation 26 | 27 | foundation = KitFoundation(config, kitType=KitType, stabilityRating=KitStabilityRating) 28 | release = args.release 29 | 30 | kit_dict = foundation.kit_groups[release][args.kit] 31 | repos = getKitSourceInstances(foundation, config, kit_dict) 32 | 33 | tree = GitTree(kit_dict['name'], args.branch, config=config, 34 | url=config.base_url(kit_dict['name']), create=False, 35 | root="%s/%s" % (config.dest_trees, kit_dict['name']), 36 | origin_check=False) 37 | 38 | for repo_dict in repos: 39 | steps = await copyFromSourceRepositoriesSteps(repo_dict=repo_dict, kit_dict=kit_dict, source_defs=repos, 40 | release=release, secondary_kit=False, 41 | fixup_repo=fixup_repo, cpm_logger=None, move_maps=dict()) 42 | await tree.run(steps) 43 | 44 | 45 | if __name__ == "__main__": 46 | import argparse 47 | 48 | parser = argparse.ArgumentParser() 49 | parser.add_argument("release", type=str, default=None, required=True, help="Specify release to generate.") 50 | parser.add_argument("kit", type=str, default=None, required=True, help="Specify kit to generate.") 51 | parser.add_argument("branch", type=str, default=None, required=True, help="Specify branch to write to.") 52 | parser.add_argument("--config", type=str, default=None, help="Specify config file. Defaults to ~/.merge.") 53 | args = parser.parse_args() 54 | 55 | config = Configuration(args.config) 56 | loop = asyncio.get_event_loop() 57 | loop.run_until_complete(main_thread(config, args)) 58 | sys.exit(0) 59 | 60 | # vim: ts=4 sw=4 noet tw=140 61 | -------------------------------------------------------------------------------- /bin/merge-all-kits: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import asyncio 4 | import atexit 5 | import json 6 | import os 7 | import sys 8 | from collections import defaultdict, OrderedDict 9 | from datetime import datetime 10 | 11 | submod_path = os.path.normpath(os.path.join(os.path.realpath(__file__), "../../submodules")) 12 | has_submodules = set() 13 | for repo_path in os.listdir(submod_path): 14 | if os.path.exists(os.path.join(submod_path, repo_path, "modules")): 15 | sys.path.insert(0, os.path.join(submod_path, repo_path, "modules")) 16 | has_submodules.add(repo_path) 17 | sys.path.insert(0, os.path.normpath(os.path.join(os.path.realpath(__file__), "../../modules"))) 18 | 19 | import merge.merge_utils as mu 20 | from merge.config import Configuration 21 | from merge.merge_utils import AsyncMergeAllKits, updateKit 22 | from merge.merge_utils import KitStabilityRating, KitType, KitRatingString 23 | 24 | if True: 25 | #"pyservices" not in has_submodules: 26 | hub_client = None 27 | 28 | def send_msg(json_dict): 29 | pass 30 | 31 | else: 32 | print("Initializing pyservices.") 33 | from pyservices.zmq.http_server import HubClient, HubClientMode 34 | from pyservices.zmq.zmq_msg_breezyops import BreezyMessage, MessageType 35 | hub_client = HubClient("merge-scripts", mode=HubClientMode.EXTERNAL) 36 | 37 | def send_msg(json_dict): 38 | if "pyservices" not in has_submodules: 39 | return 40 | msg_obj = BreezyMessage( 41 | service="webhook", 42 | action="task-event", 43 | msg_type=MessageType.INFO, 44 | json_dict=json_dict 45 | ) 46 | hub_client.send_nowait(msg_obj) 47 | 48 | async def generate_kit_metadata(foundation, release, meta_repo, output_sha1s): 49 | """ 50 | Generates the metadata in /var/git/meta-repo/metadata/... 51 | :param release: the release string, like "1.3-release". 52 | :param meta_repo: the meta-repo GitTree. 53 | :return: None. 54 | """ 55 | 56 | if not os.path.exists(meta_repo.root + "/metadata"): 57 | os.makedirs(meta_repo.root + "/metadata") 58 | 59 | with open(meta_repo.root + "/metadata/kit-sha1.json", "w") as a: 60 | a.write(json.dumps(output_sha1s, sort_keys=True, indent=4, ensure_ascii=False)) 61 | 62 | outf = meta_repo.root + "/metadata/kit-info.json" 63 | 64 | with open(outf, 'w') as a: 65 | k_info = {} 66 | out = [] 67 | out_settings = defaultdict(lambda: defaultdict(dict)) 68 | for kit_dict in foundation.kit_groups[release]: 69 | kit_name = kit_dict["name"] 70 | if kit_name not in out: 71 | out.append(kit_name) 72 | # specific keywords that can be set for each branch to identify its current quality level 73 | out_settings[kit_name]['stability'][kit_dict["branch"]] = KitRatingString(kit_dict["stability"]) 74 | out_settings[kit_name]['type'] = kit_dict["type"].value if "type" in kit_dict else KitType.AUTOMATICALLY_GENERATED.value 75 | k_info["kit_order"] = out 76 | k_info["kit_settings"] = out_settings 77 | 78 | # auto-generate release-defs. We used to define them manually in foundation: 79 | 80 | kit_name_set = OrderedDict() 81 | for kit_dict in foundation.kit_groups[release]: 82 | kit_name_set[kit_dict["name"]] = True 83 | 84 | rdefs = {} 85 | for kit_name in kit_name_set.keys(): 86 | rdefs[kit_name] = [] 87 | for def_kit in filter(lambda x: x["name"] == kit_name and x["stability"] not in [KitStabilityRating.DEPRECATED], foundation.kit_groups[release]): 88 | rdefs[kit_name].append(def_kit["branch"]) 89 | 90 | if release in ["1.2-release"]: 91 | # metadata format version 1: 92 | k_info["release_defs"] = {"1.2": rdefs } 93 | else: 94 | # newer metadata format version 10: drop release version dict: 95 | k_info["release_defs"] = rdefs 96 | k_info["release_info"] = getattr(foundation, "release_info", None) 97 | a.write(json.dumps(k_info, sort_keys=True, indent=4, ensure_ascii=False)) 98 | 99 | with open(meta_repo.root + "/metadata/version.json", "w") as a: 100 | a.write(json.dumps(foundation.metadata_version_info[release], sort_keys=True, indent=4, ensure_ascii=False)) 101 | 102 | 103 | class KitQualityError(Exception): 104 | 105 | pass 106 | 107 | 108 | async def kit_qa_check(foundation): 109 | 110 | # Make sure we don't redefine the same kit branch -- it's bad. 111 | 112 | all_kit_branches = defaultdict(dict) 113 | independent_kits = defaultdict(set) 114 | for release, kit_list in foundation.kit_groups.items(): 115 | for kit in kit_list: 116 | if "type" in kit and kit["type"] == KitType.INDEPENDENTLY_MAINTAINED: 117 | independent_kits[kit["name"]].add(kit["branch"]) 118 | continue 119 | kit_name = kit["name"] 120 | kit_branch = kit["branch"] 121 | if kit_branch in all_kit_branches[kit_name]: 122 | raise KitQualityError("Kit %s branch %s is defined multiple times. Exiting." % (kit_name, kit_branch)) 123 | if kit_name in independent_kits: 124 | raise KitQualityError("Kit %s is already tagged as independently-maintained but auto-generated entry exists. This is not allowed. Exiting." % kit_name) 125 | all_kit_branches[kit_name][kit_branch] = kit 126 | return True 127 | 128 | 129 | async def main_thread(config, args): 130 | 131 | if hub_client is not None: 132 | asyncio.create_task(hub_client.start()) 133 | # one global timestamp for each run of this tool -- for mysql db 134 | now = datetime.utcnow() 135 | 136 | send_msg({ 137 | "task": "merge-all-kits", 138 | "event": "started", 139 | "arguments": sys.argv, 140 | }) 141 | 142 | fixup_repo = mu.GitTree("kit-fixups", config.branch("kit-fixups"), config=config, url=config.kit_fixups, root=config.source_trees + "/kit-fixups") 143 | await fixup_repo.initialize() 144 | meta_repo = mu.GitTree("meta-repo", config.branch("meta-repo"), config=config, url=config.base_url("meta-repo"), root=config.dest_trees + "/meta-repo", mirror = config.mirror.rstrip("/") + "/meta-repo" if config.mirror else None, origin_check=True, destfix=args.destfix) 145 | await meta_repo.initialize() 146 | 147 | # once the correct branch is checked out, then we want to do this import: 148 | sys.path.insert(0, fixup_repo.root + "/modules") 149 | from fixups.foundations import KitFoundation 150 | 151 | foundation = KitFoundation(config, KitStabilityRating, KitType) 152 | push = not args.nopush 153 | 154 | await kit_qa_check(foundation) 155 | 156 | num_threads = 40 157 | async_engine = None 158 | 159 | if args.db is True: 160 | async_engine = AsyncMergeAllKits(num_threads=num_threads) 161 | async_engine.start_threads(enable_workers=True if num_threads != 0 else False) 162 | atexit.register(async_engine.exit_handler) 163 | 164 | if args.release == "all": 165 | releases = foundation.kit_groups.keys() 166 | else: 167 | if args.release not in foundation.kit_groups.keys(): 168 | print("Error: cannot find release \"%s\"." % args.release) 169 | sys.exit(1) 170 | else: 171 | releases = [args.release] 172 | 173 | for release in releases: 174 | 175 | cpm_logger = mu.CatPkgMatchLogger(log_xml=push) 176 | if not release.endswith("-release"): 177 | continue 178 | 179 | target_branch = "master" if release == "1.2-release" else release 180 | await meta_repo.gitCheckout(target_branch) 181 | 182 | output_sha1s = defaultdict(lambda: defaultdict(dict)) 183 | prev_kit_dict = None 184 | 185 | for kit_dict in foundation.kit_groups[release]: 186 | print("Regenerating kit ", kit_dict) 187 | head = await updateKit(foundation, config, release, async_engine, kit_dict, prev_kit_dict, cpm_logger, create=not push, destfix=args.destfix, push=push, now=now, fixup_repo=fixup_repo, indypush=args.indypush) 188 | kit_name = kit_dict["name"] 189 | output_sha1s[kit_name][kit_dict["branch"]] = head 190 | prev_kit_dict = kit_dict 191 | await generate_kit_metadata(foundation, release, meta_repo, output_sha1s) 192 | await meta_repo.gitCommit(message="kit updates", push=False) 193 | if args.xmlout: 194 | cpm_logger.writeXML(args.xmlout) 195 | 196 | if push is True: 197 | print("Pushing meta-repo...") 198 | await meta_repo.gitMirrorPush() 199 | 200 | elapsed_time = datetime.utcnow() - now 201 | 202 | send_msg({ 203 | "task": "merge-all-kits", 204 | "event": "complete", 205 | "arguments": sys.argv, 206 | "elapsed-time": { 207 | "days": elapsed_time.days, 208 | "seconds": elapsed_time.seconds, 209 | "microseconds": elapsed_time.microseconds 210 | } 211 | }) 212 | 213 | if __name__ == "__main__": 214 | import argparse 215 | 216 | parser = argparse.ArgumentParser() 217 | parser.add_argument("release", type=str, default="all", nargs="?", help="specify release to generate. Defaults to 'all'.") 218 | parser.add_argument("--nopush", action="store_true", help="Don't push changes upstream at all.") 219 | parser.add_argument("--db", action="store_true", help="Connect to fastpull database to update to-be-fetched file list.") 220 | parser.add_argument("--indypush", action="store_true", help="Push up independent kits (good for developer mode.)") 221 | parser.add_argument("--destfix", action="store_true", help="Auto-fix invalid git destinations.)") 222 | parser.add_argument("--config", type=str, default=None, help="Specify config file. Defaults to ~/.merge.") 223 | parser.add_argument("--xmlout", type=str, default=None, help="Specify where to write XML package info (default: don't)") 224 | args = parser.parse_args() 225 | 226 | config = Configuration(args.config) 227 | loop = asyncio.get_event_loop() 228 | loop.run_until_complete(main_thread(config, args)) 229 | sys.exit(0) 230 | 231 | # vim: ts=4 sw=4 noet tw=140 232 | -------------------------------------------------------------------------------- /bin/merge-gentoo-staging: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | sys.path.insert(0, os.path.normpath(os.path.join(os.path.realpath(__file__), "../../modules"))) 6 | import merge.merge_utils as mu 7 | from merge.config import Configuration 8 | import asyncio 9 | 10 | config = Configuration() 11 | 12 | # This function updates the gentoo-staging tree with all the latest gentoo updates: 13 | 14 | async def gentoo_staging_update(): 15 | gentoo_staging_w = mu.GitTree("gentoo-staging", "master", url=config.gentoo_staging, root=config.dest_trees+"/gentoo-staging", config=config) 16 | await gentoo_staging_w.initialize() 17 | #gentoo_src = mu.GitTree("gentoo-x86", "master", "https://github.com/gentoo/gentoo.git", pull=True) 18 | gentoo_src = mu.GitTree("gentoo-x86", branch="master", url="https://anongit.gentoo.org/git/repo/gentoo.git", config=config) 19 | await gentoo_src.initialize() 20 | #gentoo_src = CvsTree("gentoo-x86",":pserver:anonymous@anoncvs.gentoo.org:/var/cvsroot") 21 | gentoo_glsa = mu.GitTree("gentoo-glsa", branch="master", url="git://anongit.gentoo.org/data/glsa.git", config=config) 22 | await gentoo_glsa.initialize() 23 | # This is the gentoo-staging tree, stored in a different place locally, so we can simultaneously be updating gentoo-staging and reading 24 | # from it without overwriting ourselves: 25 | 26 | all_steps = [ 27 | mu.GitCheckout("master"), 28 | mu.SyncFromTree(gentoo_src, exclude=[".gitignore", "eclass/.gitignore", "metadata/.gitignore", "/metadata/cache/**", "dev-util/metro"]), 29 | # Only include 2012 and up GLSA's: 30 | mu.SyncDir(gentoo_glsa.root, srcdir=None, destdir="metadata/glsa", exclude=["glsa-200*.xml","glsa-2010*.xml", "glsa-2011*.xml"]), 31 | ] 32 | 33 | await gentoo_staging_w.run(all_steps) 34 | await gentoo_staging_w.gitCommit(message="gentoo updates") 35 | 36 | if __name__ == "__main__": 37 | 38 | loop = asyncio.get_event_loop() 39 | loop.run_until_complete(gentoo_staging_update()) 40 | sys.exit(0) 41 | 42 | # vim: ts=4 sw=4 noet 43 | -------------------------------------------------------------------------------- /bin/qa/bad-dns: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | """ 4 | test for bad DNS 5 | """ 6 | 7 | import sys 8 | import os 9 | sys.path.insert(0, os.path.normpath(os.path.join(os.path.realpath(__file__), "../../../modules"))) 10 | import merge.merge_utils as mu 11 | import asyncio 12 | from merge.config import Configuration 13 | 14 | async def main_thread(): 15 | mytree = mu.GitTree("blah", branch="master", url="https://babasdasdflkdsjfasdfx.xyz/git/repo/gentoo.git", config=Configuration()) 16 | await mytree.initialize() 17 | print("howdy") 18 | 19 | if __name__ == "__main__": 20 | 21 | loop = asyncio.get_event_loop() 22 | loop.run_until_complete(main_thread()) 23 | sys.exit(0) -------------------------------------------------------------------------------- /bin/qa/kit-set-mismatch: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | """ 4 | Initial stab at a QA check for catpkgs in fixups that are part of another kit's package set, and catpkgs in package 5 | sets that are in another kit's fixups. 6 | """ 7 | 8 | import sys 9 | import os 10 | sys.path.insert(0, os.path.normpath(os.path.join(os.path.realpath(__file__), "../../../modules"))) 11 | import merge.merge_utils as mu 12 | from merge.config import Configuration 13 | from collections import defaultdict 14 | config = Configuration 15 | from merge.merge_utils import KitStabilityRating, KitType 16 | 17 | fixup_repo = mu.GitTree("kit-fixups", config.branch("kit-fixups"), url=config.kit_fixups, root=config.source_trees+"/kit-fixups") 18 | meta_repo = mu.GitTree("meta-repo", config.branch("meta-repo"), url=config.base_url("meta-repo"), root=config.dest_trees+"/meta-repo") 19 | 20 | sys.path.insert(0, fixup_repo.root + "/modules") 21 | from fixups.foundations import KitFoundation 22 | 23 | foundation = KitFoundation(config, KitStabilityRating, KitType) 24 | 25 | kit_order = [ 'prime' ] 26 | 27 | fixup_packages = {} 28 | package_sets = defaultdict(set) 29 | 30 | for kit_group in kit_order: 31 | for kit_dict in foundation.kit_groups[kit_group]: 32 | kit_name = kit_dict['name'] 33 | pkgf = "package-sets/%s-packages" % kit_name 34 | pkgf_skip = "package-sets/%s-skip" % kit_name 35 | pkgdir = fixup_repo.root 36 | pkgf = pkgdir + "/" + pkgf 37 | pkgf_skip = pkgdir + "/" + pkgf_skip 38 | 39 | pkg_set = [] 40 | if os.path.exists(pkgf): 41 | pkg_set = mu.get_pkglist(pkgf) 42 | 43 | filtered_pkg_set = [] 44 | for pkg in pkg_set: 45 | parts = pkg.split() 46 | if not len(parts): 47 | continue 48 | first_part = parts[0] 49 | if first_part.split("/") != 2: 50 | continue 51 | parts = first_part.split("/") 52 | if parts[1] == "*": 53 | continue 54 | filtered_pkg_set.append(pkg) 55 | 56 | fixup_set = mu.get_extra_catpkgs_from_kit_fixups(fixup_repo, kit_name) 57 | filtered_pkg_set = set(filtered_pkg_set) 58 | for catpkg in fixup_set: 59 | fixup_packages[catpkg] = kit_name 60 | for catpkg in filtered_pkg_set: 61 | package_sets[catpkg] = kit_name 62 | 63 | for catpkg, kit in package_sets.items(): 64 | if catpkg in fixup_packages and fixup_packages[catpkg] != kit: 65 | print(kit, catpkg) 66 | 67 | for catpkg, kit in fixup_packages.items(): 68 | if catpkg in package_sets and package_sets[catpkg] != kit: 69 | print(kit, catpkg) -------------------------------------------------------------------------------- /bin/trickle: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import sys, os, subprocess 4 | from email.utils import parsedate 5 | from time import mktime 6 | from datetime import datetime 7 | 8 | sys.path.insert(0, os.path.normpath(os.path.join(os.path.realpath(__file__), "../../modules"))) 9 | import merge.merge_utils as mu 10 | from merge.config import Configuration 11 | import asyncio 12 | from merge.merge_utils import KitStabilityRating, KitType 13 | 14 | config = Configuration 15 | 16 | repo_name = sys.argv[1] 17 | 18 | def run(command): 19 | s, o = subprocess.getstatusoutput(command) 20 | if s == 0: 21 | return o 22 | else: 23 | return None 24 | 25 | def get_audit_cycle(path): 26 | try: 27 | with open(path+"/.audit-cycle", "r") as auditfile: 28 | content = auditfile.read().strip() 29 | try: 30 | return int(content) 31 | except: 32 | raise IOError 33 | except IOError: 34 | return None 35 | 36 | def default_audit_cycle(kit, branch): 37 | global KitStabilityRating 38 | global foundation 39 | 40 | for release, rdict in foundation.release_defs.items(): 41 | if kit in rdict and branch in rdict[kit]: 42 | return 30 43 | if branch in [ "global", "curated" ]: 44 | return 45 45 | for kit_dict in foundation.kit_groups["prime"]: 46 | if kit_dict["name"] == kit and kit_dict["branch"] == branch: 47 | if "stability" in kit_dict and kit_dict["stability"] == KitStabilityRating.PRIME: 48 | if "default" in kit_dict and kit_dict["default"] is True: 49 | return 60 50 | else: 51 | return 90 52 | elif "stability" in kit_dict and kit_dict["stability"] == KitStabilityRating.DEPRECATED: 53 | return 180 54 | else: 55 | return 60 56 | return 180 57 | 58 | if not os.path.exists(repo_name): 59 | print("path does not exist. Exiting.") 60 | sys.exit(1) 61 | 62 | async def main_thread(): 63 | fixup_repo = mu.GitTree("kit-fixups", config=config, branch=config.branch("kit-fixups"), url=config.kit_fixups, root=config.source_trees + "/kit-fixups") 64 | fixup_repo.initialize() 65 | #meta_repo = mu.GitTree("meta-repo", config.branch("meta-repo"), url=config.base_url("meta-repo"), root=config.dest_trees + "/meta-repo") 66 | #meta_repo.initialize() 67 | 68 | sys.path.insert(0, fixup_repo.root + "/modules") 69 | from fixups.foundations import KitFoundation 70 | globals()["foundation"] = foundation = KitFoundation(config, KitStabilityRating, KitType) 71 | catpkg_list = [] 72 | now = datetime.now() 73 | utcnow = datetime.utcnow() 74 | for kit in os.listdir(repo_name): 75 | if kit == ".git": 76 | continue 77 | kit_path = os.path.join(repo_name, kit) 78 | if not os.path.isdir(kit_path): 79 | continue 80 | if kit == "profiles": 81 | continue 82 | kit_audit_cycle = get_audit_cycle(kit_path) 83 | for branch in os.listdir(kit_path): 84 | branch_path = os.path.join(kit_path, branch) 85 | if not os.path.isdir(branch_path): 86 | continue 87 | audit_cycle = get_audit_cycle(branch_path) or kit_audit_cycle or default_audit_cycle(kit, branch) 88 | print(kit, branch, audit_cycle) 89 | for cat in os.listdir(branch_path): 90 | if "-" not in cat and cat != "virtual": 91 | continue 92 | cat_path = os.path.join(branch_path, cat) 93 | if not os.path.isdir(cat_path): 94 | continue 95 | for pkg in os.listdir(cat_path): 96 | catpkg = cat + "/" + pkg 97 | auditfile = cat + "/" + pkg + "/.audit" 98 | if os.path.exists(auditfile): 99 | datecheckfile = pkg + "/.audit" 100 | else: 101 | datecheckfile = pkg 102 | out = run ("(cd %s; git log -n 1 --oneline -- %s)" % ( cat_path, datecheckfile )) 103 | sha1 = out.split(" ")[0] 104 | out = run ("(cd %s; git show --no-patch --format=%%ce_%%cD %s)" % ( cat_path, sha1)) 105 | email, isodate = out.split("_") 106 | dt = (now - datetime.fromtimestamp(mktime(parsedate(isodate)))) 107 | if dt.days >= audit_cycle: 108 | days_delta = dt.days - audit_cycle 109 | catpkg_list.append((days_delta, dt, kit, branch, catpkg, email)) 110 | #"git log -n 1 --oneline -- ." 111 | #"git show --no-patch --format=%ce,%cI 68dc82e" 112 | print('kit-fixups that need review' 113 | '

Funtoo Linux Stale Packages QA List

' 114 | '
This page lists catpkgs in kit-fixups that are stale. These catpkgs should be reviewed and updated; when they are updated in git, they will no longer be stale and will be removed automatically from this list.
' 115 | '

If you review a catpkg and determine that it does not need an update, it is still possible to remove it from this list. Add a .audit file to the catpkg directory containing a text description of your review and commit it. This will result in the catpkg being \'reviewed\' and it will drop from this list.' 116 | '

By default, catpkgs will be up for review after 60 days. To change this threshold, you can create a .audit-cycle file in the kit or branch directory containing an integer number of days after which catpkgs in the kit or branch should be considered stale.

Visit kit-fixups on GitHub

' 117 | '

Please ensure catpkgs are not stale. Ideally review them every 30 days or less; please do not let catpkgs go without review for 60 days or more. Also note that because Funtoo doesn\'t have official maintainers for packages, the \'last modified by\' column is listed for convenience so you can coordinate with the previous committer if necessary.

' 118 | '

This page was last updated on ' + now.strftime("%Y-%m-%d %H:%M:%S %p %Z local time") + " (" + utcnow.strftime("%y-%m-%d %H:%M") + " UTC)

" 119 | '
' 120 | '') 121 | catpkg_list.sort(key=lambda x: x[0], reverse=True) 122 | for item in catpkg_list: 123 | days_delta = item[0] 124 | if days_delta > 100: 125 | days_delta = '' + str(days_delta) + '' 126 | print("" % ( days_delta, item[1].days, item[2], item[3], item[4], item[5])) 127 | print("
days latedays stalekitbranchcatpkglast modified by
%s%s%s%s%s%s
") 128 | 129 | 130 | if __name__ == "__main__": 131 | loop = asyncio.get_event_loop() 132 | loop.run_until_complete(main_thread()) 133 | sys.exit(0) -------------------------------------------------------------------------------- /modules/bug_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import base64 4 | import json 5 | 6 | import requests 7 | 8 | def gen_base64(username, password): 9 | d_b_encode = '%s:%s' % (username, password) 10 | dEncode = bytes(d_b_encode, "utf-8") 11 | bdEncode = base64.encodebytes(dEncode).decode("utf-8")[:-1] 12 | return bdEncode 13 | 14 | 15 | class JIRA(object): 16 | 17 | def __init__(self, url, user, password): 18 | self.url = url 19 | self.user = user 20 | self.password = password 21 | 22 | def getAuth(self): 23 | base64string = gen_base64(self.user, self.password) 24 | return "Basic %s" % base64string 25 | 26 | def getAllIssues(self, params={}): 27 | # use this to search with params={"jql" : "blah" } 28 | url = self.url + '/search' 29 | r = requests.get(url, params=params) 30 | print(r.url) 31 | if r.status_code == requests.codes.ok: 32 | return r.json() 33 | return None 34 | 35 | def issues_iter(self, jql): 36 | results = 100 37 | startAt = 0 38 | while True: 39 | issues = self.getAllIssues(params={"jql" : jql, "startAt" : startAt}) 40 | startAt += len(issues["issues"]) 41 | for i in issues["issues"]: 42 | yield i 43 | if len(issues["issues"]) == 0: 44 | break 45 | 46 | def createIssue(self, project, title, description, issuetype="Bug", extrafields={}): 47 | url = self.url + '/issue/' 48 | headers = {"Content-type": "application/json", "Accept": "application/json", "Authorization": self.getAuth()} 49 | issue = {"fields": { 50 | 'project': {'key': project}, 51 | 'summary': title, 52 | 'description': description, 53 | 'issuetype': {'name': issuetype} 54 | } 55 | } 56 | issue["fields"].update(extrafields) 57 | print("Posting new bug.") 58 | r = requests.post(url, data=json.dumps(issue), headers=headers) 59 | try: 60 | j = r.json() 61 | except ValueError: 62 | print("createIssue: Error decoding JSON from POST. Possible connection error.") 63 | return None 64 | if 'key' in j: 65 | return j['key'] 66 | return None 67 | 68 | def createSubTask(self, parentkey, project, title, description): 69 | return self.createIssue(project=project, title=title, description=description, issuetype="Sub-task", extrafields={'parent': parentkey}) 70 | 71 | def closeIssue(self, issue, comment=None, resolution='Fixed'): 72 | url = self.url + '/issue/' + issue['key'] + '/transitions' 73 | headers = {"Content-type": "application/json", "Accept": "application/json", "Authorization": self.getAuth()} 74 | data = {'update': 75 | {'comment': 76 | [ 77 | {'add': {'body': comment or 'Closing ' + issue['key']}} 78 | ] 79 | } 80 | } 81 | data['fields'] = {'resolution': {'name': resolution}} 82 | data['transition'] = {'id': 831} 83 | r = requests.post(url, data=json.dumps(data), headers=headers) 84 | if r.status_code == requests.codes.ok: 85 | return True 86 | else: 87 | return False 88 | 89 | def commentOnIssue(self, issue, comment): 90 | url = self.url + '/issue/' + issue['key'] + '/comment' 91 | headers = {"Content-type": "application/json", "Accept": "application/json", "Authorization": self.getAuth()} 92 | data = {'body': comment} 93 | r = requests.post(url, data=json.dumps(data), headers=headers) 94 | if r.status_code == requests.codes.ok: 95 | return True 96 | else: 97 | return False 98 | 99 | def closeDuplicateIssue(self, orig_issue, dup_issue): 100 | url = self.url + '/issue/' + dup_issue['key'] + '/transitions' 101 | headers = {"Content-type": "application/json", "Accept": "application/json", "Authorization": self.getAuth()} 102 | data = {'update': 103 | {'comment': 104 | [ 105 | {'add': {'body': 'Duplicate of %s' % orig_issue['key']}} 106 | ] 107 | } 108 | } 109 | data['fields'] = {'resolution': {'name': 'Duplicate'}} 110 | data['transition'] = {'id': 831} 111 | print(json.dumps(data)) 112 | print(url) 113 | r = requests.post(url, data=json.dumps(data), headers=headers) 114 | print(r.text) 115 | if r.status_code == requests.codes.ok: 116 | return True 117 | else: 118 | return False 119 | 120 | 121 | class GitHub(object): 122 | 123 | def __init__(self, user, password, org=None): 124 | self.url = 'https://api.github.com' 125 | self.user = user 126 | self.password = password 127 | self.org = org 128 | 129 | def getAuth(self): 130 | base64string = gen_base64(self.user, self.password) 131 | return "Basic %s" % base64string 132 | 133 | def getOrgRepositories(self): 134 | url = self.url + '/orgs/%s/repos' % self.org 135 | r = requests.get(url) 136 | if r.status_code == requests.codes.ok: 137 | out = [] 138 | for repo in r.json(): 139 | out.append(repo['full_name']) 140 | return out 141 | return None 142 | 143 | def getShortRepositories(self): 144 | url = self.url + '/orgs/%s/repos' % self.org 145 | r = requests.get(url) 146 | if r.status_code == requests.codes.ok: 147 | out = [] 148 | for repo in r.json(): 149 | out.append(repo['name']) 150 | return out 151 | return None 152 | 153 | def commentOnIssue(self, issue_json, comment): 154 | url = issue_json['comments_url'] 155 | data = {'body': comment} 156 | headers = {"Content-Type": "application/json", 'Authorization': self.getAuth()} 157 | r = requests.post(url, headers=headers, data=json.dumps(data)) 158 | j = r.json() 159 | if 'url' in j: 160 | return j['url'] 161 | else: 162 | return None 163 | 164 | def closeIssue(self, issue_json): 165 | url = issue_json['url'] 166 | data = {'state': 'closed'} 167 | headers = {"Content-Type": "application/json", 'Authorization': self.getAuth()} 168 | r = requests.post(url, headers=headers, data=json.dumps(data)) 169 | if r.status_code == requests.codes.ok: 170 | return r.json() 171 | return None 172 | 173 | 174 | class GitHubRepository(GitHub): 175 | 176 | def __init__(self, repo, user, password, org): 177 | super().__init__(user, password, org) 178 | self.repo = repo 179 | 180 | def getAllPullRequests(self): 181 | url = self.url + '/repos/%s/pulls' % self.repo 182 | headers = {'Authorization': self.getAuth()} 183 | r = requests.get(url, headers=headers) 184 | if r.status_code == requests.codes.ok: 185 | return r.json() 186 | return None 187 | 188 | def getAllIssues(self): 189 | url = self.url + '/repos/%s/issues' % self.repo 190 | headers = {'Authorization': self.getAuth()} 191 | r = requests.get(url, headers=headers, params={'state': 'all'}) 192 | if r.status_code == requests.codes.ok: 193 | return r.json() 194 | return None 195 | 196 | # vim: ts=4 sw=4 noet 197 | 198 | if __name__ == "__main__": 199 | import sys 200 | j = JIRA("https://bugs.funtoo.org/rest/api/2", "drobbins", sys.argv[1]) 201 | for i in j.issues_iter('project="FL" and status != "closed"'): 202 | print(i["key"], i["fields"]["summary"]) 203 | -------------------------------------------------------------------------------- /modules/merge/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/funtoo/merge-scripts/dd76fcaaebc70b5de863f6395239a53e9e9d8dae/modules/merge/__init__.py -------------------------------------------------------------------------------- /modules/merge/async_engine.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import asyncio 4 | from concurrent.futures import ThreadPoolExecutor 5 | from collections import defaultdict 6 | from queue import Queue, Empty 7 | 8 | class AsyncEngine: 9 | 10 | queue_size = 60000 11 | 12 | def __init__(self, num_threads=40): 13 | self.task_q = Queue(maxsize=self.queue_size) 14 | self.num_threads = num_threads 15 | self.thread_exec = ThreadPoolExecutor(max_workers=self.num_threads) 16 | self.workers = [] 17 | self.loop = asyncio.get_event_loop() 18 | self.keep_running = True 19 | 20 | def start_threads(self, enable_workers=True): 21 | if enable_workers is True: 22 | for x in range(0, self.num_threads): 23 | self.loop.run_in_executor(self.thread_exec, self._worker) 24 | print("Started %s workers." % self.num_threads) 25 | 26 | def add_worker(self, w): 27 | self.workers.append(self.thread_exec.submit(w)) 28 | 29 | def enqueue(self, **kwargs): 30 | self.task_q.put(kwargs) 31 | 32 | def _worker(self): 33 | while self.keep_running is True or (self.keep_running is False and self.task_q.qsize() > 0 ): 34 | try: 35 | kwargs = defaultdict(lambda: None, self.task_q.get(timeout=3)) 36 | self.worker_thread(**kwargs) 37 | except Empty: 38 | continue 39 | 40 | async def wait_for_workers_to_finish(self): 41 | self.keep_running = False 42 | await asyncio.gather(*self.workers) 43 | 44 | def exit_handler(self): 45 | """something for atexit.register""" 46 | self.loop.run_until_complete(asyncio.gather( 47 | asyncio.ensure_future(self.wait_for_workers_to_finish()) 48 | )) 49 | 50 | 51 | def worker_thread(self, **kwargs): 52 | print("blarg") 53 | 54 | 55 | # vim: ts=4 sw=4 noet 56 | -------------------------------------------------------------------------------- /modules/merge/async_portage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import portage 4 | import warnings 5 | portage.proxy.lazyimport.lazyimport(globals(), 6 | 'portage.dbapi.dep_expand:dep_expand', 7 | 'portage.dep:match_from_list,_match_slot', 8 | 'portage.util.listdir:listdir', 9 | 'portage.versions:best,_pkg_str', 10 | ) 11 | 12 | async def async_xmatch(self, level, origdep, mydep=None, mykey=None, mylist=None): 13 | "caching match function; very trick stuff" 14 | 15 | if mydep is None: 16 | # this stuff only runs on first call of xmatch() 17 | # create mydep, mykey from origdep 18 | mydep = dep_expand(origdep, mydb=self, settings=self.settings) 19 | mykey = mydep.cp 20 | 21 | # if no updates are being made to the tree, we can consult our xcache... 22 | cache_key = None 23 | if self.frozen: 24 | cache_key = (mydep, mydep.unevaluated_atom) 25 | try: 26 | return self.xcache[level][cache_key][:] 27 | except KeyError: 28 | pass 29 | 30 | myval = None 31 | mytree = None 32 | if mydep.repo is not None: 33 | mytree = self.treemap.get(mydep.repo) 34 | if mytree is None: 35 | if level.startswith("match-"): 36 | myval = [] 37 | else: 38 | myval = "" 39 | 40 | if myval is not None: 41 | # Unknown repo, empty result. 42 | pass 43 | elif level == "match-all-cpv-only": 44 | # match *all* packages, only against the cpv, in order 45 | # to bypass unnecessary cache access for things like IUSE 46 | # and SLOT. 47 | if mydep == mykey: 48 | # Share cache with match-all/cp_list when the result is the 49 | # same. Note that this requires that mydep.repo is None and 50 | # thus mytree is also None. 51 | level = "match-all" 52 | myval = self.cp_list(mykey, mytree=mytree) 53 | else: 54 | myval = match_from_list(mydep, 55 | self.cp_list(mykey, mytree=mytree)) 56 | 57 | elif level in ("bestmatch-visible", "match-all", 58 | "match-visible", "minimum-all", "minimum-all-ignore-profile", 59 | "minimum-visible"): 60 | # Find the minimum matching visible version. This is optimized to 61 | # minimize the number of metadata accesses (improves performance 62 | # especially in cases where metadata needs to be generated). 63 | if mydep == mykey: 64 | mylist = self.cp_list(mykey, mytree=mytree) 65 | else: 66 | mylist = match_from_list(mydep, 67 | self.cp_list(mykey, mytree=mytree)) 68 | 69 | ignore_profile = level in ("minimum-all-ignore-profile",) 70 | visibility_filter = level not in ("match-all", 71 | "minimum-all", "minimum-all-ignore-profile") 72 | single_match = level not in ("match-all", "match-visible") 73 | myval = [] 74 | aux_keys = list(self._aux_cache_keys) 75 | if level == "bestmatch-visible": 76 | iterfunc = reversed 77 | else: 78 | iterfunc = iter 79 | 80 | for cpv in iterfunc(mylist): 81 | try: 82 | metadata = dict(zip(aux_keys, await self.async_aux_get(cpv, aux_keys, myrepo=cpv.repo))) 83 | except KeyError: 84 | # ebuild not in this repo, or masked by corruption 85 | continue 86 | 87 | try: 88 | pkg_str = _pkg_str(cpv, metadata=metadata, 89 | settings=self.settings, db=self) 90 | except InvalidData: 91 | continue 92 | 93 | if visibility_filter and not self._visible(pkg_str, metadata): 94 | continue 95 | 96 | if mydep.slot is not None and \ 97 | not _match_slot(mydep, pkg_str): 98 | continue 99 | 100 | if mydep.unevaluated_atom.use is not None and \ 101 | not self._match_use(mydep, pkg_str, metadata, 102 | ignore_profile=ignore_profile): 103 | continue 104 | 105 | myval.append(pkg_str) 106 | if single_match: 107 | break 108 | 109 | if single_match: 110 | if myval: 111 | myval = myval[0] 112 | else: 113 | myval = "" 114 | 115 | elif level == "bestmatch-list": 116 | # dep match -- find best match but restrict search to sublist 117 | warnings.warn("The 'bestmatch-list' mode of " 118 | "portage.dbapi.porttree.portdbapi.xmatch is deprecated", 119 | DeprecationWarning, stacklevel=2) 120 | myval = best(list(self._iter_match(mydep, mylist))) 121 | elif level == "match-list": 122 | # dep match -- find all matches but restrict search to sublist (used in 2nd half of visible()) 123 | warnings.warn("The 'match-list' mode of " 124 | "portage.dbapi.porttree.portdbapi.xmatch is deprecated", 125 | DeprecationWarning, stacklevel=2) 126 | myval = list(self._iter_match(mydep, mylist)) 127 | else: 128 | raise AssertionError( 129 | "Invalid level argument: '%s'" % level) 130 | 131 | if self.frozen: 132 | xcache_this_level = self.xcache.get(level) 133 | if xcache_this_level is not None: 134 | xcache_this_level[cache_key] = myval 135 | if not isinstance(myval, _pkg_str): 136 | myval = myval[:] 137 | 138 | return myval -------------------------------------------------------------------------------- /modules/merge/config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import sys 5 | from configparser import ConfigParser 6 | 7 | 8 | class Configuration: 9 | 10 | def __init__(self, filename=None): 11 | if filename is None: 12 | home_dir = os.path.expanduser("~") 13 | self.config_path = os.path.join(home_dir, ".merge") 14 | else: 15 | self.config_path = filename 16 | if not os.path.exists(self.config_path): 17 | print(""" 18 | Merge scripts now use a configuration file. Create a ~/.merge file with the following format. Note that 19 | while the config file must exist, it may be empty, in which case, the following settings will be used. 20 | These are the production configuration settings, so you will probably want to override most or all of 21 | these. 22 | 23 | [sources] 24 | 25 | flora = https://github.com/funtoo/flora 26 | kit-fixups = https://github.com/funtoo/kit-fixups 27 | gentoo-staging = repos@git.funtoo.org:ports/gentoo-staging.git 28 | 29 | [destinations] 30 | 31 | base_url = https://github.com/funtoo 32 | 33 | [branches] 34 | 35 | flora = master 36 | kit-fixups = master 37 | meta-repo = master 38 | 39 | [work] 40 | 41 | source = /var/git/source-trees 42 | destination = /var/git/dest-trees 43 | """) 44 | sys.exit(1) 45 | 46 | self.config = ConfigParser() 47 | self.config.read(self.config_path) 48 | 49 | valids = { 50 | "sources": [ "flora", "kit-fixups", "gentoo-staging" ], 51 | "destinations": [ "base_url", "mirror", "indy_url" ], 52 | "branches": [ "flora", "kit-fixups", "meta-repo" ], 53 | "work": [ "source", "destination" ] 54 | } 55 | for section, my_valids in valids.items(): 56 | 57 | if self.config.has_section(section): 58 | if section == "database": 59 | continue 60 | for opt in self.config[section]: 61 | if opt not in my_valids: 62 | print("Error: ~/.merge [%s] option %s is invalid." % (section, opt)) 63 | sys.exit(1) 64 | 65 | def get_option(self, section, key, default=None): 66 | if self.config.has_section(section) and key in self.config[section]: 67 | my_path = self.config[section][key] 68 | else: 69 | my_path = default 70 | return my_path 71 | 72 | def db_connection(self, dbname): 73 | return self.get_option("database", dbname) 74 | 75 | @property 76 | def flora(self): 77 | return self.get_option("sources", "flora", "ssh://git@code.funtoo.org:7999/co/flora.git") 78 | 79 | @property 80 | def kit_fixups(self): 81 | return self.get_option("sources", "kit-fixups", "ssh://git@code.funtoo.org:7999/core/kit-fixups.git") 82 | 83 | @property 84 | def mirror(self): 85 | return self.get_option("destinations", "mirror", None) 86 | 87 | @property 88 | def gentoo_staging(self): 89 | return self.get_option("sources", "gentoo-staging", "ssh://git@code.funtoo.org:7999/auto/gentoo-staging.git") 90 | 91 | def base_url(self, repo): 92 | base = self.get_option("destinations", "base_url", "ssh://git@code.funtoo.org:7999/auto/") 93 | if not base.endswith("/"): 94 | base += "/" 95 | if not repo.endswith(".git"): 96 | repo += ".git" 97 | return base + repo 98 | 99 | def indy_url(self, repo): 100 | base = self.get_option("destinations", "indy_url", "ssh://git@code.funtoo.org:7999/indy/") 101 | if not base.endswith("/"): 102 | base += "/" 103 | if not repo.endswith(".git"): 104 | repo += ".git" 105 | return base + repo 106 | 107 | def branch(self, key): 108 | return self.get_option("branches", key, "master") 109 | 110 | @property 111 | def source_trees(self): 112 | return self.get_option("work", "source", "/var/git/source-trees") 113 | 114 | @property 115 | def dest_trees(self): 116 | return self.get_option("work", "destination", "/var/git/dest-trees") 117 | -------------------------------------------------------------------------------- /modules/merge/db_core.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import sys 4 | from merge.config import Configuration 5 | from contextlib import contextmanager 6 | from sqlalchemy import create_engine, Integer, Boolean, Column, String, BigInteger, DateTime, Text 7 | from sqlalchemy.ext.declarative import declarative_base 8 | from sqlalchemy.orm import sessionmaker, scoped_session 9 | from datetime import datetime 10 | from sqlalchemy.schema import MetaData 11 | 12 | app_config = Configuration() 13 | 14 | class Database(object): 15 | 16 | # Abstract database class with contextmanager pattern. 17 | 18 | @contextmanager 19 | def get_session(self): 20 | session = scoped_session(sessionmaker(bind=self.engine)) 21 | try: 22 | yield session 23 | session.commit() 24 | except: 25 | session.rollback() 26 | raise 27 | finally: 28 | session.close() 29 | 30 | class FastPullDatabase(Database): 31 | 32 | MetaData = MetaData() 33 | 34 | def __init__(self): 35 | 36 | self.Base = declarative_base(self.MetaData) 37 | 38 | class Distfile(self.Base): 39 | 40 | # A distfile represents a single file for download. A distfile has a filename, which is its local filename 41 | # after it is downloaded, as well as one or more SRC_URIs, which define where the file can be downloaded 42 | # from -- and may reference a filename different from the 'filename' field (in the case of '->' used within 43 | # the SRC_URI). In addition, the 'id' field is an ASCII SHA512 checksum from the manifest, and catpkg and 44 | # kit record the catpkg and kit that reference this file, respectively. 45 | 46 | # Note that with the current data model, it is possible that multiple catpkgs and/or kits may reference 47 | # the same file, and they will overwrite each other's entries in the distfile database. So Distfile should 48 | # be used as a complete list of distfiles, but not as a complete mapping of catpkg -> distfile. 49 | 50 | # Distfile records are used for SRC_URI and mirror-related tracking tasks. They record the last time 51 | # a particular filename was seen in the 'updated_on field' and the 'mirror' field indicates whether the 52 | # file should be mirrored (inverse of RESTRICT="mirror") 53 | 54 | __tablename__ = "distfiles" 55 | 56 | 57 | id = Column('id', String(128), primary_key=True) # sha512 in ASCII 58 | rand_id = Column('rand_id', String(128), index=True) # fastpull_id 59 | filename = Column('filename', String(255), primary_key=True) # filename on disk 60 | 61 | # the id/filename is a composite key, because a SHA512 may exist under potentially multiple filenames, and we 62 | # want to be aware of these situations. 63 | 64 | digest_type = Column('digest_type', String(20)) 65 | alt_digest = Column('alt_digest', Text) 66 | size = Column('size', BigInteger) 67 | catpkg = Column('catpkg', String(255), index=True) # catpkg 68 | kit = Column('kit', String(40), index=True) # source kit 69 | src_uri = Column('src_uri', Text) # src_uris -- filename may be different as Portage can rename -- stored in order of appearance, one per line 70 | mirror = Column('mirror', Boolean, default=True) 71 | last_fetched_on = Column('last_fetched_on', DateTime) # set to a datetime the last time we successfully fetched the file # last failure 72 | 73 | # deprecated fields: 74 | 75 | last_attempted_on = Column('last_attempted_on', DateTime) 76 | last_failure_on = Column('last_failure_on', DateTime) 77 | failtype = Column('failtype', Text) 78 | priority = Column('priority', Integer, default=0) 79 | 80 | 81 | class QueuedDistfile(self.Base): 82 | 83 | __tablename__ = 'queued_distfiles' 84 | 85 | id = Column(Integer, primary_key=True) 86 | filename = Column('filename', String(255), index=True) # filename on disk 87 | catpkg = Column('catpkg', String(255), index=True) # catpkg 88 | kit = Column('kit', String(40), index=True) # source kit 89 | branch = Column('branch', String(40), index=True) # source kit 90 | src_uri = Column('src_uri', Text) # src_uris -- filename may be different as Portage can rename -- stored in order of appearance, one per line 91 | size = Column('size', BigInteger) 92 | mirror = Column('mirror', Boolean, default=True) 93 | digest_type = Column('digest_type', String(20)) 94 | digest = Column('digest', Text) 95 | added_on = Column('added_on', DateTime, default=datetime.utcnow) 96 | priority = Column('priority', Integer, default=0) 97 | last_attempted_on = Column('last_attempted_on', DateTime) 98 | last_failure_on = Column('last_failure_on', DateTime) 99 | failcount = Column(Integer, default=0) 100 | failtype = Column('failtype', Text) 101 | 102 | class MissingRequestedFile(self.Base): 103 | 104 | __tablename__ = "missing_requested_files" 105 | 106 | id = Column(Integer, primary_key=True) 107 | filename = Column(String(255), index=True) 108 | failcount = Column(Integer, default=0) 109 | last_failure_on = Column(DateTime, default=None) 110 | 111 | class MissingManifestFailure(self.Base): 112 | 113 | __tablename__ = 'manifest_failures' 114 | 115 | filename = Column('filename', String(255), primary_key=True) # filename on disk 116 | catpkg = Column('catpkg', String(255), primary_key=True) # catpkg 117 | kit = Column('kit', String(40), primary_key=True) # source kit 118 | branch = Column('branch', String(40), primary_key=True) # source kit 119 | src_uri = Column('src_uri', Text) # src_uris -- filename may be different as Portage can rename -- stored in order of appearance, one per line 120 | failtype = Column('failtype', String(8)) # 'missing' 121 | fail_on = Column('fail_on', DateTime) #last failure 122 | 123 | self.Distfile = Distfile 124 | self.QueuedDistfile = QueuedDistfile 125 | self.MissingManifestFailure = MissingManifestFailure 126 | self.MissingRequestedFile = MissingRequestedFile 127 | 128 | self.engine = create_engine(app_config.db_connection("fastpull"), strategy='threadlocal', pool_size=40, max_overflow=80) 129 | self.Base.metadata.create_all(self.engine) 130 | 131 | if __name__ == "__main__": 132 | 133 | # This migration code is designed to migrate old Distfile() records to the new QueuedDistfile() records: 134 | 135 | if len(sys.argv) > 1 and sys.argv[1] == "migrate": 136 | db = FastPullDatabase() 137 | print("migrating...") 138 | with db.get_session() as session: 139 | for d in session.query(db.Distfile).filter(db.Distfile.last_fetched_on == None): 140 | qd = db.QueuedDistfile() 141 | qd.filename = d.filename 142 | qd.catpkg = d.catpkg 143 | qd.kit = d.kit 144 | qd.src_uri = d.src_uri 145 | qd.size = d.size 146 | qd.mirror = d.mirror 147 | qd.digest_type = "sha512" 148 | qd.added_on = datetime.utcnow() 149 | qd.priority = d.priority 150 | qd.last_attempted_on = d.last_attempted_on 151 | qd.last_failure_on = d.last_failure_on 152 | qd.failcount = 1 153 | qd.failtype = d.failtype 154 | session.add(qd) 155 | session.delete(d) 156 | session.commit() 157 | sys.stdout.write(">") 158 | sys.stdout.flush() 159 | print() 160 | elif len(sys.argv) > 1 and sys.argv[1] == "fixup": 161 | # this code should detect and fixup things that need to be re-fetched. 162 | db = FastPullDatabase() 163 | print("fixing up old random ids...") 164 | with db.get_session() as session: 165 | for d in session.query(db.Distfile): 166 | try: 167 | myval = int(d.rand_id, 16) 168 | except ValueError: 169 | # not hexadecimal... need to re-fetch, etc. 170 | print(d.filename) 171 | qd = db.QueuedDistfile() 172 | qd.filename = d.filename 173 | qd.catpkg = d.catpkg 174 | qd.kit = d.kit 175 | qd.src_uri = d.src_uri 176 | qd.size = d.size 177 | qd.mirror = d.mirror 178 | qd.digest_type = "sha512" 179 | qd.added_on = datetime.utcnow() 180 | qd.priority = d.priority 181 | qd.last_attempted_on = None 182 | qd.last_failure_on = None 183 | qd.failcount = 0 184 | qd.failtype = None 185 | session.add(qd) 186 | session.delete(d) 187 | sys.stdout.write(">") 188 | sys.stdout.flush() 189 | session.commit() 190 | 191 | else: 192 | db = FastPullDatabase() 193 | with db.get_session() as session: 194 | for x in session.query(db.QueuedDistfile).filter(db.QueuedDistfile.last_attempted_on == None): 195 | print(x.filename) 196 | 197 | # vim: ts=4 sw=4 noet 198 | -------------------------------------------------------------------------------- /modules/merge/extensions/xproto.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | from merge.merge_utils import runShell, MergeStep, CreateEbuildFromTemplate, get_catpkg_from_ebuild_path 5 | from glob import glob 6 | from collections import defaultdict 7 | import itertools 8 | import asyncio 9 | 10 | class XProtoStepGenerator(MergeStep): 11 | 12 | """ 13 | 14 | This merge step will auto-generate surrogate "stub" ebuilds for a master xproto ebuild. The template 15 | used for the stub ebuilds can be seen below. The run() method takes advantage of threads to process 16 | multiple xproto ebuilds concurrently. 17 | 18 | """ 19 | 20 | def __init__(self, template_text: str): 21 | self.template_text = template_text 22 | 23 | def get_pkgs_from_meson(self, master_cpv, fn, prefix="pcs"): 24 | 25 | """This is a helper method that grabs package names from meson build files in xproto sources. 26 | 27 | It accepts the master_cpv we are processing as an argument, so we can also return it and process the results in a 28 | more pipeline-oriented fashion. We also accept the arguments ``fn`` -- filename of the meson file, and a prefix 29 | parameter used to tweak the specific result sets we want to grab from the meson file. 30 | """ 31 | 32 | capture = False 33 | 34 | with open(fn, "r") as f: 35 | lines = f.readlines() 36 | for line in lines: 37 | ls = line.strip() 38 | if ls.startswith("%s = [" % prefix): 39 | capture = True 40 | elif capture is True: 41 | if ls == "]": 42 | break 43 | else: 44 | ls = ls.lstrip("[").rstrip("],").split(",") 45 | pkg = ls[0].strip().strip("'") 46 | ver = ls[1].strip().strip("'") 47 | yield master_cpv, pkg, ver 48 | 49 | async def worker_async(self, meta_pkg_ebuild_path, tree): 50 | """ 51 | This is a worker method that will extract an xproto ebuild using the ebuild command, then use the get_pkgs_from_meson 52 | helper method to grab all package names from meson, and then will return the results. 53 | :param meta_pkg_ebuild_path: This is the absolute path of the xproto ebuild to process. 54 | :return: A list of entries from the meson files -- each entry in the list is a tuple containing cpv of our xproto ebuild, 55 | the meson package name, and the meson version. 56 | """ 57 | 58 | env = os.environ.copy() 59 | env['PORTAGE_DEPCACHEDIR'] = '/var/cache/edb/%s-%s-meta' % (tree.name, tree.branch) 60 | if tree.name != "core-kit": 61 | env['PORTAGE_REPOSITORIES'] = ''' 62 | [DEFAULT] 63 | main-repo = core-kit 64 | 65 | [core-kit] 66 | location = %s/core-kit 67 | aliases = gentoo 68 | 69 | [%s] 70 | location = %s 71 | ''' % (tree.config.dest_trees, tree.name, tree.root) 72 | else: 73 | env['PORTAGE_REPOSITORIES'] = ''' 74 | [DEFAULT] 75 | main-repo = core-kit 76 | 77 | [core-kit] 78 | location = %s/core-kit 79 | aliases = gentoo 80 | ''' % tree.config.dest_trees 81 | 82 | sdata = meta_pkg_ebuild_path.rstrip(".ebuild").split("/") 83 | master_cpv = sdata[-3] + "/" + sdata[-1] 84 | success = await runShell("(cd %s; ebuild %s clean unpack)" % (os.path.dirname(meta_pkg_ebuild_path), os.path.basename(meta_pkg_ebuild_path)), abort_on_failure=False, env=env) 85 | if not success: 86 | return defaultdict(set) 87 | meson_file = os.path.expanduser("~portage/%s/work/xorg*proto-*/meson.build" % master_cpv) 88 | meson_file = glob(meson_file) 89 | if len(meson_file) != 1 or not os.path.exists(meson_file[0]): 90 | print("File not found:", meson_file) 91 | else: 92 | meson_file = meson_file[0] 93 | meta_mappings = defaultdict(set) 94 | for master_cpv, pkg, ver in itertools.chain(self.get_pkgs_from_meson(master_cpv, meson_file), self.get_pkgs_from_meson(master_cpv, meson_file, "legacy_pcs")): 95 | meta_mappings[(pkg, ver)].add(master_cpv) 96 | await runShell("(cd %s; ebuild %s clean)" % (os.path.dirname(meta_pkg_ebuild_path), os.path.basename(meta_pkg_ebuild_path)), abort_on_failure=False, env=env) 97 | return meta_mappings 98 | 99 | async def run(self, tree): 100 | 101 | """ 102 | This is the main "run" method which will run our main worker methods -- worker_async -- concurrently in a ThreadPoolExecutor. 103 | We then get the results of the meson extractions, and create new MergeSteps for generating the appropriate ebuilds using 104 | templates, and run them. 105 | :return: None 106 | """ 107 | 108 | env = os.environ.copy() 109 | env['PORTAGE_DEPCACHEDIR'] = '/var/cache/edb/%s-%s-meta' % (tree.name, tree.branch) 110 | if tree.name != "core-kit": 111 | env['PORTAGE_REPOSITORIES'] = ''' 112 | [DEFAULT] 113 | main-repo = core-kit 114 | 115 | [core-kit] 116 | location = %s/core-kit 117 | aliases = gentoo 118 | 119 | [%s] 120 | location = %s 121 | ''' % (tree.config.dest_trees, tree.name, tree.root) 122 | else: 123 | env['PORTAGE_REPOSITORIES'] = ''' 124 | [DEFAULT] 125 | main-repo = core-kit 126 | 127 | [core-kit] 128 | location = %s/core-kit 129 | aliases = gentoo 130 | ''' % tree.config.dest_trees 131 | 132 | 133 | all_meta_pkg_ebuilds = list(glob(tree.root + "/x11-base/xorg-proto/xorg-proto-*.ebuild")) 134 | futures =[ 135 | self.loop.run_in_executor(self.cpu_bound_executor, self.run_async_in_executor, self.worker_async, meta_pkg_ebuild_path, tree) 136 | for meta_pkg_ebuild_path in all_meta_pkg_ebuilds 137 | ] 138 | meta_mappings = defaultdict(set) 139 | for future in asyncio.as_completed(futures): 140 | new_meta_mappings = await future 141 | for key, new_set in new_meta_mappings.items(): 142 | meta_mappings[key] |= new_set 143 | 144 | for pv_key, all_meta_atoms in meta_mappings.items(): 145 | pkg, ver = pv_key 146 | all_meta_atoms = sorted(list(all_meta_atoms)) 147 | output_ebuild = tree.root + "/x11-proto/%s/%s-%s.ebuild" % (pkg, pkg, ver) 148 | output_dir = os.path.dirname(output_ebuild) 149 | if not os.path.exists(output_dir): 150 | os.makedirs(output_dir) 151 | step = CreateEbuildFromTemplate( 152 | template_text=self.template_text, 153 | template_params={ "all_meta_atoms" : all_meta_atoms }, 154 | file_subpath = "x11-proto/%s/%s-%s.ebuild" % ( pkg, pkg, ver ) 155 | ) 156 | await step.run(tree) 157 | self.collector.cpm_logger.record(tree.name, get_catpkg_from_ebuild_path(output_ebuild), is_fixup=True) 158 | -------------------------------------------------------------------------------- /modules/merge/merge_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import glob 4 | import itertools 5 | import os 6 | import shutil 7 | import subprocess 8 | import sys 9 | import re 10 | from lxml import etree 11 | import portage 12 | portage._internal_caller = True 13 | from portage.dep import use_reduce, dep_getkey, flatten 14 | from portage.exception import PortageKeyError 15 | import grp 16 | import pwd 17 | import multiprocessing 18 | from collections import defaultdict 19 | from portage.util.futures.iter_completed import async_iter_completed 20 | from merge.async_portage import async_xmatch 21 | import asyncio 22 | from concurrent.futures import ThreadPoolExecutor 23 | from multiprocessing import cpu_count 24 | import jinja2 25 | 26 | debug = False 27 | 28 | from merge.async_engine import AsyncEngine 29 | from enum import Enum 30 | 31 | # These should be kept in-sync with definitions that are in foundations.py. 32 | 33 | class KitStabilityRating(Enum): 34 | PRIME = 0 # Kit is enterprise-quality 35 | NEAR_PRIME = 1 # Kit is approaching enterprise-quality 36 | BETA = 2 # Kit is in beta 37 | ALPHA = 3 # Kit is in alpha 38 | DEV = 4 # Kit is newly created and in active development 39 | CURRENT = 10 # Kit follows Gentoo currrent 40 | DEPRECATED = 11 # Kit is deprecated/retired 41 | 42 | class KitType(Enum): 43 | AUTOMATICALLY_GENERATED = "auto" # auto-generated 44 | INDEPENDENTLY_MAINTAINED = "indy" # independently-maintained 45 | 46 | def KitRatingString(kit_enum): 47 | if kit_enum is KitStabilityRating.PRIME: 48 | return "prime" 49 | elif kit_enum is KitStabilityRating.NEAR_PRIME: 50 | return "near-prime" 51 | elif kit_enum is KitStabilityRating.BETA: 52 | return "beta" 53 | elif kit_enum is KitStabilityRating.ALPHA: 54 | return "alpha" 55 | elif kit_enum is KitStabilityRating.DEV: 56 | return "dev" 57 | elif kit_enum is KitStabilityRating.CURRENT: 58 | return "current" 59 | elif kit_enum is KitStabilityRating.DEPRECATED: 60 | return "deprecated" 61 | 62 | class AsyncMergeAllKits(AsyncEngine): 63 | _db = None 64 | 65 | @property 66 | def db(self): 67 | if self._db is None: 68 | from merge.db_core import FastPullDatabase 69 | self._db = FastPullDatabase() 70 | return self._db 71 | 72 | def worker_thread(self, **kwargs): 73 | db = self.db 74 | with db.get_session() as session: 75 | f = kwargs["file"] 76 | existing = session.query(db.Distfile).filter(db.Distfile.id == kwargs["digest"]).first() 77 | # TODO: maybe it already exists, but under a different filename. If so, we still want to create a distfile entry for it so it can be downloaded... 78 | 79 | if existing: 80 | return 81 | 82 | # Don't create multiple queued downloads for the same distfile: 83 | if session.query(db.QueuedDistfile).filter(db.QueuedDistfile.filename == f).filter( 84 | db.QueuedDistfile.size == kwargs["size"]).first() is not None: 85 | return 86 | 87 | # Queue the distfile for downloading... 88 | 89 | qd = db.QueuedDistfile() 90 | qd.filename = f 91 | qd.catpkg = kwargs["catpkg"] 92 | qd.kit = kwargs["kit_name"] 93 | qd.branch = kwargs["kit_branch"] 94 | qd.src_uri = kwargs["src_uri"] 95 | qd.size = kwargs["size"] 96 | qd.mirror = kwargs["restrict"] 97 | qd.digest_type = kwargs["digest_type"] 98 | qd.digest = kwargs["digest"] 99 | qd.priority = 1 if kwargs["bestmatch"] else 0 100 | session.add(qd) 101 | session.commit() 102 | 103 | class RepositoryStepsCollector: 104 | 105 | """ 106 | This class is designed to "hold" a bunch of repository steps and also provide these repository steps with 107 | access to important information, which is stored in the properties of the collector object. 108 | """ 109 | 110 | def __init__(self, fixup_root, dest_tree, cpm_logger=None): 111 | self.fixup_root = fixup_root 112 | self.dest_tree = dest_tree 113 | self.cpm_logger = cpm_logger 114 | self.steps = [] 115 | 116 | def add_step(self, new_step): 117 | """Add a step to our collection and also link our step to the collector.""" 118 | new_step.collector = self 119 | self.steps.append(new_step) 120 | 121 | async def run_steps_in_tree(self, tree): 122 | for step in self.steps: 123 | await step.run(tree) 124 | 125 | class MergeStep: 126 | 127 | loop = asyncio.get_event_loop() 128 | cpu_bound_executor = ThreadPoolExecutor(max_workers=cpu_count()) 129 | # This is only used for Repository Steps: 130 | collector = None 131 | 132 | def run_async_in_executor(self, corofn, *args): 133 | 134 | """ 135 | 136 | Use this method to run an asynchronous worker within a ThreadPoolExecutor. 137 | Without this special wrapper, this normally doesn't work, and the ThreadPoolExecutor will not allow async calls. 138 | But with this wrapper, our worker and its subsequent calls can be async. 139 | 140 | Use as follows:: 141 | 142 | futures =[ 143 | self.loop.run_in_executor(self.cpu_bound_executor, self.run_async_in_executor, self.worker_async, worker_async_arg1, ...) 144 | for meta_pkg_ebuild_path in all_meta_pkg_ebuilds 145 | ] 146 | for future in asyncio.as_completed(futures): 147 | ... 148 | 149 | """ 150 | loop = asyncio.new_event_loop() 151 | try: 152 | future = corofn(*args) 153 | asyncio.set_event_loop(loop) 154 | return loop.run_until_complete(future) 155 | finally: 156 | loop.close() 157 | 158 | async def run(self, tree): 159 | pass 160 | 161 | def get_catpkg_from_ebuild_path(path): 162 | """Simple method to take an ebuild path and extract the Portage catpkg atom from it.""" 163 | 164 | spl = path.rstrip(".ebuild").split("/") 165 | return spl[-3] + "/" + spl[-1] 166 | 167 | 168 | class RunRepositoryStepsIfAvailable(MergeStep): 169 | 170 | def __init__(self, fixup_root, cpm_logger): 171 | self.fixup_root = fixup_root # the "master" repo root 172 | self.cpm_logger = cpm_logger 173 | 174 | async def run(self, tree): 175 | 176 | # We look in various directories for the generate.py file -- and use standard kit-fixups approach to overriding: 177 | # branch > curated > global: 178 | 179 | root = os.path.join(self.fixup_root, tree.name) 180 | global_root = os.path.join(root, "global", "generate.py") 181 | curated_root = os.path.join(root, "curated", "generate.py") 182 | branch_root = os.path.join(root, tree.branch, "generate.py") 183 | if os.path.exists(branch_root): 184 | p = branch_root 185 | elif os.path.exists(curated_root): 186 | p = curated_root 187 | elif os.path.exists(global_root): 188 | p = global_root 189 | else: 190 | return 191 | 192 | import importlib.util 193 | spec = importlib.util.spec_from_file_location('repository_steps', p) 194 | mod = importlib.util.module_from_spec(spec) 195 | spec.loader.exec_module(mod) 196 | 197 | # When actually running generate.py, we will lock the fixup_root passed to the RepositoryStepsCollector to 198 | # wherever we found the generate.py file: 199 | 200 | repo_steps_collector = RepositoryStepsCollector(fixup_root=p, dest_tree=tree.root, cpm_logger=self.cpm_logger) 201 | mod.add_steps(repo_steps_collector) 202 | await repo_steps_collector.run_steps_in_tree(tree) 203 | 204 | 205 | class CreateEbuildFromTemplate(MergeStep): 206 | 207 | def __init__(self, file_subpath, template_text, template_params=None): 208 | self.file_subpath = file_subpath 209 | self.template_text = template_text 210 | if template_params is None: 211 | self.template_params = {} 212 | else: 213 | self.template_params = template_params 214 | 215 | async def run(self, tree): 216 | outfile = os.path.join(tree.root, self.file_subpath) 217 | outdir = os.path.dirname(outfile) 218 | if not os.path.exists(outdir): 219 | os.makedirs(outdir) 220 | with open(outfile, "w") as f: 221 | print('Generating %s...' % outfile) 222 | template = jinja2.Template(self.template_text) 223 | f.write(template.render(**self.template_params)) 224 | 225 | 226 | class Tree: 227 | 228 | def head(self): 229 | return "None" 230 | 231 | 232 | class GitTreeError(Exception): 233 | pass 234 | 235 | 236 | class GitTree(Tree): 237 | "A Tree (git) that we can use as a source for work jobs, and/or a target for running jobs." 238 | 239 | def __init__(self, name: str, branch: str = "master", config=None, url: str = None, commit_sha1: str = None, 240 | root: str = None, 241 | create: bool = False, 242 | reponame: str = None, 243 | mirror: str = None, 244 | origin_check: bool = True, 245 | destfix: bool = False, 246 | reclone: bool = False): 247 | 248 | # note that if create=True, we are in a special 'local create' mode which is good for testing. We create the repo locally from 249 | # scratch if it doesn't exist, as well as any branches. And we don't push. 250 | self.config = config 251 | self.name = name 252 | self.root = root 253 | self.url = url 254 | self.merged = [] 255 | self.pull = True 256 | self.reponame = reponame 257 | self.create = create 258 | self.has_cleaned = False 259 | self.initialized = False 260 | self.initial_future = self.initialize_tree(branch, commit_sha1) 261 | self.mirror = mirror 262 | self.origin_check = origin_check 263 | self.destfix = destfix 264 | self.reclone = reclone 265 | 266 | # if we don't specify root destination tree, assume we are source only: 267 | 268 | async def initialize_tree(self, branch, commit_sha1=None): 269 | self.branch = branch 270 | self.commit_sha1 = commit_sha1 271 | 272 | if self.root is None: 273 | base = self.config.source_trees 274 | self.root = "%s/%s" % (base, self.name) 275 | 276 | if os.path.isdir("%s/.git" % self.root) and self.reclone: 277 | await runShell("rm -rf %s" % self.root) 278 | 279 | if not os.path.isdir("%s/.git" % self.root): 280 | # repo does not exist? - needs to be cloned or created 281 | if os.path.exists(self.root): 282 | raise GitTreeError("%s exists but does not appear to be a valid git repository." % self.root) 283 | 284 | base = os.path.dirname(self.root) 285 | if self.create: 286 | # we have been told to create this repo. This works even if we have a remote clone URL specified 287 | os.makedirs(self.root) 288 | await runShell("( cd %s && git init )" % self.root) 289 | await runShell("echo 'created by merge.py' > %s/README" % self.root) 290 | await runShell("( cd %s && git add README; git commit -a -m 'initial commit by merge.py' )" % self.root) 291 | await runShell("( cd %s && git remote add origin %s )" % (self.root, self.url)) 292 | elif self.url: 293 | if not os.path.exists(base): 294 | os.makedirs(base) 295 | # we aren't supposed to create it from scratch -- can we clone it? 296 | await runShell("(cd %s && git clone %s %s)" % (base, self.url, os.path.basename(self.root))) 297 | 298 | else: 299 | # we've run out of options 300 | print("Error: tree %s does not exist, but no clone URL specified. Exiting." % self.root) 301 | sys.exit(1) 302 | 303 | # create local tracking branches for all remote branches. - we want to do this for every initialization. 304 | s, o = subprocess.getstatusoutput("(cd %s && git branch -r | grep -v /HEAD)" % self.root) 305 | if s != 0: 306 | # if repo is totally uninitialized (like gitolite wildrepo) -- initialize it with a first commit. 307 | print("Attempting to initialize git repository for first use...") 308 | await runShell("(cd %s && touch README && git add README && git commit -a -m 'first commit' && git push)" % self.root) 309 | s, o = subprocess.getstatusoutput("(cd %s && git branch -r | grep -v /HEAD)" % self.root) 310 | if s != 0: 311 | print("Error listing local branches.") 312 | sys.exit(1) 313 | for branch in o.split(): 314 | branch = branch.split("/")[-1] 315 | if not self.localBranchExists(branch): 316 | await runShell("( cd %s && git checkout %s)" % (self.root, branch)) 317 | 318 | # if we've gotten here, we can assume that the repo exists at self.root. 319 | if self.url is not None and self.origin_check: 320 | retval, out = subprocess.getstatusoutput("(cd %s && git remote get-url origin)" % self.root) 321 | my_url = self.url 322 | if my_url.endswith(".git"): 323 | my_url = my_url[:-4] 324 | if out.endswith(".git"): 325 | out = out[:-4] 326 | if out != my_url: 327 | if self.destfix: 328 | print("WARNING: fixing remote URL for origin to point to %s" % my_url) 329 | self.setRemoteURL('origin', my_url) 330 | else: 331 | print() 332 | print("Error: remote url for origin at %s is:" % self.root) 333 | print() 334 | print(" existing:", out) 335 | print(" expected:", self.url) 336 | print() 337 | print("Please fix or delete any repos that are cloned from the wrong origin.") 338 | print("To do this automatically, use the --destfix option with merge-all-kits.") 339 | raise GitTreeError("%s: Git origin mismatch." % self.root) 340 | # first, we will clean up any messes: 341 | if not self.has_cleaned: 342 | await runShell("(cd %s && git reset --hard && git clean -fd )" % self.root) 343 | self.has_cleaned = True 344 | 345 | # git fetch will run as part of this: 346 | await self.gitCheckout(self.branch, from_init=True) 347 | 348 | # point to specified sha1: 349 | 350 | if self.commit_sha1: 351 | await runShell("(cd %s && git checkout %s )" % (self.root, self.commit_sha1)) 352 | if self.head() != self.commit_sha1: 353 | raise GitTreeError("%s: Was not able to check out specified SHA1: %s." % (self.root, self.commit_sha1)) 354 | elif self.pull: 355 | # we are on the right branch, but we want to make sure we have the latest updates 356 | await runShell("(cd %s && git pull -f --all || true)" % self.root) 357 | 358 | self.initialized = True 359 | 360 | async def initialize(self): 361 | if not self.initialized: 362 | await self.initial_future 363 | 364 | @property 365 | def currentLocalBranch(self): 366 | s, branch = subprocess.getstatusoutput("( cd %s && git symbolic-ref --short -q HEAD )" % self.root) 367 | if s: 368 | return None 369 | else: 370 | return branch 371 | 372 | def localBranchExists(self, branch): 373 | s, branch = subprocess.getstatusoutput("( cd %s && git show-ref --verify --quiet refs/heads/%s )" % (self.root, branch)) 374 | if s: 375 | return False 376 | else: 377 | return True 378 | 379 | def getRemoteURL(self, remote): 380 | s, o = subprocess.getstatusoutput("( cd %s && git remote get-url %s )" % (self.root, remote)) 381 | if s: 382 | return None 383 | else: 384 | return o.strip() 385 | 386 | def setRemoteURL(self, mirror_name, url): 387 | s, o = subprocess.getstatusoutput("( cd %s && git remote add %s %s )" % (self.root, mirror_name, url)) 388 | if s: 389 | return False 390 | else: 391 | return True 392 | 393 | def remoteBranchExists(self, branch): 394 | s, o = subprocess.getstatusoutput("( cd %s && git show-branch remotes/origin/%s )" % (self.root, branch)) 395 | if s: 396 | return False 397 | else: 398 | return True 399 | 400 | def getDepthOfCommit(self, sha1): 401 | s, depth = subprocess.getstatusoutput("( cd %s && git rev-list HEAD ^%s --count)" % (self.root, sha1)) 402 | return int(depth) + 1 403 | 404 | def getAllCatPkgs(self): 405 | with open(self.root + "/profiles/categories", "r") as a: 406 | cats = set(a.read().split()) 407 | for item in glob.glob(self.root + "/*-*"): 408 | if os.path.isdir(item): 409 | cat = os.path.basename(item) 410 | if cat not in cats: 411 | print("!!! WARNING: category %s not in categories... should be added to profiles/categories!" % item) 412 | cats.add(cat) 413 | cats = sorted(list(cats)) 414 | catpkgs = {} 415 | 416 | for cat in cats: 417 | if not os.path.exists(self.root + "/" + cat): 418 | continue 419 | pkgs = os.listdir(self.root + "/" + cat) 420 | for pkg in pkgs: 421 | if not os.path.isdir(self.root + "/" + cat + "/" + pkg): 422 | continue 423 | catpkgs[cat + "/" + pkg] = self.name 424 | return catpkgs 425 | 426 | def catpkg_exists(self, catpkg): 427 | return os.path.exists(self.root + "/" + catpkg) 428 | 429 | async def gitCheckout(self, branch="master", from_init=False): 430 | if not from_init: 431 | await self.initialize() 432 | await runShell("(cd %s && git fetch --verbose)" % self.root) 433 | if self.localBranchExists(branch): 434 | await runShell("(cd %s && git checkout %s && git pull -f --all || true)" % (self.root, branch)) 435 | elif self.remoteBranchExists(branch): 436 | await runShell("(cd %s && git checkout -b %s --track origin/%s)" % (self.root, branch, branch)) 437 | else: 438 | await runShell("(cd %s && git checkout -b %s)" % (self.root, branch)) 439 | if self.currentLocalBranch != branch: 440 | raise GitTreeError("%s: On branch %s. not able to check out branch %s." % (self.root, self.currentLocalBranch, branch)) 441 | 442 | async def mirrorLocalBranches(self, mirror=None): 443 | if mirror is None: 444 | mirror = self.url 445 | # This is a special push command that will push local tags and branches *only* 446 | await runShell("(cd %s && git push %s +refs/heads/* +refs/tags/*)" % (self.root, mirror)) 447 | 448 | async def mirrorUpstreamRepository(self, mirror): 449 | # This is a special push command that will push all the stuff from origin (branches and tags) *only* 450 | # It will skip local branches. 451 | await runShell("(cd %s && git fetch --prune)" % self.root) 452 | await runShell("(cd %s && git push --prune %s +refs/remotes/origin/*:refs/heads/* +refs/tags/*:refs/tags/*)" % (self.root, mirror)) 453 | 454 | async def gitMirrorPush(self): 455 | await runShell( 456 | "(cd %s && ( git rev-parse --abbrev-ref --symbolic-full-name @{u} || git branch --set-upstream-to origin/%s))" % ( 457 | self.root, self.branch)) 458 | await self.mirrorLocalBranches() 459 | if self.mirror: 460 | await self.mirrorUpstreamRepository(self.mirror) 461 | 462 | async def gitCommit(self, message="", push=True): 463 | await runShell("( cd %s && git add . )" % self.root) 464 | cmd = "( cd %s && [ -n \"$(git status --porcelain)\" ] && git commit -a -F - << EOF\n" % self.root 465 | if message != "": 466 | cmd += "%s\n\n" % message 467 | names = [] 468 | if len(self.merged): 469 | cmd += "merged: \n\n" 470 | for name, sha1 in self.merged: 471 | if name in names: 472 | # don't print dups 473 | continue 474 | names.append(name) 475 | if sha1 is not None: 476 | cmd += " %s: %s\n" % (name, sha1) 477 | cmd += "EOF\n" 478 | cmd += ")\n" 479 | print("running: %s" % cmd) 480 | # we use os.system because this multi-line command breaks runShell() - really, breaks commands.getstatusoutput(). 481 | myenv = os.environ.copy() 482 | if os.geteuid() == 0: 483 | # make sure HOME is set if we are root (maybe we entered to a minimal environment -- this will mess git up.) 484 | # In particular, a new tmux window will have HOME set to /root but NOT exported. Which will mess git up. (It won't know where to find ~/.gitconfig.) 485 | myenv["HOME"] = "/root" 486 | cp = subprocess.run(cmd, shell=True, env=myenv) 487 | retval = cp.returncode 488 | if retval not in [ 0, 1 ]: # can return 1 489 | print("retval is: %s" % retval) 490 | print(cp) 491 | print("Commit failed.") 492 | sys.exit(1) 493 | if push is True and self.create is False: 494 | await self.mirrorLocalBranches() 495 | if self.mirror: 496 | await self.mirrorUpstreamRepository(mirror=self.mirror) 497 | else: 498 | print("Pushing disabled.") 499 | 500 | async def run(self, steps): 501 | for step in steps: 502 | if step is not None: 503 | print("Running step", step.__class__.__name__, step) 504 | await step.run(self) 505 | 506 | def head(self): 507 | return headSHA1(self.root) 508 | 509 | def logTree(self, srctree): 510 | # record name and SHA of src tree in dest tree, used for git commit message/auditing: 511 | if srctree.name is None: 512 | # this tree doesn't have a name, so just copy any existing history from that tree 513 | self.merged.extend(srctree.merged) 514 | else: 515 | # this tree has a name, so record the name of the tree and its SHA1 for reference 516 | if hasattr(srctree, "origroot"): 517 | self.merged.append([srctree.name, headSHA1(srctree.origroot)]) 518 | return 519 | self.merged.append([srctree.name, srctree.head()]) 520 | 521 | 522 | class RsyncTree(Tree): 523 | def __init__(self, name, config=None, url="rsync://rsync.us.gentoo.org/gentoo-portage/"): 524 | self.name = name 525 | self.url = url 526 | self.config = config 527 | base = self.config.source_trees 528 | self.root = "%s/%s" % (base, self.name) 529 | if not os.path.exists(base): 530 | os.makedirs(base) 531 | runShell( 532 | "rsync --recursive --delete-excluded --links --safe-links --perms --times --compress --force --whole-file --delete --timeout=180 --exclude=/.git --exclude=/metadata/cache/ --exclude=/metadata/glsa/glsa-200*.xml --exclude=/metadata/glsa/glsa-2010*.xml --exclude=/metadata/glsa/glsa-2011*.xml --exclude=/metadata/md5-cache/ --exclude=/distfiles --exclude=/local --exclude=/packages %s %s/" % ( 533 | self.url, self.root)) 534 | 535 | 536 | class SvnTree(Tree): 537 | def __init__(self, name, config=None, url=None): 538 | self.name = name 539 | self.url = url 540 | self.config = config 541 | base = self.config.source_trees 542 | self.root = "%s/%s" % (base, self.name) 543 | if not os.path.exists(base): 544 | os.makedirs(base) 545 | if os.path.exists(self.root): 546 | runShell("(cd %s && svn up)" % self.root, abort_on_failure=False) 547 | else: 548 | runShell("(cd %s && svn co %s %s)" % (base, self.url, self.name)) 549 | 550 | 551 | class CvsTree(Tree): 552 | def __init__(self, name, config=None, url=None, path=None): 553 | self.name = name 554 | self.url = url 555 | if path is None: 556 | path = self.name 557 | self.config = config 558 | base = self.config.source_trees 559 | self.root = "%s/%s" % (base, path) 560 | if not os.path.exists(base): 561 | os.makedirs(base) 562 | if os.path.exists(self.root): 563 | runShell("(cd %s && cvs update -dP)" % self.root, abort_on_failure=False) 564 | else: 565 | runShell("(cd %s && cvs -d %s co %s)" % (base, self.url, path)) 566 | 567 | def get_move_maps(move_map_path, kit_name): 568 | """Grabs a move map list, returning a dictionary""" 569 | move_maps = {} 570 | for kit in ["global", kit_name]: 571 | fname = move_map_path + "/" + kit 572 | if os.path.exists(fname): 573 | with open(fname, "r") as move_file: 574 | for line in move_file: 575 | line = line.strip() 576 | if line.startswith("#"): 577 | continue 578 | elif len(line) == 0: 579 | continue 580 | move_split = line.split("->") 581 | if len(move_split) != 2: 582 | print("WARNING: invalid package move line in %s: %s" % ( fname, line)) 583 | continue 584 | else: 585 | pkg1 = move_split[0].strip() 586 | pkg2 = move_split[1].strip() 587 | move_maps[pkg1] = pkg2 588 | return move_maps 589 | 590 | 591 | def get_pkglist(fname): 592 | 593 | """Grabs a package set list, returning a list of lines.""" 594 | if fname[0] == "/": 595 | cpkg_fn = fname 596 | else: 597 | cpkg_fn = os.path.dirname(os.path.abspath(__file__)) + "/" + fname 598 | if not os.path.isdir(cpkg_fn): 599 | # single file specified 600 | files = [ cpkg_fn ] 601 | else: 602 | # directory specifed -- we will grab the file contents of the dir: 603 | fn_list = os.listdir(cpkg_fn) 604 | fn_list.sort() 605 | files = [] 606 | for fn in fn_list: 607 | files.append(cpkg_fn + "/" + fn) 608 | patterns = [] 609 | for cpkg_fn in files: 610 | with open(cpkg_fn,"r") as cpkg: 611 | for line in cpkg: 612 | line = line.strip() 613 | if line == "": 614 | continue 615 | ls = line.split("#") 616 | if len(ls) >=2: 617 | line = ls[0] 618 | patterns.append(line) 619 | else: 620 | return patterns 621 | 622 | 623 | def get_package_set_and_skips_for_kit(fixup_root, release, kit_name): 624 | 625 | pkgf = "package-sets/%s/%s-packages" 626 | pkgf_skip = "package-sets/%s/%s-skip" 627 | 628 | specific_pkgf = os.path.join(fixup_root, pkgf % (release, kit_name)) 629 | if os.path.exists(specific_pkgf): 630 | specific_skips = os.path.join(fixup_root, pkgf_skip % (release, kit_name)) 631 | if os.path.exists(specific_skips): 632 | return get_pkglist(specific_pkgf), get_pkglist(specific_skips) 633 | else: 634 | return get_pkglist(specific_pkgf), [] 635 | else: 636 | global_pkgf = os.path.join(fixup_root, pkgf % ("global", kit_name)) 637 | global_skips = os.path.join(fixup_root, pkgf_skip % ("global", kit_name)) 638 | if os.path.exists(global_skips): 639 | return get_pkglist(global_pkgf), get_pkglist(global_skips) 640 | else: 641 | return get_pkglist(global_pkgf), [] 642 | 643 | 644 | def filterInCategory(pkgset, fil): 645 | match = set() 646 | nomatch = set() 647 | for pkg in list(pkgset): 648 | if pkg.startswith(fil): 649 | match.add(pkg) 650 | else: 651 | nomatch.add(pkg) 652 | return match, nomatch 653 | 654 | 655 | def do_package_use_line(pkg, def_python, bk_python, imps): 656 | if def_python not in imps: 657 | if bk_python in imps: 658 | return "%s python_single_target_%s" % (pkg, bk_python) 659 | else: 660 | return "%s python_single_target_%s python_targets_%s" % (pkg, imps[0], imps[0]) 661 | return None 662 | 663 | 664 | class GenPythonUse(MergeStep): 665 | 666 | def __init__(self, py_settings, out_subpath, release): 667 | self.def_python = py_settings["primary"] 668 | self.bk_python = py_settings["alternate"] 669 | self.mask = py_settings["mask"] 670 | self.out_subpath = out_subpath 671 | self.release = release 672 | 673 | async def run(self, cur_overlay): 674 | cur_tree = cur_overlay.root 675 | try: 676 | with open(os.path.join(cur_tree, 'profiles/repo_name')) as f: 677 | cur_name = f.readline().strip() 678 | except FileNotFoundError: 679 | cur_name = cur_overlay.name 680 | env = os.environ.copy() 681 | env['PORTAGE_DEPCACHEDIR'] = '/var/cache/edb/%s-%s-%s-meta' % ( self.release, cur_overlay.name, cur_overlay.branch ) 682 | if cur_name != "core-kit": 683 | env['PORTAGE_REPOSITORIES'] = ''' 684 | [DEFAULT] 685 | main-repo = core-kit 686 | 687 | [core-kit] 688 | location = %s/core-kit 689 | aliases = gentoo 690 | 691 | [%s] 692 | location = %s 693 | ''' % (cur_overlay.config.dest_trees, cur_name, cur_tree) 694 | else: 695 | env['PORTAGE_REPOSITORIES'] = ''' 696 | [DEFAULT] 697 | main-repo = core-kit 698 | 699 | [core-kit] 700 | location = %s/core-kit 701 | aliases = gentoo 702 | ''' % cur_overlay.config.dest_trees 703 | p = portage.portdbapi(mysettings=portage.config(env=env,config_profile_path='')) 704 | 705 | pkg_use = [] 706 | 707 | for pkg in p.cp_all(): 708 | 709 | cp = portage.catsplit(pkg) 710 | if not os.path.exists(cur_tree + "/" + pkg): 711 | # catpkg is from core-kit, but we are not processing core kit, so skip: 712 | continue 713 | ebs = {} 714 | for a in await async_xmatch(p, "match-all", pkg): 715 | if len(a) == 0: 716 | continue 717 | aux = await p.async_aux_get(a, ["INHERITED"]) 718 | eclasses=aux[0].split() 719 | if "python-single-r1" not in eclasses: 720 | continue 721 | else: 722 | px = portage.catsplit(a) 723 | cmd = '( eval $(cat %s/%s/%s/%s.ebuild | grep ^PYTHON_COMPAT); echo "${PYTHON_COMPAT[@]}" )' % ( cur_tree, cp[0], cp[1], px[1] ) 724 | outp = await getcommandoutput(cmd) 725 | imps = outp[1].decode("ascii").split() 726 | if len(imps) == 0: 727 | print("!!! WARNING: ebuild %s in %s has blank or undefined PYTHON_COMPAT; this should be fixed!" % (a, cur_overlay.name)) 728 | continue 729 | ebs[a] = imps 730 | if len(ebs.keys()) == 0: 731 | continue 732 | 733 | # ebs now is a dict containing catpkg -> PYTHON_COMPAT settings for each ebuild in the catpkg. We want to see if they are identical 734 | 735 | oldval = None 736 | 737 | # if split == False, then we will do one global setting for the catpkg. If split == True, we will do individual settings for each version 738 | # of the catpkg, since there are differences. This saves space in our python-use file while keeping everything correct. 739 | 740 | split = False 741 | for key, val in ebs.items(): 742 | if oldval is None: 743 | oldval = val 744 | else: 745 | if oldval != val: 746 | split = True 747 | break 748 | 749 | if not split: 750 | pkg_use += [ do_package_use_line(pkg, self.def_python, self.bk_python, oldval) ] 751 | else: 752 | for key,val in ebs.items(): 753 | pkg_use += [ do_package_use_line("=%s" % key, self.def_python, self.bk_python, val) ] 754 | outpath = cur_tree + '/profiles/' + self.out_subpath + '/package.use' 755 | if not os.path.exists(outpath): 756 | os.makedirs(outpath) 757 | with open(outpath + "/python-use", "w") as f: 758 | for l in sorted(x for x in pkg_use if x is not None): 759 | f.write(l + "\n") 760 | # for core-kit, set good defaults as well. 761 | if cur_name == "core-kit": 762 | outpath = cur_tree + '/profiles/' + self.out_subpath + '/make.defaults' 763 | a = open(outpath, "w") 764 | a.write('PYTHON_TARGETS="%s %s"\n' % ( self.def_python, self.bk_python )) 765 | a.write('PYTHON_SINGLE_TARGET="%s"\n' % self.def_python) 766 | a.close() 767 | if self.mask: 768 | outpath = cur_tree + '/profiles/' + self.out_subpath + '/package.mask/funtoo-kit-python' 769 | if not os.path.exists(os.path.dirname(outpath)): 770 | os.makedirs(os.path.dirname(outpath)) 771 | a = open(outpath, "w") 772 | a.write(self.mask + "\n") 773 | a.close() 774 | 775 | async def getDependencies(cur_overlay, catpkgs, levels=0, cur_level=0): 776 | cur_tree = cur_overlay.root 777 | try: 778 | with open(os.path.join(cur_tree, 'profiles/repo_name')) as f: 779 | cur_name = f.readline().strip() 780 | except FileNotFoundError: 781 | cur_name = cur_overlay.name 782 | env = os.environ.copy() 783 | if cur_overlay.name != "core-kit": 784 | env['PORTAGE_REPOSITORIES'] = ''' 785 | [DEFAULT] 786 | main-repo = core-kit 787 | 788 | [core-kit] 789 | location = %s/core-kit 790 | aliases = gentoo 791 | 792 | [%s] 793 | location = %s 794 | ''' % (cur_overlay.config.dest_trees, cur_name, cur_tree) 795 | else: 796 | env['PORTAGE_REPOSITORIES'] = ''' 797 | [DEFAULT] 798 | main-repo = core-kit 799 | 800 | [core-kit] 801 | location = %s/core-kit 802 | aliases = gentoo 803 | ''' % cur_overlay.config.dest_trees 804 | p = portage.portdbapi(mysettings=portage.config(env=env,config_profile_path='')) 805 | mypkgs = set() 806 | 807 | future_aux = {} 808 | 809 | def future_generator(): 810 | for catpkg in list(catpkgs): 811 | for my_cpv in p.cp_list(catpkg): 812 | if my_cpv == '': 813 | print("No match for %s" % catpkg) 814 | continue 815 | my_future = p.async_aux_get(my_cpv, [ "DEPEND", "RDEPEND"]) 816 | future_aux[id(my_future)] = my_cpv 817 | yield my_future 818 | 819 | for fu_fu in async_iter_completed(future_generator()): 820 | future_set = await fu_fu 821 | for future in future_set: 822 | cpv = future_aux.pop(id(future)) 823 | try: 824 | result = future.result() 825 | except KeyError as e: 826 | print("aux_get fail", cpv, e) 827 | else: 828 | for dep in flatten(use_reduce(result[0]+" "+result[1], matchall=True)): 829 | if len(dep) and dep[0] == "!": 830 | continue 831 | try: 832 | mypkg = dep_getkey(dep) 833 | except portage.exception.InvalidAtom: 834 | continue 835 | if mypkg not in mypkgs: 836 | mypkgs.add(mypkg) 837 | if levels != cur_level: 838 | mypkgs = mypkgs.union(await getDependencies(cur_overlay, mypkg, levels=levels, cur_level=cur_level+1)) 839 | return mypkgs 840 | 841 | def getPackagesInCatWithMaintainer(cur_overlay, my_cat, my_email): 842 | cat_root = os.path.join(cur_overlay.root, my_cat) 843 | if os.path.exists(cat_root): 844 | for pkgdir in os.listdir(cat_root): 845 | metafile = os.path.join(cat_root, pkgdir, "metadata.xml") 846 | if not os.path.exists(metafile): 847 | continue 848 | tree = etree.parse(metafile) 849 | for email in tree.xpath('.//maintainer/email/text()'): 850 | if my_email == str(email): 851 | yield my_cat + "/" + pkgdir 852 | 853 | def getPackagesMatchingGlob(cur_overlay, my_glob, exclusions=None): 854 | insert_list = [] 855 | if exclusions is None: 856 | exclusions = [] 857 | for candidate in glob.glob(cur_overlay.root + "/" + my_glob): 858 | if not os.path.isdir(candidate): 859 | continue 860 | strip_len = len(cur_overlay.root)+1 861 | candy_strip = candidate[strip_len:] 862 | if candy_strip not in exclusions: 863 | insert_list.append(candy_strip) 864 | return insert_list 865 | 866 | def getPackagesMatchingRegex(cur_overlay, my_regex): 867 | insert_list = [] 868 | for candidate in glob.glob(cur_overlay.root + "/*/*"): 869 | if not os.path.isdir(candidate): 870 | continue 871 | strip_len = len(cur_overlay.root)+1 872 | candy_strip = candidate[strip_len:] 873 | if my_regex.match(candy_strip): 874 | insert_list.append(candy_strip) 875 | return insert_list 876 | 877 | async def getPackagesWithEclass(cur_overlay, eclass): 878 | cur_tree = cur_overlay.root 879 | try: 880 | with open(os.path.join(cur_tree, 'profiles/repo_name')) as f: 881 | cur_name = f.readline().strip() 882 | except FileNotFoundError: 883 | cur_name = cur_overlay.name 884 | env = os.environ.copy() 885 | if cur_name != "core-kit": 886 | env['PORTAGE_REPOSITORIES'] = ''' 887 | [DEFAULT] 888 | main-repo = core-kit 889 | 890 | [core-kit] 891 | location = %s/core-kit 892 | aliases = gentoo 893 | 894 | [%s] 895 | location = %s 896 | ''' % (cur_overlay.config.dest_trees, cur_name, cur_tree) 897 | else: 898 | env['PORTAGE_REPOSITORIES'] = ''' 899 | [DEFAULT] 900 | main-repo = core-kit 901 | 902 | [core-kit] 903 | location = /%s/core-kit 904 | aliases = gentoo 905 | ''' % cur_overlay.config.dest_trees 906 | p = portage.portdbapi(mysettings=portage.config(env=env, config_profile_path='')) 907 | mypkgs = set() 908 | 909 | future_aux = {} 910 | cpv_map = {} 911 | 912 | def future_generator(): 913 | for catpkg in p.cp_all(): 914 | for my_cpv in p.cp_list(catpkg): 915 | if my_cpv == '': 916 | print("No match for %s" % catpkg) 917 | continue 918 | cpv_map[my_cpv] = catpkg 919 | my_future = p.async_aux_get(my_cpv, [ "INHERITED"]) 920 | future_aux[id(my_future)] = my_cpv 921 | yield my_future 922 | 923 | for fu_fu in async_iter_completed(future_generator()): 924 | future_set = await fu_fu 925 | for future in future_set: 926 | cpv = future_aux.pop(id(future)) 927 | try: 928 | result = future.result() 929 | except KeyError as e: 930 | print("aux_get fail", cpv, e) 931 | else: 932 | if eclass in result[0].split(): 933 | cp = cpv_map[cpv] 934 | if cp not in mypkgs: 935 | mypkgs.add(cp) 936 | return mypkgs 937 | 938 | async def getPackagesInCatWithEclass(cur_overlay, cat, eclass): 939 | cur_tree = cur_overlay.root 940 | try: 941 | with open(os.path.join(cur_tree, 'profiles/repo_name')) as f: 942 | cur_name = f.readline().strip() 943 | except FileNotFoundError: 944 | cur_name = cur_overlay.name 945 | env = os.environ.copy() 946 | if cur_name != "core-kit": 947 | env['PORTAGE_REPOSITORIES'] = ''' 948 | [DEFAULT] 949 | main-repo = core-kit 950 | 951 | [core-kit] 952 | location = %s/core-kit 953 | aliases = gentoo 954 | 955 | [%s] 956 | location = %s 957 | ''' % (cur_overlay.config.dest_trees, cur_name, cur_tree) 958 | else: 959 | env['PORTAGE_REPOSITORIES'] = ''' 960 | [DEFAULT] 961 | main-repo = core-kit 962 | 963 | [core-kit] 964 | location = %s/core-kit 965 | aliases = gentoo 966 | ''' % cur_overlay.config.dest_trees 967 | p = portage.portdbapi(mysettings=portage.config(env=env, config_profile_path='')) 968 | mypkgs = set() 969 | 970 | future_aux = {} 971 | cpv_map = {} 972 | 973 | def future_generator(): 974 | for catpkg in p.cp_all(categories=[cat]): 975 | for my_cpv in p.cp_list(catpkg): 976 | if my_cpv == '': 977 | print("No match for %s" % catpkg) 978 | continue 979 | cpv_map[my_cpv] = catpkg 980 | my_future = p.async_aux_get(my_cpv, [ "INHERITED"]) 981 | future_aux[id(my_future)] = my_cpv 982 | yield my_future 983 | 984 | for fu_fu in async_iter_completed(future_generator()): 985 | future_set = await fu_fu 986 | for future in future_set: 987 | cpv = future_aux.pop(id(future)) 988 | try: 989 | result = future.result() 990 | except KeyError as e: 991 | print("aux_get fail", cpv, e) 992 | else: 993 | if eclass in result[0].split(): 994 | cp = cpv_map[cpv] 995 | if cp not in mypkgs: 996 | mypkgs.add(cp) 997 | return mypkgs 998 | 999 | def extract_uris(src_uri): 1000 | 1001 | fn_urls = defaultdict(list) 1002 | 1003 | def record_fn_url(my_fn, p_blob): 1004 | if p_blob not in fn_urls[my_fn]: 1005 | new_files.append(my_fn) 1006 | fn_urls[my_fn].append(p_blob) 1007 | 1008 | blobs = src_uri.split() 1009 | prev_blob = None 1010 | pos = 0 1011 | new_files = [] 1012 | 1013 | while pos <= len(blobs): 1014 | if pos < len(blobs): 1015 | blob = blobs[pos] 1016 | else: 1017 | blob = "" 1018 | if blob in [")", "(", "||"] or blob.endswith("?"): 1019 | pos += 1 1020 | continue 1021 | if blob == "->": 1022 | # We found a http://foo -> bar situation. Handle it: 1023 | fn = blobs[pos + 1] 1024 | if fn is not None: 1025 | record_fn_url(fn, prev_blob) 1026 | prev_blob = None 1027 | pos += 2 1028 | else: 1029 | # Process previous item: 1030 | if prev_blob: 1031 | fn = prev_blob.split("/")[-1] 1032 | record_fn_url(fn, prev_blob) 1033 | prev_blob = blob 1034 | pos += 1 1035 | 1036 | return fn_urls, new_files 1037 | 1038 | 1039 | class FastPullScan(MergeStep): 1040 | 1041 | def __init__(self, now, engine: AsyncEngine = None): 1042 | self.now = now 1043 | self.engine = engine 1044 | 1045 | async def run(self, cur_overlay: GitTree): 1046 | if self.engine is None: 1047 | return 1048 | cur_tree = cur_overlay.root 1049 | try: 1050 | with open(os.path.join(cur_tree, 'profiles/repo_name')) as f: 1051 | cur_name = f.readline().strip() 1052 | except FileNotFoundError: 1053 | cur_name = cur_overlay.name 1054 | env = os.environ.copy() 1055 | if cur_name != "core-kit": 1056 | env['PORTAGE_REPOSITORIES'] = ''' 1057 | [DEFAULT] 1058 | main-repo = core-kit 1059 | 1060 | [core-kit] 1061 | location = %s/core-kit 1062 | aliases = gentoo 1063 | 1064 | [%s] 1065 | location = %s 1066 | ''' % (cur_overlay.config.dest_trees, cur_name, cur_tree) 1067 | else: 1068 | env['PORTAGE_REPOSITORIES'] = ''' 1069 | [DEFAULT] 1070 | main-repo = core-kit 1071 | 1072 | [core-kit] 1073 | location = %s/core-kit 1074 | aliases = gentoo 1075 | ''' % cur_overlay.config.dest_trees 1076 | env['ACCEPT_KEYWORDS'] = "~amd64 amd64" 1077 | p = portage.portdbapi(mysettings=portage.config(env=env, config_profile_path='')) 1078 | 1079 | for pkg in p.cp_all(trees=[cur_overlay.root]): 1080 | 1081 | # src_uri now has the following format: 1082 | 1083 | # src_uri["foo.tar.gz"] = [ "https://url1", "https//url2" ... ] 1084 | # entries in SRC_URI from fetch-restricted ebuilds will have SRC_URI prefixed by "NOMIRROR:" 1085 | 1086 | # We are scanning SRC_URI in all ebuilds in the catpkg, as well as Manifest. 1087 | # This will give us a complete list of all archives used in the catpkg. 1088 | 1089 | # We want to prioritize SRC_URI for bestmatch-visible ebuilds. We will use bm 1090 | # and prio to tag files that are in bestmatch-visible ebuilds. 1091 | 1092 | bm = await async_xmatch(p, "bestmatch-visible", pkg) 1093 | 1094 | fn_urls = defaultdict(list) 1095 | fn_meta = defaultdict(dict) 1096 | 1097 | for cpv in await async_xmatch(p, "match-all", pkg): 1098 | if len(cpv) == 0: 1099 | continue 1100 | try: 1101 | aux_info = await p.async_aux_get(cpv, ["SRC_URI", "RESTRICT" ], mytree=cur_overlay.root) 1102 | restrict = aux_info[1].split() 1103 | mirror_restrict = False 1104 | for r in restrict: 1105 | if r == "mirror": 1106 | mirror_restrict = True 1107 | break 1108 | except portage.exception.PortageKeyError: 1109 | print("!!! PortageKeyError on %s" % cpv) 1110 | continue 1111 | 1112 | # record our own metadata about each file... 1113 | new_fn_urls, new_files = extract_uris(aux_info[0]) 1114 | fn_urls.update(new_fn_urls) 1115 | for fn in new_files: 1116 | fn_meta[fn]["restrict"] = mirror_restrict 1117 | fn_meta[fn]["bestmatch"] = cpv == bm 1118 | 1119 | man_info = {} 1120 | man_file = cur_tree + "/" + pkg + "/Manifest" 1121 | if os.path.exists(man_file): 1122 | man_f = open(man_file, "r") 1123 | for line in man_f.readlines(): 1124 | ls = line.split() 1125 | if len(ls) <= 3 or ls[0] != "DIST": 1126 | continue 1127 | try: 1128 | digest_index = ls.index("SHA512") + 1 1129 | digest_type = "sha512" 1130 | except ValueError: 1131 | try: 1132 | digest_index = ls.index("SHA256") + 1 1133 | digest_type = "sha256" 1134 | except ValueError: 1135 | print("Error: Manifest file %s has invalid format: " % man_file) 1136 | print(" ", line) 1137 | continue 1138 | man_info[ls[1]] = { "size" : ls[2], "digest" : ls[digest_index], "digest_type" : digest_type } 1139 | man_f.close() 1140 | 1141 | # for each catpkg: 1142 | 1143 | for f, uris in fn_urls.items(): 1144 | 1145 | if f not in man_info: 1146 | print("Error: %s/%s: %s Manifest file contains nothing for %s, skipping..." % (cur_overlay.name, cur_overlay.branch, pkg, f)) 1147 | continue 1148 | 1149 | s_out = "" 1150 | for u in uris: 1151 | s_out += u + "\n" 1152 | 1153 | # If we have already grabbed this distfile, then let's not queue it for fetching... 1154 | 1155 | if man_info[f]["digest_type"] == "sha512": 1156 | # enqueue this distfile to potentially be added to distfile-spider. This is done asynchronously. 1157 | self.engine.enqueue( 1158 | file=f, 1159 | digest=man_info[f]["digest"], 1160 | size=man_info[f]["size"], 1161 | restrict=fn_meta[f]["restrict"], 1162 | catpkg=pkg, 1163 | src_uri=s_out, 1164 | kit_name=cur_overlay.name, 1165 | kit_branch=cur_overlay.branch, 1166 | digest_type=man_info[f]["digest_type"], 1167 | bestmatch= fn_meta[f]["bestmatch"] 1168 | 1169 | ) 1170 | 1171 | def repoName(cur_overlay): 1172 | cur_tree = cur_overlay.root 1173 | try: 1174 | with open(os.path.join(cur_tree, 'profiles/repo_name')) as f: 1175 | cur_name = f.readline().strip() 1176 | except FileNotFoundError: 1177 | cur_name = cur_overlay.name 1178 | return cur_name 1179 | 1180 | # getAllEclasses() and getAllLicenses() uses the function getAllMeta() below to do all heavy lifting. What getAllMeta() returns 1181 | # is a list of eclasses that are used by our kit, but this list doesn't indicate what repository holds the eclasses. 1182 | 1183 | # So we don't know if the eclass is in the dest_kit or in the parent_repo and still needs to be copied over. as an eclass 1184 | # 'fixup'. getAllEclasses() is designed to locate the actual eclass that we care about so we know what repo it lives in and what 1185 | # steps need to be taken, if any. 1186 | 1187 | # First, we will look in our dest-kit repository. If it exists there, then it was already copied into place by a kit-fixup and 1188 | # we do not want to overwrite it with another eclass! Then we will look in the parent_repo (which is designed to be 'gentoo'), 1189 | # and see if the eclass is there. We expect to find it there. If we don't, it is a MISSING eclass (or license). 1190 | 1191 | # getAllEclasses and getAllLicenses return a dictionary with the following keys, and with a list of files relative to the 1192 | # repo root as the dictionary value: 1193 | # 1194 | # 'parent_repo' : list of all eclasses that should be copied from parent repo 1195 | # 'dest_kit' : list of all eclasses that were found in our kit and don't need to be copied (they are already in place) 1196 | # None : list of all eclasses that were NOT found. This is an error and indicates we need some kit-fixups or 1197 | # overlay-specific eclasses. 1198 | 1199 | async def _getAllDriver(metadata, path_prefix, dest_kit, release): 1200 | # these may be eclasses or licenses -- we use the term 'eclass' here: 1201 | eclasses = await getAllMeta(metadata, dest_kit, release) 1202 | out = { None: [], "dest_kit" : [] } 1203 | for eclass in eclasses: 1204 | ep = os.path.join(dest_kit.root, path_prefix, eclass) 1205 | if os.path.exists(ep): 1206 | out["dest_kit"].append(eclass) 1207 | continue 1208 | out[None].append(eclass) 1209 | return out 1210 | 1211 | def simpleGetAllLicenses(dest_kit, parent_repo): 1212 | out = [] 1213 | for my_license in os.listdir(parent_repo.root + "/licenses"): 1214 | if os.path.exists(dest_kit.root + "/licenses/" + my_license): 1215 | continue 1216 | out.append(my_license) 1217 | return out 1218 | 1219 | def simpleGetAllEclasses(dest_kit, parent_repo): 1220 | """ 1221 | A simpler method to get all eclasses copied into a kit. If the eclass exists in parent repo, but not in dest_kit, 1222 | return it in a list. 1223 | 1224 | :param dest_kit: 1225 | :param parent_repo: 1226 | :return: 1227 | """ 1228 | out = [] 1229 | for eclass in os.listdir(parent_repo.root + "/eclass"): 1230 | if not eclass.endswith(".eclass"): 1231 | continue 1232 | if os.path.exists(dest_kit.root + "/eclass/" + eclass): 1233 | continue 1234 | out.append(eclass) 1235 | return out 1236 | 1237 | 1238 | async def getAllEclasses(dest_kit, release): 1239 | return await _getAllDriver("INHERITED", "eclass", dest_kit, release) 1240 | 1241 | async def getAllLicenses(dest_kit, release): 1242 | return await _getAllDriver("LICENSE", "licenses", dest_kit, release) 1243 | 1244 | # getAllMeta uses the Portage API to query metadata out of a set of repositories. It is designed to be used to figure 1245 | # out what licenses or eclasses to copy from a parent repository to the current kit so that the current kit contains a 1246 | # set of all eclasses (and licenses) it needs within itself, without any external dependencies on other repositories 1247 | # for these items -- this is a key design feature of kits to improve stability. 1248 | 1249 | # It supports being called this way: 1250 | # 1251 | # (parent_repo) -- all eclasses/licenses here 1252 | # | 1253 | # | 1254 | # \-------------------------(dest_kit) -- no eclasses/licenses here yet 1255 | # (though some may exist due to being copied by fixups) 1256 | # 1257 | # getAllMeta() returns a set of actual files (without directories) that are used, so [ 'foo.eclass', 'bar.eclass'] 1258 | # or [ 'GPL-2', 'bleh' ]. 1259 | # 1260 | 1261 | async def getAllMeta(metadata, dest_kit, release): 1262 | metadict = { "LICENSE" : 0, "INHERITED" : 1 } 1263 | metapos = metadict[metadata] 1264 | 1265 | env = os.environ.copy() 1266 | env['PORTAGE_DEPCACHEDIR'] = '/var/cache/edb/%s-%s-%s-meta' % ( release, dest_kit.name, dest_kit.branch ) 1267 | if dest_kit.name != "core-kit": 1268 | env['PORTAGE_REPOSITORIES'] = ''' 1269 | [DEFAULT] 1270 | main-repo = core-kit 1271 | 1272 | [core-kit] 1273 | location = %s/core-kit 1274 | aliases = gentoo 1275 | 1276 | [%s] 1277 | location = %s 1278 | ''' % ( dest_kit.config.dest_trees, dest_kit.name, dest_kit.root) 1279 | else: 1280 | # we are testing a stand-alone kit that should have everything it needs included 1281 | env['PORTAGE_REPOSITORIES'] = ''' 1282 | [DEFAULT] 1283 | main-repo = core-kit 1284 | 1285 | [%s] 1286 | location = %s 1287 | aliases = gentoo 1288 | ''' % ( dest_kit.name, dest_kit.root ) 1289 | 1290 | p = portage.portdbapi(mysettings=portage.config(env=env, config_profile_path='')) 1291 | mymeta = set() 1292 | 1293 | future_aux = {} 1294 | cpv_map = {} 1295 | 1296 | def future_generator(): 1297 | for catpkg in p.cp_all(trees=[dest_kit.root]): 1298 | for cpv in p.cp_list(catpkg, mytree=dest_kit.root): 1299 | if cpv == '': 1300 | print("No match for %s" % catpkg) 1301 | continue 1302 | cpv_map[cpv] = catpkg 1303 | my_future = p.async_aux_get(cpv, [ "LICENSE", "INHERITED" ], mytree=dest_kit.root) 1304 | future_aux[id(my_future)] = cpv 1305 | yield my_future 1306 | 1307 | for fu_fu in async_iter_completed(future_generator()): 1308 | future_set = await fu_fu 1309 | for future in future_set: 1310 | cpv = future_aux.pop(id(future)) 1311 | try: 1312 | result = future.result() 1313 | except KeyError as e: 1314 | print("aux_get fail", cpv, e) 1315 | else: 1316 | if metadata == "INHERITED": 1317 | for eclass in result[metapos].split(): 1318 | key = eclass + ".eclass" 1319 | if key not in mymeta: 1320 | mymeta.add(key) 1321 | elif metadata == "LICENSE": 1322 | for lic in result[metapos].split(): 1323 | if lic in [")", "(", "||"] or lic.endswith("?"): 1324 | continue 1325 | if lic not in mymeta: 1326 | mymeta.add(lic) 1327 | return mymeta 1328 | 1329 | 1330 | async def generateKitSteps(release, kit_name, from_tree, select_only="all", fixup_repo=None, cpm_logger=None, filter_repos=None, filter_cats=None, move_maps=None, force=None, secondary_kit=False): 1331 | if force is None: 1332 | force = set() 1333 | else: 1334 | force = set(force) 1335 | literals = [] 1336 | steps = [] 1337 | pkglist = [] 1338 | pkgf = "package-sets/%s-packages" % kit_name 1339 | pkgf_skip = "package-sets/%s-skip" % kit_name 1340 | pkgdir = fixup_repo.root 1341 | pkgf = pkgdir + "/" + pkgf 1342 | pkgf_skip = pkgdir + "/" + pkgf_skip 1343 | skip = [] 1344 | if move_maps is None: 1345 | move_maps = {} 1346 | else: 1347 | move_maps = move_maps 1348 | master_pkglist, skip = get_package_set_and_skips_for_kit(fixup_repo.root, release, kit_name) 1349 | for pattern in master_pkglist: 1350 | if pattern.startswith("@regex@:"): 1351 | pkglist += getPackagesMatchingRegex( from_tree, re.compile(pattern[8:])) 1352 | elif pattern.startswith("@depsincat@:"): 1353 | patsplit = pattern.split(":") 1354 | catpkg = patsplit[1] 1355 | dep_pkglist = await getDependencies( from_tree, [ catpkg ] ) 1356 | if len(patsplit) == 3: 1357 | dep_pkglist, dep_pkglist_nomatch = filterInCategory(dep_pkglist, patsplit[2]) 1358 | pkglist += list(dep_pkglist) 1359 | elif pattern.startswith("@maintainer@:"): 1360 | spiff, my_cat, my_email = pattern.split(":") 1361 | pkglist += list(getPackagesInCatWithMaintainer( from_tree, my_cat, my_email)) 1362 | elif pattern.startswith("@has_eclass@:"): 1363 | patsplit = pattern.split(":") 1364 | eclass = patsplit[1] 1365 | eclass_pkglist = await getPackagesWithEclass( from_tree, eclass ) 1366 | pkglist += list(eclass_pkglist) 1367 | elif pattern.startswith("@cat_has_eclass@:"): 1368 | patsplit = pattern.split(":") 1369 | cat, eclass = patsplit[1:] 1370 | cat_pkglist = await getPackagesInCatWithEclass( from_tree, cat, eclass ) 1371 | pkglist += list(cat_pkglist) 1372 | else: 1373 | linesplit = pattern.split() 1374 | if len(linesplit) and linesplit[0].endswith("/*"): 1375 | # we want to support exclusions, starting with "-": 1376 | exclusions = [] 1377 | for exclusion in linesplit[1:]: 1378 | if exclusion.startswith("-"): 1379 | exclusions.append(exclusion[1:]) 1380 | else: 1381 | print("Invalid exclusion: %s" % pattern) 1382 | pkglist += getPackagesMatchingGlob( from_tree, linesplit[0], exclusions=exclusions ) 1383 | else: 1384 | move_pkg = pattern.split("->") 1385 | if len(move_pkg) == 2: 1386 | # we have something in the form sys-apps/foo -> sys-apps/bar -- we will add foo to the merge list... 1387 | pkglist.append(move_pkg[0].strip()) 1388 | # but create move_map so we have info that we want to to move to the new location if we find it. 1389 | move_maps[move_pkg[0].strip()] = move_pkg[1].strip() 1390 | else: 1391 | pkglist.append(pattern) 1392 | literals.append(pattern) 1393 | 1394 | to_insert = set(pkglist) 1395 | 1396 | if secondary_kit is True: 1397 | # add in any catpkgs from previous scans of this same kit that might be missing from this scan: 1398 | to_insert = cpm_logger.update_cached_kit_catpkg_set(to_insert) 1399 | else: 1400 | cpm_logger.update_cached_kit_catpkg_set(to_insert) 1401 | 1402 | # filter out anything that was not in the select_only argument list, if it was provided: 1403 | if select_only != "all": 1404 | p_set = set(select_only) 1405 | to_insert = to_insert & p_set 1406 | 1407 | # filter out any catpkgs that exist in any of the filter_repos: 1408 | new_set = set() 1409 | if filter_cats is None: 1410 | filter_cats = set() 1411 | else: 1412 | filter_cats = set(filter_cats) 1413 | for catpkg in to_insert: 1414 | 1415 | # filter unwanted categories first 1416 | cat = catpkg.split("/")[0] 1417 | if cat in filter_cats: 1418 | continue 1419 | 1420 | # filter unwanted catpkgs: 1421 | do_skip = False 1422 | for filter_repo in filter_repos: 1423 | if filter_repo.catpkg_exists(catpkg): 1424 | if catpkg not in force: 1425 | do_skip = True 1426 | break 1427 | if do_skip: 1428 | continue 1429 | else: 1430 | new_set.add(catpkg) 1431 | to_insert = new_set 1432 | 1433 | insert_kwargs = {"select": sorted(list(to_insert))} 1434 | 1435 | if pkglist: 1436 | steps += [ InsertEbuilds(from_tree, skip=skip, replace=False, literals=literals, cpm_logger=cpm_logger, move_maps=move_maps, **insert_kwargs) ] 1437 | return steps 1438 | 1439 | def get_extra_catpkgs_from_kit_fixups(fixup_repo, kit): 1440 | 1441 | """ 1442 | This function will scan the specified kit directory in kit-fixups and look for catpkgs that are specified in some 1443 | but not all non-global directories. This list of catpkgs should be added to the kit's package set. Otherwise, the 1444 | catpkg will exist in some branches (the one with the fixup) but will not exist in the branches without the fixup. 1445 | If we use this function, then we don't need to manually add these catpkgs to the package-set for the kit manually, 1446 | which makes things less error prone for us. 1447 | 1448 | For example: 1449 | 1450 | kit-fixups/foo-kit/1.0-prime/foo/bar exists 1451 | kit-fixups/foo-kit/1.1-prime/foo/bar does not exist. 1452 | 1453 | Without using this function to augment the package-set automatically, and without manually adding foo/bar to the 1454 | package-set list ourselves, foo/bar will exist in 1.0-prime but will not exist in 1.1-prime. But if we scan our 1455 | kit-fixups with this method, we will get a list back [ "foo/bar" ] and can add this to our package-set for foo-kit, 1456 | which will cause both kits to get a copy of foo/bar. 1.0-prime will get the fixup and 1.1-prime will get a copy 1457 | from its source repos. 1458 | 1459 | :param fixup_repo: 1460 | :param kit: 1461 | :return: 1462 | """ 1463 | 1464 | root = fixup_repo.root 1465 | 1466 | def get_catpkg_list(repo_root): 1467 | if not os.path.exists(repo_root) or not os.path.isdir(repo_root): 1468 | return 1469 | for cat in os.listdir(repo_root): 1470 | if cat in [ "profiles", "eclass", "licenses"]: 1471 | continue 1472 | if not os.path.isdir(repo_root + "/" + cat): 1473 | continue 1474 | for pkg in os.listdir(repo_root + "/" + cat): 1475 | yield cat+"/"+pkg 1476 | 1477 | global_set = set(get_catpkg_list(root+"/"+kit+"/"+"global")) 1478 | out = [] 1479 | 1480 | try: 1481 | non_global_kit_dirs = set(os.listdir(root+"/"+kit)) 1482 | except FileNotFoundError: 1483 | return out 1484 | 1485 | if "global" in non_global_kit_dirs: 1486 | non_global_kit_dirs.remove("global") 1487 | non_global_count = len(list(non_global_kit_dirs)) 1488 | 1489 | non_global_matches = defaultdict(int) 1490 | 1491 | for non_global_branch in non_global_kit_dirs: 1492 | for catpkg in get_catpkg_list(root+"/"+kit+"/"+non_global_branch): 1493 | non_global_matches[catpkg] += 1 1494 | 1495 | for catpkg, count in non_global_matches.items(): 1496 | if count < non_global_count and catpkg not in global_set: 1497 | out.append(catpkg) 1498 | 1499 | return out 1500 | 1501 | # CatPkgMatchLogger is an object that is used to keep a running record of catpkgs that were copied to kits via package-set rules. 1502 | # As catpkgs are called, a CatPkgMatchLogger() object is called as follows: 1503 | # 1504 | # logger.record("sys-foo/bar") # catpkg foo/bar was merged. 1505 | # logger.record(regex("sys-bar/*")) # a "sys-bar/*" was specified in the package set. 1506 | # 1507 | # Then, prior to copying a catpkg to a kit, we can check to see if maybe this catpkg was already copied to another kit. If so, we 1508 | # should not copy it to a new kit which would cause a duplicate catpkg to exist between two kits. The "should we copy this catpkg" 1509 | # question is answered by calling the match() method, as follows: 1510 | # 1511 | # logger.match("sys-foo/bar") : True -- this matches a previously copied catpkg atom, so don't copy it to the kit. 1512 | # logger.match("sys-foo/oni") : False -- we have no record of this catpkg being copied, so it's safe to copy. 1513 | # logger.match("sys-bar/bleh") : True -- this catpkg matches a wildcard regex that was used previously, so don't copy. 1514 | # 1515 | # The support for regex matches fixes a kit problem called "kit overflow". Here's an example of kit overflow. Let's say 1516 | # we have a snapshot of our python-kit, but since our snapshot, many dev-python catpkgs have been added. Without regex support 1517 | # in CatPkgMatchLogger, these new catpkgs will "overflow" to nokit. When we eventually bump our python-kit to a newer snapshot 1518 | # and these newer catpkgs start to appear in python-kit instead of our unsnapshotted nokit, this will result in dev-python 1519 | # downgrades. 1520 | # 1521 | # To work around this, when we encounter a pattern or regex like "dev-python/*", we record a regex in CatPkgMatchLogger. If the 1522 | # catpkg we are considering copying WOULD have matched a previously-used pattern, we can know that it should NOT be copied to 1523 | # nokit. If we were to just track literal catpkgs and not regexes, then the overflow to nokit would occur. 1524 | 1525 | class CatPkgMatchLogger(object): 1526 | 1527 | def __init__(self, log_xml=False): 1528 | self._copycount = 0 1529 | self._matchcount = 0 1530 | # for string matches 1531 | self._matchdict = {} 1532 | self._current_kit_set = set() 1533 | # map catpkg to kit that matched it. 1534 | self._match_map = {} 1535 | 1536 | # for fixups from a non-global directory, we want the match to only apply for a particular branch. This way 1537 | # If xorg-kit/1.17-prime/foo/bar gets copied, we don't also need to have an xorg-kit/1.19-prime/foo/bar -- 1538 | # the code will be smart and know that for the 1.19-prime branch, we still want to copy over foo/bar when we 1539 | # encounter it. 1540 | 1541 | # format: 'catpkg-match' : { 'kit' : [ 'branch1', 'branch2' ] } 1542 | # 1543 | # ^^^ This means that 'catpkg-match' was copied into branch1 and branch2 of kit 'kit'. So we want to ALLOW 1544 | # a copy into branch3 of kit, but NOT ALLOW a copy into any successive kit (since it was already copied.) 1545 | 1546 | self._fixup_matchdict = defaultdict(dict) 1547 | self._matchdict_curkit = {} 1548 | # for regex matches 1549 | self._regexdict = {} 1550 | self._regexdict_curkit = {} 1551 | 1552 | if log_xml: 1553 | self.xml_recorder = XMLRecorder() 1554 | else: 1555 | self.xml_recorder = None 1556 | 1557 | # IMPORTANT: 1558 | 1559 | # We don't want to match regexes against catpkgs in the CURRENT KIT. Otherwise we will only copy the first match 1560 | # of a regex! Here is why -- the first ebuild that matches the regex will get copied, and we will record the regex. 1561 | # Then the second and successive catpkg matches will also match the regex, so .match() will return True and we will 1562 | # skip them, thinking that they are already copied. 1563 | 1564 | # We work around this by caching the regexes and only start applying them after the caller calls .nextKit(). Then they 1565 | # become active. 1566 | 1567 | # NOTE: Since a kit pulls from multiple repos, this does raise the possibility of repo b replacing a catpkg that was 1568 | # already copied. We work around this by always using replace=False with InsertEbuilds -- so that if the catpkg is already 1569 | # on disk, then it isn't copied, even if it matches a regex. 1570 | 1571 | # NOTE that we now also cache non-regex matches too. This allows us to process two xorg-kits or python-kits in a row. 1572 | # matches will accumulate but not take effect until .nextKit() is called. 1573 | 1574 | # Another feature of the CatPkgMatchLoggger is that it records how many catpkgs actually were copied -- 1 for each catpkg 1575 | # literal, and a caller-specified number of matches for regexes. This tally is used by merge-all-kits.py to determine the 1576 | # total number of catpkgs copied to each kit. 1577 | 1578 | def writeXML(self, fn): 1579 | if self.xml_recorder: 1580 | self.xml_recorder.write(fn) 1581 | 1582 | def recordCopyToXML(self, srctree, kit, catpkg): 1583 | if self.xml_recorder: 1584 | self.xml_recorder.xml_record(srctree, kit, catpkg) 1585 | 1586 | @property 1587 | def copycount(self): 1588 | return self._copycount 1589 | 1590 | @property 1591 | def matchcount(self): 1592 | return self._matchcount 1593 | 1594 | def match(self, catpkg): 1595 | """ 1596 | This method tells us whether we should copy over a catpkg to a particular kit. 1597 | :param catpkg: the catpkg in question. 1598 | :return: Boolean, True if we have already copied and should not copy again, and False if we have not seen and 1599 | should copy.. 1600 | """ 1601 | 1602 | if catpkg in self._matchdict: 1603 | # Yes, we've seen it, just as a regular package copied before (non-fixup), so don't copy 1604 | return True 1605 | 1606 | for pat, regex in self._regexdict.items(): 1607 | if regex.match(catpkg): 1608 | # Seen and likely copied before, don't copy 1609 | return True 1610 | # We've passed all tests -- copy this sucker! 1611 | return False 1612 | 1613 | def update_cached_kit_catpkg_set(self, myset): 1614 | # this is used by the intra-kit logic that identifies catpkgs selected from prior runs of the same kit that 1615 | # don't exist in the current kit selection. We want to grab these stragglers. 1616 | 1617 | self._current_kit_set |= myset 1618 | return self._current_kit_set 1619 | 1620 | def get_other_kit(self, catpkg): 1621 | return self._match_map[catpkg] if catpkg in self._match_map else "(unknown)" 1622 | 1623 | def record(self, kit, catpkg, regex_matched=None, is_fixup=False): 1624 | """ 1625 | This method records catpkgs that we are copying over, so we can determine whether or not the catpkg should be 1626 | copied again into later kits. In general, we only want to copy a catpkg once -- but there are exceptions, like 1627 | if we have different branches of the same kit, or if we have fixups. So the logic is nuanced. 1628 | 1629 | :param catpkg: Either a catpkg string or regex match. 1630 | :param is_fixup: True if we are applying a fixup; else False. 1631 | :return: None 1632 | """ 1633 | if regex_matched is not None: 1634 | if is_fixup: 1635 | raise IndexError("Can't use regex with fixup") 1636 | self._regexdict_curkit[regex_matched.pattern] = catpkg 1637 | self._match_map[catpkg] = kit 1638 | else: 1639 | # otherwise, record in our regular matchdict 1640 | self._matchdict_curkit[catpkg] = True 1641 | self._match_map[catpkg] = kit 1642 | self._copycount += 1 1643 | 1644 | def nextKit(self): 1645 | self._regexdict.update(self._regexdict_curkit) 1646 | self._regexdict_curkit = {} 1647 | self._matchdict.update(self._matchdict_curkit) 1648 | self._matchdict_curkit = {} 1649 | self._current_kit_set = set() 1650 | 1651 | def headSHA1(tree): 1652 | retval, out = subprocess.getstatusoutput("(cd %s && git rev-parse HEAD)" % tree) 1653 | if retval == 0: 1654 | return out.strip() 1655 | return None 1656 | 1657 | 1658 | async def getcommandoutput(args, env=None): 1659 | # Slight modification of the function getstatusoutput present in: 1660 | # https://docs.python.org/3/library/asyncio-subprocess.html#example 1661 | if isinstance(args, str): 1662 | proc = await asyncio.create_subprocess_shell(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) 1663 | else: 1664 | proc = await asyncio.create_subprocess_exec(*args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) 1665 | try: 1666 | stdout, stderr = await proc.communicate() 1667 | except: 1668 | proc.kill() 1669 | await proc.wait() 1670 | raise 1671 | 1672 | exitcode = await proc.wait() 1673 | return exitcode, stdout, stderr 1674 | 1675 | 1676 | async def runShell(cmd_list, abort_on_failure=True, env=None): 1677 | if debug: 1678 | print("running: %r" % cmd_list) 1679 | out = await getcommandoutput(cmd_list, env=env) 1680 | if out[0] != 0: 1681 | print("Error executing %r" % cmd_list) 1682 | print() 1683 | print("output:") 1684 | print(out[1].decode("utf-8")) 1685 | if abort_on_failure: 1686 | sys.exit(1) 1687 | else: 1688 | return False 1689 | return True 1690 | 1691 | 1692 | class AutoGlobMask(MergeStep): 1693 | 1694 | """ 1695 | AutoGlobMask will automatically create a package.mask file that matches particular 1696 | ebuilds that it finds in the tree. 1697 | 1698 | catpkg: The catpkg to process. AutoGlobMask will look into the destination tree in 1699 | this catpkg directory. 1700 | 1701 | glob: the wildcard pattern of an ebuild files to match in the catpkg directory. 1702 | 1703 | maskdest: The filename of the mask file to create in profiles/packages.mask. 1704 | 1705 | All ebuilds matching glob in the catpkg dir will have mask entries created and 1706 | written to profiles/package.mask/maskdest. 1707 | 1708 | """ 1709 | 1710 | def __init__(self, catpkg, my_glob, maskdest): 1711 | self.glob = my_glob 1712 | self.catpkg = catpkg 1713 | self.maskdest = maskdest 1714 | 1715 | async def run(self,tree): 1716 | if not os.path.exists(tree.root + "/profiles/package.mask"): 1717 | os.makedirs(tree.root + "/profiles/package.mask") 1718 | f = open(os.path.join(tree.root,"profiles/package.mask", self.maskdest), "w") 1719 | #os.chdir(os.path.join(tree.root,self.catpkg)) 1720 | cat = self.catpkg.split("/")[0] 1721 | for item in glob.glob(os.path.join(tree.root,self.catpkg) + "/" + self.glob+".ebuild"): 1722 | s_split = item.split("/") 1723 | f.write("=%s/%s\n" % (cat,"/".join(s_split[-2:])[:-7])) 1724 | f.close() 1725 | 1726 | class ThirdPartyMirrors(MergeStep): 1727 | "Add funtoo's distfiles mirror, and add funtoo's mirrors as gentoo back-ups." 1728 | 1729 | async def run(self,tree): 1730 | orig = "%s/profiles/thirdpartymirrors" % tree.root 1731 | new = "%s/profiles/thirdpartymirrors.new" % tree.root 1732 | mirrors = "https://fastpull-us.funtoo.org/distfiles" 1733 | a = open(orig, "r") 1734 | b = open(new, "w") 1735 | for line in a: 1736 | ls = line.split() 1737 | if len(ls) and ls[0] == "gentoo": 1738 | b.write("gentoo\t"+ls[1]+" "+mirrors+" "+" ".join(ls[2:])+"\n") 1739 | else: 1740 | b.write(line) 1741 | b.write("funtoo %s\n" % mirrors) 1742 | a.close() 1743 | b.close() 1744 | os.unlink(orig) 1745 | os.link(new, orig) 1746 | os.unlink(new) 1747 | 1748 | class ApplyPatchSeries(MergeStep): 1749 | def __init__(self,path): 1750 | self.path = path 1751 | 1752 | async def run(self, tree): 1753 | a = open(os.path.join(self.path,"series"),"r") 1754 | for line in a: 1755 | if line[0:1] == "#": 1756 | continue 1757 | if line[0:4] == "EXEC": 1758 | ls = line.split() 1759 | runShell( "( cd %s && %s/%s )" % ( tree.root, self.path, ls[1] )) 1760 | else: 1761 | runShell( "( cd %s && git apply %s/%s )" % ( tree.root, self.path, line[:-1] )) 1762 | 1763 | class GenerateRepoMetadata(MergeStep): 1764 | def __init__(self, name, masters=None, aliases=None, priority=None): 1765 | self.name = name 1766 | self.aliases = aliases if aliases is not None else [] 1767 | self.masters = masters if masters is not None else [] 1768 | self.priority = priority 1769 | 1770 | async def run(self, tree): 1771 | meta_path = os.path.join(tree.root, "metadata") 1772 | if not os.path.exists(meta_path): 1773 | os.makedirs(meta_path) 1774 | a = open(meta_path + '/layout.conf','w') 1775 | out = '''repo-name = %s 1776 | thin-manifests = true 1777 | sign-manifests = false 1778 | profile-formats = portage-2 1779 | cache-formats = md5-dict 1780 | ''' % self.name 1781 | if self.aliases: 1782 | out += "aliases = %s\n" % " ".join(self.aliases) 1783 | if self.masters: 1784 | out += "masters = %s\n" % " ".join(self.masters) 1785 | a.write(out) 1786 | a.close() 1787 | rn_path = os.path.join(tree.root, "profiles") 1788 | if not os.path.exists(rn_path): 1789 | os.makedirs(rn_path) 1790 | a = open(rn_path + '/repo_name', 'w') 1791 | a.write(self.name + "\n") 1792 | a.close() 1793 | 1794 | class RemoveFiles(MergeStep): 1795 | def __init__(self,globs=None): 1796 | if globs is None: 1797 | globs = [] 1798 | self.globs = globs 1799 | 1800 | async def run(self, tree): 1801 | for glob in self.globs: 1802 | cmd = "rm -rf %s/%s" % ( tree.root, glob ) 1803 | runShell(cmd) 1804 | 1805 | class SyncDir(MergeStep): 1806 | def __init__(self,srcroot,srcdir=None,destdir=None,exclude=None,delete=False): 1807 | self.srcroot = srcroot 1808 | self.srcdir = srcdir 1809 | self.destdir = destdir 1810 | self.exclude = exclude if exclude is not None else [] 1811 | self.delete = delete 1812 | 1813 | async def run(self, tree): 1814 | if self.srcdir: 1815 | src = os.path.join(self.srcroot,self.srcdir)+"/" 1816 | else: 1817 | src = os.path.normpath(self.srcroot)+"/" 1818 | if self.destdir: 1819 | dest = os.path.join(tree.root,self.destdir)+"/" 1820 | else: 1821 | if self.srcdir: 1822 | dest = os.path.join(tree.root,self.srcdir)+"/" 1823 | else: 1824 | dest = os.path.normpath(tree.root)+"/" 1825 | if not os.path.exists(dest): 1826 | os.makedirs(dest) 1827 | cmd = "rsync -a --exclude CVS --exclude .svn --filter=\"hide /.git\" --filter=\"protect /.git\" " 1828 | for e in self.exclude: 1829 | cmd += "--exclude %s " % e 1830 | if self.delete: 1831 | cmd += "--delete --delete-excluded " 1832 | cmd += "%s %s" % ( src, dest ) 1833 | await runShell(cmd) 1834 | 1835 | class CopyAndRename(MergeStep): 1836 | def __init__(self, src, dest, ren_fun): 1837 | self.src = src 1838 | self.dest = dest 1839 | #renaming function ... accepts source file path, and returns destination filename 1840 | self.ren_fun = ren_fun 1841 | 1842 | async def run(self, tree): 1843 | srcpath = os.path.join(tree.root,self.src) 1844 | for f in os.listdir(srcpath): 1845 | destfile = os.path.join(tree.root,self.dest) 1846 | destfile = os.path.join(destfile,self.ren_fun(f)) 1847 | await runShell("( cp -a %s/%s %s )" % ( srcpath, f, destfile )) 1848 | 1849 | class SyncFiles(MergeStep): 1850 | def __init__(self, srcroot, files): 1851 | self.srcroot = srcroot 1852 | self.files = files 1853 | if not isinstance(files, dict): 1854 | raise TypeError("'files' argument should be a dict of source:destination items") 1855 | 1856 | async def run(self, tree): 1857 | for src, dest in self.files.items(): 1858 | if dest is not None: 1859 | dest = os.path.join(tree.root, dest) 1860 | else: 1861 | dest = os.path.join(tree.root, src) 1862 | src = os.path.join(self.srcroot, src) 1863 | if os.path.exists(dest): 1864 | print("%s exists, attempting to unlink..." % dest) 1865 | try: 1866 | os.unlink(dest) 1867 | except (IOError, PermissionError) as e: 1868 | print("Unlinking failed: %s" % str(e)) 1869 | pass 1870 | dest_dir = os.path.dirname(dest) 1871 | if os.path.exists(dest_dir) and os.path.isfile(dest_dir): 1872 | os.unlink(dest_dir) 1873 | if not os.path.exists(dest_dir): 1874 | os.makedirs(dest_dir) 1875 | print("copying %s to final location %s" % (src, dest)) 1876 | shutil.copyfile(src, dest) 1877 | 1878 | class ELTSymlinkWorkaround(MergeStep): 1879 | 1880 | async def run(self, tree): 1881 | dest = os.path.join(tree.root + "/eclass/ELT-patches") 1882 | if not os.path.lexists(dest): 1883 | os.makedirs(dest) 1884 | 1885 | class MergeUpdates(MergeStep): 1886 | def __init__(self, srcroot): 1887 | self.srcroot = srcroot 1888 | 1889 | async def run(self, tree): 1890 | for src in sorted(glob.glob(os.path.join(self.srcroot, "profiles/updates/?Q-????")), key=lambda x: (x[-4:], x[-7])): 1891 | dest = os.path.join(tree.root, "profiles/updates", src[-7:]) 1892 | if os.path.exists(dest): 1893 | src_file = open(src) 1894 | dest_file = open(dest) 1895 | src_lines = src_file.readlines() 1896 | dest_lines = dest_file.readlines() 1897 | src_file.close() 1898 | dest_file.close() 1899 | dest_lines.extend(src_lines) 1900 | dest_file = open(dest, "w") 1901 | dest_file.writelines(dest_lines) 1902 | dest_file.close() 1903 | else: 1904 | shutil.copyfile(src, dest) 1905 | 1906 | class CleanTree(MergeStep): 1907 | # remove all files from tree, except dotfiles/dirs. 1908 | 1909 | def __init__(self,exclude=None): 1910 | if exclude is None: 1911 | exclude = [] 1912 | self.exclude = exclude 1913 | 1914 | async def run(self,tree): 1915 | for fn in os.listdir(tree.root): 1916 | if fn[:1] == ".": 1917 | continue 1918 | if fn in self.exclude: 1919 | continue 1920 | await runShell("rm -rf %s/%s" % (tree.root, fn)) 1921 | 1922 | class SyncFromTree(SyncDir): 1923 | # sync a full portage tree, deleting any excess files in the target dir: 1924 | def __init__(self,srctree,exclude=None): 1925 | if exclude is None: 1926 | exclude=[] 1927 | self.srctree = srctree 1928 | SyncDir.__init__(self,srctree.root,srcdir=None,destdir=None,exclude=exclude,delete=True) 1929 | 1930 | async def run(self, desttree): 1931 | await SyncDir.run(self,desttree) 1932 | desttree.logTree(self.srctree) 1933 | 1934 | 1935 | class XMLRecorder(object): 1936 | 1937 | def __init__(self): 1938 | self.xml_out = etree.Element("packages") 1939 | 1940 | def write(self, fn): 1941 | if os.path.exists(os.path.dirname(fn)): 1942 | a = open(fn, "wb") 1943 | etree.ElementTree(self.xml_out).write(a, encoding='utf-8', xml_declaration=True, pretty_print=True) 1944 | a.close() 1945 | 1946 | def xml_record(self, repo, kit, catpkg): 1947 | cat, pkg = catpkg.split("/") 1948 | exp = "category[@name='%s']" % cat 1949 | catxml = self.xml_out.find(exp) 1950 | if catxml is None: 1951 | catxml = etree.Element("category", name=cat) 1952 | self.xml_out.append(catxml) 1953 | pkgxml = self.xml_out.find("category[@name='%s']/package/[@name='%s']" % (cat, pkg)) 1954 | 1955 | # remove existing 1956 | if pkgxml is not None: 1957 | pkgxml.getparent().remove(pkgxml) 1958 | pkgxml = etree.Element("package", name=pkg, repository=repo.name, kit=kit.name) 1959 | doMeta = True 1960 | try: 1961 | tpkgmeta = open("%s/%s/metadata.xml" % (repo.root, catpkg), 'rb') 1962 | try: 1963 | metatree = etree.parse(tpkgmeta) 1964 | except UnicodeDecodeError: 1965 | doMeta = False 1966 | tpkgmeta.close() 1967 | if doMeta: 1968 | use_vars = [] 1969 | usexml = etree.Element("use") 1970 | for el in metatree.iterfind('.//flag'): 1971 | name = el.get("name") 1972 | if name is not None: 1973 | flag = etree.Element("flag") 1974 | flag.attrib["name"] = name 1975 | flag.text = etree.tostring(el, encoding='unicode', method="text").strip() 1976 | usexml.append(flag) 1977 | pkgxml.attrib["use"] = ",".join(use_vars) 1978 | pkgxml.append(usexml) 1979 | except IOError: 1980 | pass 1981 | catxml.append(pkgxml) 1982 | 1983 | 1984 | regextype = type(re.compile('hello, world')) 1985 | 1986 | class InsertFilesFromSubdir(MergeStep): 1987 | 1988 | def __init__(self,srctree,subdir,suffixfilter=None,select="all",skip=None, src_offset=None): 1989 | self.subdir = subdir 1990 | self.suffixfilter = suffixfilter 1991 | self.select = select 1992 | self.srctree = srctree 1993 | self.skip = skip 1994 | self.src_offset = src_offset 1995 | 1996 | async def run(self,desttree): 1997 | desttree.logTree(self.srctree) 1998 | src = self.srctree.root 1999 | if self.src_offset: 2000 | src = os.path.join(src, self.src_offset) 2001 | if self.subdir: 2002 | src = os.path.join(src, self.subdir) 2003 | if not os.path.exists(src): 2004 | return 2005 | dst = desttree.root 2006 | if self.subdir: 2007 | dst = os.path.join(dst, self.subdir) 2008 | if not os.path.exists(dst): 2009 | os.makedirs(dst) 2010 | for e in os.listdir(src): 2011 | if self.suffixfilter and not e.endswith(self.suffixfilter): 2012 | continue 2013 | if isinstance(self.select, list): 2014 | if e not in self.select: 2015 | continue 2016 | elif isinstance(self.select, regextype): 2017 | if not self.select.match(e): 2018 | continue 2019 | if isinstance(self.skip, list): 2020 | if e in self.skip: 2021 | continue 2022 | elif isinstance(self.skip, regextype): 2023 | if self.skip.match(e): 2024 | continue 2025 | real_dst = os.path.basename(os.path.join(dst, e)) 2026 | await runShell("cp -a %s/%s %s" % ( src, e, dst)) 2027 | 2028 | class InsertEclasses(InsertFilesFromSubdir): 2029 | 2030 | def __init__(self,srctree,select="all",skip=None): 2031 | InsertFilesFromSubdir.__init__(self,srctree,"eclass",".eclass",select=select,skip=skip) 2032 | 2033 | class InsertLicenses(InsertFilesFromSubdir): 2034 | 2035 | def __init__(self,srctree,select="all",skip=None): 2036 | InsertFilesFromSubdir.__init__(self,srctree,"licenses",select=select,skip=skip) 2037 | 2038 | class CreateCategories(MergeStep): 2039 | 2040 | def __init__(self,srctree): 2041 | self.srctree = srctree 2042 | 2043 | async def run(self,desttree): 2044 | catset = set() 2045 | with open(self.srctree.root + "/profiles/categories", "r") as f: 2046 | cats = f.read().split() 2047 | for cat in cats: 2048 | if os.path.isdir(desttree.root + "/" + cat): 2049 | catset.add(cat) 2050 | if not os.path.exists(desttree.root + "/profiles"): 2051 | os.makedirs(desttree.root + "/profiles") 2052 | with open(desttree.root + "/profiles/categories", "w") as g: 2053 | for cat in sorted(list(catset)): 2054 | g.write(cat+"\n") 2055 | 2056 | class ZapMatchingEbuilds(MergeStep): 2057 | def __init__(self,srctree,select="all",branch=None): 2058 | self.select = select 2059 | self.srctree = srctree 2060 | self.branch = branch 2061 | 2062 | 2063 | async def run(self, desttree): 2064 | if self.branch is not None: 2065 | # Allow dynamic switching to different branches/commits to grab things we want: 2066 | await self.srctree.gitCheckout(self.branch) 2067 | # Figure out what categories to process: 2068 | dest_cat_path = os.path.join(desttree.root, "profiles/categories") 2069 | if os.path.exists(dest_cat_path): 2070 | with open(dest_cat_path, "r") as f: 2071 | dest_cat_set = set(f.read().splitlines()) 2072 | else: 2073 | dest_cat_set = set() 2074 | 2075 | # Our main loop: 2076 | print( "# Zapping builds from %s" % desttree.root ) 2077 | for cat in os.listdir(desttree.root): 2078 | if cat not in dest_cat_set: 2079 | continue 2080 | src_catdir = os.path.join(self.srctree.root,cat) 2081 | if not os.path.isdir(src_catdir): 2082 | continue 2083 | for src_pkg in os.listdir(src_catdir): 2084 | dest_pkgdir = os.path.join(desttree.root,cat,src_pkg) 2085 | if not os.path.exists(dest_pkgdir): 2086 | # don't need to zap as it doesn't exist 2087 | continue 2088 | await runShell("rm -rf %s" % dest_pkgdir) 2089 | 2090 | 2091 | class RecordAllCatPkgs(MergeStep): 2092 | 2093 | """ 2094 | This is used for non-auto-generated kits where we should record the catpkgs as belonging to a particular kit 2095 | but perform no other action. A kit generation NO-OP, comparted to InsertEbuilds 2096 | """ 2097 | 2098 | def __init__(self, srctree: GitTree, cpm_logger: CatPkgMatchLogger=None): 2099 | self.cpm_logger = cpm_logger 2100 | self.srctree = srctree 2101 | 2102 | async def run(self, desttree=None): 2103 | for catpkg in self.srctree.getAllCatPkgs(): 2104 | self.cpm_logger.record(self.srctree.name, catpkg, is_fixup=False) 2105 | 2106 | 2107 | class InsertEbuilds(MergeStep): 2108 | 2109 | """ 2110 | Insert ebuilds in source tre into destination tree. 2111 | 2112 | select: Ebuilds to copy over. 2113 | By default, all ebuilds will be selected. This can be modified by setting select to a 2114 | list of ebuilds to merge (specify by catpkg, as in "x11-apps/foo"). It is also possible 2115 | to specify "x11-apps/*" to refer to all source ebuilds in a particular category. 2116 | 2117 | skip: Ebuilds to skip. 2118 | By default, no ebuilds will be skipped. If you want to skip copying certain ebuilds, 2119 | you can specify a list of ebuilds to skip. Skipping will remove additional ebuilds from 2120 | the set of selected ebuilds. Specify ebuilds to skip using catpkg syntax, ie. 2121 | "x11-apps/foo". It is also possible to specify "x11-apps/*" to skip all ebuilds in 2122 | a particular category. 2123 | 2124 | replace: Ebuilds to replace. 2125 | By default, if an catpkg dir already exists in the destination tree, it will not be overwritten. 2126 | However, it is possible to change this behavior by setting replace to True, which means that 2127 | all catpkgs should be overwritten. It is also possible to set replace to a list containing 2128 | catpkgs that should be overwritten. Wildcards such as "x11-libs/*" will be respected as well. 2129 | 2130 | categories: Categories to process. 2131 | categories to process for inserting ebuilds. Defaults to all categories in tree, using 2132 | profiles/categories and all dirs with "-" in them and "virtuals" as sources. 2133 | 2134 | 2135 | """ 2136 | def __init__(self, srctree: GitTree, select="all", select_only="all", skip=None, replace=False, categories=None, 2137 | ebuildloc=None, branch=None, cpm_logger: CatPkgMatchLogger=None, literals: list=None, move_maps: dict=None, is_fixup=False): 2138 | self.select = select 2139 | self.skip = skip 2140 | self.srctree = srctree 2141 | self.replace = replace 2142 | self.categories = categories 2143 | self.cpm_logger = cpm_logger 2144 | self.is_fixup = is_fixup 2145 | # literals is a list of catpkgs specified directly in the package set, in sys-foo/bar format. We want to 2146 | # print a warning if one of these manually specified in the package list is not copied because it was already 2147 | # included in another kit. This can indicate an issue. 2148 | if literals is None: 2149 | self.literals = [] 2150 | else: 2151 | self.literals = literals 2152 | if move_maps is None: 2153 | self.move_maps = {} 2154 | else: 2155 | self.move_maps = move_maps 2156 | if select_only is None: 2157 | self.select_only = [] 2158 | else: 2159 | self.select_only = select_only 2160 | self.branch = branch 2161 | 2162 | 2163 | self.ebuildloc = ebuildloc 2164 | 2165 | def __repr__(self): 2166 | return "" % self.srctree.root 2167 | 2168 | async def run(self,desttree): 2169 | if self.branch is not None: 2170 | # Allow dynamic switching to different branches/commits to grab things we want: 2171 | await self.srctree.gitCheckout(self.branch) 2172 | # Just for clarification, I'm breaking these out to separate variables: 2173 | branch = desttree.branch 2174 | kit = desttree.name 2175 | 2176 | if self.ebuildloc: 2177 | srctree_root = self.srctree.root + "/" + self.ebuildloc 2178 | else: 2179 | srctree_root = self.srctree.root 2180 | desttree.logTree(self.srctree) 2181 | # Figure out what categories to process: 2182 | src_cat_path = os.path.join(srctree_root, "profiles/categories") 2183 | dest_cat_path = os.path.join(desttree.root, "profiles/categories") 2184 | if self.categories is not None: 2185 | # categories specified in __init__: 2186 | src_cat_set = set(self.categories) 2187 | else: 2188 | src_cat_set = set() 2189 | if os.path.exists(src_cat_path): 2190 | # categories defined in profile: 2191 | with open(src_cat_path, "r") as f: 2192 | src_cat_set.update(f.read().splitlines()) 2193 | # auto-detect additional categories: 2194 | cats = os.listdir(srctree_root) 2195 | for cat in cats: 2196 | # All categories have a "-" in them and are directories: 2197 | if os.path.isdir(os.path.join(srctree_root,cat)): 2198 | if "-" in cat or cat == "virtual": 2199 | src_cat_set.add(cat) 2200 | if os.path.exists(dest_cat_path): 2201 | with open(dest_cat_path, "r") as f: 2202 | dest_cat_set = set(f.read().splitlines()) 2203 | else: 2204 | dest_cat_set = set() 2205 | # Our main loop: 2206 | print( "# Merging in ebuilds from %s" % srctree_root ) 2207 | for cat in src_cat_set: 2208 | catdir = os.path.join(srctree_root, cat) 2209 | if not os.path.isdir(catdir): 2210 | # not a valid category in source overlay, so skip it 2211 | continue 2212 | #runShell("install -d %s" % catdir) 2213 | for pkg in os.listdir(catdir): 2214 | catpkg = "%s/%s" % (cat,pkg) 2215 | pkgdir = os.path.join(catdir, pkg) 2216 | if self.cpm_logger and self.cpm_logger.match(catpkg): 2217 | if catpkg in self.literals: 2218 | print("!!! WARNING: catpkg '%s' specified in package set was already included in kit %s. This should be fixed." % ( catpkg, self.cpm_logger.get_other_kit(catpkg))) 2219 | #already copied 2220 | continue 2221 | if self.select_only != "all" and catpkg not in self.select_only: 2222 | # we don't want this catpkg 2223 | continue 2224 | if not os.path.isdir(pkgdir): 2225 | # not a valid package dir in source overlay, so skip it 2226 | continue 2227 | if isinstance(self.select, list): 2228 | if catpkg not in self.select: 2229 | # we have a list of pkgs to merge, and this isn't on the list, so skip: 2230 | continue 2231 | elif isinstance(self.select, regextype): 2232 | if not self.select.match(catpkg): 2233 | # no regex match: 2234 | continue 2235 | if isinstance(self.skip, list): 2236 | if catpkg in self.skip: 2237 | # we have a list of pkgs to skip, and this catpkg is on the list, so skip: 2238 | continue 2239 | elif isinstance(self.skip, regextype): 2240 | if self.select.match(catpkg): 2241 | # regex skip match, continue 2242 | continue 2243 | dest_cat_set.add(cat) 2244 | tpkgdir = None 2245 | tcatpkg = None 2246 | if catpkg in self.move_maps: 2247 | if os.path.exists(pkgdir): 2248 | # old package exists, so we'll want to rename. 2249 | tcatpkg = self.move_maps[catpkg] 2250 | tpkgdir = os.path.join(desttree.root, tcatpkg) 2251 | else: 2252 | tcatpkg = self.move_maps[catpkg] 2253 | # old package doesn't exist, so we'll want to use the "new" pkgname as the source, hope it's there... 2254 | pkgdir = os.path.join(srctree_root, tcatpkg) 2255 | # and use new package name as destination... 2256 | tpkgdir = os.path.join(desttree.root, tcatpkg) 2257 | else: 2258 | tpkgdir = os.path.join(desttree.root, catpkg) 2259 | tcatdir = os.path.dirname(tpkgdir) 2260 | copied = False 2261 | if self.replace is True or (isinstance(self.replace, list) and (catpkg in self.replace)): 2262 | if not os.path.exists(tcatdir): 2263 | os.makedirs(tcatdir) 2264 | await runShell("rm -rf %s; cp -a %s %s" % (tpkgdir, pkgdir, tpkgdir )) 2265 | copied = True 2266 | else: 2267 | if not os.path.exists(tpkgdir): 2268 | copied = True 2269 | if not os.path.exists(tcatdir): 2270 | os.makedirs(tcatdir) 2271 | if not os.path.exists(tpkgdir): 2272 | await runShell("cp -a %s %s" % (pkgdir, tpkgdir)) 2273 | if copied: 2274 | # log XML here. 2275 | if self.cpm_logger: 2276 | self.cpm_logger.recordCopyToXML(self.srctree, desttree, catpkg) 2277 | if isinstance(self.select, regextype): 2278 | # If a regex was used to match the copied catpkg, record the regex. 2279 | self.cpm_logger.record(desttree.name, catpkg, regex_matched=self.select, is_fixup=self.is_fixup) 2280 | else: 2281 | # otherwise, record the literal catpkg matched. 2282 | self.cpm_logger.record(desttree.name, catpkg, is_fixup=self.is_fixup) 2283 | if tcatpkg is not None: 2284 | # This means we did a package move. Record the "new name" of the package, too. So both 2285 | # old name and new name get marked as being part of this kit. 2286 | self.cpm_logger.record(desttree.name, tcatpkg, is_fixup=self.is_fixup) 2287 | if os.path.isdir(os.path.dirname(dest_cat_path)): 2288 | with open(dest_cat_path, "w") as f: 2289 | f.write("\n".join(sorted(dest_cat_set))) 2290 | 2291 | class ProfileDepFix(MergeStep): 2292 | 2293 | "ProfileDepFix undeprecates profiles marked as deprecated." 2294 | 2295 | async def run(self,tree): 2296 | fpath = os.path.join(tree.root,"profiles/profiles.desc") 2297 | if os.path.exists(fpath): 2298 | a = open(fpath,"r") 2299 | for line in a: 2300 | if line[0:1] == "#": 2301 | continue 2302 | sp = line.split() 2303 | if len(sp) >= 2: 2304 | prof_path = sp[1] 2305 | await runShell("rm -f %s/profiles/%s/deprecated" % ( tree.root, prof_path )) 2306 | 2307 | class RunSed(MergeStep): 2308 | 2309 | """ 2310 | Run sed commands on specified files. 2311 | 2312 | files: List of files. 2313 | 2314 | commands: List of commands. 2315 | """ 2316 | 2317 | def __init__(self, files, commands): 2318 | self.files = files 2319 | self.commands = commands 2320 | 2321 | async def run(self, tree): 2322 | commands = list(itertools.chain.from_iterable(("-e", command) for command in self.commands)) 2323 | files = [os.path.join(tree.root, file) for file in self.files] 2324 | await runShell(["sed"] + commands + ["-i"] + files) 2325 | 2326 | class GenCache(MergeStep): 2327 | 2328 | def __init__(self,cache_dir=None, release=None): 2329 | self.cache_dir = cache_dir 2330 | self.release = release 2331 | "GenCache runs egencache --update to update metadata." 2332 | 2333 | async def run(self,tree): 2334 | 2335 | if tree.name != "core-kit": 2336 | repos_conf = "[DEFAULT]\nmain-repo = core-kit\n\n[core-kit]\nlocation = %s/core-kit\n\n[%s]\nlocation = %s\n" % (tree.config.dest_trees, tree.reponame if tree.reponame else tree.name, tree.root) 2337 | 2338 | # Perform QA check to ensure all eclasses are in place prior to performing egencache, as not having this can 2339 | # cause egencache to hang. 2340 | 2341 | result = await getAllEclasses(tree, self.release) 2342 | if None in result and len(result[None]): 2343 | missing_eclasses = [] 2344 | for ec in result[None]: 2345 | # if a missing eclass is not in core-kit, then we'll be concerned: 2346 | if not os.path.exists("%s/core-kit/eclass/%s" % (tree.config.dest_trees, ec)): 2347 | missing_eclasses.append(ec) 2348 | if len(missing_eclasses): 2349 | print("!!! Error: QA check on kit %s failed -- missing eclasses:" % tree.name) 2350 | print("!!! : " + " ".join(missing_eclasses)) 2351 | print( 2352 | "!!! : Please be sure to use kit-fixups or the overlay's eclass list to copy these necessary eclasses into place.") 2353 | sys.exit(1) 2354 | else: 2355 | repos_conf = "[DEFAULT]\nmain-repo = core-kit\n\n[core-kit]\nlocation = %s/core-kit\n" % tree.config.dest_trees 2356 | cmd = ["egencache", "--update", "--tolerant", "--repo", tree.reponame if tree.reponame else tree.name, 2357 | "--repositories-configuration" , repos_conf , 2358 | "--config-root=/tmp", 2359 | "--jobs", repr(multiprocessing.cpu_count()+1)] 2360 | if self.cache_dir: 2361 | cmd += [ "--cache-dir", self.cache_dir ] 2362 | if not os.path.exists(self.cache_dir): 2363 | os.makedirs(self.cache_dir) 2364 | os.chown(self.cache_dir, pwd.getpwnam('portage').pw_uid, grp.getgrnam('portage').gr_gid) 2365 | attempts = 10 2366 | attempt = 1 2367 | while attempt <= attempts: 2368 | if attempt != 1: 2369 | print("Restarting egencache -- sometimes it dies... this is expected.") 2370 | success = await runShell(cmd, abort_on_failure=False) 2371 | if success: 2372 | break 2373 | attempt += 1 2374 | if attempt > attempts: 2375 | print("Couldn't get egencache to finish. Exiting.") 2376 | sys.exit(1) 2377 | 2378 | class GenUseLocalDesc(MergeStep): 2379 | 2380 | "GenUseLocalDesc runs egencache to update use.local.desc" 2381 | 2382 | async def run(self,tree): 2383 | if tree.name != "core-kit": 2384 | repos_conf = "[DEFAULT]\nmain-repo = core-kit\n\n[core-kit]\nlocation = %s/core-kit\n\n[%s]\nlocation = %s\n" % (tree.config.dest_trees, tree.reponame if tree.reponame else tree.name, tree.root) 2385 | else: 2386 | repos_conf = "[DEFAULT]\nmain-repo = core-kit\n\n[core-kit]\nlocation = %s/core-kit\n" % tree.config.dest_trees 2387 | await runShell(["egencache", "--update-use-local-desc", "--tolerant", "--config-root=/tmp", "--repo", tree.reponame if tree.reponame else tree.name, "--repositories-configuration" , repos_conf ], abort_on_failure=False) 2388 | 2389 | class GitCheckout(MergeStep): 2390 | 2391 | def __init__(self,branch): 2392 | self.branch = branch 2393 | 2394 | async def run(self,tree): 2395 | await runShell("(cd %s && git checkout %s || git checkout -b %s --track origin/%s || git checkout -b %s)" % ( tree.root, self.branch, self.branch, self.branch, self.branch )) 2396 | 2397 | class CreateBranch(MergeStep): 2398 | 2399 | def __init__(self,branch): 2400 | self.branch = branch 2401 | 2402 | async def run(self,tree): 2403 | await runShell("( cd %s && git checkout -b %s --track origin/%s )" % ( tree.root, self.branch, self.branch )) 2404 | 2405 | 2406 | class Minify(MergeStep): 2407 | 2408 | "Minify removes ChangeLogs and shrinks Manifests." 2409 | 2410 | async def run(self,tree): 2411 | await runShell("( cd %s && find -iname ChangeLog | xargs rm -f )" % tree.root, abort_on_failure=False ) 2412 | await runShell("( cd %s && find -iname Manifest | xargs -i@ sed -ni '/^DIST/p' @ )" % tree.root ) 2413 | 2414 | 2415 | # We want to reset 'kitted_catpkgs' at certain points. The 'kit_order' variable below is used to control this, and we 2416 | # normally don't need to touch it. 'kitted_order' above tells the code to generate 'prime', then 'shared' (without 2417 | # resetting kitted_catpkgs to empty), then the None tells the code to reset kitted_catpkgs, so when 'current' kits are 2418 | # generated, they can include from all possible catpkgs. This is done because prime+shared is designed to be our 2419 | # primary enterprise-set of Funtoo kits. current+shared is also supported as a more bleeding edge option. 2420 | 2421 | # KIT PREP STEPS. To rebuild kits from scratch, we need to perform some initial actions to initialize an empty git 2422 | # repository, as well as some final actions. In the kit_steps dictionary below, indexed by kit, 'pre' dict lists the 2423 | # initial actions, and 'post' lists the final actions for the kit. There is also a special top-level key called 2424 | # 'regular-kits'. These actions are appended to any kit that is not core-kit or nokit. In addition to 'pre' and 'post' 2425 | # steps, there is also a 'copy' step that is not currently used (but is supported by getKitPrepSteps()). 2426 | 2427 | def getKitPrepSteps(release, repos, kit_dict, gentoo_staging, fixup_repo): 2428 | kit_steps = { 2429 | 'core-kit': {'pre': [ 2430 | GenerateRepoMetadata("core-kit", aliases=["gentoo"], priority=1000), 2431 | # core-kit has special logic for eclasses -- we want all of them, so that third-party overlays can reference the full set. 2432 | # All other kits use alternate logic (not in kit_steps) to only grab the eclasses they actually use. 2433 | SyncDir(gentoo_staging.root, "eclass"), 2434 | ], 2435 | 'post': [ 2436 | # news items are not included here anymore 2437 | SyncDir(fixup_repo.root, "metadata", exclude=["cache", "md5-cache", "layout.conf"]), 2438 | # add funtoo stuff to thirdpartymirrors 2439 | ThirdPartyMirrors(), 2440 | RunSed(["profiles/base/make.defaults"], ["/^PYTHON_TARGETS=/d", "/^PYTHON_SINGLE_TARGET=/d"]), 2441 | ] 2442 | }, 2443 | # masters of core-kit for regular kits and nokit ensure that masking settings set in core-kit for catpkgs in other kits are applied 2444 | # to the other kits. Without this, mask settings in core-kit apply to core-kit only. 2445 | 'regular-kits': {'pre': [ 2446 | GenerateRepoMetadata(kit_dict['name'], masters=["core-kit"], priority=500), 2447 | ] 2448 | }, 2449 | 'all-kits': {'pre': [ 2450 | SyncFiles(fixup_repo.root, { 2451 | "COPYRIGHT.txt": "COPYRIGHT.txt", 2452 | "LICENSE.txt": "LICENSE.txt", 2453 | }), 2454 | ] 2455 | }, 2456 | 'nokit': {'pre': [ 2457 | GenerateRepoMetadata("nokit", masters=["core-kit"], priority=-2000), 2458 | ] 2459 | } 2460 | } 2461 | 2462 | # pure64 support for legacy releases: 2463 | if release in ["1.2-release", "funtoo-current"]: 2464 | kit_steps['core-kit']['post'] += [ 2465 | CopyAndRename("profiles/funtoo/1.0/linux-gnu/arch/x86-64bit/subarch", 2466 | "profiles/funtoo/1.0/linux-gnu/arch/pure64/subarch", 2467 | lambda x: os.path.basename(x) + "-pure64") 2468 | ] 2469 | 2470 | out_pre_steps = [] 2471 | out_copy_steps = [] 2472 | out_post_steps = [] 2473 | 2474 | kd = kit_dict['name'] 2475 | if kd in kit_steps: 2476 | if 'pre' in kit_steps[kd]: 2477 | out_pre_steps += kit_steps[kd]['pre'] 2478 | if 'post' in kit_steps[kd]: 2479 | out_post_steps += kit_steps[kd]['post'] 2480 | if 'copy' in kit_steps[kd]: 2481 | out_copy_steps += kit_steps[kd]['copy'] 2482 | 2483 | # a 'regular kit' is not core-kit or nokit -- if we have pre or post steps for them, append these steps: 2484 | if kit_dict['name'] not in ['core-kit', 'nokit'] and 'regular-kits' in kit_steps: 2485 | if 'pre' in kit_steps['regular-kits']: 2486 | out_pre_steps += kit_steps['regular-kits']['pre'] 2487 | if 'post' in kit_steps['regular-kits']: 2488 | out_post_steps += kit_steps['regular-kits']['post'] 2489 | 2490 | if 'all-kits' in kit_steps: 2491 | if 'pre' in kit_steps['all-kits']: 2492 | out_pre_steps += kit_steps['all-kits']['pre'] 2493 | if 'post' in kit_steps['all-kits']: 2494 | out_post_steps += kit_steps['all-kits']['post'] 2495 | 2496 | return out_pre_steps, out_copy_steps, out_post_steps 2497 | 2498 | 2499 | # GET KIT SOURCE INSTANCE. This function returns a list of GitTree objects for each of repositories specified for 2500 | # a particular kit's kit_source, in the order that they should be processed (in the order they are defined in 2501 | # kit_source_defs, in other words.) 2502 | 2503 | prev_source_defs = None 2504 | prev_repos = None 2505 | 2506 | async def getKitSourceInstances(foundation, config, kit_dict): 2507 | 2508 | # We use a 'cache last repos' scheme to avoid unnecessarily reinitializing repos when they have already been 2509 | # initialized properly for us. This should help speed up the merge scripts. 2510 | 2511 | global prev_source_defs 2512 | global prev_repos 2513 | 2514 | source_name = kit_dict['source'] 2515 | 2516 | repos = [] 2517 | 2518 | source_defs = foundation.kit_source_defs[source_name] 2519 | 2520 | if source_defs == prev_source_defs: 2521 | return prev_repos 2522 | 2523 | for source_def in source_defs: 2524 | 2525 | repo_name = source_def['repo'] 2526 | repo_branch = source_def['branch'] if "branch" in source_def else "master" 2527 | repo_sha1 = source_def["src_sha1"] if "src_sha1" in source_def else None 2528 | repo_obj = GitTree 2529 | repo_url = foundation.overlays[repo_name]["url"] 2530 | if "dirname" in foundation.overlays[repo_name]: 2531 | path = foundation.overlays[repo_name]["dirname"] 2532 | else: 2533 | path = repo_name 2534 | repo = repo_obj(repo_name, url=repo_url, config=config, root="%s/%s" % (config.source_trees, path), 2535 | branch=repo_branch, commit_sha1=repo_sha1, origin_check=False, 2536 | reclone=foundation.overlays[repo_name]["reclone"] if "reclone" in foundation.overlays[ 2537 | repo_name] else False) 2538 | await repo.initialize() 2539 | repos.append( 2540 | {"name": repo_name, "repo": repo, "is_fixup": source_def['is_fixup'] if 'is_fixup' in source_def else False, 2541 | "overlay_def": foundation.overlays[repo_name]}) 2542 | 2543 | prev_source_defs = source_defs 2544 | prev_repos = repos 2545 | return repos 2546 | 2547 | async def copyFromSourceRepositoriesSteps(repo_dict=None, release=None, source_defs=None, kit_dict=None, secondary_kit=False, fixup_repo=None, cpm_logger=None, move_maps=None): 2548 | 2549 | # Phase 2: copy core set of ebuilds 2550 | 2551 | # Here we generate our main set of ebuild copy steps, based on the contents of the package-set file for the kit. The logic works as 2552 | # follows. We apply our package-set logic to each repo in succession. If copy ebuilds were actually copied (we detect this by 2553 | # looking for changed catpkg count in our dest_kit,) then we also run additional steps: "copyfiles" and "eclasses". "copyfiles" 2554 | # specifies files like masks to copy over to the dest_kit, and "eclasses" specifies eclasses from the overlay that we need to 2555 | # copy over to the dest_kit. We don't need to specify eclasses that we need from gentoo_staging -- these are automatically detected 2556 | # and copied, but if there are any special eclasses from the overlay then we want to copy these over initially. 2557 | 2558 | steps = [] 2559 | select_clause = "all" 2560 | overlay_def = repo_dict["overlay_def"] 2561 | 2562 | if "select" in overlay_def: 2563 | select_clause = overlay_def["select"] 2564 | 2565 | # If the repo has a "filter" : [ "foo", "bar", "oni" ], then construct a list of repos with those names and put 2566 | # them in filter_repos. We will pass this list of repo objects to InsertEbuilds inside generateKitSteps, and if 2567 | # a catpkg exists in any of these repos, then it will NOT be copied if it is scheduled to be copied for this 2568 | # repo. This is a way we can lock down overlays to not insert any catpkgs that are already defined in gentoo -- 2569 | # just add: filter : [ "gentoo-staging" ] and if the catpkg exists in gentoo-staging, it won't get copied. This 2570 | # way we can more safely choose to include all ebuilds from 'potpurri' overlays like faustoo without exposing 2571 | # ourself to too much risk from messing stuff up. 2572 | 2573 | filter_repos = [] 2574 | if "filter" in overlay_def: 2575 | for filter_repo_name in overlay_def["filter"]: 2576 | for x in source_defs: 2577 | if x["name"] == filter_repo_name: 2578 | filter_repos.append(x["repo"]) 2579 | 2580 | if "filter-categories" in overlay_def: 2581 | filter_cats = overlay_def["filter-categories"] 2582 | else: 2583 | filter_cats = [] 2584 | 2585 | if kit_dict["name"] == "nokit" or ("is_fixup" in repo_dict and repo_dict["is_fixup"] is True): 2586 | # grab all remaining ebuilds 2587 | steps += [InsertEbuilds(repo_dict["repo"], select_only=select_clause, move_maps=move_maps, skip=None, 2588 | replace=False, cpm_logger=cpm_logger)] 2589 | else: 2590 | steps += await generateKitSteps(release, kit_dict['name'], repo_dict["repo"], fixup_repo=fixup_repo, 2591 | select_only=select_clause, 2592 | filter_repos=filter_repos, 2593 | filter_cats=filter_cats, 2594 | force=overlay_def["force"] if "force" in overlay_def else None, 2595 | cpm_logger=cpm_logger, move_maps=move_maps, secondary_kit=secondary_kit) 2596 | return steps 2597 | 2598 | 2599 | def copyFromFixupsSteps(release=None, fixup_repo=None, branch=None, kit_dict=None, cpm_logger=None): 2600 | 2601 | # Phase 3: copy eclasses, licenses, profile info, and ebuild/eclass fixups from the kit-fixups repository. 2602 | 2603 | # First, we are going to process the kit-fixups repository and look for ebuilds and eclasses to replace. Eclasses can be 2604 | # overridden by using the following paths inside kit-fixups: 2605 | 2606 | # kit-fixups/eclass/1.2-release <--------- global eclasses, get installed to all kits unconditionally for release (overrides those above) 2607 | # kit-fixups//global/eclass <-------- global eclasses for a particular kit, goes in all branches (overrides those above) 2608 | # kit-fixups//global/profiles <------ global profile info for a particular kit, goes in all branches (overrides those above) 2609 | # kit-fixups///eclass <------ eclasses to install in just a specific branch of a specific kit (overrides those above) 2610 | # kit-fixups///profiles <---- profile info to install in just a specific branch of a specific kit (overrides those above) 2611 | 2612 | # Note that profile repo_name and categories files are excluded from any copying. 2613 | 2614 | # Ebuilds can be installed to kits by putting them in the following location(s): 2615 | 2616 | # kit-fixups//global/cat/pkg <------- install cat/pkg into all branches of a particular kit 2617 | # kit-fixups///cat/pkg <----- install cat/pkg into a particular branch of a kit 2618 | 2619 | # Remember that at this point, we may be missing a lot of eclasses and licenses from Gentoo. We will then perform a final sweep 2620 | # of all catpkgs in the dest_kit and auto-detect missing eclasses from Gentoo and copy them to our dest_kit. Remember that if you 2621 | # need a custom eclass from a third-party overlay, you will need to specify it in the overlay's overlays["ov_name"]["eclasses"] 2622 | # list. Or alternatively you can copy the eclasses you need to kit-fixups and maintain them there :) 2623 | 2624 | steps = [] 2625 | # Here is the core logic that copies all the fix-ups from kit-fixups (eclasses and ebuilds) into place: 2626 | eclass_release_path = "eclass/%s" % release 2627 | if os.path.exists(os.path.join(fixup_repo.root, eclass_release_path)): 2628 | steps += [SyncDir(fixup_repo.root, eclass_release_path, "eclass")] 2629 | if branch == "master": 2630 | # if a branch has "master" as its branch, we will look for a fixup directory of its *release* (like "1.2-release") just so it's clear 2631 | # for maintainers ("master" would be ambiguous in kit-fixups.) 2632 | fixup_dirs = ["global", "curated", release] 2633 | else: 2634 | fixup_dirs = ["global", "curated", branch] 2635 | for fixup_dir in fixup_dirs: 2636 | fixup_path = kit_dict['name'] + "/" + fixup_dir 2637 | if os.path.exists(fixup_repo.root + "/" + fixup_path): 2638 | if os.path.exists(fixup_repo.root + "/" + fixup_path + "/eclass"): 2639 | steps += [ 2640 | InsertFilesFromSubdir(fixup_repo, "eclass", ".eclass", select="all", skip=None, 2641 | src_offset=fixup_path) 2642 | ] 2643 | if os.path.exists(fixup_repo.root + "/" + fixup_path + "/licenses"): 2644 | steps += [ 2645 | InsertFilesFromSubdir(fixup_repo, "licenses", None, select="all", skip=None, 2646 | src_offset=fixup_path) 2647 | ] 2648 | if os.path.exists(fixup_repo.root + "/" + fixup_path + "/profiles"): 2649 | steps += [ 2650 | InsertFilesFromSubdir(fixup_repo, "profiles", None, select="all", 2651 | skip=["repo_name", "categories"], src_offset=fixup_path) 2652 | ] 2653 | # copy appropriate kit readme into place: 2654 | readme_path = fixup_path + "/README.rst" 2655 | if os.path.exists(fixup_repo.root + "/" + readme_path): 2656 | steps += [ 2657 | SyncFiles(fixup_repo.root, { 2658 | readme_path: "README.rst" 2659 | }) 2660 | ] 2661 | 2662 | # We now add a step to insert the fixups, and we want to record them as being copied so successive kits 2663 | # don't get this particular catpkg. Assume we may not have all these catpkgs listed in our package-set 2664 | # file... 2665 | 2666 | steps += [ 2667 | InsertEbuilds(fixup_repo, ebuildloc=fixup_path, select="all", skip=None, replace=True, 2668 | cpm_logger=cpm_logger, is_fixup=True) 2669 | ] 2670 | return steps 2671 | 2672 | 2673 | # UPDATE KIT. This function does the heavy lifting of taking a kit specification included in a kit_dict, and 2674 | # regenerating it. The kitted_catpkgs argument is a dictionary which is also written to and used to keep track of 2675 | # catpkgs copied between runs of updateKit. 2676 | 2677 | async def updateKit(foundation, config, release, async_engine: AsyncMergeAllKits, kit_dict, prev_kit_dict, 2678 | cpm_logger, create=False, push=False, now=None, fixup_repo=None, branch=None, force=False, indypush=False, destfix=False): 2679 | 2680 | # secondary_kit means: we're the second (or third, etc.) xorg-kit or other kit to be processed. The first kind of 2681 | # each kit processed has secondary_kit = False, and later ones have secondary_kit = True. We need special processing 2682 | # to grab any 'orphan' packages that were selected as part of prior kit scans (and thus will not be included in 2683 | # later kits) but were not picked up in our current kit-scan. For example, let's say @depsincat@:virtual/ttf-fonts: 2684 | # media-fonts picks up a funky font in the first xorg-kit scan, but in the second xorg-kit scan, the deps have 2685 | # changed and thus this font isn't selected. Well without special handling, if we are using the second (or later) 2686 | # xorg-kit, funky-font won't exist. We call these guys 'orphans' and need to ensure we include them. 2687 | 2688 | move_maps = get_move_maps(fixup_repo.root + "/move-maps", kit_dict['name']) 2689 | 2690 | secondary_kit = False 2691 | if prev_kit_dict is not None: 2692 | if kit_dict['name'] != prev_kit_dict['name']: 2693 | 2694 | # We are advancing to the next kit. For example, we just processed an xorg-kit and are now processing a python-kit. So we want to apply all our accumulated matches. 2695 | # If we are processing an xorg-kit again, this won't run, which is what we want. We want to keep accumulating catpkg names/matches. 2696 | 2697 | cpm_logger.nextKit() 2698 | 2699 | else: 2700 | secondary_kit = True 2701 | 2702 | if branch is None: 2703 | branch = kit_dict['branch'] 2704 | 2705 | print("Processing kit %s branch %s, secondary kit is %s" % (kit_dict['name'], branch, repr(secondary_kit))) 2706 | 2707 | # get set of source repos used to grab catpkgs from: 2708 | 2709 | if force is False and "type" in kit_dict and kit_dict["type"] == KitType.INDEPENDENTLY_MAINTAINED: 2710 | # independently-maintained repo. Don't regenerate. Just record all catpkgs in this kit as belonging to this kit so they don't get into other kits: 2711 | kit_dict["tree"] = tree = GitTree(kit_dict["name"], branch, config=config, 2712 | url=config.indy_url(kit_dict["name"]), 2713 | root=config.source_trees + "/" + kit_dict["name"], origin_check=False) 2714 | await tree.initialize() 2715 | await tree.run([ 2716 | RecordAllCatPkgs(tree, cpm_logger), 2717 | FastPullScan(now=now, engine=async_engine) 2718 | ]) 2719 | if indypush: 2720 | # If --indypush is specified, we want to mirror the independent kit to the same destination as the kits we 2721 | # are auto-generating. This does it: 2722 | await tree.mirrorUpstreamRepository(mirror=config.base_url(kit_dict['name'])) 2723 | return tree.head() 2724 | 2725 | if "repo_obj" not in kit_dict: 2726 | kit_dict["repo_obj"] = await getKitSourceInstances(foundation, config, kit_dict) 2727 | repos = kit_dict["repo_obj"] 2728 | 2729 | # get a handy variable reference to gentoo_staging: 2730 | gentoo_staging = None 2731 | for x in repos: 2732 | if x["name"] == "gentoo-staging": 2733 | gentoo_staging = x["repo"] 2734 | break 2735 | 2736 | if gentoo_staging is None: 2737 | print("Couldn't find source gentoo staging repo") 2738 | elif gentoo_staging.name != "gentoo-staging": 2739 | print("Gentoo staging mismatch -- name is %s" % gentoo_staging["name"]) 2740 | 2741 | # If we have gotten here, we are automatically generating a kit... 2742 | kit_dict['tree'] = tree = GitTree(kit_dict['name'], branch, config=config, 2743 | url=config.base_url(kit_dict['name']), create=create, 2744 | root="%s/%s" % (config.dest_trees, kit_dict['name']), 2745 | mirror=config.mirror.rstrip("/") + "/" + kit_dict[ 2746 | "name"] if config.mirror else None, 2747 | origin_check=True, 2748 | destfix=destfix) 2749 | await tree.initialize() 2750 | if "stability" in kit_dict and kit_dict["stability"] == KitStabilityRating.DEPRECATED: 2751 | # no longer update this kit. 2752 | return tree.head() 2753 | 2754 | # Phase 1: prep the kit 2755 | pre_steps = [ 2756 | GitCheckout(branch), 2757 | CleanTree() 2758 | ] 2759 | 2760 | prep_steps = getKitPrepSteps(release, repos, kit_dict, gentoo_staging, fixup_repo) 2761 | pre_steps += prep_steps[0] 2762 | post_steps = prep_steps[2] 2763 | 2764 | for repo_dict in repos: 2765 | # do the eclass and copyfiles steps first, in case they are needed for prior steps. 2766 | if tree.name == "core-kit": 2767 | # execute "copyfiles" and "eclasses" copy logic only for core-kit. 2768 | ov = foundation.overlays[repo_dict["name"]] 2769 | if "copyfiles" in ov and len(ov["copyfiles"]): 2770 | # since we copied over some ebuilds, we also want to make sure we copy over things like masks, etc: 2771 | pre_steps += [SyncFiles(repo_dict["repo"].root, ov["copyfiles"])] 2772 | if "eclasses" in ov: 2773 | # we have eclasses to copy over, too: 2774 | ec_files = {} 2775 | for eclass in ov["eclasses"]: 2776 | ecf = "eclass/" + eclass + ".eclass" 2777 | ec_files[ecf] = ecf 2778 | pre_steps += [SyncFiles(repo_dict["repo"].root, ec_files)] 2779 | 2780 | # This is an improved faster sync of all licenses. We will remove missing ones later: 2781 | 2782 | pre_steps += [ 2783 | SyncDir(gentoo_staging.root, "licenses") 2784 | ] 2785 | 2786 | await tree.run(pre_steps) 2787 | 2788 | for repo_dict in repos: 2789 | steps = await copyFromSourceRepositoriesSteps(repo_dict=repo_dict, kit_dict=kit_dict, source_defs=repos, release=release, secondary_kit=secondary_kit, fixup_repo=fixup_repo, cpm_logger=cpm_logger, move_maps=move_maps) 2790 | await tree.run(steps) 2791 | 2792 | steps = copyFromFixupsSteps(release=release, fixup_repo=fixup_repo, branch=branch, kit_dict=kit_dict, cpm_logger=cpm_logger) 2793 | 2794 | steps += [ 2795 | RunRepositoryStepsIfAvailable(fixup_root=fixup_repo.root, cpm_logger=cpm_logger) 2796 | ] 2797 | 2798 | await tree.run(steps) 2799 | 2800 | # copy all available licenses that have not been copied in fixups from gentoo-staging over to the kit. 2801 | # We will remove any unused licenses below... 2802 | 2803 | #copy_steps = [InsertLicenses(gentoo_staging, select=simpleGetAllLicenses(tree, gentoo_staging))] 2804 | #await tree.run(copy_steps) 2805 | 2806 | # Phase 4: finalize and commit 2807 | 2808 | # remove unused licenses... 2809 | used_licenses = await getAllLicenses(tree, release) 2810 | to_remove = [] 2811 | for license in os.listdir(tree.root + "/licenses"): 2812 | if license not in used_licenses["dest_kit"]: 2813 | to_remove.append(tree.root + "/licenses/" + license) 2814 | for file in to_remove: 2815 | os.unlink(file) 2816 | 2817 | post_steps += [ 2818 | ELTSymlinkWorkaround(), 2819 | CreateCategories(gentoo_staging), 2820 | # multi-plex this and store in different locations so that different selections can be made based on which python-kit is enabled. 2821 | # python-kit itself only needs one set which will be enabled by default. 2822 | ] 2823 | 2824 | 2825 | python_settings = foundation.python_kit_settings[release] 2826 | 2827 | for py_branch, py_settings in python_settings.items(): 2828 | post_steps += [GenPythonUse(py_settings, "funtoo/kits/python-kit/%s" % py_branch, release=release)] 2829 | 2830 | post_steps += [ 2831 | Minify(), 2832 | GenUseLocalDesc(), 2833 | GenCache(cache_dir="/var/cache/edb/%s-%s-%s" % (release, kit_dict['name'], branch), release=release), 2834 | ] 2835 | 2836 | post_steps += [ 2837 | FastPullScan(now=now, engine=async_engine) 2838 | ] 2839 | 2840 | await tree.run(post_steps) 2841 | await tree.gitCommit(message="updates", push=push) 2842 | return tree.head() 2843 | 2844 | # vim: ts=4 sw=4 noet 2845 | -------------------------------------------------------------------------------- /tests/extra_packages.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os, sys 4 | import unittest 5 | sys.path.insert(0, os.path.normpath(os.path.join(os.path.realpath(__file__), "../modules"))) 6 | from merge.merge_utils import get_extra_catpkgs_from_kit_fixups 7 | 8 | class MockTree(): 9 | 10 | def __init__(self, name, root): 11 | self.name = name 12 | self.root = root 13 | 14 | class ExtraPackageTest(unittest.TestCase): 15 | 16 | def setUp(self): 17 | 18 | self.fixups = MockTree("kit-fixups", os.path.join(os.getcwd(), "kit-fixups")) 19 | 20 | def test_basic(self): 21 | 22 | extras = get_extra_catpkgs_from_kit_fixups(self.fixups, "foo-kit") 23 | extras = set(extras) 24 | self.assertEqual(len(extras),2) 25 | self.assertIn('sys-apps/foobartronic', extras) 26 | self.assertIn('sys-apps/funapp', extras) 27 | 28 | if __name__ == "__main__": 29 | unittest.main() 30 | -------------------------------------------------------------------------------- /tests/kit-fixups/foo-kit/1.0-prime/sys-apps/foobar/foobar-1.5.ebuild: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/funtoo/merge-scripts/dd76fcaaebc70b5de863f6395239a53e9e9d8dae/tests/kit-fixups/foo-kit/1.0-prime/sys-apps/foobar/foobar-1.5.ebuild -------------------------------------------------------------------------------- /tests/kit-fixups/foo-kit/1.0-prime/sys-apps/foobartronic/foobartronic-1.0.ebuild: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/funtoo/merge-scripts/dd76fcaaebc70b5de863f6395239a53e9e9d8dae/tests/kit-fixups/foo-kit/1.0-prime/sys-apps/foobartronic/foobartronic-1.0.ebuild -------------------------------------------------------------------------------- /tests/kit-fixups/foo-kit/1.1-prime/sys-apps/foobar/foobar-1.6.ebuild: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/funtoo/merge-scripts/dd76fcaaebc70b5de863f6395239a53e9e9d8dae/tests/kit-fixups/foo-kit/1.1-prime/sys-apps/foobar/foobar-1.6.ebuild -------------------------------------------------------------------------------- /tests/kit-fixups/foo-kit/1.1-prime/sys-apps/funapp/funapp-2.0.ebuild: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/funtoo/merge-scripts/dd76fcaaebc70b5de863f6395239a53e9e9d8dae/tests/kit-fixups/foo-kit/1.1-prime/sys-apps/funapp/funapp-2.0.ebuild -------------------------------------------------------------------------------- /utils/google_upload_server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | from google.cloud import storage 5 | import google.cloud.exceptions 6 | 7 | google_client = storage.Client.from_service_account_json('goog_creds.json') 8 | bucket = google_client.get_bucket("fastpull-us") 9 | def google_upload(filename): 10 | print("starting upload") 11 | disk_path = os.path.join(config.get_path("fastpull_out"), filename) 12 | print("disk path", disk_path) 13 | # should strip non-important directories: 14 | google_blob = bucket.blob(filename) 15 | print("filename", filename) 16 | try: 17 | google_blob.upload_from_filename(disk_path) 18 | except google.cloud.exceptions.GoogleCloudError: 19 | print("upload failed") 20 | return False 21 | else: 22 | print("upload succeeded") 23 | return True 24 | 25 | # vim: ts=4 sw=4 noet 26 | -------------------------------------------------------------------------------- /utils/pkglist.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3.4 2 | 3 | from src.merge_utils import * 4 | 5 | import portage 6 | 7 | cur_name = sys.argv[1] 8 | cur_tree = "/var/git/meta-repo/kits/" + cur_name 9 | cur_overlay = GitTree(cur_name, root=cur_tree) 10 | env = os.environ.copy() 11 | env['PORTAGE_DEPCACHEDIR'] = '/var/cache/edb/%s-%s-meta' % (cur_overlay.name, cur_overlay.branch) 12 | env['PORTAGE_REPOSITORIES'] = ''' 13 | [DEFAULT] 14 | main-repo = %s 15 | 16 | [%s] 17 | location = %s 18 | ''' % (cur_name, cur_name, cur_tree) 19 | 20 | p_global = portage.portdbapi() 21 | #mysettings=portage.config(env=env, config_profile_path='')) 22 | v = portage.vardbapi() 23 | 24 | results = { 25 | "orphaned" : [], 26 | "masked" : [], 27 | "stale" : [] 28 | } 29 | 30 | #for catpkg in v.cp_all(): 31 | # inst_match = v.cp_list(catpkg) 32 | # if len(inst_match): 33 | # matches = p.match(catpkg) 34 | # all_matches = p.xmatch("match-all", catpkg) 35 | # if len(matches): 36 | # for inst in inst_match: 37 | # if inst not in matches: 38 | # inst_split = pkgsplit(inst) 39 | # match_split = pkgsplit(matches[-1]) 40 | # my_cmp = pkgcmp(inst_split,match_split) 41 | # if my_cmp > 0: 42 | # results["masked"].append(inst) 43 | # elif my_cmp < 0: 44 | # results["stale"].append(inst) 45 | # else: 46 | # if len(all_matches): 47 | # results["masked"] += inst_match 48 | # else: 49 | # results["orphaned"] += inst_match 50 | 51 | p = portage.portdbapi(mysettings=portage.config(env=env, config_profile_path='')) 52 | mypkgs = {} 53 | kit_count = {} 54 | cp_all = p.cp_all() 55 | for catpkg in cp_all: 56 | for pkg in p.cp_list(catpkg): 57 | if pkg == '': 58 | print("No match for %s" % catpkg) 59 | continue 60 | try: 61 | aux = p.aux_get(pkg, ["DEPEND", "RDEPEND"]) 62 | except PortageKeyError: 63 | print("Portage key error for %s" % repr(pkg)) 64 | continue 65 | try: 66 | f = flatten(use_reduce(aux[0]+" "+aux[1], matchall=True)) 67 | except portage.exception.InvalidDependString: 68 | print("bad dep string in " + pkg + ": " + aux[0] + " " + aux[1]) 69 | continue 70 | for dep in f: 71 | if len(dep) and dep[0] == "!": 72 | continue 73 | try: 74 | mypkg = dep_getkey(dep) 75 | except portage.exception.InvalidAtom: 76 | continue 77 | try: 78 | kit = p_global.better_cache[mypkg][0].name 79 | except KeyError: 80 | kit = "(none)" 81 | if kit == sys.argv[1]: 82 | continue 83 | if mypkg not in cp_all: 84 | if mypkg not in mypkgs: 85 | mypkgs[mypkg] = [] 86 | if catpkg not in mypkgs[mypkg]: 87 | mypkgs[mypkg].append(catpkg) 88 | if kit not in kit_count: 89 | kit_count[kit] = 0 90 | kit_count[kit] += 1 91 | print("External dependency Packages with dependency") 92 | print("============================= ================================================================") 93 | for pkg in sorted(mypkgs.keys()): 94 | print(pkg.ljust(30), mypkgs[pkg]) 95 | 96 | kit_tot = 0 97 | for key, val in kit_count.items(): 98 | kit_tot += val 99 | print() 100 | print("External Kit Percentage") 101 | print("=================== ================================================================") 102 | 103 | for key in sorted(kit_count.keys()): 104 | print(key.ljust(20), "%4.2f%%" % ((kit_count[key]*100)/kit_tot)) 105 | #print(results) 106 | # vim: ts=4 sw=4 noet 107 | -------------------------------------------------------------------------------- /utils/python3_kit_qa_check.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | # This QA check will scan the meta-repo on the existing system for ebuilds that support an older version 4 | # of python3 but not python3.6. This does not scan python_single_target ebuilds but rather those that can 5 | # be built to support multiple python implementations. 6 | 7 | from merge.merge_utils import * 8 | 9 | import portage 10 | 11 | p = portage.portdbapi() 12 | p.freeze() 13 | 14 | future_aux = {} 15 | 16 | old_python_set = { "python_targets_python3_3", "python_targets_python3_4", "python_targets_python3_5" } 17 | cur_python_set = { "python_targets_python3_6" } 18 | 19 | def future_generator(): 20 | for cp in p.cp_all(): 21 | repos = p.getRepositories(catpkg=cp) 22 | cpv = p.xmatch("bestmatch-visible", cp) 23 | if cpv: 24 | future = p.async_aux_get(cpv, [ "INHERITED", "IUSE" ]) 25 | future_aux[id(future)] = (cpv, repos) 26 | yield future 27 | 28 | for future in iter_completed(future_generator()): 29 | cpv, repo = future_aux.pop(id(future)) 30 | try: 31 | result = future.result() 32 | except KeyError as e: 33 | print("aux_get fail", cpv, e) 34 | eclasses, iuse = result 35 | iuse_set = set(iuse.split()) 36 | 37 | if len(old_python_set & iuse_set) and not len(cur_python_set & iuse_set): 38 | # contains python3.4 or 3.5 compat but not python3.6 compat: 39 | print(cpv, repo, old_python_set & iuse_set) 40 | 41 | -------------------------------------------------------------------------------- /utils/spider_common.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | 4 | -------------------------------------------------------------------------------- /wip/upgrade_steps.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | funtoo_releases = OrderedDict() 4 | 5 | funtoo_releases["1.0"] = { 6 | "kit_compat_group": [ 7 | ("core-kit", "1.0-prime"), 8 | ("security-kit", "1.0-prime"), 9 | ("media-kit", "1.1-prime"), 10 | ("java-kit", "1.0-prime"), 11 | ("ruby-kit", "1.0-prime"), 12 | ("haskell-kit", "1.0-prime"), 13 | ("lisp-scheme-kit", "1.0-prime"), 14 | ("lang-kit", "1.0-prime"), 15 | ("dev-kit", "1.0-prime"), 16 | ("desktop-kit", "1.0-prime"), 17 | ], 18 | "upgrade_from": [] 19 | } 20 | 21 | funtoo_releases["1.2"] = { 22 | "kit_compat_group": [ 23 | ("core-kit", "1.2-prime"), 24 | ("security-kit", "1.2-prime"), 25 | ("media-kit", "1.2-prime"), 26 | ("java-kit", "1.2-prime"), 27 | ("ruby-kit", "1.2-prime"), 28 | ("haskell-kit", "1.2-prime"), 29 | ("lisp-scheme-kit", "1.2-prime"), 30 | ("lang-kit", "1.2-prime"), 31 | ("dev-kit", "1.2-prime"), 32 | ("desktop-kit", "1.2-prime"), 33 | ], 34 | "upgrade_from": ["1.0"], 35 | "release_docs": "RELEASE.mediawiki", # TODO: where is the optimal place to place this file? 36 | "package_prerequisites": [">=app-admin/ego-1.9.0"], 37 | # TODO: these upgrade steps might be better placed in the ego repo.... maybe exported to JSON? 38 | "upgrade_steps": [ 39 | "emerge -1 gcc", 40 | "emerge -1 glibc", 41 | "emerge -uDN @system", 42 | "emerge -uDN @world", 43 | "emerge @preserved-rebuild" 44 | "revdep-rebuild --library 'libstdc++.so.6' -- --exclude sys-devel/gcc" 45 | ] 46 | } 47 | 48 | 49 | class UpgradeHandler: 50 | pass 51 | 52 | class KitHandler(UpgradeHandler): 53 | pass 54 | 55 | class ReleaseHandler(UpgradeHandler): 56 | pass 57 | 58 | class Release12UpgradeHandler(UpgradeHandler): 59 | 60 | _kits = [ 61 | "core-kit" 62 | "security-kit", 63 | "media-kit", 64 | "java-kit", 65 | "ruby-kit", 66 | "haskell-kit", 67 | "lisp-scheme-kit", 68 | "lang-kit", 69 | "dev-kit", 70 | "desktop-kit" 71 | ] 72 | 73 | @classmethod 74 | def available_upgrades(cls): 75 | 76 | reqs = [] 77 | results = [] 78 | 79 | for kit in cls._kits: 80 | if kit == "media-kit": 81 | reqs.append({"kit": kit, "branch": "1.1-prime"}) 82 | reqs.append({ "kit" : kit, "branch" : "1.0-prime" }) 83 | 84 | for kit in cls._kits: 85 | results.append({ "kit": kit, "branch": "1.2-prime" }) 86 | 87 | return [ 88 | { 89 | "target" : { "release" : "1.2" }, 90 | "requirements": reqs, 91 | "results" : results 92 | } 93 | ] 94 | 95 | class PythonKitHandler(KitHandler): 96 | 97 | @classmethod 98 | def available_upgrades(cls): 99 | 100 | return [ 101 | { 102 | "target" : { "kit" : "python-kit", "branch" : "3.6-prime" }, 103 | "requirements" : [ 104 | { "kit" : "python-kit", "branch" : "3.4-prime" } 105 | ] 106 | } 107 | ] 108 | 109 | def get_steps(self, new_branch, old_branch): 110 | new_v, new_rating = new_branch.split("-") # "3.6", "prime" 111 | old_v, old_rating = old_branch.split("-") 112 | new_major = Decimal(new_v[:3]) # 3.6 113 | old_major = Decimal(old_v[:3]) 114 | post_steps = [ "emerge -uDN @world" ] 115 | if new_major != old_major: 116 | post_steps += [ "eselect python set --python3 python%s" % new_major ] 117 | for major in self.settings["remove"]: 118 | post_steps.append("emerge -C =dev-lang/python-%s" % major) 119 | return [], post_steps 120 | 121 | settings = { 122 | # branch / primary python / alternate python / python mask (if any) 123 | 'master': { 124 | "primary": "python3_6", 125 | "alternate": "python2_7", 126 | "mask": None, 127 | "remove" : [ "3.3", "3.4", "3.5" ] 128 | }, 129 | '3.4-prime': { 130 | "primary": "python3_4", 131 | "alternate": "python2_7", 132 | "mask": ">=dev-lang/python-3.5", 133 | "remove": ["3.3", "3.5", "3.6"] 134 | }, 135 | '3.6-prime': { 136 | "primary": "python3_6", 137 | "alternate": "python2_7", 138 | "mask": ">=dev-lang/python-3.7", 139 | "remove": ["3.3", "3.4", "3.5"] 140 | }, 141 | '3.6.3-prime': { 142 | "primary": "python3_6", 143 | "alternate": "python2_7", 144 | "mask": ">=dev-lang/python-3.7", 145 | "remove": ["3.3", "3.4", "3.5"] 146 | } 147 | } 148 | 149 | 150 | 151 | """" 152 | 153 | ego kit upgrade python-kit 3.4-prime 3.6-prime 154 | >> Would you like to upgrade to python-kit 3.6-prime? y/n 155 | >> Starting kit upgrade.... 156 | >> Recording log... 157 | >> changing kit.. 158 | >> syncing... 159 | >> upgrading... 160 | >> Will execute the following steps. Would you like me to execute these for you?: 161 | >> 162 | 1. emerge -auDN @world 163 | 2 164 | 165 | 166 | 167 | 168 | 169 | 170 | """ 171 | 172 | --------------------------------------------------------------------------------