├── CNAME
├── requirements.txt
├── mkrepo
├── Makefile
├── Dockerfile
├── setup.py
├── .gitignore
├── static
├── box.svg
├── template.html
└── skeleton.css
├── mkrepo.py
├── README.md
├── index.html
├── storage.py
├── debrepo.py
├── rpmfile.py
└── rpmrepo.py
/CNAME:
--------------------------------------------------------------------------------
1 | mkrepo.com
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | boto3==1.4.1
2 |
--------------------------------------------------------------------------------
/mkrepo:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import mkrepo
4 | mkrepo.main()
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | index.html : README.md static/skeleton.css static/box.svg static/template.html
2 | pandoc -s README.md -o index.html -c static/skeleton.css --template static/template.html -T mkrepo
3 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM centos:7
2 |
3 | RUN set -x \
4 | && yum -y install \
5 | epel-release \
6 | && yum -y install \
7 | createrepo \
8 | python-pip
9 |
10 | RUN set -x \
11 | && pip install \
12 | boto3
13 |
14 |
15 | COPY *.py /mkrepo/
16 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from distutils.core import setup
3 | setup(
4 | name='mkrepo',
5 | packages=[''],
6 | version='0.1.2',
7 | description='Maintain deb and rpm repos on s3',
8 | author='Konstantin Nazarov',
9 | author_email='mail@kn.am',
10 | url='https://github.com/tarantool/mkrepo',
11 | keywords=['rpm', 'deb'],
12 | classifiers=[],
13 | scripts=['mkrepo']
14 | )
15 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .mkrepo
2 |
3 | ### Python ###
4 | # Byte-compiled / optimized / DLL files
5 | __pycache__/
6 | *.py[cod]
7 | *$py.class
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | env/
15 | build/
16 | develop-eggs/
17 | dist/
18 | downloads/
19 | eggs/
20 | .eggs/
21 | lib/
22 | lib64/
23 | parts/
24 | sdist/
25 | var/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *,cover
49 | .hypothesis/
50 |
51 | # Sphinx documentation
52 | docs/_build/
53 |
54 | # PyBuilder
55 | target/
56 |
57 | # Jupyter Notebook
58 | .ipynb_checkpoints
59 |
60 | # pyenv
61 | .python-version
62 |
63 | # dotenv
64 | .env
65 |
66 | # virtualenv
67 | .venv/
68 | venv/
69 | ENV/
70 |
71 | # Spyder project settings
72 | .spyderproject
73 |
74 | # Rope project settings
75 | .ropeproject
76 |
--------------------------------------------------------------------------------
/static/box.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 | Created with Sketch.
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/mkrepo.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import argparse
4 | import storage
5 | import debrepo
6 | import rpmrepo
7 | import os
8 |
9 |
10 | def is_deb_repo(stor):
11 | result = False
12 | for _ in stor.files("pool/"):
13 | result = True
14 | break
15 |
16 | return result
17 |
18 |
19 | def is_rpm_repo(stor):
20 | result = False
21 | for _ in stor.files("Packages/"):
22 | result = True
23 | break
24 |
25 | return result
26 |
27 |
28 | def update_repo(path, args):
29 | stor = None
30 |
31 | if not os.path.exists(args.temp_dir):
32 | os.mkdir(args.temp_dir)
33 |
34 | if path.startswith('s3://'):
35 | path = path[len('s3://'):]
36 |
37 | if '/' in path:
38 | bucket, prefix = path.split('/', 1)
39 | else:
40 | bucket, prefix = path, '.'
41 |
42 | stor = storage.S3Storage(args.s3_endpoint,
43 | bucket,
44 | prefix,
45 | args.s3_access_key_id,
46 | args.s3_secret_access_key,
47 | args.s3_region)
48 |
49 | else:
50 | stor = storage.FilesystemStorage(path)
51 |
52 | if is_deb_repo(stor):
53 | print "Updating deb repository: %s" % path
54 | debrepo.update_repo(stor, args.sign, args.temp_dir)
55 | elif is_rpm_repo(stor):
56 | print "Updating rpm repository: %s" % path
57 | rpmrepo.update_repo(stor, args.sign, args.temp_dir)
58 | else:
59 | print "Unknown repository: %s" % path
60 |
61 |
62 | def main():
63 | parser = argparse.ArgumentParser()
64 |
65 | parser.add_argument(
66 | '--temp-dir',
67 | default=".mkrepo",
68 | help='directory used to store temporary artifacts')
69 |
70 | parser.add_argument(
71 | '--s3-access-key-id', help='access key for connecting to S3')
72 | parser.add_argument(
73 | '--s3-secret-access-key', help='secret key for connecting to S3')
74 |
75 | parser.add_argument(
76 | '--s3-endpoint',
77 | help='region endpoint for connecting to S3 (default: s3.amazonaws.com)')
78 |
79 | parser.add_argument(
80 | '--s3-region',
81 | help='S3 region name')
82 |
83 | parser.add_argument(
84 | '--sign',
85 | action='store_true',
86 | default=False,
87 | help='sign package metadata')
88 |
89 | parser.add_argument(
90 | 'path', nargs='+',
91 | help='List of paths to scan. Either s3://bucket/prefix or /path/on/local/fs')
92 |
93 | args = parser.parse_args()
94 |
95 | paths = args.path
96 |
97 | for path in paths:
98 | update_repo(path, args)
99 |
100 |
101 | if __name__ == '__main__':
102 | main()
103 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Create RPM and DEB repositories in S3
2 |
3 | `mkrepo` is a repository generator with pluggable backends,
4 | which allows you to maintain an RPM or DEB repository on various
5 | storages, like local filesystem or S3, and periodically regenerate metadata.
6 |
7 | Use it in tandem with your favourite CI system to produce a better pipeline.
8 | `mkrepo` helps you to get rid of ad-hoc cron jobs.
9 |
10 | As a bonus, `mkrepo` supports on-premises S3 servers like [Minio](http://minio.io).
11 |
12 | Works on Linux and OS X. Should also work on BSD and Windows, but I haven't checked.
13 |
14 | ## Quickstart
15 |
16 | Create an s3 bucket named e.g. `builds` and put a sample package `package.rpm` to `s3://builds/rpmrepo/Packages`. Then do the following:
17 |
18 | ``` bash
19 | ./mkrepo.py s3://builds/rpmrepo
20 | ```
21 |
22 | After this, you will find all metadata generated in `s3://builds/rpmrepo/repodata`
23 |
24 | ## Dependencies
25 |
26 | Python libraries:
27 |
28 | * boto3
29 |
30 | ## Command-line reference
31 |
32 | `mkrepo` parses your `~/.aws/config` and reads secret key and region settings.
33 | So you may skip them in command line invocation in case you have aws config.
34 |
35 | ``` bash
36 | mkrepo.py [-h]
37 | [--temp-dir TEMP_DIR]
38 | [--s3-access-key-id S3_ACCESS_KEY_ID]
39 | [--s3-secret-access-key S3_SECRET_ACCESS_KEY]
40 | [--s3-endpoint S3_ENDPOINT]
41 | [--s3-region S3_REGION]
42 | [--sign]
43 | path [path ...]
44 | ```
45 |
46 | * `--temp-dir` - /(optional)/directory used to store temporary artifacts (default is .mkrepo)
47 | * `--s3-access-key-id` - /(optional)/ specify S3 access key ID
48 | * `--s3-secret-access-key` - /(optional)/ specify S3 secret key
49 | * `--s3-endpoint` - /(optional)/ specify S3 server URI
50 | * `--s3-region` - /(optional)/ specify S3 region (default is us-east-1)
51 | * `--sign` - /(optional) sign package metadata
52 | * `path` - specify list of path to scan for repositories
53 |
54 | ## How it works
55 |
56 | `mkrepo` searches the supplied path for either `Packages` or `pool` subdir. If
57 | it finds `Packages`, it assumes an rpm repo. If it finds `pool`, it assumes a
58 | deb repo.
59 |
60 | Then it parses existing metadata files (if any) and compares timestamps recorded
61 | there with timestamps of all package files in the repo. Any packages that have
62 | different timestamps or that don't exist in metadata, are parsed and added to
63 | metadata.
64 |
65 | Then new metadata is uploaded to S3, replacing previous one.
66 |
67 | ## Credits
68 |
69 | Thanks to [Cyril Rohr](https://github.com/crohr) and [Ken Robertson](https://github.com/krobertson), authors of the following awesome tools:
70 |
71 | * [rpm-s3](https://github.com/crohr/rpm-s3)
72 | * [deb-s3](https://github.com/krobertson/deb-s3)
73 |
74 | Unfortunately, we needed a solution that is completely decoupled from CI pipeline,
75 | and the mentioned tools only support package push mode, when you have to use a
76 | tool to actually push packages to s3, insted of native s3 clients.
77 |
--------------------------------------------------------------------------------
/static/template.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | $for(author-meta)$
8 |
9 | $endfor$
10 | $if(date-meta)$
11 |
12 | $endif$
13 | $if(keywords)$
14 |
15 | $endif$
16 | $if(title-prefix)$$title-prefix$ – $endif$$pagetitle$
17 |
18 | $if(quotes)$
19 |
20 | $endif$
21 | $if(highlighting-css)$
22 |
25 | $endif$
26 | $for(css)$
27 |
28 | $endfor$
29 | $if(math)$
30 | $math$
31 | $endif$
32 | $for(header-includes)$
33 | $header-includes$
34 | $endfor$
35 |
36 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
56 |
57 |
58 |
59 |
60 |
61 |
64 |
65 |
66 |
67 |
68 | $for(include-before)$
69 | $include-before$
70 | $endfor$
71 | $if(title)$
72 |
84 | $endif$
85 | $if(toc)$
86 |
87 | $toc$
88 |
89 | $endif$
90 | $body$
91 | $for(include-after)$
92 | $include-after$
93 | $endfor$
94 |
95 |
96 |
97 |
98 |
--------------------------------------------------------------------------------
/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | mkrepo –
8 |
9 |
46 |
47 |
48 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
68 |
69 |
70 |
71 |
72 |
73 |
76 |
77 |
78 |
79 |
80 |
Create RPM and DEB repositories in S3
81 |
mkrepo is a repository generator with pluggable backends, which allows you to maintain an RPM or DEB repository on various storages, like local filesystem or S3, and periodically regenerate metadata.
82 |
Use it in tandem with your favourite CI system to produce a better pipeline. mkrepo helps you to get rid of ad-hoc cron jobs.
83 |
As a bonus, mkrepo supports on-premises S3 servers like Minio .
84 |
Works on Linux and OS X. Should also work on BSD and Windows, but I haven't checked.
85 |
Quickstart
86 |
Create an s3 bucket named e.g. builds and put a sample package package.rpm to s3://builds/rpmrepo/Packages. Then do the following:
87 |
./mkrepo.py s3://builds/rpmrepo
88 |
After this, you will find all metadata generated in s3://builds/rpmrepo/repodata
89 |
Dependencies
90 |
Python libraries:
91 |
94 |
Command-line reference
95 |
mkrepo parses your ~/.aws/config and reads secret key and region settings. So you may skip them in command line invocation in case you have aws config.
96 |
mkrepo.py [-h]
97 |
98 | [--temp-dir TEMP_DIR]
99 | [--s3-access-key-id S3_ACCESS_KEY_ID]
100 | [--s3-secret-access-key S3_SECRET_ACCESS_KEY]
101 | [--s3-endpoint S3_ENDPOINT]
102 | [--s3-region S3_REGION]
103 | [--sign ]
104 | path [path ...]
105 |
106 | --temp-dir - /(optional)/directory used to store temporary artifacts (default is .mkrepo)
107 | --s3-access-key-id - /(optional)/ specify S3 access key ID
108 | --s3-secret-access-key - /(optional)/ specify S3 secret key
109 | --s3-endpoint - /(optional)/ specify S3 server URI
110 | --s3-region - /(optional)/ specify S3 region (default is us-east-1)
111 | --sign - /(optional) sign package metadata
112 | path - specify list of path to scan for repositories
113 |
114 |
How it works
115 |
mkrepo searches the supplied path for either Packages or pool subdir. If it finds Packages, it assumes an rpm repo. If it finds pool, it assumes a deb repo.
116 |
Then it parses existing metadata files (if any) and compares timestamps recorded there with timestamps of all package files in the repo. Any packages that have different timestamps or that don't exist in metadata, are parsed and added to metadata.
117 |
Then new metadata is uploaded to S3, replacing previous one.
118 |
Credits
119 |
Thanks to Cyril Rohr and Ken Robertson , authors of the following awesome tools:
120 |
124 |
Unfortunately, we needed a solution that is completely decoupled from CI pipeline, and the mentioned tools only support package push mode, when you have to use a tool to actually push packages to s3, insted of native s3 clients.
125 |
126 |
127 |
128 |
129 |
--------------------------------------------------------------------------------
/storage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | import errno
5 | import shutil
6 | import urllib
7 | import boto3
8 | import StringIO
9 | import time
10 |
11 |
12 | class Storage:
13 |
14 | def __init__(self):
15 | pass
16 |
17 | def read_file(self, key):
18 | raise NotImplementedError()
19 |
20 | def write_file(self, key, data):
21 | raise NotImplementedError()
22 |
23 | def download_file(self, key, destination):
24 | raise NotImplementedError()
25 |
26 | def upload_file(self, key, source):
27 | raise NotImplementedError()
28 |
29 | def delete_file(self, key):
30 | raise NotImplementedError()
31 |
32 | def mtime(self, key):
33 | raise NotImplementedError()
34 |
35 | def exists(self, key):
36 | raise NotImplementedError()
37 |
38 | def files(self, subdir=None):
39 | raise NotImplementedError()
40 |
41 |
42 | def _mkdir_recursive(path):
43 | try:
44 | os.makedirs(path)
45 | except OSError as exc: # Python >2.5
46 | if exc.errno == errno.EEXIST and os.path.isdir(path):
47 | pass
48 | else:
49 | raise
50 |
51 |
52 | class FilesystemStorage(Storage):
53 |
54 | def __init__(self, basedir='.'):
55 | self.basedir = basedir
56 |
57 | def read_file(self, key):
58 | fullpath = os.path.join(self.basedir, key)
59 | with open(fullpath) as f:
60 | return f.read()
61 |
62 | def write_file(self, key, data):
63 | fullpath = os.path.join(self.basedir, key)
64 |
65 | if not os.path.exists(self.basedir):
66 | raise RuntimeError("Base directory doesn't exist: '%s'" %
67 | self.basedir)
68 |
69 | dirname = os.path.dirname(fullpath)
70 |
71 | if not os.path.exists(dirname):
72 | _mkdir_recursive(dirname)
73 |
74 | with open(fullpath, 'w+') as f:
75 | f.write(data)
76 |
77 | def download_file(self, key, destination):
78 | fullpath = os.path.join(self.basedir, key)
79 |
80 | shutil.copy(fullpath, destination)
81 |
82 | def upload_file(self, key, source):
83 | fullpath = os.path.join(self.basedir, key)
84 |
85 | if not os.path.exists(self.basedir):
86 | raise RuntimeError("Base directory doesn't exist: '%s'" %
87 | self.basedir)
88 |
89 | dirname = os.path.dirname(fullpath)
90 |
91 | if not os.path.exists(dirname):
92 | _mkdir_recursive(dirname)
93 |
94 | shutil.copy(source, fullpath)
95 |
96 | def delete_file(self, key):
97 | fullpath = os.path.join(self.basedir, key)
98 |
99 | os.remove(fullpath)
100 |
101 | def mtime(self, key):
102 | fullpath = os.path.join(self.basedir, key)
103 |
104 | return os.path.getmtime(fullpath)
105 |
106 | def exists(self, key):
107 | fullpath = os.path.join(self.basedir, key)
108 |
109 | return os.path.exists(fullpath)
110 |
111 | def files(self, subdir=None):
112 | basedir = self.basedir
113 |
114 | if subdir is not None:
115 | basedir = os.path.join(basedir, subdir)
116 |
117 | for dirname, _, files in os.walk(basedir):
118 | for filename in files:
119 | yield os.path.relpath(os.path.join(dirname, filename), self.basedir)
120 |
121 |
122 | class S3Storage(Storage):
123 |
124 | def __init__(self,
125 | endpoint,
126 | bucket,
127 | prefix="",
128 | aws_access_key_id=None,
129 | aws_secret_access_key=None,
130 | aws_region=None):
131 | self.bucket = bucket
132 | self.prefix = prefix
133 |
134 | self.client = boto3.client('s3', endpoint_url=endpoint,
135 | aws_access_key_id=aws_access_key_id,
136 | aws_secret_access_key=aws_secret_access_key,
137 | region_name=aws_region)
138 | self.resource = boto3.resource(
139 | 's3',
140 | endpoint_url=endpoint,
141 | aws_access_key_id=aws_access_key_id,
142 | aws_secret_access_key=aws_secret_access_key,
143 | region_name=aws_region)
144 |
145 | def read_file(self, key):
146 | fullkey = os.path.normpath(os.path.join(self.prefix, key.lstrip('/')))
147 |
148 | s3obj = self.resource.Object(self.bucket, fullkey)
149 |
150 | buf = StringIO.StringIO()
151 | s3obj.download_fileobj(buf)
152 | return buf.getvalue()
153 |
154 | def write_file(self, key, data):
155 | fullkey = os.path.normpath(os.path.join(self.prefix, key.lstrip('/')))
156 |
157 | s3obj = self.resource.Object(self.bucket, fullkey)
158 |
159 | buf = StringIO.StringIO()
160 | buf.write(data)
161 | buf.seek(0)
162 |
163 | s3obj.upload_fileobj(buf)
164 |
165 | def download_file(self, key, destination):
166 | fullkey = os.path.normpath(os.path.join(self.prefix, key.lstrip('/')))
167 |
168 | self.client.download_file(self.bucket, fullkey, destination)
169 |
170 | def upload_file(self, key, source):
171 | fullkey = os.path.normpath(os.path.join(self.prefix, key))
172 |
173 | self.client.upload_file(source, self.bucket, fullkey)
174 |
175 | def delete_file(self, key):
176 | fullkey = os.path.normpath(os.path.join(self.prefix, key.lstrip('/')))
177 |
178 | self.client.delete_object(Bucket=self.bucket, Key=fullkey)
179 |
180 | def mtime(self, key):
181 | fullkey = os.path.normpath(os.path.join(self.prefix, key.lstrip('/')))
182 |
183 | obj = self.resource.Object(self.bucket, fullkey)
184 | mtime = obj.last_modified
185 | mtime_sec = time.mktime(mtime.timetuple())
186 | return mtime_sec
187 |
188 | def exists(self, key):
189 | fullkey = os.path.normpath(
190 | os.path.join(self.prefix, key.lstrip('/')))
191 |
192 | bucket = self.resource.Bucket(self.bucket)
193 |
194 | objs = list(bucket.objects.filter(Prefix=fullkey))
195 |
196 | return len(objs) > 0 and objs[0].key == fullkey
197 |
198 | def files(self, subdir=None):
199 | dirname = self.prefix
200 |
201 | if subdir is not None:
202 | dirname = os.path.join(dirname, subdir.lstrip('/'))
203 |
204 | dirname = os.path.normpath(dirname)
205 |
206 | paginator = self.client.get_paginator('list_objects')
207 | list_parameters = {'Bucket': self.bucket,
208 | 'Prefix': dirname}
209 |
210 | for result in paginator.paginate(**list_parameters):
211 | if result.get('Contents') is not None:
212 | for fileobj in result.get('Contents'):
213 | filepath = os.path.relpath(fileobj.get('Key'), dirname)
214 | yield os.path.normpath(os.path.join(subdir or '/', filepath))
215 |
216 |
217 | class HttpStorage(Storage):
218 |
219 | def __init__(self, baseuri, basedir='.', timeout=10):
220 | """RO Http storage implementation"""
221 | self.basedir = basedir
222 | if not baseuri.startswith('http://') and \
223 | not baseuri.startswith('https://'):
224 | baseuri = "http://%s" % baseuri
225 | self.baseuri = baseuri
226 | self.timeout = timeout
227 |
228 | def read_file(self, key):
229 | fullpath = os.path.join(self.basedir, key)
230 | with open(fullpath) as f:
231 | return f.read()
232 |
233 | def write_file(self, key, data):
234 | fullpath = os.path.join(self.basedir, key)
235 |
236 | if not os.path.exists(self.basedir):
237 | raise RuntimeError("Base directory doesn't exist: '%s'" %
238 | self.basedir)
239 |
240 | dirname = os.path.dirname(fullpath)
241 |
242 | if not os.path.exists(dirname):
243 | _mkdir_recursive(dirname)
244 |
245 | with open(fullpath, 'w+') as f:
246 | f.write(data)
247 |
248 | def download_file(self, key, destination, params=None):
249 | status, data = self.get(key, params)
250 | if status != 200:
251 | return False
252 | self.write_file(destination, data)
253 | return True
254 |
255 | def mtime(self, key):
256 | """All files are fresh"""
257 | return time.time()
258 |
259 | def get(self, path, params=None, deep=0):
260 | args = [self.baseuri, path]
261 | if params is not None:
262 | args.extend(['?', urllib.urlencode(params)])
263 |
264 | opener = urllib.FancyURLopener({"timeout": self.timeout})
265 | ret, data = opener.open(''.join(args)), None
266 | if ret.getcode() == 200:
267 | data = ret.read()
268 |
269 | return ret.getcode(), data
270 |
271 | def exists(self, key, params=None):
272 | status, _ = self.get(key, params)
273 | return status == 200
274 |
--------------------------------------------------------------------------------
/static/skeleton.css:
--------------------------------------------------------------------------------
1 | /*
2 | * Skeleton V2.0.4
3 | * Copyright 2014, Dave Gamache
4 | * www.getskeleton.com
5 | * Free to use under the MIT license.
6 | * http://www.opensource.org/licenses/mit-license.php
7 | * 12/29/2014
8 | */
9 |
10 |
11 | /* Table of contents
12 | ––––––––––––––––––––––––––––––––––––––––––––––––––
13 | - Grid
14 | - Base Styles
15 | - Typography
16 | - Links
17 | - Buttons
18 | - Forms
19 | - Lists
20 | - Code
21 | - Tables
22 | - Spacing
23 | - Utilities
24 | - Clearing
25 | - Media Queries
26 | */
27 |
28 |
29 | /* Grid
30 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
31 | .container {
32 | position: relative;
33 | width: 100%;
34 | max-width: 960px;
35 | margin: 0 auto;
36 | padding: 0 20px;
37 | box-sizing: border-box; }
38 | .column,
39 | .columns {
40 | width: 100%;
41 | float: left;
42 | box-sizing: border-box; }
43 |
44 | /* For devices larger than 400px */
45 | @media (min-width: 400px) {
46 | .container {
47 | width: 85%;
48 | padding: 0; }
49 | }
50 |
51 | /* For devices larger than 550px */
52 | @media (min-width: 550px) {
53 | .container {
54 | width: 80%; }
55 | .column,
56 | .columns {
57 | margin-left: 4%; }
58 | .column:first-child,
59 | .columns:first-child {
60 | margin-left: 0; }
61 |
62 | .one.column,
63 | .one.columns { width: 4.66666666667%; }
64 | .two.columns { width: 13.3333333333%; }
65 | .three.columns { width: 22%; }
66 | .four.columns { width: 30.6666666667%; }
67 | .five.columns { width: 39.3333333333%; }
68 | .six.columns { width: 48%; }
69 | .seven.columns { width: 56.6666666667%; }
70 | .eight.columns { width: 65.3333333333%; }
71 | .nine.columns { width: 74.0%; }
72 | .ten.columns { width: 82.6666666667%; }
73 | .eleven.columns { width: 91.3333333333%; }
74 | .twelve.columns { width: 100%; margin-left: 0; }
75 |
76 | .one-third.column { width: 30.6666666667%; }
77 | .two-thirds.column { width: 65.3333333333%; }
78 |
79 | .one-half.column { width: 48%; }
80 |
81 | /* Offsets */
82 | .offset-by-one.column,
83 | .offset-by-one.columns { margin-left: 8.66666666667%; }
84 | .offset-by-two.column,
85 | .offset-by-two.columns { margin-left: 17.3333333333%; }
86 | .offset-by-three.column,
87 | .offset-by-three.columns { margin-left: 26%; }
88 | .offset-by-four.column,
89 | .offset-by-four.columns { margin-left: 34.6666666667%; }
90 | .offset-by-five.column,
91 | .offset-by-five.columns { margin-left: 43.3333333333%; }
92 | .offset-by-six.column,
93 | .offset-by-six.columns { margin-left: 52%; }
94 | .offset-by-seven.column,
95 | .offset-by-seven.columns { margin-left: 60.6666666667%; }
96 | .offset-by-eight.column,
97 | .offset-by-eight.columns { margin-left: 69.3333333333%; }
98 | .offset-by-nine.column,
99 | .offset-by-nine.columns { margin-left: 78.0%; }
100 | .offset-by-ten.column,
101 | .offset-by-ten.columns { margin-left: 86.6666666667%; }
102 | .offset-by-eleven.column,
103 | .offset-by-eleven.columns { margin-left: 95.3333333333%; }
104 |
105 | .offset-by-one-third.column,
106 | .offset-by-one-third.columns { margin-left: 34.6666666667%; }
107 | .offset-by-two-thirds.column,
108 | .offset-by-two-thirds.columns { margin-left: 69.3333333333%; }
109 |
110 | .offset-by-one-half.column,
111 | .offset-by-one-half.columns { margin-left: 52%; }
112 |
113 | }
114 |
115 |
116 | /* Base Styles
117 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
118 | /* NOTE
119 | html is set to 62.5% so that all the REM measurements throughout Skeleton
120 | are based on 10px sizing. So basically 1.5rem = 15px :) */
121 | html {
122 | font-size: 62.5%; }
123 | body {
124 | font-size: 1.5em; /* currently ems cause chrome bug misinterpreting rems on body element */
125 | line-height: 1.6;
126 | font-weight: 400;
127 | font-family: "Raleway", "HelveticaNeue", "Helvetica Neue", Helvetica, Arial, sans-serif;
128 | color: #222; }
129 |
130 |
131 | /* Typography
132 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
133 | h1, h2, h3, h4, h5, h6 {
134 | margin-top: 0;
135 | margin-bottom: 2rem;
136 | font-weight: 300; }
137 | h1 { font-size: 4.0rem; line-height: 1.2; letter-spacing: -.1rem;}
138 | h2 { font-size: 3.6rem; line-height: 1.25; letter-spacing: -.1rem; }
139 | h3 { font-size: 3.0rem; line-height: 1.3; letter-spacing: -.1rem; }
140 | h4 { font-size: 2.4rem; line-height: 1.35; letter-spacing: -.08rem; }
141 | h5 { font-size: 1.8rem; line-height: 1.5; letter-spacing: -.05rem; }
142 | h6 { font-size: 1.5rem; line-height: 1.6; letter-spacing: 0; }
143 |
144 | /* Larger than phablet */
145 | @media (min-width: 550px) {
146 | h1 { font-size: 5.0rem; }
147 | h2 { font-size: 4.2rem; }
148 | h3 { font-size: 3.6rem; }
149 | h4 { font-size: 3.0rem; }
150 | h5 { font-size: 2.4rem; }
151 | h6 { font-size: 1.5rem; }
152 | }
153 |
154 | p {
155 | margin-top: 0; }
156 |
157 |
158 | /* Links
159 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
160 | a {
161 | color: #1EAEDB; }
162 | a:hover {
163 | color: #0FA0CE; }
164 |
165 |
166 | /* Buttons
167 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
168 | .button,
169 | button,
170 | input[type="submit"],
171 | input[type="reset"],
172 | input[type="button"] {
173 | display: inline-block;
174 | height: 38px;
175 | padding: 0 30px;
176 | color: #555;
177 | text-align: center;
178 | font-size: 11px;
179 | font-weight: 600;
180 | line-height: 38px;
181 | letter-spacing: .1rem;
182 | text-transform: uppercase;
183 | text-decoration: none;
184 | white-space: nowrap;
185 | background-color: transparent;
186 | border-radius: 4px;
187 | border: 1px solid #bbb;
188 | cursor: pointer;
189 | box-sizing: border-box; }
190 | .button:hover,
191 | button:hover,
192 | input[type="submit"]:hover,
193 | input[type="reset"]:hover,
194 | input[type="button"]:hover,
195 | .button:focus,
196 | button:focus,
197 | input[type="submit"]:focus,
198 | input[type="reset"]:focus,
199 | input[type="button"]:focus {
200 | color: #333;
201 | border-color: #888;
202 | outline: 0; }
203 | .button.button-primary,
204 | button.button-primary,
205 | input[type="submit"].button-primary,
206 | input[type="reset"].button-primary,
207 | input[type="button"].button-primary {
208 | color: #FFF;
209 | background-color: #33C3F0;
210 | border-color: #33C3F0; }
211 | .button.button-primary:hover,
212 | button.button-primary:hover,
213 | input[type="submit"].button-primary:hover,
214 | input[type="reset"].button-primary:hover,
215 | input[type="button"].button-primary:hover,
216 | .button.button-primary:focus,
217 | button.button-primary:focus,
218 | input[type="submit"].button-primary:focus,
219 | input[type="reset"].button-primary:focus,
220 | input[type="button"].button-primary:focus {
221 | color: #FFF;
222 | background-color: #1EAEDB;
223 | border-color: #1EAEDB; }
224 |
225 |
226 | /* Forms
227 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
228 | input[type="email"],
229 | input[type="number"],
230 | input[type="search"],
231 | input[type="text"],
232 | input[type="tel"],
233 | input[type="url"],
234 | input[type="password"],
235 | textarea,
236 | select {
237 | height: 38px;
238 | padding: 6px 10px; /* The 6px vertically centers text on FF, ignored by Webkit */
239 | background-color: #fff;
240 | border: 1px solid #D1D1D1;
241 | border-radius: 4px;
242 | box-shadow: none;
243 | box-sizing: border-box; }
244 | /* Removes awkward default styles on some inputs for iOS */
245 | input[type="email"],
246 | input[type="number"],
247 | input[type="search"],
248 | input[type="text"],
249 | input[type="tel"],
250 | input[type="url"],
251 | input[type="password"],
252 | textarea {
253 | -webkit-appearance: none;
254 | -moz-appearance: none;
255 | appearance: none; }
256 | textarea {
257 | min-height: 65px;
258 | padding-top: 6px;
259 | padding-bottom: 6px; }
260 | input[type="email"]:focus,
261 | input[type="number"]:focus,
262 | input[type="search"]:focus,
263 | input[type="text"]:focus,
264 | input[type="tel"]:focus,
265 | input[type="url"]:focus,
266 | input[type="password"]:focus,
267 | textarea:focus,
268 | select:focus {
269 | border: 1px solid #33C3F0;
270 | outline: 0; }
271 | label,
272 | legend {
273 | display: block;
274 | margin-bottom: .5rem;
275 | font-weight: 600; }
276 | fieldset {
277 | padding: 0;
278 | border-width: 0; }
279 | input[type="checkbox"],
280 | input[type="radio"] {
281 | display: inline; }
282 | label > .label-body {
283 | display: inline-block;
284 | margin-left: .5rem;
285 | font-weight: normal; }
286 |
287 |
288 | /* Lists
289 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
290 | ul {
291 | list-style: circle inside; }
292 | ol {
293 | list-style: decimal inside; }
294 | ol, ul {
295 | padding-left: 0;
296 | margin-top: 0; }
297 | ul ul,
298 | ul ol,
299 | ol ol,
300 | ol ul {
301 | margin: 1.5rem 0 1.5rem 3rem;
302 | font-size: 90%; }
303 | li {
304 | margin-bottom: 1rem; }
305 |
306 |
307 | /* Code
308 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
309 | code {
310 | padding: .2rem .5rem;
311 | margin: 0 .2rem;
312 | font-size: 90%;
313 | white-space: nowrap;
314 | background: #F1F1F1;
315 | border: 1px solid #E1E1E1;
316 | border-radius: 4px; }
317 | pre > code {
318 | display: block;
319 | padding: 1rem 1.5rem;
320 | white-space: pre; }
321 |
322 |
323 | /* Tables
324 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
325 | th,
326 | td {
327 | padding: 12px 15px;
328 | text-align: left;
329 | border-bottom: 1px solid #E1E1E1; }
330 | th:first-child,
331 | td:first-child {
332 | padding-left: 0; }
333 | th:last-child,
334 | td:last-child {
335 | padding-right: 0; }
336 |
337 |
338 | /* Spacing
339 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
340 | button,
341 | .button {
342 | margin-bottom: 1rem; }
343 | input,
344 | textarea,
345 | select,
346 | fieldset {
347 | margin-bottom: 1.5rem; }
348 | pre,
349 | blockquote,
350 | dl,
351 | figure,
352 | table,
353 | p,
354 | ul,
355 | ol,
356 | form {
357 | margin-bottom: 2.5rem; }
358 |
359 |
360 | /* Utilities
361 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
362 | .u-full-width {
363 | width: 100%;
364 | box-sizing: border-box; }
365 | .u-max-full-width {
366 | max-width: 100%;
367 | box-sizing: border-box; }
368 | .u-pull-right {
369 | float: right; }
370 | .u-pull-left {
371 | float: left; }
372 |
373 |
374 | /* Misc
375 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
376 | hr {
377 | margin-top: 3rem;
378 | margin-bottom: 3.5rem;
379 | border-width: 0;
380 | border-top: 1px solid #E1E1E1; }
381 |
382 |
383 | /* Clearing
384 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
385 |
386 | /* Self Clearing Goodness */
387 | .container:after,
388 | .row:after,
389 | .u-cf {
390 | content: "";
391 | display: table;
392 | clear: both; }
393 |
394 |
395 | /* Media Queries
396 | –––––––––––––––––––––––––––––––––––––––––––––––––– */
397 | /*
398 | Note: The best way to structure the use of media queries is to create the queries
399 | near the relevant code. For example, if you wanted to change the styles for buttons
400 | on small devices, paste the mobile query code up in the buttons section and style it
401 | there.
402 | */
403 |
404 |
405 | /* Larger than mobile */
406 | @media (min-width: 400px) {}
407 |
408 | /* Larger than phablet (also point when grid becomes active) */
409 | @media (min-width: 550px) {}
410 |
411 | /* Larger than tablet */
412 | @media (min-width: 750px) {}
413 |
414 | /* Larger than desktop */
415 | @media (min-width: 1000px) {}
416 |
417 | /* Larger than Desktop HD */
418 | @media (min-width: 1200px) {}
419 |
--------------------------------------------------------------------------------
/debrepo.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import collections
4 | import mimetypes
5 | import gzip
6 | import bz2
7 | import json
8 | import tarfile
9 | import subprocess
10 | import re
11 | import os
12 | import tempfile
13 | import StringIO
14 | import hashlib
15 | import time
16 | import datetime
17 | import email
18 |
19 |
20 | def file_checksum(file_name, checksum_type):
21 | h = hashlib.new(checksum_type)
22 | with open(file_name, "rb") as f:
23 | for chunk in iter(lambda: f.read(4096), b""):
24 | h.update(chunk)
25 |
26 | return h.hexdigest()
27 |
28 |
29 | def rfc_2822_now_str():
30 | nowdt = datetime.datetime.now()
31 | nowtuple = nowdt.timetuple()
32 | nowtimestamp = time.mktime(nowtuple)
33 | return email.utils.formatdate(nowtimestamp)
34 |
35 |
36 | def gzip_string(data):
37 | out = StringIO.StringIO()
38 | with gzip.GzipFile(fileobj=out, mode="w") as fobj:
39 | fobj.write(data)
40 | return out.getvalue()
41 |
42 |
43 | def bz2_string(data):
44 | buf = bytearray(data, 'utf-8')
45 | return bz2.compress(buf)
46 |
47 |
48 | def gpg_sign_string(data, keyname=None, inline=False):
49 | cmd = "gpg --armor --digest-algo SHA256"
50 |
51 | if inline:
52 | cmd += " --clearsign"
53 | else:
54 | cmd += " --detach-sign"
55 |
56 | if keyname is not None:
57 | cmd += " --default-key='%s'" % keyname
58 |
59 | proc = subprocess.Popen(cmd,
60 | shell=True,
61 | stdout=subprocess.PIPE,
62 | stdin=subprocess.PIPE,
63 | stderr=subprocess.STDOUT)
64 | stdout = proc.communicate(input=data)[0]
65 |
66 | if proc.returncode != 0:
67 | raise RuntimeError("Failed to sign file: %s" % stdout)
68 |
69 | return stdout
70 |
71 |
72 | class Package(object):
73 |
74 | def __init__(self, component='main', arch='amd64'):
75 | self.component = component
76 | self.arch = arch
77 | self.fields = collections.OrderedDict()
78 |
79 | def parse_deb(self, debfile):
80 | cmd = 'ar -p ' + debfile + ' control.tar.gz |' + \
81 | 'tar -xzf - --to-stdout ./control'
82 |
83 | control = subprocess.check_output(cmd, shell=True)
84 | self.parse_string(control.strip())
85 |
86 | def parse_string(self, data):
87 | key = None
88 | value = None
89 |
90 | result = collections.OrderedDict()
91 | for line in data.strip().split('\n'):
92 | if line.startswith(" "):
93 | if value:
94 | value = '%s\n%s' % (value, line)
95 | else:
96 | value = line
97 | else:
98 | if key:
99 | result[key] = value.strip()
100 | key, value = line.split(':', 1)
101 | if key:
102 | result[key] = value.strip()
103 |
104 | self.fields = result
105 |
106 | def dump_string(self):
107 | result = []
108 | for key in self.fields:
109 | result.append('%s: %s' % (key, self.fields[key]))
110 |
111 | return "\n".join(result)
112 |
113 | def __getitem__(self, key):
114 | return self.fields[key]
115 |
116 | def __setitem__(self, key, value):
117 | self.fields[key] = value
118 |
119 | def __hash__(self):
120 | return hash((self.fields['Package'],
121 | self.fields['Version'],
122 | self.fields['Architecture']))
123 |
124 | def __eq__(self, other):
125 | return ((self.fields['Package'],
126 | self.fields['Version'],
127 | self.fields['Architecture']) ==
128 | (other.fields['Package'],
129 | other.fields['Version'],
130 | other.fields['Architecture']))
131 |
132 | def __ne__(self, other):
133 | return not(self == other)
134 |
135 |
136 | class PackageList(object):
137 |
138 | def __init__(self, component='main', arch='x86_64'):
139 | self.component = component
140 | self.arch = arch
141 | self.packages = set()
142 |
143 | def parse_string(self, data):
144 | packages = set()
145 | for entry in data.strip().split('\n\n'):
146 | if entry.strip() == "":
147 | continue
148 | pkg = Package(component=self.component,
149 | arch=self.arch)
150 | pkg.parse_string(entry)
151 | packages.add(pkg)
152 |
153 | self.packages = packages
154 |
155 | def add_deb_file(self, filename, relative_path):
156 | pass
157 |
158 | def parse_gzip_file(self, filename):
159 | with gzip.open(filename) as f:
160 | self.parse_string(f.read())
161 |
162 | def parse_plain_file(self, filename):
163 | with open(filename) as f:
164 | self.parse_string(f.read())
165 |
166 | def parse_file(self, filename):
167 | filetype = mimetypes.guess_type(filename)
168 | if filetype[1] is None:
169 | self.parse_plain_file(filename)
170 | elif filetype[1] == 'gzip':
171 | self.parse_gzip_file(filename)
172 | else:
173 | raise RuntimeError("Unsupported Packages type: '%s'" % filetype[1])
174 |
175 | def dump_string(self):
176 | result = []
177 |
178 | for pkg in self.packages:
179 | result.append(pkg.dump_string())
180 |
181 | return '\n\n'.join(result) + '\n'
182 |
183 |
184 | class Release(object):
185 |
186 | def __init__(self, codename=None, origin=None, suite=None):
187 | self.fields = collections.OrderedDict()
188 |
189 | if codename:
190 | self['Codename'] = codename
191 | if origin:
192 | self['Origin'] = origin
193 | if suite:
194 | self['Suite'] = suite
195 |
196 | def __getitem__(self, key):
197 | return self.fields[key]
198 |
199 | def __setitem__(self, key, value):
200 | self.fields[key] = value
201 |
202 | def parse_string(self, data):
203 | key = None
204 | value = None
205 |
206 | result = collections.OrderedDict()
207 | for line in data.strip().split('\n'):
208 | if line.startswith(" "):
209 | if value:
210 | value = '%s\n%s' % (value, line)
211 | else:
212 | value = line
213 | else:
214 | if key:
215 | result[key] = value.strip()
216 | key, value = line.split(':', 1)
217 | if key:
218 | result[key] = value.strip()
219 |
220 | self.fields = result
221 |
222 | def parse_plain_file(self, filename):
223 | with open(filename) as f:
224 | self.parse_string(f.read().strip())
225 |
226 | def parse_inplace_file(self, filename):
227 | raise NotImplementedError()
228 |
229 | def parse_file(self, filename):
230 | if filename.lower() == 'inrelease':
231 | self.parse_inplace_file(filename)
232 | else:
233 | self.parse_plain_file(filename)
234 |
235 | def dump_string(self):
236 | result = []
237 | for key in self.fields:
238 | result.append('%s: %s' % (key, self.fields[key]))
239 |
240 | return "\n".join(result) + '\n'
241 |
242 |
243 | def split_pkg_path(pkg_path):
244 |
245 | # We assume that DEB file format is the following, with optional , and
246 | # _.-_.deb
247 |
248 | expr = r'^(?P[^_]+)_(?P[0-9]+\.[0-9]+\.[0-9]+\-[0-9]+)(\.(?P[^\-]+))?([\-]?(?P[^_]+))?_(?P[^\.]+)\.deb$'
249 | match = re.match(expr, pkg_path)
250 |
251 | if not match:
252 | return None
253 |
254 | component = 'main'
255 |
256 | dist = match.group('dist')
257 | if dist is None:
258 | dist = 'all'
259 | arch = match.group('arch')
260 | if arch is None:
261 | arch = 'all'
262 |
263 | return (dist, component, arch)
264 |
265 |
266 | def update_repo(storage, sign, tempdir):
267 | dists = set()
268 | package_lists = collections.defaultdict(PackageList)
269 |
270 | expr = r'^dists/([^/]*)/Release$'
271 | for file_path in storage.files('dists'):
272 | match = re.match(expr, file_path)
273 |
274 | if not match:
275 | continue
276 |
277 | dist = match.group(1)
278 | dists.add(dist)
279 |
280 | release = Release()
281 | release.parse_string(storage.read_file('dists/%s/Release' % dist))
282 |
283 | components = release['Components'].split(' ')
284 | architectures = release['Architectures'].split(' ')
285 |
286 | for component in components:
287 | for arch in architectures:
288 | subdir = 'source' if arch == 'source' else 'binary-%s' % arch
289 |
290 | package_list = PackageList()
291 | package_list.parse_string(
292 | storage.read_file('dists/%s/%s/%s/Packages' %
293 | (dist, component, subdir)))
294 |
295 | package_lists[(dist, component, arch)] = package_list
296 |
297 | mtimes = {}
298 | for package_list in package_lists.itervalues():
299 | for package in package_list.packages:
300 | if 'FileTime' in package.fields:
301 | mtimes[package['Filename'].lstrip(
302 | '/')] = float(package['FileTime'])
303 |
304 | tmpdir = tempfile.mkdtemp('', 'tmp', tempdir)
305 |
306 | expr = r'^.*\.deb$'
307 | for file_path in storage.files('pool'):
308 | file_path = file_path.lstrip('/')
309 |
310 | match = re.match(expr, file_path)
311 |
312 | if not match:
313 | continue
314 |
315 | components = split_pkg_path(file_path)
316 | dist, _, _ = components
317 | dists.add(dist)
318 |
319 | if not components:
320 | print("Failed to parse file name: '%s'" % file_path)
321 |
322 | mtime = storage.mtime(file_path)
323 |
324 | if file_path in mtimes:
325 | if mtime == mtimes[file_path]:
326 | print "Skipping: '%s'" % file_path
327 | continue
328 | print "Updating: '%s'" % file_path
329 | else:
330 | print "Adding: '%s'" % file_path
331 |
332 | storage.download_file(file_path, os.path.join(tmpdir, 'package.deb'))
333 |
334 | package = Package()
335 | local_file = os.path.join(tmpdir, 'package.deb')
336 | package.parse_deb(local_file)
337 | package['Filename'] = file_path
338 | package['Size'] = os.path.getsize(local_file)
339 | package['FileTime'] = mtime
340 |
341 | checksum_names = {'md5': 'MD5Sum', 'sha1': 'SHA1', 'sha256': 'SHA256'}
342 | for checksum_type in ['md5', 'sha1', 'sha256']:
343 | checksum = file_checksum(local_file, checksum_type)
344 | checksum_name = checksum_names[checksum_type]
345 | package[checksum_name] = checksum
346 |
347 | packages = package_lists[components].packages
348 |
349 | if package in packages:
350 | packages.remove(package)
351 | packages.add(package)
352 |
353 | checksums = collections.defaultdict(dict)
354 | sizes = collections.defaultdict(dict)
355 | components = collections.defaultdict(set)
356 | architectures = collections.defaultdict(set)
357 |
358 | for key in package_lists.iterkeys():
359 | dist, component, arch = key
360 | subdir = 'source' if arch == 'source' else 'binary-%s' % arch
361 |
362 | components[dist].add(component)
363 | architectures[dist].add(arch)
364 |
365 | package_list = package_lists[key]
366 |
367 | prefix = 'dists/%s/' % dist
368 |
369 | pkg_file_path = '%s/%s/Packages' % (component, subdir)
370 | pkg_file = package_list.dump_string()
371 |
372 | pkg_file_gzip_path = '%s/%s/Packages.gz' % (component, subdir)
373 | pkg_file_gzip = gzip_string(pkg_file)
374 |
375 | pkg_file_bz2_path = '%s/%s/Packages.bz2' % (component, subdir)
376 | pkg_file_bz2 = bz2_string(pkg_file)
377 |
378 | storage.write_file(prefix + pkg_file_path, pkg_file)
379 | storage.write_file(prefix + pkg_file_gzip_path, pkg_file_gzip)
380 | storage.write_file(prefix + pkg_file_bz2_path, pkg_file_bz2)
381 |
382 | for path in [pkg_file_path, pkg_file_gzip_path, pkg_file_bz2_path]:
383 | data = storage.read_file(prefix + path)
384 | sizes[dist][path] = len(data)
385 |
386 | for checksum_type in ['md5', 'sha1', 'sha256']:
387 | h = hashlib.new(checksum_type)
388 | h.update(data)
389 |
390 | checksums[dist][(checksum_type, path)] = h.hexdigest()
391 |
392 | creation_date = rfc_2822_now_str()
393 |
394 | for dist in dists:
395 | release = Release()
396 |
397 | release['Origin'] = 'Repo generator'
398 | release['Label'] = 'Repo generator'
399 | release['Codename'] = dist
400 | release['Date'] = creation_date
401 | release['Architectures'] = ' '.join(architectures[dist])
402 | release['Components'] = ' '.join(components[dist])
403 | release['Description'] = 'Repo generator'
404 |
405 | checksum_lines = collections.defaultdict(list)
406 | checksum_names = {'md5': 'MD5Sum', 'sha1': 'SHA1', 'sha256': 'SHA256'}
407 | for checksum_key, checksum_value in checksums[dist].iteritems():
408 | checksum_type, path = checksum_key
409 |
410 | file_size = sizes[dist][path]
411 | checksum_name = checksum_names[checksum_type]
412 |
413 | line = ' %s %s %s' % (checksum_value, file_size, path)
414 | checksum_lines[checksum_name].append(line)
415 |
416 | for checksum_name in checksum_lines.keys():
417 | release[checksum_name] = \
418 | '\n' + '\n'.join(checksum_lines[checksum_name])
419 |
420 | release_str = release.dump_string()
421 | storage.write_file('dists/%s/Release' % dist, release_str)
422 |
423 | if sign:
424 | release_str_signature = gpg_sign_string(release_str)
425 | release_str_inline = gpg_sign_string(release_str, inline=True)
426 | storage.write_file('dists/%s/Release.gpg' %
427 | dist, release_str_signature)
428 | storage.write_file('dists/%s/InRelease' % dist, release_str_inline)
429 |
--------------------------------------------------------------------------------
/rpmfile.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import struct
4 | import sys
5 |
6 | RPM_MAGIC = 0xedabeedb
7 | RPM_VER_MIN = (3, 0)
8 |
9 | RPM_HEADER_HEADER_MAGIC = 0x8eade8
10 |
11 | OLD_STYLE_HEADER_SIZE = 96
12 |
13 | RPMSENSE_ANY = 0
14 | RPMSENSE_LESS = 1 << 1
15 | RPMSENSE_GREATER = 1 << 2
16 | RPMSENSE_EQUAL = 1 << 3
17 | RPMSENSE_SENSEMASK = 0x0e
18 | RPMSENSE_NOTEQUAL = RPMSENSE_EQUAL ^ RPMSENSE_SENSEMASK
19 |
20 | RPMSENSE_PROVIDES = (1 << 4)
21 | RPMSENSE_CONFLICTS = (1 << 5)
22 | RPMSENSE_OBSOLETES = (1 << 7)
23 | RPMSENSE_INTERP = (1 << 8),
24 | RPMSENSE_SCRIPT_PRE = ((1 << 9) | RPMSENSE_ANY)
25 | RPMSENSE_SCRIPT_POST = ((1 << 10) | RPMSENSE_ANY)
26 | RPMSENSE_SCRIPT_PREUN = ((1 << 11) | RPMSENSE_ANY)
27 | RPMSENSE_SCRIPT_POSTUN = ((1 << 12) | RPMSENSE_ANY)
28 | RPMSENSE_SCRIPT_VERIFY = (1 << 13)
29 | RPMSENSE_FIND_REQUIRES = (1 << 14)
30 | RPMSENSE_FIND_PROVIDES = (1 << 15)
31 | RPMSENSE_TRIGGERIN = (1 << 16)
32 | RPMSENSE_TRIGGERUN = (1 << 17)
33 | RPMSENSE_TRIGGERPOSTUN = (1 << 18)
34 | RPMSENSE_MISSINGOK = (1 << 19)
35 | RPMSENSE_SCRIPT_PREP = (1 << 20)
36 | RPMSENSE_SCRIPT_BUILD = (1 << 21)
37 | RPMSENSE_SCRIPT_INSTALL = (1 << 22)
38 | RPMSENSE_SCRIPT_CLEAN = (1 << 23)
39 | RPMSENSE_RPMLIB = ((1 << 24) | RPMSENSE_ANY)
40 | RPMSENSE_TRIGGERPREIN = (1 << 25)
41 | RPMSENSE_KEYRING = (1 << 26)
42 | RPMSENSE_PATCHES = (1 << 27)
43 | RPMSENSE_CONFIG = (1 << 28)
44 |
45 |
46 | def flags_to_str(flags):
47 | flags = flags & 0x0e
48 |
49 | if flags == RPMSENSE_NOTEQUAL:
50 | return "NE"
51 | elif flags == RPMSENSE_EQUAL:
52 | return "EQ"
53 | elif flags & RPMSENSE_LESS:
54 | return "LT"
55 | elif flags & RPMSENSE_GREATER:
56 | return "GT"
57 | elif flags & (RPMSENSE_LESS | RPMSENSE_EQUAL):
58 | return "LE"
59 | elif flags & (RPMSENSE_GREATER | RPMSENSE_EQUAL):
60 | return "GE"
61 | elif flags == 0:
62 | return None
63 | else:
64 | raise RuntimeError("Unknown flags: %d" % flags)
65 |
66 |
67 | SIGNATURE_TAG_TABLE = {
68 | 1000: "SIG_SIZE",
69 | 1001: "LEMD5_1",
70 | 1002: "PGP",
71 | 1003: "LEMD5_2",
72 | 1004: "MD5",
73 | 1005: "GPG",
74 | 1006: "PGP5",
75 | 1007: "PAYLOADSIZE",
76 | 264: "BADSHA1_1",
77 | 265: "BADSHA1_2",
78 | 269: "SHA1",
79 | 267: "DSA",
80 | 268: "RSA",
81 | 270: "SIG_LONGSIZE",
82 | 271: "SIG_LONGARCHIVESIZE"
83 | }
84 |
85 | HEADER_TAG_TABLE = {
86 | 61: "HEADERIMAGE",
87 | 62: "HEADERSIGNATURES",
88 | 63: "HEADERIMMUTABLE",
89 | 64: "HEADERREGIONS",
90 | 100: "HEADERI18NTABLE",
91 | 256: "SIG_BASE",
92 | 257: "SIGSIZE",
93 | 258: "SIGLEMD5_1",
94 | 259: "SIGPGP",
95 | 260: "SIGLEMD5_2",
96 | 261: "SIGMD5",
97 | 262: "SIGGPG",
98 | 263: "SIGPGP5",
99 | 264: "BADSHA1_1",
100 | 265: "BADSHA1_2",
101 | 266: "PUBKEYS",
102 | 267: "DSAHEADER",
103 | 268: "RSAHEADER",
104 | 269: "SHA1HEADER",
105 | 270: "LONGSIGSIZE",
106 | 271: "LONGARCHIVESIZE",
107 | 1000: "NAME",
108 | 1001: "VERSION",
109 | 1002: "RELEASE",
110 | 1003: "EPOCH",
111 | 1004: "SUMMARY",
112 | 1005: "DESCRIPTION",
113 | 1006: "BUILDTIME",
114 | 1007: "BUILDHOST",
115 | 1008: "INSTALLTIME",
116 | 1009: "SIZE",
117 | 1010: "DISTRIBUTION",
118 | 1011: "VENDOR",
119 | 1012: "GIF",
120 | 1013: "XPM",
121 | 1014: "LICENSE",
122 | 1015: "PACKAGER",
123 | 1016: "GROUP",
124 | 1017: "CHANGELOG",
125 | 1018: "SOURCE",
126 | 1019: "PATCH",
127 | 1020: "URL",
128 | 1021: "OS",
129 | 1022: "ARCH",
130 | 1023: "PREIN",
131 | 1024: "POSTIN",
132 | 1025: "PREUN",
133 | 1026: "POSTUN",
134 | 1027: "OLDFILENAMES",
135 | 1028: "FILESIZES",
136 | 1029: "FILESTATES",
137 | 1030: "FILEMODES",
138 | 1031: "FILEUIDS",
139 | 1032: "FILEGIDS",
140 | 1033: "FILERDEVS",
141 | 1034: "FILEMTIMES",
142 | 1035: "FILEDIGESTS",
143 | 1036: "FILELINKTOS",
144 | 1037: "FILEFLAGS",
145 | 1038: "ROOT",
146 | 1039: "FILEUSERNAME",
147 | 1040: "FILEGROUPNAME",
148 | 1041: "EXCLUDE",
149 | 1042: "EXCLUSIVE",
150 | 1043: "ICON",
151 | 1044: "SOURCERPM",
152 | 1045: "FILEVERIFYFLAGS",
153 | 1046: "ARCHIVESIZE",
154 | 1047: "PROVIDENAME",
155 | 1048: "REQUIREFLAGS",
156 | 1049: "REQUIRENAME",
157 | 1050: "REQUIREVERSION",
158 | 1051: "NOSOURCE",
159 | 1052: "NOPATCH",
160 | 1053: "CONFLICTFLAGS",
161 | 1054: "CONFLICTNAME",
162 | 1055: "CONFLICTVERSION",
163 | 1056: "DEFAULTPREFIX",
164 | 1057: "BUILDROOT",
165 | 1058: "INSTALLPREFIX",
166 | 1059: "EXCLUDEARCH",
167 | 1060: "EXCLUDEOS",
168 | 1061: "EXCLUSIVEARCH",
169 | 1062: "EXCLUSIVEOS",
170 | 1063: "AUTOREQPROV",
171 | 1064: "RPMVERSION",
172 | 1065: "TRIGGERSCRIPTS",
173 | 1066: "TRIGGERNAME",
174 | 1067: "TRIGGERVERSION",
175 | 1068: "TRIGGERFLAGS",
176 | 1069: "TRIGGERINDEX",
177 | 1079: "VERIFYSCRIPT",
178 | 1080: "CHANGELOGTIME",
179 | 1081: "CHANGELOGNAME",
180 | 1082: "CHANGELOGTEXT",
181 | 1083: "BROKENMD5",
182 | 1084: "PREREQ",
183 | 1085: "PREINPROG",
184 | 1086: "POSTINPROG",
185 | 1087: "PREUNPROG",
186 | 1088: "POSTUNPROG",
187 | 1089: "BUILDARCHS",
188 | 1090: "OBSOLETENAME",
189 | 1091: "VERIFYSCRIPTPROG",
190 | 1092: "TRIGGERSCRIPTPROG",
191 | 1093: "DOCDIR",
192 | 1094: "COOKIE",
193 | 1095: "FILEDEVICES",
194 | 1096: "FILEINODES",
195 | 1097: "FILELANGS",
196 | 1098: "PREFIXES",
197 | 1099: "INSTPREFIXES",
198 | 1100: "TRIGGERIN",
199 | 1101: "TRIGGERUN",
200 | 1102: "TRIGGERPOSTUN",
201 | 1103: "AUTOREQ",
202 | 1104: "AUTOPROV",
203 | 1105: "CAPABILITY",
204 | 1106: "SOURCEPACKAGE",
205 | 1107: "OLDORIGFILENAMES",
206 | 1108: "BUILDPREREQ",
207 | 1109: "BUILDREQUIRES",
208 | 1110: "BUILDCONFLICTS",
209 | 1111: "BUILDMACROS",
210 | 1112: "PROVIDEFLAGS",
211 | 1113: "PROVIDEVERSION",
212 | 1114: "OBSOLETEFLAGS",
213 | 1115: "OBSOLETEVERSION",
214 | 1116: "DIRINDEXES",
215 | 1117: "BASENAMES",
216 | 1118: "DIRNAMES",
217 | 1119: "ORIGDIRINDEXES",
218 | 1120: "ORIGBASENAMES",
219 | 1121: "ORIGDIRNAMES",
220 | 1122: "OPTFLAGS",
221 | 1123: "DISTURL",
222 | 1124: "PAYLOADFORMAT",
223 | 1125: "PAYLOADCOMPRESSOR",
224 | 1126: "PAYLOADFLAGS",
225 | 1127: "INSTALLCOLOR",
226 | 1128: "INSTALLTID",
227 | 1129: "REMOVETID",
228 | 1130: "SHA1RHN",
229 | 1131: "RHNPLATFORM",
230 | 1132: "PLATFORM",
231 | 1133: "PATCHESNAME",
232 | 1134: "PATCHESFLAGS",
233 | 1135: "PATCHESVERSION",
234 | 1136: "CACHECTIME",
235 | 1137: "CACHEPKGPATH",
236 | 1138: "CACHEPKGSIZE",
237 | 1139: "CACHEPKGMTIME",
238 | 1140: "FILECOLORS",
239 | 1141: "FILECLASS",
240 | 1142: "CLASSDICT",
241 | 1143: "FILEDEPENDSX",
242 | 1144: "FILEDEPENDSN",
243 | 1145: "DEPENDSDICT",
244 | 1146: "SOURCEPKGID",
245 | 1147: "FILECONTEXTS",
246 | 1148: "rpm/rpmtag.h",
247 | 1149: "RECONTEXTS",
248 | 1150: "POLICIES",
249 | 1151: "PRETRANS",
250 | 1152: "POSTTRANS",
251 | 1153: "PRETRANSPROG",
252 | 1154: "rpm/rpmtag.h",
253 | 1155: "DISTTAG",
254 | 1156: "SUGGESTSNAME",
255 | 1157: "SUGGESTSVERSION",
256 | 1158: "SUGGESTSFLAGS",
257 | 1159: "ENHANCESNAME",
258 | 1160: "ENHANCESVERSION",
259 | 1161: "ENHANCESFLAGS",
260 | 1162: "PRIORITY",
261 | 1163: "CVSID",
262 | 1164: "BLINKPKGID",
263 | 1165: "BLINKHDRID",
264 | 1166: "BLINKNEVRA",
265 | 1167: "FLINKPKGID",
266 | 1168: "FLINKHDRID",
267 | 1169: "FLINKNEVRA",
268 | 1170: "PACKAGEORIGIN",
269 | 1171: "TRIGGERPREIN",
270 | 1172: "BUILDSUGGESTS",
271 | 1173: "BUILDENHANCES",
272 | 1174: "SCRIPTSTATES",
273 | 1175: "SCRIPTMETRICS",
274 | 1176: "BUILDCPUCLOCK",
275 | 1177: "FILEDIGESTALGOS",
276 | 1178: "VARIANTS",
277 | 1179: "XMAJOR",
278 | 1180: "XMINOR",
279 | 1181: "REPOTAG",
280 | 1182: "KEYWORDS",
281 | 1183: "BUILDPLATFORMS",
282 | 1184: "PACKAGECOLOR",
283 | 1185: "PACKAGEPREFCOLOR",
284 | 1186: "XATTRSDICT",
285 | 1187: "FILEXATTRSX",
286 | 1188: "DEPATTRSDICT",
287 | 1189: "CONFLICTATTRSX",
288 | 1190: "OBSOLETEATTRSX",
289 | 1191: "PROVIDEATTRSX",
290 | 1192: "REQUIREATTRSX",
291 | 1193: "BUILDPROVIDES",
292 | 1194: "BUILDOBSOLETES",
293 | 1195: "DBINSTANCE",
294 | 1196: "NVRA",
295 | 5000: "FILENAMES",
296 | 5001: "FILEPROVIDE",
297 | 5002: "FILEREQUIRE",
298 | 5003: "FSNAMES",
299 | 5004: "FSSIZES",
300 | 5005: "TRIGGERCONDS",
301 | 5006: "TRIGGERTYPE",
302 | 5007: "ORIGFILENAMES",
303 | 5008: "LONGFILESIZES",
304 | 5009: "LONGSIZE",
305 | 5010: "FILECAPS",
306 | 5011: "FILEDIGESTALGO",
307 | 5012: "BUGURL",
308 | 5013: "EVR",
309 | 5014: "NVR",
310 | 5015: "NEVR",
311 | 5016: "NEVRA",
312 | 5017: "HEADERCOLOR",
313 | 5018: "VERBOSE",
314 | 5019: "EPOCHNUM",
315 | 5020: "PREINFLAGS",
316 | 5021: "POSTINFLAGS",
317 | 5022: "PREUNFLAGS",
318 | 5023: "POSTUNFLAGS",
319 | 5024: "PRETRANSFLAGS",
320 | 5025: "POSTTRANSFLAGS",
321 | 5026: "VERIFYSCRIPTFLAGS",
322 | 5027: "TRIGGERSCRIPTFLAGS",
323 | 5029: "COLLECTIONS",
324 | 5030: "POLICYNAMES",
325 | 5031: "POLICYTYPES",
326 | 5032: "POLICYTYPESINDEXES",
327 | 5033: "POLICYFLAGS",
328 | 5034: "VCS",
329 | 5035: "ORDERNAME",
330 | 5036: "ORDERVERSION",
331 | 5037: "ORDERFLAGS",
332 | 5038: "MSSFMANIFEST",
333 | 5039: "MSSFDOMAIN",
334 | 5040: "INSTFILENAMES",
335 | 5041: "REQUIRENEVRS",
336 | 5042: "PROVIDENEVRS",
337 | 5043: "OBSOLETENEVRS",
338 | 5044: "CONFLICTNEVRS",
339 | 5045: "FILENLINKS"
340 | }
341 |
342 |
343 | class RpmInfo(object):
344 |
345 | def _read_header_header(self, f):
346 | magic = struct.unpack('>I', '\x00' + f.read(3))[0]
347 | if magic != RPM_HEADER_HEADER_MAGIC:
348 | raise RuntimeError("Wrong header header magic: '%s'" % hex(magic))
349 |
350 | ver, reserved, num_index_entries, num_data_bytes = \
351 | struct.unpack('>BIII', f.read(13))
352 |
353 | return num_index_entries, num_data_bytes
354 |
355 | def _read_index_entry(self, f):
356 | tag, type, offset, count = \
357 | struct.unpack('>IIII', f.read(16))
358 |
359 | return tag, type, offset, count
360 |
361 | def _read_store(self, f, tag_table, index_entries, num_index_bytes):
362 | current_offset = f.tell()
363 |
364 | result = {}
365 |
366 | for entry in index_entries:
367 | tag, type, offset, count = entry
368 | f.seek(current_offset + offset)
369 |
370 | value = None
371 | if type == 0:
372 | pass
373 | elif type == 1:
374 | value = []
375 | for _ in range(count):
376 | value.append(struct.unpack('>c', f.read(1))[0])
377 | if len(value) == 1:
378 | value = value[0]
379 | elif type == 2:
380 | value = []
381 | for _ in range(count):
382 | value.append(struct.unpack('>b', f.read(1))[0])
383 | if len(value) == 1:
384 | value = value[0]
385 | elif type == 3:
386 | value = []
387 | for _ in range(count):
388 | value.append(struct.unpack('>h', f.read(2))[0])
389 | if len(value) == 1:
390 | value = value[0]
391 | elif type == 4:
392 | value = []
393 | for _ in range(count):
394 | value.append(struct.unpack('>I', f.read(4))[0])
395 | if len(value) == 1:
396 | value = value[0]
397 | elif type == 5:
398 | value = []
399 | for _ in range(count):
400 | value.append(struct.unpack('>q', f.read(8))[0])
401 | if len(value) == 1:
402 | value = value[0]
403 | elif type == 6:
404 | char = None
405 | string = ""
406 | while True:
407 | char = f.read(1)
408 | if char == '\x00':
409 | break
410 | string += char
411 | value = string
412 | elif type == 7:
413 | value = struct.unpack('>%ds' % count, f.read(count))[0]
414 | elif type == 8:
415 | stringlist = []
416 | for i in range(count):
417 | char = None
418 | string = ""
419 | while True:
420 | char = f.read(1)
421 | if char == '\x00':
422 | break
423 | string += char
424 | stringlist.append(string)
425 | value = stringlist
426 |
427 | if tag in tag_table:
428 | result[tag_table[tag]] = value
429 |
430 | addr = current_offset + num_index_bytes
431 | # align to 8-byte boundary
432 | addr = (addr + (8 - 1)) & -8
433 | f.seek(addr)
434 | return result
435 |
436 | def parse_header(self, f, tag_table):
437 | num_index_entries, num_index_bytes = self._read_header_header(f)
438 |
439 | index_entries = []
440 | for i in range(num_index_entries):
441 | index_entries.append(
442 | self._read_index_entry(f))
443 |
444 | data = self._read_store(f, tag_table, index_entries, num_index_bytes)
445 |
446 | return data
447 |
448 | def parse_file(self, filename):
449 | with open(filename, 'rb') as f:
450 | magic = struct.unpack('>I', f.read(4))[0]
451 | if magic != RPM_MAGIC:
452 | raise RuntimeError("Not an RPM file: '%s'" % filename)
453 |
454 | ver_major, ver_minor = struct.unpack('>BB', f.read(2))
455 |
456 | if (ver_major, ver_minor) < RPM_VER_MIN:
457 | raise RuntimeError(("RPM file version '%d.%d' is less than " +
458 | "minimum supported version '%d.%d'") %
459 | ((ver_major, ver_minor) + RPM_VER_MIN))
460 |
461 | f.seek(OLD_STYLE_HEADER_SIZE) # size of old-style header
462 |
463 | signature = self.parse_header(f, SIGNATURE_TAG_TABLE)
464 |
465 | self.header_start = f.tell()
466 | header = self.parse_header(f, HEADER_TAG_TABLE)
467 | self.header_end = f.tell()
468 |
469 | header.update(signature)
470 | return header
471 |
472 |
473 | def main():
474 | i = RpmInfo()
475 | data = i.parse_file(sys.argv[1])
476 | for key, value in data.items():
477 | print("%s: %s" % (key, str(value)))
478 |
479 |
480 | if __name__ == '__main__':
481 | main()
482 |
--------------------------------------------------------------------------------
/rpmrepo.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | import sys
5 | import re
6 |
7 | import subprocess
8 | import tempfile
9 | import shutil
10 | import storage
11 | import gzip
12 | import StringIO
13 | import rpmfile
14 | import hashlib
15 | import json
16 | import itertools
17 |
18 | import datetime
19 | import time
20 |
21 | from xml.sax.saxutils import escape
22 |
23 | try:
24 | import xml.etree.cElementTree as ET
25 | except ImportError:
26 | import xml.etree.ElementTree as ET
27 |
28 |
29 | def gzip_string(data):
30 | out = StringIO.StringIO()
31 | with gzip.GzipFile(fileobj=out, mode="w") as fobj:
32 | fobj.write(data)
33 | return out.getvalue()
34 |
35 |
36 | def gunzip_string(data):
37 | fobj = StringIO.StringIO(data)
38 | decompressed = gzip.GzipFile(fileobj=fobj)
39 |
40 | return decompressed.read()
41 |
42 |
43 | def file_checksum(file_name, checksum_type):
44 | h = hashlib.new(checksum_type)
45 | with open(file_name, "rb") as f:
46 | for chunk in iter(lambda: f.read(4096), b""):
47 | h.update(chunk)
48 | return h.hexdigest()
49 |
50 |
51 | def string_checksum(data, checksum_type):
52 | fobj = StringIO.StringIO(data)
53 | h = hashlib.new(checksum_type)
54 | for chunk in iter(lambda: fobj.read(4096), b""):
55 | h.update(chunk)
56 |
57 | return h.hexdigest()
58 |
59 |
60 | def gpg_sign_string(data, keyname=None, inline=False):
61 | cmd = "gpg --armor --digest-algo SHA256"
62 |
63 | if inline:
64 | cmd += " --clearsign"
65 | else:
66 | cmd += " --detach-sign"
67 |
68 | if keyname is not None:
69 | cmd += " --default-key='%s'" % keyname
70 |
71 | proc = subprocess.Popen(cmd,
72 | shell=True,
73 | stdout=subprocess.PIPE,
74 | stdin=subprocess.PIPE,
75 | stderr=subprocess.STDOUT)
76 | stdout = proc.communicate(input=data)[0]
77 |
78 | if proc.returncode != 0:
79 | raise RuntimeError("Failed to sign file: %s" % stdout)
80 |
81 | return stdout
82 |
83 |
84 | def sign_metadata(repomdfile):
85 | """Requires a proper ~/.rpmmacros file. See """
86 | cmd = ["gpg", "--detach-sign", "--armor", "--digest-algo SHA256", repomdfile]
87 | try:
88 | subprocess.check_call(cmd)
89 | print ("Successfully signed repository metadata file")
90 | except subprocess.CalledProcessError as e:
91 | print ("Unable to sign repository metadata '%s'" % (repomdfile))
92 | exit(1)
93 |
94 |
95 | def setup_repository(repo, tempdir):
96 | """Make sure a repo is present at repopath"""
97 | if repo._grab.storage.exists("repodata/repomd.xml"):
98 | return
99 |
100 | tmpdir = tempfile.mkdtemp('', 'tmp', tempdir)
101 | cmd = ['createrepo', '--no-database', tmpdir]
102 | subprocess.check_output(cmd)
103 | repo._grab.syncdir(os.path.join(tmpdir, "repodata"), "repodata")
104 | shutil.rmtree(tmpdir)
105 |
106 |
107 | def parse_repomd(data):
108 | root = ET.fromstring(data)
109 | namespaces = {'repo': 'http://linux.duke.edu/metadata/repo'}
110 |
111 | filelists = None
112 | primary = None
113 |
114 | revision = root.find('repo:revision', namespaces).text
115 |
116 | for child in root:
117 | if 'type' not in child.attrib:
118 | continue
119 |
120 | result = {}
121 | for key in ['checksum', 'open-checksum',
122 | 'timestamp', 'size', 'open-size']:
123 | result[key] = child.find('repo:' + key, namespaces).text
124 | result['location'] = child.find(
125 | 'repo:location', namespaces).attrib['href']
126 |
127 | if child.attrib['type'] == 'filelists':
128 | filelists = result
129 | elif child.attrib['type'] == 'primary':
130 | primary = result
131 |
132 | return filelists, primary, revision
133 |
134 |
135 | def parse_filelists(data):
136 | root = ET.fromstring(data)
137 | namespaces = {'filelists': 'http://linux.duke.edu/metadata/filelists'}
138 |
139 | packages = {}
140 |
141 | for child in root:
142 | if not child.tag.endswith('}package'):
143 | continue
144 |
145 | pkgid = child.attrib['pkgid']
146 | name = child.attrib['name']
147 | arch = child.attrib['arch']
148 | version = child.find('filelists:version', namespaces)
149 |
150 | version = {'ver': version.attrib['ver'],
151 | 'rel': version.attrib['rel'],
152 | 'epoch': version.attrib.get('epoch', '0')}
153 |
154 | files = []
155 | for node in child.findall('filelists:file', namespaces):
156 | file_name = node.text
157 | file_type = 'file'
158 |
159 | if 'type' in node.attrib and node.attrib['type'] == 'dir':
160 | file_type = 'dir'
161 | files.append({'type': file_type, 'name': file_name})
162 |
163 | package = {'pkgid': pkgid, 'name': name, 'arch': arch,
164 | 'version': version, 'files': files}
165 | nerv = (name, version['epoch'], version['rel'], version['ver'])
166 | packages[nerv] = package
167 |
168 | return packages
169 |
170 |
171 | def dump_filelists(filelists):
172 | res = ""
173 |
174 | res += '\n'
175 | res += '\n' % len(
176 | filelists)
177 |
178 | for package in filelists.values():
179 | res += '\n' % (
180 | package['pkgid'], package['name'], package['arch'])
181 |
182 | ver = package['version']
183 |
184 | res += ' \n' % components
188 |
189 | for fileentry in package['files']:
190 | if fileentry['type'] == 'file':
191 | res += ' %s \n' % fileentry['name']
192 | else:
193 | res += ' %s \n' % fileentry['name']
194 |
195 | res += ' \n'
196 |
197 | res += " \n"
198 |
199 | return res
200 |
201 |
202 | def parse_primary(data):
203 | root = ET.fromstring(data)
204 | namespaces = {'primary': 'http://linux.duke.edu/metadata/common',
205 | 'rpm': 'http://linux.duke.edu/metadata/rpm'}
206 |
207 | packages = {}
208 |
209 | for child in root:
210 | if not child.tag.endswith('}package'):
211 | continue
212 |
213 | checksum = child.find('primary:checksum', namespaces).text
214 | name = child.find('primary:name', namespaces).text
215 | arch = child.find('primary:arch', namespaces).text
216 | summary = child.find('primary:summary', namespaces).text
217 | description = child.find('primary:description', namespaces).text
218 | packager = child.find('primary:packager', namespaces).text
219 | url = child.find('primary:url', namespaces).text
220 | time = child.find('primary:time', namespaces)
221 | file_time = time.attrib['file']
222 | build_time = time.attrib['build']
223 | size = child.find('primary:size', namespaces)
224 | package_size = size.attrib['package']
225 | installed_size = size.attrib['installed']
226 | archive_size = size.attrib['archive']
227 | location = child.find('primary:location', namespaces).attrib['href']
228 |
229 | version = child.find('primary:version', namespaces)
230 | version = {'ver': version.attrib['ver'],
231 | 'rel': version.attrib['rel'],
232 | 'epoch': version.attrib.get('epoch', '0')}
233 |
234 | # format
235 | fmt = child.find('primary:format', namespaces)
236 |
237 | format_license = fmt.find('rpm:license', namespaces).text
238 | vendor = fmt.find('rpm:vendor', namespaces)
239 | format_vendor = vendor.text if vendor else ""
240 | format_group = fmt.find('rpm:group', namespaces).text
241 | format_buildhost = fmt.find('rpm:buildhost', namespaces).text
242 | format_sourcerpm = fmt.find('rpm:sourcerpm', namespaces).text
243 | header_range = fmt.find('rpm:header-range', namespaces)
244 | format_header_start = header_range.attrib['start']
245 | format_header_end = header_range.attrib['end']
246 |
247 | # provides
248 |
249 | provides = fmt.find('rpm:provides', namespaces)
250 | if provides is None:
251 | provides = []
252 |
253 | provides_dict = {}
254 |
255 | for entry in provides:
256 | provides_name = entry.attrib['name']
257 | provides_epoch = entry.attrib.get('epoch', None)
258 | provides_rel = entry.attrib.get('rel', None)
259 | provides_ver = entry.attrib.get('ver', None)
260 | provides_flags = entry.attrib.get('flags', None)
261 |
262 | nerv = (provides_name, provides_epoch, provides_rel, provides_ver)
263 |
264 | provides_dict[nerv] = {'name': provides_name,
265 | 'epoch': provides_epoch,
266 | 'rel': provides_rel,
267 | 'ver': provides_ver,
268 | 'flags': provides_flags}
269 |
270 | # requires
271 |
272 | requires = fmt.find('rpm:requires', namespaces)
273 | if requires is None:
274 | requires = []
275 |
276 | requires_dict = {}
277 |
278 | for entry in requires:
279 | requires_name = entry.attrib['name']
280 | requires_epoch = entry.attrib.get('epoch', None)
281 | requires_rel = entry.attrib.get('rel', None)
282 | requires_ver = entry.attrib.get('ver', None)
283 | requires_flags = entry.attrib.get('flags', None)
284 | requires_pre = entry.attrib.get('pre', None)
285 |
286 | nerv = (requires_name, requires_epoch, requires_rel, requires_ver)
287 |
288 | requires_dict[nerv] = {'name': requires_name,
289 | 'epoch': requires_epoch,
290 | 'rel': requires_rel,
291 | 'ver': requires_ver,
292 | 'flags': requires_flags,
293 | 'pre': requires_pre}
294 |
295 | # obsoletes
296 |
297 | obsoletes = fmt.find('rpm:obsoletes', namespaces)
298 | if obsoletes is None:
299 | obsoletes = []
300 |
301 | obsoletes_dict = {}
302 |
303 | for entry in obsoletes:
304 | obsoletes_name = entry.attrib['name']
305 | obsoletes_epoch = entry.attrib.get('epoch', None)
306 | obsoletes_rel = entry.attrib.get('rel', None)
307 | obsoletes_ver = entry.attrib.get('ver', None)
308 | obsoletes_flags = entry.attrib.get('flags', None)
309 |
310 | nerv = (obsoletes_name, obsoletes_epoch,
311 | obsoletes_rel, obsoletes_ver)
312 |
313 | obsoletes_dict[nerv] = {'name': obsoletes_name,
314 | 'epoch': obsoletes_epoch,
315 | 'rel': obsoletes_rel,
316 | 'ver': obsoletes_ver,
317 | 'flags': obsoletes_flags}
318 |
319 | # files
320 | files = []
321 | for node in fmt.findall('primary:file', namespaces):
322 | file_name = node.text
323 | file_type = 'file'
324 |
325 | if 'type' in node.attrib and node.attrib['type'] == 'dir':
326 | file_type = 'dir'
327 | files.append({'type': file_type, 'name': file_name})
328 |
329 | # result package
330 | format_dict = {'license': format_license,
331 | 'vendor': format_vendor,
332 | 'group': format_group,
333 | 'buildhost': format_buildhost,
334 | 'sourcerpm': format_sourcerpm,
335 | 'header_start': format_header_start,
336 | 'header_end': format_header_end,
337 | 'provides': provides_dict,
338 | 'requires': requires_dict,
339 | 'obsoletes': obsoletes_dict,
340 | 'files': files}
341 |
342 | package = {
343 | 'checksum': checksum,
344 | 'name': name,
345 | 'arch': arch,
346 | 'version': version,
347 | 'summary': summary,
348 | 'description': description,
349 | 'packager': packager,
350 | 'url': url,
351 | 'file_time': file_time,
352 | 'build_time': build_time,
353 | 'package_size': package_size,
354 | 'installed_size': installed_size,
355 | 'archive_size': archive_size,
356 | 'location': location,
357 | 'format': format_dict}
358 |
359 | nerv = (name, version['epoch'], version['rel'], version['ver'])
360 | packages[nerv] = package
361 | return packages
362 |
363 |
364 | def dump_primary(primary):
365 | res = ""
366 |
367 | res += '\n'
368 | res += '\n' % len(
369 | primary)
370 |
371 | for package in primary.values():
372 | res += '\n'
373 | res += ' %s \n' % package['name']
374 | res += ' %s \n' % package['arch']
375 |
376 | ver = package['version']
377 | res += ' \n' % components
381 |
382 | res += ' %s \n' % \
383 | package['checksum']
384 |
385 | res += ' %s \n' % escape(package['summary'] or '')
386 | res += ' %s \n' % escape(
387 | package['description'] or '')
388 | res += ' %s \n' % escape(
389 | package['packager'] or '')
390 |
391 | res += ' %s \n' % (package['url'] or '')
392 | res += ' \n' % (package['file_time'],
393 | package['build_time'])
394 | res += ' \n' % (
395 | package['package_size'],
396 | package['installed_size'],
397 | package['archive_size']
398 | )
399 | res += ' \n' % package['location']
400 |
401 | fmt = package['format']
402 |
403 | res += ' \n'
404 |
405 | res += ' %s \n' % fmt['license']
406 |
407 | if fmt['vendor']:
408 | res += ' %s \n' % escape(fmt['vendor'])
409 |
410 | res += ' %s \n' % (fmt['group'] or '')
411 | res += ' %s \n' % fmt['buildhost']
412 | res += ' %s \n' % fmt['sourcerpm']
413 |
414 | res += ' \n' % (
415 | fmt['header_start'], fmt['header_end'])
416 |
417 | res += ' \n'
418 |
419 | for key in sorted(fmt['provides']):
420 | provides = fmt['provides'][key]
421 | entry = ['name="%s"' % provides['name']]
422 | for component in ['flags', 'epoch', 'ver', 'rel']:
423 | if provides[component] is not None:
424 | entry.append('%s="%s"' % (component, provides[component]))
425 |
426 | res += ' \n'
427 |
428 | res += ' \n'
429 |
430 | res += ' \n'
431 |
432 | for key in sorted(fmt['requires']):
433 | requires = fmt['requires'][key]
434 | entry = ['name="%s"' % requires['name']]
435 | for component in ['flags', 'epoch', 'ver', 'rel', 'pre']:
436 | if requires[component] is not None:
437 | entry.append('%s="%s"' % (component, requires[component]))
438 |
439 | res += ' \n'
440 |
441 | res += ' \n'
442 |
443 | res += ' \n'
444 |
445 | for key in sorted(fmt['obsoletes']):
446 | obsoletes = fmt['obsoletes'][key]
447 | entry = ['name="%s"' % obsoletes['name']]
448 | for component in ['flags', 'epoch', 'ver', 'rel']:
449 | if obsoletes[component] is not None:
450 | entry.append('%s="%s"' % (component, obsoletes[component]))
451 |
452 | res += ' \n'
453 |
454 | res += ' \n'
455 |
456 | res += ' \n'
457 | res += ' \n'
458 |
459 | res += " \n"
460 |
461 | return res
462 |
463 |
464 | def parse_ver_str(ver_str):
465 | if not ver_str:
466 | return (None, None, None)
467 |
468 | expr = r'^(\d+:)?([^-]*)(-[^-]*)?$'
469 | match = re.match(expr, ver_str)
470 | if not match:
471 | raise RuntimeError("Can't parse version: '%s'" % ver_str)
472 | epoch = match.group(1)[:-1] if match.group(1) else "0"
473 | ver = match.group(2)
474 | rel = match.group(3)[1:] if match.group(3) else None
475 | return (epoch, ver, rel)
476 |
477 |
478 | def header_to_filelists(header, sha256):
479 | pkgid = sha256
480 | name = header['NAME']
481 | arch = header['ARCH']
482 | epoch = header.get('EPOCH', '0')
483 | rel = header.get('RELEASE', None)
484 | ver = header['VERSION']
485 | version = {'ver': ver, 'rel': rel, 'epoch': epoch}
486 |
487 | dirnames = header.get('DIRNAMES', [])
488 | if not isinstance(dirnames, list):
489 | dirnames = [dirnames]
490 | classdict = header.get('CLASSDICT', [])
491 | if not isinstance(classdict, list):
492 | classdict = [classdict]
493 | basenames = header.get('BASENAMES', [])
494 | if not isinstance(basenames, list):
495 | basenames = [basenames]
496 | dirindexes = header.get('DIRINDEXES', [])
497 | if not isinstance(dirindexes, list):
498 | dirindexes = [dirindexes]
499 | fileclasses = header.get('FILECLASS', [])
500 | if not isinstance(fileclasses, list):
501 | fileclasses = [fileclasses]
502 |
503 | files = []
504 |
505 | for entry in zip(basenames, dirindexes, fileclasses):
506 | filename = entry[0]
507 | dirname = dirnames[entry[1]]
508 |
509 | fileclass = classdict[entry[2]]
510 |
511 | filetype = "file"
512 |
513 | if fileclass == "directory":
514 | filetype = "dir"
515 |
516 | files.append({'name': dirname + filename, 'type': filetype})
517 |
518 | for dirname in dirnames:
519 | files.append({'name': dirname, 'type': 'dir'})
520 |
521 | package = {'pkgid': pkgid, 'name': name, 'arch': arch,
522 | 'version': version, 'files': files}
523 | nerv = (name, version['epoch'], version['rel'], version['ver'])
524 |
525 | return nerv, package
526 |
527 |
528 | def header_to_primary(
529 | header,
530 | sha256,
531 | mtime,
532 | location,
533 | header_start,
534 | header_end,
535 | size):
536 | name = header['NAME']
537 | arch = header.get('ARCH', '')
538 | summary = header.get('SUMMARY', '')
539 | description = header.get('DESCRIPTION', '')
540 | packager = header.get('PACKAGER', None)
541 | build_time = header.get('BUILDTIME', '')
542 | url = header.get('URL', '')
543 | epoch = header.get('EPOCH', '0')
544 | rel = header.get('RELEASE', None)
545 | ver = header.get('VERSION', '')
546 | version = {'ver': ver, 'rel': rel, 'epoch': epoch}
547 |
548 | package_size = size
549 | installed_size = header['SIZE']
550 | archive_size = header['PAYLOADSIZE']
551 |
552 | # format
553 |
554 | format_license = header.get('LICENSE', None)
555 | format_vendor = header.get('VENDOR', None)
556 | format_group = header.get('GROUP', None)
557 | format_buildhost = header.get('BUILDHOST', None)
558 | format_sourcerpm = header.get('SOURCERPM', None)
559 | format_header_start = header_start
560 | format_header_end = header_end
561 |
562 | # provides
563 |
564 | provides_dict = {}
565 | providename = header.get('PROVIDENAME', [])
566 | provideversion = header.get('PROVIDEVERSION', [])
567 | provideflags = header.get('PROVIDEFLAGS', [])
568 |
569 | if not isinstance(provideflags, list):
570 | provideflags = [provideflags]
571 |
572 | for entry in zip(providename, provideversion, provideflags):
573 | provides_name = entry[0]
574 | provides_epoch, provides_ver, provides_rel = \
575 | parse_ver_str(entry[1])
576 | provides_flags = rpmfile.flags_to_str(entry[2])
577 |
578 | nerv = (provides_name, provides_epoch, provides_rel, provides_ver)
579 |
580 | provides_dict[nerv] = {'name': provides_name,
581 | 'epoch': provides_epoch,
582 | 'rel': provides_rel,
583 | 'ver': provides_ver,
584 | 'flags': provides_flags}
585 |
586 | # requires
587 |
588 | requires_dict = {}
589 | requirename = header.get('REQUIRENAME', [])
590 | requireversion = header.get('REQUIREVERSION', [])
591 | requireflags = header.get('REQUIREFLAGS', [])
592 |
593 | for entry in zip(requirename, requireversion, requireflags):
594 | requires_name = entry[0]
595 | requires_epoch, requires_ver, requires_rel = \
596 | parse_ver_str(entry[1])
597 | requires_flags = rpmfile.flags_to_str(entry[2])
598 |
599 | if entry[2] & rpmfile.RPMSENSE_RPMLIB:
600 | continue
601 |
602 | pre = None
603 |
604 | if entry[2] & 4352:
605 | pre = "1"
606 |
607 | nerv = (requires_name, requires_epoch, requires_rel, requires_ver)
608 |
609 | requires_dict[nerv] = {'name': requires_name,
610 | 'epoch': requires_epoch,
611 | 'rel': requires_rel,
612 | 'ver': requires_ver,
613 | 'flags': requires_flags,
614 | "pre": pre}
615 |
616 | # obsoletes
617 |
618 | obsoletes_dict = {}
619 | obsoletename = header.get('OBSOLETENAME', [])
620 | obsoleteversion = header.get('OBSOLETEVERSION', [])
621 | obsoleteflags = header.get('OBSOLETEFLAGS', [])
622 |
623 | if not isinstance(obsoleteflags, list):
624 | obsoleteflags = [obsoleteflags]
625 |
626 | for entry in zip(obsoletename, obsoleteversion, obsoleteflags):
627 | obsoletes_name = entry[0]
628 | obsoletes_epoch, obsoletes_ver, obsoletes_rel = \
629 | parse_ver_str(entry[1])
630 | obsoletes_flags = rpmfile.flags_to_str(entry[2])
631 |
632 | nerv = (obsoletes_name, obsoletes_epoch, obsoletes_rel, obsoletes_ver)
633 |
634 | obsoletes_dict[nerv] = {'name': obsoletes_name,
635 | 'epoch': obsoletes_epoch,
636 | 'rel': obsoletes_rel,
637 | 'ver': obsoletes_ver,
638 | 'flags': obsoletes_flags}
639 |
640 | # files
641 | dirnames = header.get('DIRNAMES', [])
642 | if not isinstance(dirnames, list):
643 | dirnames = [dirnames]
644 | basenames = header.get('BASENAMES', [])
645 | if not isinstance(basenames, list):
646 | basenames = [basenames]
647 | dirindexes = header.get('DIRINDEXES', [])
648 | if not isinstance(dirindexes, list):
649 | dirindexes = [dirindexes]
650 |
651 | files = []
652 | for entry in zip(basenames, dirindexes):
653 | filename = entry[0]
654 | dirname = dirnames[entry[1]]
655 | files.append({'name': dirname + filename, 'type': 'file'})
656 |
657 | for dirname in dirnames:
658 | files.append({'name': dirname, 'type': 'dir'})
659 |
660 | # result package
661 | format_dict = {'license': format_license,
662 | 'vendor': format_vendor,
663 | 'group': format_group,
664 | 'buildhost': format_buildhost,
665 | 'sourcerpm': format_sourcerpm,
666 | 'header_start': format_header_start,
667 | 'header_end': format_header_end,
668 | 'provides': provides_dict,
669 | 'requires': requires_dict,
670 | 'obsoletes': obsoletes_dict,
671 | 'files': files}
672 |
673 | package = {
674 | 'checksum': sha256,
675 | 'name': name,
676 | 'arch': arch,
677 | 'version': version,
678 | 'summary': summary,
679 | 'description': description,
680 | 'packager': packager,
681 | 'url': url,
682 | 'file_time': str(
683 | int(mtime)),
684 | 'build_time': build_time,
685 | 'package_size': package_size,
686 | 'installed_size': installed_size,
687 | 'archive_size': archive_size,
688 | 'location': location,
689 | 'format': format_dict}
690 |
691 | nerv = (name, version['epoch'], version['rel'], version['ver'])
692 |
693 | return nerv, package
694 |
695 |
696 | def generate_repomd(filelists_str, filelists_gz, primary_str, primary_gz, revision):
697 | filelists_str_sha256 = string_checksum(filelists_str, 'sha256')
698 | primary_str_sha256 = string_checksum(primary_str, 'sha256')
699 |
700 | filelists_gz_sha256 = string_checksum(filelists_gz, 'sha256')
701 | primary_gz_sha256 = string_checksum(primary_gz, 'sha256')
702 |
703 | filelists_name = 'repodata/%s-filelists.xml.gz' % filelists_gz_sha256
704 | primary_name = 'repodata/%s-primary.xml.gz' % primary_gz_sha256
705 |
706 | nowdt = datetime.datetime.now()
707 | nowtuple = nowdt.timetuple()
708 | nowtimestamp = time.mktime(nowtuple)
709 |
710 | res = ""
711 |
712 | res += '\n'
713 | res += '\n'
714 |
715 | res += ' %s \n' % revision
716 |
717 | res += ' \n'
718 | res += ' %s \n' % filelists_gz_sha256
719 | res += ' %s \n' % filelists_str_sha256
720 | res += ' \n' % filelists_name
721 | res += ' %s \n' % int(nowtimestamp)
722 | res += ' %s \n' % len(filelists_gz)
723 | res += ' %s \n' % len(filelists_str)
724 | res += ' \n'
725 |
726 | res += ' \n'
727 | res += ' %s \n' % primary_gz_sha256
728 | res += ' %s \n' % primary_str_sha256
729 | res += ' \n' % primary_name
730 | res += ' %s \n' % int(nowtimestamp)
731 | res += ' %s \n' % len(primary_gz)
732 | res += ' %s \n' % len(primary_str)
733 | res += ' \n'
734 |
735 | res += ' \n'
736 |
737 | return res
738 |
739 |
740 | def update_repo(storage, sign, tempdir):
741 | filelists = {}
742 | primary = {}
743 | revision = "0"
744 | initial_filelists = None
745 | initial_primary = None
746 |
747 | if storage.exists('repodata/repomd.xml'):
748 | data = storage.read_file('repodata/repomd.xml')
749 |
750 | filelists, primary, revision = parse_repomd(data)
751 |
752 | initial_filelists = filelists['location']
753 | data = storage.read_file(initial_filelists)
754 | filelists = parse_filelists(gunzip_string(data))
755 |
756 | initial_primary = primary['location']
757 | data = storage.read_file(initial_primary)
758 | primary = parse_primary(gunzip_string(data))
759 |
760 | recorded_files = set()
761 | for package in primary.values():
762 | recorded_files.add((package['location'], float(package['file_time'])))
763 |
764 | existing_files = set()
765 | expr = r'^.*\.rpm$'
766 | for file_path in storage.files('.'):
767 | match = re.match(expr, file_path)
768 |
769 | if not match:
770 | continue
771 |
772 | mtime = storage.mtime(file_path)
773 |
774 | existing_files.add((file_path, mtime))
775 |
776 | files_to_add = existing_files - recorded_files
777 |
778 | for file_to_add in files_to_add:
779 | file_path = file_to_add[0]
780 | mtime = file_to_add[1]
781 | print("Adding: '%s'" % file_path)
782 |
783 | tmpdir = tempfile.mkdtemp('', 'tmp', tempdir)
784 | storage.download_file(file_path, os.path.join(tmpdir, 'package.rpm'))
785 |
786 | rpminfo = rpmfile.RpmInfo()
787 | header = rpminfo.parse_file(os.path.join(tmpdir, 'package.rpm'))
788 | sha256 = file_checksum(os.path.join(tmpdir, 'package.rpm'), "sha256")
789 |
790 | statinfo = os.stat(os.path.join(tmpdir, 'package.rpm'))
791 | size = statinfo.st_size
792 |
793 | shutil.rmtree(tmpdir)
794 |
795 | nerv, prim = header_to_primary(header, sha256, mtime, file_path,
796 | rpminfo.header_start, rpminfo.header_end,
797 | size)
798 | _, flist = header_to_filelists(header, sha256)
799 |
800 | primary[nerv] = prim
801 | filelists[nerv] = flist
802 |
803 | revision = str(int(revision) + 1)
804 |
805 | filelists_str = dump_filelists(filelists)
806 | primary_str = dump_primary(primary)
807 | filelists_gz = gzip_string(filelists_str)
808 | primary_gz = gzip_string(primary_str)
809 |
810 | repomd_str = generate_repomd(filelists_str, filelists_gz,
811 | primary_str, primary_gz, revision)
812 |
813 | filelists_gz_sha256 = string_checksum(filelists_gz, 'sha256')
814 | primary_gz_sha256 = string_checksum(primary_gz, 'sha256')
815 | filelists_name = 'repodata/%s-filelists.xml.gz' % filelists_gz_sha256
816 | primary_name = 'repodata/%s-primary.xml.gz' % primary_gz_sha256
817 |
818 | storage.write_file(filelists_name, filelists_gz)
819 | storage.write_file(primary_name, primary_gz)
820 | storage.write_file('repodata/repomd.xml', repomd_str)
821 |
822 | if initial_filelists:
823 | storage.delete_file(initial_filelists)
824 | if initial_primary:
825 | storage.delete_file(initial_primary)
826 |
827 | if sign:
828 | repomd_str_signed = gpg_sign_string(repomd_str)
829 | storage.write_file('repodata/repomd.xml.asc', repomd_str_signed)
830 |
831 |
832 | def main():
833 | stor = storage.FilesystemStorage(sys.argv[1])
834 |
835 | update_repo(stor)
836 |
837 |
838 | if __name__ == '__main__':
839 | main()
840 |
--------------------------------------------------------------------------------