├── pdfzip
├── Truepolyglot
├── __init__.py
├── ZipFileTransformer
│ ├── __init__.py
│ ├── zip.py
│ └── zipfile.py
├── PdfFileTransformer
│ ├── __init__.py
│ └── pdf.py
└── PolyglotFile
│ ├── __init__.py
│ ├── polyglotzippdf.py
│ ├── polyglotzipany.py
│ ├── polyglotpdfany.py
│ ├── polyglotpdfzip.py
│ └── polyglotszippdf.py
├── .gitattributes
├── tests
├── samples
│ ├── test1.pdf
│ ├── test1.zip
│ ├── test1_normalized.pdf
│ └── descriptions.txt
├── test_pdf_rebuild.py
├── test_rebuild_zip.py
├── test_pdf_add_data.py
├── test_polyglot_pdfzip.py
├── test_zip.py
├── test_pdf_normalisation.py
└── pdfcat
├── .gitignore
├── setup.py
├── LICENSE
├── version.sh
├── truepolyglot
└── README.md
/pdfzip:
--------------------------------------------------------------------------------
1 | truepolyglot
--------------------------------------------------------------------------------
/Truepolyglot/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | version.sh export-subst
2 |
--------------------------------------------------------------------------------
/tests/samples/test1.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ansemjo/truepolyglot/HEAD/tests/samples/test1.pdf
--------------------------------------------------------------------------------
/tests/samples/test1.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ansemjo/truepolyglot/HEAD/tests/samples/test1.zip
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | # virtualenv for testing
5 | venv/
--------------------------------------------------------------------------------
/Truepolyglot/ZipFileTransformer/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from .zip import Zip
4 | from .zipfile import *
5 |
--------------------------------------------------------------------------------
/tests/samples/test1_normalized.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ansemjo/truepolyglot/HEAD/tests/samples/test1_normalized.pdf
--------------------------------------------------------------------------------
/Truepolyglot/PdfFileTransformer/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from PyPDF2 import PdfFileReader, PdfFileWriter
4 | from .pdf import Pdf
--------------------------------------------------------------------------------
/tests/samples/descriptions.txt:
--------------------------------------------------------------------------------
1 | == Zip files ==
2 |
3 | test1.zip: deux fichiers et commentaire global.
4 |
5 | == Pdf files ==
6 |
7 | test1.pdf: fichier des impots.
--------------------------------------------------------------------------------
/Truepolyglot/PolyglotFile/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from .polyglotpdfzip import PolyglotPdfZip
4 | from .polyglotpdfany import PolyglotPdfAny
5 | from .polyglotzippdf import PolyglotZipPdf
6 | from .polyglotszippdf import PolyglotSZipPdf
7 | from .polyglotzipany import PolyglotZipAny
8 |
9 |
--------------------------------------------------------------------------------
/tests/test_pdf_rebuild.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | import sys
5 | sys.path.append("../")
6 |
7 | from PdfFileTransformer import Pdf
8 | import logging
9 |
10 | input_file = "./samples/test1.pdf"
11 | output_file = "./samples/test1_out.pdf"
12 |
13 | logging.basicConfig(level=logging.DEBUG)
14 |
15 |
16 | p = Pdf(input_file)
17 | f = open(output_file, 'wb')
18 | f.write(p.get_build_buffer())
19 | f.close()
20 |
--------------------------------------------------------------------------------
/tests/test_rebuild_zip.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | import sys
5 | sys.path.append("../")
6 |
7 | import tempfile
8 |
9 | from ZipFileTransformer import Zip, ZipFile
10 |
11 | input_file = "./samples/test1.zip"
12 | output_file = "./samples/test1_out.zip"
13 |
14 | zi = ZipFile(input_file,"r")
15 | zo = ZipFile(output_file,"w")
16 | zo.writestr(' ',b'AAAAAAAAAAAAAAAAAAAAAA',0)
17 | for zipinfo in zi.infolist():
18 | zo.writestr(zipinfo, zi.read(zipinfo))
19 | zi.close()
20 | zo.close()
--------------------------------------------------------------------------------
/tests/test_pdf_add_data.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | import sys
5 | sys.path.append("../")
6 |
7 | import logging
8 | from PdfFileTransformer import Pdf
9 |
10 |
11 | input_file = "./samples/test1.pdf"
12 | output_file = "./samples/test1_out.pdf"
13 |
14 | logging.basicConfig(level=logging.DEBUG)
15 |
16 | p = Pdf(input_file)
17 | p.insert_new_obj_stream_at_start(b'A' * 140)
18 | p.insert_new_obj_stream_at_end(b'B' * 120)
19 | f = open(output_file, 'wb')
20 | f.write(p.get_build_buffer())
21 | f.close()
22 |
--------------------------------------------------------------------------------
/tests/test_polyglot_pdfzip.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | import sys
5 | sys.path.append("../")
6 |
7 | from PdfFileTransformer import Pdf
8 | from ZipFileTransformer import Zip
9 | from PolyglotFile import PolyglotPdfZip
10 | import logging
11 |
12 | input_file_pdf = "./samples/test1.pdf"
13 | input_file_zip = "./samples/test1.zip"
14 | output_file = "./samples/test1_out.pdf"
15 |
16 | logging.basicConfig(level=logging.DEBUG)
17 |
18 |
19 | p = Pdf(input_file_pdf)
20 | z = Zip(input_file_zip)
21 | a = PolyglotPdfZip(p, z)
22 | a.generate()
23 | a.write(output_file)
24 |
--------------------------------------------------------------------------------
/tests/test_zip.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | import sys
5 | sys.path.append("../")
6 |
7 | import tempfile
8 |
9 | from ZipFileTransformer import Zip
10 |
11 | input_file = "./samples/test1.zip"
12 | output_file = tempfile.mktemp()
13 | print("Output: " + output_file)
14 |
15 | z = Zip(input_file)
16 | a = bytearray(b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA')
17 | b = bytearray(b'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB')
18 | z.add_data_to_file(a, b, False)
19 | g = open(output_file, "wb")
20 | g.write(a + z.get_local_file_data() + b + z.get_data_after_central_directory())
21 | g.close()
22 |
--------------------------------------------------------------------------------
/tests/test_pdf_normalisation.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | import sys
5 | sys.path.append("../")
6 | import logging
7 | from PdfFileTransformer.PyPDF2 import PdfFileReader, PdfFileWriter
8 |
9 |
10 | input_file = "./samples/test1.pdf"
11 | output_file = "./samples/test1_out.pdf"
12 |
13 | logging.basicConfig(level=logging.DEBUG)
14 |
15 | f_input = open(input_file, "rb")
16 | reader = PdfFileReader(f_input)
17 |
18 | f_output = open(output_file, "wb")
19 | writer = PdfFileWriter()
20 |
21 | writer.appendPagesFromReader(reader)
22 | writer.setHeader(b"%PDF-1.5")
23 | writer.write(f_output)
24 |
25 | f_input.close()
26 | f_output.close()
27 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from os import environ
4 | from subprocess import check_output as cmd
5 | from setuptools import setup, find_packages
6 |
7 | # package metadata
8 | name = "truepolyglot"
9 | environ['REVISION_SEPERATOR'] = '.post'
10 | version = cmd(['sh', './version.sh','version']).strip().decode()
11 | author = "ben"
12 | email = "truepolyglot@hackade.org"
13 | git = "https://git.hackade.org/%s.git" % name
14 |
15 | setup(
16 | name=name,
17 | version=version,
18 | author=author,
19 | author_email=email,
20 | url=git,
21 | scripts=[name, "pdfzip"],
22 | packages=find_packages(),
23 | python_requires='>3',
24 | install_requires=["pypdf2 == 1.28.6"],
25 | )
26 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | This is free and unencumbered software released into the public domain.
2 |
3 | Anyone is free to copy, modify, publish, use, compile, sell, or
4 | distribute this software, either in source code form or as a compiled
5 | binary, for any purpose, commercial or non-commercial, and by any
6 | means.
7 |
8 | In jurisdictions that recognize copyright laws, the author or authors
9 | of this software dedicate any and all copyright interest in the
10 | software to the public domain. We make this dedication for the benefit
11 | of the public at large and to the detriment of our heirs and
12 | successors. We intend this dedication to be an overt act of
13 | relinquishment in perpetuity of all present and future rights to this
14 | software under copyright law.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 | OTHER DEALINGS IN THE SOFTWARE.
23 |
24 | For more information, please refer to
25 |
--------------------------------------------------------------------------------
/Truepolyglot/PolyglotFile/polyglotzippdf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | This is free and unencumbered software released into the public domain.
5 |
6 | Anyone is free to copy, modify, publish, use, compile, sell, or
7 | distribute this software, either in source code form or as a compiled
8 | binary, for any purpose, commercial or non-commercial, and by any
9 | means.
10 |
11 | In jurisdictions that recognize copyright laws, the author or authors
12 | of this software dedicate any and all copyright interest in the
13 | software to the public domain. We make this dedication for the benefit
14 | of the public at large and to the detriment of our heirs and
15 | successors. We intend this dedication to be an overt act of
16 | relinquishment in perpetuity of all present and future rights to this
17 | software under copyright law.
18 |
19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 | OTHER DEALINGS IN THE SOFTWARE.
26 |
27 | For more information, please refer to
28 | """
29 |
30 | from .polyglotpdfzip import PolyglotPdfZip
31 |
32 |
33 | '''
34 | |-------------------------------| -
35 | |--------- PDF Header ----------K1 | J1
36 | |-------------------------------| -
37 | |----- PDF OBJ 1 = ZIP Data ----K2 |
38 | |-------------------------------| -
39 | |---- Original PDF Ojbects -----K3 |
40 | |-------------------------------| |
41 | |---------- Xref Table ---------| |
42 | |-------------------------------K4 | J2
43 | |----------- Trailer -----------| |
44 | |-------------------------------| -
45 | |-------- End Zip Data ---------| |
46 | |-------------------------------| |
47 | '''
48 |
49 |
50 | class PolyglotZipPdf(PolyglotPdfZip):
51 |
52 | def generate(self):
53 | k2_stream = self.zip.buffer[:self.zip.end_of_data]
54 | size_k2_stream = len(k2_stream)
55 | self.pdf.insert_new_obj_stream_at_start(k2_stream)
56 | offset_k2_stream = self.pdf.get_first_stream_offset()
57 |
58 | pdf_buffer = self.pdf.get_build_buffer()
59 |
60 | j1 = pdf_buffer[0:offset_k2_stream]
61 | j2 = pdf_buffer[offset_k2_stream + size_k2_stream:]
62 |
63 | self.zip.add_data_to_file(j1, j2, True)
64 | self.buffer = self.zip.buffer
65 |
--------------------------------------------------------------------------------
/Truepolyglot/PolyglotFile/polyglotzipany.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | This is free and unencumbered software released into the public domain.
5 |
6 | Anyone is free to copy, modify, publish, use, compile, sell, or
7 | distribute this software, either in source code form or as a compiled
8 | binary, for any purpose, commercial or non-commercial, and by any
9 | means.
10 |
11 | In jurisdictions that recognize copyright laws, the author or authors
12 | of this software dedicate any and all copyright interest in the
13 | software to the public domain. We make this dedication for the benefit
14 | of the public at large and to the detriment of our heirs and
15 | successors. We intend this dedication to be an overt act of
16 | relinquishment in perpetuity of all present and future rights to this
17 | software under copyright law.
18 |
19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 | OTHER DEALINGS IN THE SOFTWARE.
26 |
27 | For more information, please refer to
28 | """
29 |
30 | import logging
31 |
32 | '''
33 | |-------------------------------| -
34 | |---------- Payload 1 ----------K1 | J1
35 | |-------------------------------| -
36 | |---- ZIP Local File Header ----K2 |
37 | |-------------------------------| -
38 | |---------- Payload 2-----------K3 | J2
39 | |-------------------------------| -
40 | |---- ZIP Central Directory ----K4 |
41 | |-------------------------------| |
42 | |--- End of Central Directory --K5 |
43 | |-------------------------------| |
44 | '''
45 |
46 |
47 | class PolyglotZipAny():
48 | from Truepolyglot.ZipFileTransformer import Zip
49 |
50 | def __init__(self, Zip, payload1filename, payload2filename):
51 | self.buffer = bytearray()
52 | self.zip = Zip
53 | self.payload1 = bytearray()
54 | if payload1filename is not None:
55 | with open(payload1filename, "rb") as f:
56 | self.payload1 = f.read()
57 | self.payload2 = bytearray()
58 | if payload2filename is not None:
59 | with open(payload2filename, "rb") as f:
60 | self.payload2 = f.read()
61 |
62 | def generate(self):
63 | self.zip.add_data_to_file(self.payload1, self.payload2, True)
64 | self.buffer = self.zip.buffer
65 |
66 | def write(self, filename):
67 | fd = open(filename, "wb")
68 | fd.write(self.buffer)
69 | fd.close()
70 |
--------------------------------------------------------------------------------
/Truepolyglot/PolyglotFile/polyglotpdfany.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | This is free and unencumbered software released into the public domain.
5 |
6 | Anyone is free to copy, modify, publish, use, compile, sell, or
7 | distribute this software, either in source code form or as a compiled
8 | binary, for any purpose, commercial or non-commercial, and by any
9 | means.
10 |
11 | In jurisdictions that recognize copyright laws, the author or authors
12 | of this software dedicate any and all copyright interest in the
13 | software to the public domain. We make this dedication for the benefit
14 | of the public at large and to the detriment of our heirs and
15 | successors. We intend this dedication to be an overt act of
16 | relinquishment in perpetuity of all present and future rights to this
17 | software under copyright law.
18 |
19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 | OTHER DEALINGS IN THE SOFTWARE.
26 |
27 | For more information, please refer to
28 | """
29 |
30 | import logging
31 |
32 | '''
33 | |-------------------------------| -
34 | |--------- PDF Header ----------K1 | J1
35 | |-------------------------------| -
36 | |---- PDF OBJ 1 = Payload 1 ----K2 |
37 | |-------------------------------| -
38 | |---- Original PDF Ojbects -----K3 | J2
39 | |-------------------------------| -
40 | |-- PDF Last OBJ = Payload 2 ---K4 |
41 | |-------------------------------| |
42 | |---------- Xref Table ---------| |
43 | |-------------------------------K5 |
44 | |----------- Trailer -----------| |
45 | |-------------------------------| |
46 | '''
47 |
48 |
49 | class PolyglotPdfAny():
50 | from Truepolyglot.PdfFileTransformer import Pdf
51 |
52 | def __init__(self, Pdf, payload1filename, payload2filename):
53 | self.buffer = bytearray()
54 | self.pdf = Pdf
55 | self.payload1 = bytearray()
56 | if payload1filename is not None:
57 | with open(payload1filename, "rb") as f:
58 | self.payload1 = f.read()
59 | self.payload2 = bytearray()
60 | if payload2filename is not None:
61 | with open(payload2filename, "rb") as f:
62 | self.payload2 = f.read()
63 |
64 | def generate(self):
65 | k2stream = self.payload1
66 | if len(k2stream) > 0:
67 | self.pdf.insert_new_obj_stream_at_start(k2stream)
68 | k4stream = self.payload2
69 | if len(k4stream) > 0:
70 | self.pdf.insert_new_obj_stream_at_end(k4stream)
71 | self.buffer = self.pdf.get_build_buffer()
72 |
73 | def write(self, filename):
74 | fd = open(filename, "wb")
75 | fd.write(self.buffer)
76 | fd.close()
77 |
--------------------------------------------------------------------------------
/tests/pdfcat:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """
3 | Concatenate pages from pdf files into a single pdf file.
4 |
5 | Page ranges refer to the previously-named file.
6 | A file not followed by a page range means all the pages of the file.
7 |
8 | PAGE RANGES are like Python slices.
9 | {page_range_help}
10 | EXAMPLES
11 | pdfcat -o output.pdf head.pdf content.pdf :6 7: tail.pdf -1
12 | Concatenate all of head.pdf, all but page seven of content.pdf,
13 | and the last page of tail.pdf, producing output.pdf.
14 |
15 | pdfcat chapter*.pdf >book.pdf
16 | You can specify the output file by redirection.
17 |
18 | pdfcat chapter?.pdf chapter10.pdf >book.pdf
19 | In case you don't want chapter 10 before chapter 2.
20 | """
21 | # Copyright (c) 2014, Steve Witham .
22 | # All rights reserved. This software is available under a BSD license;
23 | # see https://github.com/mstamy2/PyPDF2/LICENSE
24 |
25 | from __future__ import print_function
26 |
27 | import sys
28 | sys.path.append("../")
29 |
30 | import argparse
31 | from PdfFileTransformer.PyPDF2.pagerange import PAGE_RANGE_HELP
32 |
33 |
34 | def parse_args():
35 | parser = argparse.ArgumentParser(
36 | description=__doc__.format(page_range_help=PAGE_RANGE_HELP),
37 | formatter_class=argparse.RawDescriptionHelpFormatter)
38 | parser.add_argument("-o", "--output",
39 | metavar="output_file")
40 | parser.add_argument("-v", "--verbose", action="store_true",
41 | help="show page ranges as they are being read")
42 | parser.add_argument("first_filename", nargs=1,
43 | metavar="filename [page range...]")
44 | # argparse chokes on page ranges like "-2:" unless caught like this:
45 | parser.add_argument("fn_pgrgs", nargs=argparse.REMAINDER,
46 | metavar="filenames and/or page ranges")
47 | args = parser.parse_args()
48 | args.fn_pgrgs.insert(0, args.first_filename[0])
49 | return args
50 |
51 |
52 | from sys import stderr, stdout, exit
53 | import os
54 | import traceback
55 | from collections import defaultdict
56 |
57 | from PdfFileTransformer.PyPDF2 import PdfFileMerger, parse_filename_page_ranges
58 |
59 |
60 | if __name__ == "__main__":
61 | args = parse_args()
62 | filename_page_ranges = parse_filename_page_ranges(args.fn_pgrgs)
63 | if args.output:
64 | output = open(args.output, "wb")
65 | else:
66 | stdout.flush()
67 | output = os.fdopen(stdout.fileno(), "wb")
68 |
69 | merger = PdfFileMerger()
70 | in_fs = dict()
71 | try:
72 | for (filename, page_range) in filename_page_ranges:
73 | if args.verbose:
74 | print(filename, page_range, file=stderr)
75 | if filename not in in_fs:
76 | in_fs[filename] = open(filename, "rb")
77 | merger.append(in_fs[filename], pages=page_range)
78 | except:
79 | print(traceback.format_exc(), file=stderr)
80 | print("Error while reading " + filename, file=stderr)
81 | exit(1)
82 | merger.write(output)
83 | # In 3.0, input files must stay open until output is written.
84 | # Not closing the in_fs because this script exits now.
85 |
--------------------------------------------------------------------------------
/Truepolyglot/PolyglotFile/polyglotpdfzip.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | This is free and unencumbered software released into the public domain.
5 |
6 | Anyone is free to copy, modify, publish, use, compile, sell, or
7 | distribute this software, either in source code form or as a compiled
8 | binary, for any purpose, commercial or non-commercial, and by any
9 | means.
10 |
11 | In jurisdictions that recognize copyright laws, the author or authors
12 | of this software dedicate any and all copyright interest in the
13 | software to the public domain. We make this dedication for the benefit
14 | of the public at large and to the detriment of our heirs and
15 | successors. We intend this dedication to be an overt act of
16 | relinquishment in perpetuity of all present and future rights to this
17 | software under copyright law.
18 |
19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 | OTHER DEALINGS IN THE SOFTWARE.
26 |
27 | For more information, please refer to
28 | """
29 |
30 | import logging
31 |
32 | '''
33 | |-------------------------------| -
34 | |--------- PDF Header ----------K1 | J1
35 | |-------------------------------| -
36 | |----- PDF OBJ 1 = ZIP Data ----K2 |
37 | |-------------------------------| -
38 | |---- Original PDF Ojbects -----K3 | J2
39 | |-------------------------------| -
40 | |--- Last OBJ = End Zip Data ---K4 |
41 | |-------------------------------| |
42 | |---------- Xref Table ---------| |
43 | |-------------------------------K5 |
44 | |----------- Trailer -----------| |
45 | |-------------------------------| |
46 | '''
47 |
48 |
49 | class PolyglotPdfZip():
50 | from Truepolyglot.PdfFileTransformer import Pdf
51 | from Truepolyglot.ZipFileTransformer import Zip
52 |
53 | def __init__(self, Pdf, Zip):
54 | self.buffer = bytearray()
55 | self.pdf = Pdf
56 | self.zip = Zip
57 | self.buffer = bytearray()
58 |
59 | def generate(self):
60 | k2_stream = self.zip.buffer[:self.zip.end_of_data]
61 | size_k2_stream = len(k2_stream)
62 | self.pdf.insert_new_obj_stream_at_start(k2_stream)
63 | offset_k2_stream = self.pdf.get_first_stream_offset()
64 |
65 | k4_stream = self.zip.buffer[self.zip.central_dir_file_header:]
66 | size_k4_stream = len(k4_stream)
67 | self.pdf.insert_new_obj_stream_at_end(k4_stream)
68 | offset_k4_stream = self.pdf.get_last_stream_offset()
69 |
70 | pdf_buffer = self.pdf.get_build_buffer()
71 |
72 | j1 = pdf_buffer[0:offset_k2_stream]
73 | j2 = pdf_buffer[offset_k2_stream + size_k2_stream:offset_k4_stream]
74 | self.zip.add_data_to_file(j1, j2, True)
75 |
76 | k5 = pdf_buffer[offset_k4_stream + size_k4_stream:]
77 | self.buffer = self.zip.buffer + k5
78 |
79 | def write(self, filename):
80 | fd = open(filename, "wb")
81 | fd.write(self.buffer)
82 | fd.close()
83 |
--------------------------------------------------------------------------------
/version.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # Copyright (c) 2018 Anton Semjonov
4 | # Licensed under the MIT License
5 |
6 | # This script prints version information for a project managed with Git when
7 | # executed in a shell. It works from checked-out repositories or downloaded
8 | # archives alike. For more information see https://github.com/ansemjo/version.sh
9 |
10 | # -- SYNOPSIS -- Copy this script to your project root and add the line
11 | # 'version.sh export-subst' to your .gitattributes file, creating it if it does
12 | # not exist. Commit both files, try running 'sh ./version.sh' and use annotated
13 | # Git tags to track your versions.
14 |
15 | # Ignore certain shellcheck warnings:
16 | # - '$Format..' looks like a variable in single-quotes but this is
17 | # necessary so it does _not_ expand when interpreted by the shell
18 | # - backslash before a literal newline is a portable way
19 | # to insert newlines with sed
20 | # shellcheck disable=SC2016,SC1004
21 |
22 | # Magic! These strings will be substituted by 'git archive':
23 | COMMIT='021df75abda4aa2de48593d685b63cf5364c2d6d'
24 | REFS='HEAD -> main'
25 |
26 | # Fallback values:
27 | FALLBACK_VERSION='commit'
28 | FALLBACK_COMMIT='unknown'
29 |
30 | # Revision and commit hash seperators in 'describe' string:
31 | REVISION_SEPERATOR="${REVISION_SEPERATOR:--}"
32 | COMMIT_SEPERATOR="${COMMIT_SEPERATOR:--g}"
33 |
34 | # Check if variables contain substituted values?
35 | subst() { test -n "${COMMIT##\$Format*}" && test -n "${REFS##\$Format*}"; }
36 |
37 | # Check if git and repository information is available?
38 | hasgit() {
39 | command -v git >/dev/null && { test -r .git || git rev-parse 2>/dev/null; };
40 | }
41 |
42 | # Parse the %D reflist in $REFS to get a tag or branch name:
43 | refparse() {
44 | # try to find a tag:
45 | tag=$(echo "$REFS" | sed -ne 's/.*tag: \([^,]*\).*/\1/p');
46 | test -n "$tag" && echo "$tag" && return 0;
47 | # try to find a branch name:
48 | branch=$(echo "$REFS" | sed -e 's/HEAD -> //' -e 's/, /\
49 | /' | sed -ne '/^[a-z0-9._-]*$/p' | sed -n '1p');
50 | test -n "$branch" && echo "$branch" && return 0;
51 | # nothing found, no tags and not a branch tip?
52 | return 1;
53 | }
54 |
55 | # Try to get commit and version information with git:
56 | gitcommit() {
57 | hasgit && git describe --always --abbrev=0 --match '^$' --dirty;
58 | }
59 | gitversion() {
60 | hasgit && {
61 | {
62 | # try to use 'describe':
63 | V=$(git describe --tags 2>/dev/null) && \
64 | echo "$V" | sed 's/-\([0-9]*\)-g.*/'"$REVISION_SEPERATOR"'\1/';
65 | } || {
66 | # or count the number of commits otherwise:
67 | C=$(git rev-list --count HEAD) && \
68 | printf '0.0.0%s%s' "$REVISION_SEPERATOR" "$C";
69 | };
70 | };
71 | }
72 |
73 | # Wrappers to return version and commit (substituted -> git info -> fallback):
74 | version() { subst && refparse || gitversion || echo "$FALLBACK_VERSION"; }
75 | commit() { subst && echo "$COMMIT" || gitcommit || echo "$FALLBACK_COMMIT"; }
76 | describe() { printf '%s%s%.7s\n' "$(version)" "$COMMIT_SEPERATOR" "$(commit)"; }
77 |
78 | # Parse commandline argument:
79 | case "$1" in
80 | version) version ;;
81 | commit) commit ;;
82 | describe) describe ;;
83 | json)
84 | printf '{"version":"%s","commit":"%s","describe":"%s"}\n' \
85 | "$(version)" "$(commit)" "$(describe)" ;;
86 | help)
87 | printf '%s [version|commit|describe|json]\n' "$0" ;;
88 | *)
89 | printf 'version : %s\ncommit : %s\n' "$(version)" "$(commit)" ;;
90 | esac
91 |
--------------------------------------------------------------------------------
/Truepolyglot/PolyglotFile/polyglotszippdf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | This is free and unencumbered software released into the public domain.
5 |
6 | Anyone is free to copy, modify, publish, use, compile, sell, or
7 | distribute this software, either in source code form or as a compiled
8 | binary, for any purpose, commercial or non-commercial, and by any
9 | means.
10 |
11 | In jurisdictions that recognize copyright laws, the author or authors
12 | of this software dedicate any and all copyright interest in the
13 | software to the public domain. We make this dedication for the benefit
14 | of the public at large and to the detriment of our heirs and
15 | successors. We intend this dedication to be an overt act of
16 | relinquishment in perpetuity of all present and future rights to this
17 | software under copyright law.
18 |
19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 | OTHER DEALINGS IN THE SOFTWARE.
26 |
27 | For more information, please refer to
28 | """
29 |
30 | from .polyglotpdfzip import PolyglotPdfZip
31 | import logging
32 | import tempfile
33 | from Truepolyglot.ZipFileTransformer import ZipFile
34 | from Truepolyglot.ZipFileTransformer import Zip
35 | from Truepolyglot.PdfFileTransformer import Pdf
36 |
37 | '''
38 | |-----------------------------------| -
39 | |--------- ZIP Data[0] = -----------| |
40 | |- PDF Header + PDF Obj[0] Header --| |
41 | |-----------------------------------| | K2
42 | |------- PDF Obj[0] stream = ------| |
43 | |--------- ZIP Data LF [1:] --------| |
44 | |-----------------------------------| -
45 | |------ Original PDF Ojbects -------| |
46 | |-----------------------------------| |
47 | |------------ Xref Table -----------| |
48 | |-----------------------------------| | J2
49 | |------------- Trailer -------------| |
50 | |-----------------------------------| -
51 | |---------- End Zip Data -----------|
52 | |-----------------------------------|
53 | '''
54 |
55 |
56 | class PolyglotSZipPdf(PolyglotPdfZip):
57 |
58 | def __init__(self, Pdf, Zip, acrobat_compatibility):
59 | super().__init__(Pdf, Zip)
60 | self.acrobat_compatibility = acrobat_compatibility
61 |
62 | def get_rebuild_zip_first_part_size(self):
63 |
64 | zo_path = tempfile.mkstemp()[1]
65 | logging.info("use tmp file zip: " + zo_path)
66 | zo = ZipFile(zo_path, 'a')
67 | zi = ZipFile(self.zip.filename, 'r')
68 | for zipinfo in zi.infolist():
69 | zo.writestr(zipinfo, zi.read(zipinfo))
70 | zi.close()
71 | zo.close()
72 |
73 | rebuild_zip = Zip(zo_path)
74 |
75 | p = rebuild_zip.end_of_data
76 | k2_stream = rebuild_zip.buffer[:p]
77 |
78 | size_k2_stream = len(k2_stream)
79 |
80 | return size_k2_stream
81 |
82 | def get_pdf_header(self):
83 | return self.pdf.get_file_header()
84 |
85 | def generate_zip_with_pdf_part(self, filename, pdf_data):
86 |
87 | zo = ZipFile(filename, 'a')
88 | zi = ZipFile(self.zip.filename, 'r')
89 | zo.writestr(' ', pdf_data, 0)
90 | for zipinfo in zi.infolist():
91 | zo.writestr(zipinfo, zi.read(zipinfo))
92 | zi.close()
93 | zo.close()
94 |
95 | def get_rebuild_pdf(self, zo_path, offset):
96 | '''
97 | Generate polyglot with final zip.
98 | '''
99 | new_zip = Zip(zo_path)
100 | new_pdf = Pdf(self.pdf.filename)
101 |
102 | p1 = new_zip.end_of_first_local_file_header
103 | p2 = new_zip.end_of_data
104 | k2_stream = new_zip.buffer[p1:p2]
105 |
106 | size_k2_stream = len(k2_stream)
107 | new_pdf.insert_new_obj_stream_at_start(k2_stream)
108 | k2_stream_offset = new_pdf.get_first_stream_offset()
109 |
110 | new_pdf.file_offset = offset
111 | if self.acrobat_compatibility:
112 | new_pdf.file_offset = new_pdf.file_offset + 1
113 | pdf_buffer = new_pdf.get_build_buffer()
114 | j2 = pdf_buffer[k2_stream_offset + size_k2_stream:]
115 |
116 | if self.acrobat_compatibility:
117 | new_zip.add_data_to_file(b'\x00', j2, True)
118 | else:
119 | new_zip.add_data_to_file(b'', j2, True)
120 |
121 | return new_zip.buffer
122 |
123 | def get_pdf_offset(self, zipfile):
124 |
125 | f = open(zipfile, "rb")
126 | data = f.read()
127 | return data.find(b"%PDF")
128 |
129 | def generate(self):
130 |
131 | zip_stream_size = self.get_rebuild_zip_first_part_size()
132 | pdf_header = self.get_pdf_header()
133 | pdf_header = (pdf_header +
134 | b'1 0 obj\n<<\n/Filter /FlateDecode\n/Length ' +
135 | str(zip_stream_size).encode("utf-8") +
136 | b'\n>>\nstream\n')
137 |
138 | filename = tempfile.mkstemp()[1]
139 | logging.info("use tmp file for new zip: " + filename)
140 | self.generate_zip_with_pdf_part(filename, pdf_header)
141 |
142 | pdf_offset = self.get_pdf_offset(filename)
143 |
144 | self.buffer = self.get_rebuild_pdf(filename, pdf_offset)
145 |
--------------------------------------------------------------------------------
/truepolyglot:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | """
5 | This is free and unencumbered software released into the public domain.
6 |
7 | Anyone is free to copy, modify, publish, use, compile, sell, or
8 | distribute this software, either in source code form or as a compiled
9 | binary, for any purpose, commercial or non-commercial, and by any
10 | means.
11 |
12 | In jurisdictions that recognize copyright laws, the author or authors
13 | of this software dedicate any and all copyright interest in the
14 | software to the public domain. We make this dedication for the benefit
15 | of the public at large and to the detriment of our heirs and
16 | successors. We intend this dedication to be an overt act of
17 | relinquishment in perpetuity of all present and future rights to this
18 | software under copyright law.
19 |
20 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 | OTHER DEALINGS IN THE SOFTWARE.
27 |
28 | For more information, please refer to
29 | """
30 |
31 | import argparse
32 | import logging
33 | from os import path
34 |
35 | from Truepolyglot.PdfFileTransformer import Pdf
36 | from Truepolyglot.ZipFileTransformer import Zip
37 | from Truepolyglot.PolyglotFile import PolyglotZipPdf
38 | from Truepolyglot.PolyglotFile import PolyglotPdfZip
39 | from Truepolyglot.PolyglotFile import PolyglotSZipPdf
40 | from Truepolyglot.PolyglotFile import PolyglotPdfAny
41 | from Truepolyglot.PolyglotFile import PolyglotZipAny
42 |
43 |
44 | epilog_str = 'TruePolyglot v1.6.2'
45 |
46 | def main():
47 | description_str = ('Generate a polyglot file.\n\nFormats availables:\n' +
48 | '* pdfzip: Generate a file valid as PDF and ZIP.' +
49 | ' The format is closest to PDF.\n' +
50 | '* zippdf: Generate a file valid as ZIP and PDF.' +
51 | ' The format is closest to ZIP.\n' +
52 | '* szippdf: Generate a file valid as ZIP and PDF.' +
53 | ' The format is strictly a ZIP.\n' +
54 | ' Archive is modified.\n' +
55 | '* pdfany: Generate a valid PDF file with payload1' +
56 | ' file content as the first object\n' +
57 | ' or/and payload2 file' +
58 | ' content as the last oject.\n' +
59 | '* zipany: Generate a valid ZIP file with payload1' +
60 | ' file content at the start of the file\n' +
61 | ' or/and payload2 file content between' +
62 | ' LFH and CD.\n')
63 | usage_str = '%(prog)s format [options] output-file'
64 | frm = argparse.RawTextHelpFormatter
65 | parser = argparse.ArgumentParser(description=description_str,
66 | epilog=epilog_str,
67 | usage=usage_str,
68 | formatter_class=frm)
69 | parser.add_argument('format', nargs='+', choices=["pdfzip",
70 | "zippdf",
71 | "szippdf",
72 | "pdfany",
73 | "zipany"],
74 | help='Output polyglot format')
75 | parser.add_argument('--pdffile', dest='pdffile',
76 | help='PDF input file')
77 | parser.add_argument('--zipfile', dest='zipfile',
78 | help='ZIP input file')
79 | parser.add_argument('--payload1file', dest='payload1file',
80 | help='Payload 1 input file')
81 | parser.add_argument('--payload2file', dest='payload2file',
82 | help='Payload 2 input file')
83 | parser.add_argument('--acrobat-compatibility',
84 | dest='acrobat_compatibility',
85 | help='Add a byte at the start for Acrobat Reader compatibility with the szippdf format',
86 | action='store_true')
87 | parser.add_argument('--verbose', dest='verbose',
88 | help='Verbosity level (default: info)',
89 | default="info",
90 | choices=["none", "error", "info", "debug"])
91 | parser.add_argument('output_file', nargs='+',
92 | help='Output polyglot file path')
93 |
94 | args = parser.parse_args()
95 |
96 | if args.acrobat_compatibility and args.format[0] != "szippdf":
97 | parser.error('--acrobat-compatibility is for szippdf only')
98 | if "pdf" in args.format[0] and args.pdffile is None:
99 | parser.error('pdffile is required')
100 | if "zip" in args.format[0] and args.zipfile is None:
101 | parser.error('zipfile is required')
102 | if ("any" in args.format[0] and args.payload1file is None and
103 | args.payload2file is None):
104 | parser.error('payload1file or payload2file is required')
105 |
106 | if args.verbose == "none":
107 | logging.basicConfig(level=logging.CRITICAL)
108 | if args.verbose == "error":
109 | logging.basicConfig(level=logging.ERROR)
110 | if args.verbose == "info":
111 | logging.basicConfig(level=logging.INFO)
112 | if args.verbose == "debug":
113 | logging.basicConfig(level=logging.DEBUG)
114 |
115 | if args.format[0] == "pdfzip":
116 | p = Pdf(args.pdffile)
117 | z = Zip(args.zipfile)
118 | a = PolyglotPdfZip(p, z)
119 | if args.format[0] == "zippdf":
120 | p = Pdf(args.pdffile)
121 | z = Zip(args.zipfile)
122 | a = PolyglotZipPdf(p, z)
123 | if args.format[0] == "szippdf":
124 | p = Pdf(args.pdffile)
125 | z = Zip(args.zipfile)
126 | a = PolyglotSZipPdf(p, z, args.acrobat_compatibility)
127 | if args.format[0] == "pdfany":
128 | p = Pdf(args.pdffile)
129 | a = PolyglotPdfAny(p, args.payload1file, args.payload2file)
130 | if args.format[0] == "zipany":
131 | z = Zip(args.zipfile)
132 | a = PolyglotZipAny(z, args.payload1file, args.payload2file)
133 |
134 |
135 | a.generate()
136 | a.write(args.output_file[0])
137 |
138 | # reduced argparse to create pdfzip format files
139 | def pdfzip():
140 |
141 | description = """
142 | Generate a pdfzip format polyglot file. This file is valid as
143 | a PDF and as a ZIP and is closest to the PDF format.
144 | For other output formats use the 'truepolyglot' command.
145 | """
146 |
147 | parser = argparse.ArgumentParser(
148 | description=description,
149 | epilog=epilog_str,
150 | usage="%(prog)s -p pdf -z zip output",
151 | )
152 |
153 | parser.add_argument("-p", "--pdffile", metavar="pdf", help="PDF input file", required=True)
154 | parser.add_argument("-z", "--zipfile", metavar="zip", help="ZIP archive input file", required=True)
155 | parser.add_argument("output", help="Polyglot output file")
156 | parser.add_argument('--verbose', help='Verbosity level (default: info)',
157 | default="info", choices=["none", "error", "info", "debug"])
158 |
159 | args = parser.parse_args()
160 |
161 | if args.verbose == "none":
162 | logging.basicConfig(level=logging.CRITICAL)
163 | if args.verbose == "error":
164 | logging.basicConfig(level=logging.ERROR)
165 | if args.verbose == "info":
166 | logging.basicConfig(level=logging.INFO)
167 | if args.verbose == "debug":
168 | logging.basicConfig(level=logging.DEBUG)
169 |
170 | p = Pdf(args.pdffile)
171 | z = Zip(args.zipfile)
172 | a = PolyglotPdfZip(p, z)
173 | a.generate()
174 | a.write(args.output)
175 |
176 | if __name__ == "__main__":
177 | name = path.basename(__file__)
178 | if name == "pdfzip":
179 | pdfzip()
180 | else:
181 | main()
182 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # truepolyglot
2 |
3 | Truepolyglot is polyglot file generator project. It means the generated file is composed of several file formats. The same file can be opened as a ZIP file and as a PDF file for example. The idea of this project comes from the work of [Ange Albertini](https://github.com/corkami), [International Journal of Proof-of-Concept or Get The Fuck Out](https://www.alchemistowl.org/pocorgtfo/pocorgtfo07.pdf) and [Julia Wolf](https://www.troopers.de/wp-content/uploads/2011/04/TR11_Wolf_OMG_PDF.pdf) that explain how we can build a polyglot file.\
4 | Polyglot file can be boring to build, even more if you want to respect the file format correctly.\
5 | That's why I decided to build a tool to generate them.\
6 | My main motivation was the technical challenge.
7 |
8 | ## :warning: FORKED
9 |
10 | This repository is forked from [truepolyglot.hackade.org][hackade] and includes a few commits to
11 | provide a `setup.py` for `pip`-installations along with a number of [other opinionated changes][history].
12 |
13 | You can install this version from `master` with:
14 |
15 | pip install git+https://github.com/ansemjo/truepolyglot
16 |
17 | Notably, this fork uses PyPDF2's `cloneReaderDocumentRoot`, which may hiccup on malformed PDFs more easily but
18 | copies the entire document including cross-references and section labels. The `setup.py` also installs a command
19 | `pdfzip`, which only creates polyglot files of this particular format since I believe this to be the most useful
20 | output format:
21 |
22 | pdfzip -p document.pdf -z archive.zip polyglot.zip.pdf
23 |
24 | [hackade]: https://truepolyglot.hackade.org/
25 | [history]: https://github.com/ansemjo/truepolyglot/compare/1.6.2...master
26 |
27 | Below you find the rest of the *original* README. Parts of it may be outdated and may not apply to this fork.
28 | For example I did not test compatability beyond Firefox and Evince.
29 |
30 | ## Features and versions ##
31 |
32 | | Description | Version |
33 | | ----------- | ------- |
34 | | Build a polyglot file valid as PDF and ZIP format and that can be opened with 7Zip and Windows Explorer | POC |
35 | | Add a stream object in the PDF part | POC |
36 | | Polyglot file checked without warning with [pdftocairo](https://poppler.freedesktop.org/) | >= 1.0 |
37 | | Polyglot file checked without warning with [caradoc](https://github.com/ANSSI-FR/caradoc) | >= 1.0 |
38 | | Rebuild the PDF Xref Table | >= 1.0 |
39 | | Stream object with the correct length header value | >= 1.0 |
40 | | Add the format "zippdf", file without offset after the Zip data | >= 1.1 |
41 | | Polyglot file keeps the original PDF version | >= 1.1.1 |
42 | | Add the "szippdf" format without offset before and after the Zip data | >= 1.2 |
43 | | Fix /Length stream object value and the PDF offset for the szippdf format | >= 1.2.1 |
44 | | PDF object numbers reorder after insertion | >= 1.3 |
45 | | Add the format "pdfany" a valid PDF with custom payload content in the first and the last objet | >= 1.5.2 |
46 | | Add "acrobat-compatibility" option to allow szippdf to be read with Acrobat Reader (thanks Ange Albertini)| >= 1.5.3 |
47 | | Add the format "zipany" a valid ZIP with custom payload content at the start and between LHF and CD | >= 1.6 |
48 |
49 | ## Polyglot file compatibility ##
50 |
51 | | Software | Formats | status |
52 | | -------- | ------- | ------ |
53 | | Acrobat Reader | pdfzip, zippdf, szippdf, pdfany | OK |
54 | | Sumatra PDF | pdfzip, zippdf, szippdf, pdfany | OK |
55 | | Foxit PDF Reader | pdfzip, zippdf, szippdf, pdfany | OK |
56 | | Edge | pdfzip, zippdf, szippdf, pdfany | OK |
57 | | Firefox | pdfzip, zippdf, szippdf, pdfany | OK |
58 | | 7zip | pdfzip, zippdf, zipany | OK with warning |
59 | | 7zip | szippdf | OK |
60 | | Explorer Windows | pdfzip, zippdf, szippdf, pdfany, zipany | OK |
61 | | Info-ZIP (unzip) | pdfzip, zippdf, szippdf, pdfany, zipany | OK |
62 | | Evince | pdfzip, zippdf, szippdf, pdfany | OK |
63 | | pdftocairo -pdf | pdfzip, zippdf, szippdf, pdfany | OK |
64 | | caradoc stats | pdfzip, pdfany | OK |
65 | | java -jar | szippdf | OK |
66 |
67 | ## Examples ##
68 |
69 | | First input file | Second input file | Format | Polyglot | Comment |
70 | | ---------------- | ----------------- | ------ | -------- | ------- |
71 | | [doc.pdf](https://truepolyglot.hackade.org/samples/pdfzip/poc1/doc.pdf) | [archive.zip](https://truepolyglot.hackade.org/samples/pdfzip/poc1/archive.zip) | pdfzip | [polyglot.pdf](https://truepolyglot.hackade.org/samples/pdfzip/poc1/polyglot.pdf) | PDF/ZIP polyglot - 122 Ko |
72 | | [orwell\_1984.pdf](https://truepolyglot.hackade.org/samples/pdfzip/poc2/orwell_1984.pdf) | [file-FILE5\_32.zip](https://truepolyglot.hackade.org/samples/pdfzip/poc2/file-FILE5_32.zip) | pdfzip | [polyglot.pdf](https://truepolyglot.hackade.org/samples/pdfzip/poc2/polyglot.pdf) | PDF/ZIP polyglot - 1.3 Mo |
73 | | [x86asm.pdf](https://truepolyglot.hackade.org/samples/pdfzip/poc3/x86asm.pdf) | [fasmw17304.zip](https://truepolyglot.hackade.org/samples/pdfzip/poc3/fasmw17304.zip) | pdfzip | [polyglot.pdf](https://truepolyglot.hackade.org/samples/pdfzip/poc3/polyglot.pdf) | PDF/ZIP polyglot - 1.8 Mo |
74 | | [doc.pdf](/samples/zippdf/poc4/doc.pdf) | [archive.zip](/samples/zippdf/poc4/archive.zip) | zippdf | [polyglot.pdf](/samples/zippdf/poc4/polyglot.pdf) | PDF/ZIP polyglot - 112 Ko |
75 | | [electronics.pdf](https://truepolyglot.hackade.org/samples/szippdf/poc5/electronics.pdf) | [hello\_world.jar](https://truepolyglot.hackade.org/samples/szippdf/poc5/hello_world.jar) | szippdf | [polyglot.pdf](https://truepolyglot.hackade.org/samples/szippdf/poc5/polyglot.pdf) | PDF/JAR polyglot - 778 Ko |
76 | | [hexinator.pdf](https://truepolyglot.hackade.org/samples/pdfzip/poc6/hexinator.pdf) | [eicar.zip](https://truepolyglot.hackade.org/samples/pdfzip/poc6/eicar.zip) ([scan virustotal.com](https://www.virustotal.com/#/file/2174e17e6b03bb398666c128e6ab0a27d4ad6f7d7922127fe828e07aa94ab79d/detection)) | pdfzip | [polyglot.pdf](https://truepolyglot.hackade.org/samples/pdfzip/poc6/polyglot.pdf) ([scan virustotal.com](https://www.virustotal.com/#/file/f6fef31e3b03164bb3bdf35af0521f9fc0c518a9e0f1aa9f8b60ac936201591a/detection)) | PDF/ZIP polyglot with the Eicar test in Zip - 2.9 Mo |
77 | | [doc.pdf](https://truepolyglot.hackade.org/samples/pdfany/poc7/doc.pdf) | [page.html](https://truepolyglot.hackade.org/samples/pdfany/poc7/page.html) | pdfany | [polyglot.pdf](https://truepolyglot.hackade.org/samples/pdfany/poc7/polyglot.pdf) | PDF/HTML polyglot - 26 Ko |
78 | | [logo.zip](https://truepolyglot.hackade.org/samples/zipany/poc8/logo.zip) | [nc.exe](https://truepolyglot.hackade.org/samples/zipany/poc8/nc.exe) | zipany | [polyglot.zip](https://truepolyglot.hackade.org/samples/zipany/poc8/polyglot.zip) | PDF/PE polyglot - 96 Ko |
79 |
80 | ## Usage ##
81 |
82 | ```
83 | usage: truepolyglot format [options] output-file
84 |
85 | Generate a polyglot file.
86 |
87 | Formats availables:
88 | * pdfzip: Generate a file valid as PDF and ZIP. The format is closest to PDF.
89 | * zippdf: Generate a file valid as ZIP and PDF. The format is closest to ZIP.
90 | * szippdf: Generate a file valid as ZIP and PDF. The format is strictly a ZIP. Archive is modified.
91 | * pdfany: Generate a valid PDF file with payload1 file content as the first object or/and payload2 file content as the last object.
92 | * zipany: Generate a valid ZIP file with payload1 file content at the start of the file or/and payload2 file content between LFH and CD.
93 |
94 | positional arguments: {pdfzip,zippdf,szippdf,pdfany,zipany}
95 | Output polyglot format
96 | output_file Output polyglot file path
97 |
98 | optional arguments:
99 | -h, --help show this help message and exit
100 | --pdffile PDFFILE PDF input file
101 | --zipfile ZIPFILE ZIP input file
102 | --payload1file PAYLOAD1FILE Payload 1 input file
103 | --payload2file PAYLOAD2FILE Payload 2 input file
104 | --acrobat-compatibility Add a byte at the start for Acrobat Reader compatibility with the szippdf format
105 | --verbose {none,error,info,debug} Verbosity level (default: info)
106 |
107 | TruePolyglot v1.6.2
108 | ```
109 |
110 | ## Code ##
111 |
112 | ```
113 | git clone https://git.hackade.org/truepolyglot.git/
114 | ```
115 |
116 | or download [truepolyglot-1.6.2.tar.gz](https://git.hackade.org/truepolyglot.git/snapshot/truepolyglot-1.6.2.tar.gz)
117 |
118 | ## How to detect a polyglot file ? ##
119 |
120 | You can use [binwalk](https://github.com/ReFirmLabs/binwalk) on a file to see if composed of multiple files.
121 |
122 | ## Contact ##
123 |
124 | [truepolyglot@hackade.org](mailto:truepolyglot@hackade.org)
125 |
126 | ## Credits ##
127 |
128 | Copyright © 2018-2019 ben@hackade.org
129 |
130 | TruePolyglot is released under [Unlicence](https://unlicense.org/) except for the following libraries:
131 |
132 | * [PyPDF2](https://github.com/mstamy2/PyPDF2/blob/master/LICENSE)
133 | * [zipfile.py (cpython)](https://github.com/python/cpython/blob/master/LICENSE)
134 |
135 |
--------------------------------------------------------------------------------
/Truepolyglot/ZipFileTransformer/zip.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | This is free and unencumbered software released into the public domain.
5 |
6 | Anyone is free to copy, modify, publish, use, compile, sell, or
7 | distribute this software, either in source code form or as a compiled
8 | binary, for any purpose, commercial or non-commercial, and by any
9 | means.
10 |
11 | In jurisdictions that recognize copyright laws, the author or authors
12 | of this software dedicate any and all copyright interest in the
13 | software to the public domain. We make this dedication for the benefit
14 | of the public at large and to the detriment of our heirs and
15 | successors. We intend this dedication to be an overt act of
16 | relinquishment in perpetuity of all present and future rights to this
17 | software under copyright law.
18 |
19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 | OTHER DEALINGS IN THE SOFTWARE.
26 |
27 | For more information, please refer to
28 | """
29 |
30 | import logging as Logging
31 | import re
32 |
33 | logging = Logging.getLogger("zip")
34 |
35 | class Zip:
36 |
37 | def __init__(self, filename):
38 | self.filename = filename
39 | self.buffer = bytearray()
40 | self.size = 0
41 | self.end_central_dir = 0
42 | self.first_local_file_header = 0
43 | self.offset_local_file = []
44 | self.offset_central_directory = []
45 | self.end_of_data = 0
46 | self.end_of_first_local_file_header = 0
47 |
48 | self.read()
49 | self.check_header()
50 | self.call_all_parsers()
51 | self.check_central_directory()
52 | self.parse_central_directories()
53 | self.parse_local_file_headers()
54 |
55 | def call_all_parsers(self):
56 | self.parse_offset_end_central_dir()
57 | self.parse_nb_of_disk()
58 | self.parse_start_disk()
59 | self.parse_nb_of_central_dir()
60 | self.parse_nb_total_of_central_dir()
61 | self.parse_size_central_dir()
62 | self.parse_central_dir_file_header()
63 | self.parse_comment_length()
64 |
65 | def read(self):
66 | with open(self.filename, 'rb') as fd:
67 | self.buffer = bytearray(fd.read())
68 | self.size = len(self.buffer)
69 | logging.info("read " + str(self.size) + " bytes from Zip file")
70 |
71 | def check_header(self):
72 | if self.buffer[0:4] != b"PK\x03\x04":
73 | raise Exception("Zip header not found")
74 |
75 | def parse_offset_end_central_dir(self):
76 | r = re.compile(b'\x06\x05KP')
77 | s = r.search(self.buffer[::-1])
78 | if s is None:
79 | raise Exception("Unable to find end of central directory")
80 | self.end_central_dir = self.size - s.end()
81 | logging.info("Offset end of central directory: " +
82 | hex(self.end_central_dir))
83 |
84 | def parse_nb_of_disk(self):
85 | self.nb_of_disk = int.from_bytes(
86 | self.buffer[self.end_central_dir + 4:self.end_central_dir + 6],
87 | "little")
88 | logging.debug("Nb of disk: " + str(self.nb_of_disk))
89 |
90 | def parse_start_disk(self):
91 | self.start_disk = int.from_bytes(
92 | self.buffer[self.end_central_dir + 6:self.end_central_dir + 8],
93 | "little")
94 | logging.debug("Start disk: " + str(self.start_disk))
95 |
96 | def parse_nb_of_central_dir(self):
97 | self.nb_of_central_dir = int.from_bytes(
98 | self.buffer[self.end_central_dir + 8:self.end_central_dir + 10],
99 | "little")
100 | logging.info("Nb of central directory record: " +
101 | str(self.nb_of_central_dir))
102 |
103 | def parse_nb_total_of_central_dir(self):
104 | self.nb_total_of_central_dir = int.from_bytes(
105 | self.buffer[self.end_central_dir + 10:self.end_central_dir + 12],
106 | "little")
107 | logging.info("Nb of total central directory record: " +
108 | str(self.nb_total_of_central_dir))
109 |
110 | def parse_size_central_dir(self):
111 | self.size_central_dir = int.from_bytes(
112 | self.buffer[self.end_central_dir + 12:self.end_central_dir + 14],
113 | "little")
114 | logging.info("Size of central directory: " +
115 | str(self.size_central_dir))
116 |
117 | def parse_central_dir_file_header(self):
118 | self.central_dir_file_header = int.from_bytes(
119 | self.buffer[self.end_central_dir + 16:self.end_central_dir + 20],
120 | "little")
121 | logging.info("Central directory file header: " +
122 | hex(self.central_dir_file_header))
123 |
124 | def parse_comment_length(self):
125 | self.comment_length = int.from_bytes(
126 | self.buffer[self.end_central_dir + 20:self.end_central_dir + 22],
127 | "little")
128 | logging.info("Comment length: " +
129 | str(self.comment_length))
130 |
131 | def check_central_directory(self):
132 | offset = self.central_dir_file_header
133 | if (self.buffer[offset:offset + 4] !=
134 | b'PK\x01\x02'):
135 | raise Exception("Unable to find central directory")
136 | logging.info("Found central directory")
137 |
138 | def parse_central_directories(self):
139 | if (self.buffer[self.central_dir_file_header:
140 | self.central_dir_file_header + 4] !=
141 | b'PK\x01\x02'):
142 | raise Exception("Unable to find first central directory")
143 | logging.info("Found first central directory")
144 |
145 | i = 0
146 | size = 0
147 | offset = self.central_dir_file_header
148 |
149 | while (self.buffer[size + offset:
150 | size + offset + 4] ==
151 | b'PK\x01\x02'):
152 |
153 | logging.debug("Parse central directory n°" + str(i))
154 | logging.debug("Offset: " + hex(offset + size))
155 | self.offset_central_directory.append(offset + size)
156 | filename_length = int.from_bytes(
157 | self.buffer[size + offset + 28:size + offset + 30],
158 | "little")
159 | logging.debug("filename length:" + str(filename_length))
160 | extra_field_length = int.from_bytes(
161 | self.buffer[size + offset + 30:size + offset + 32],
162 | "little")
163 | logging.debug("extra field length:" + str(extra_field_length))
164 | comment_length = int.from_bytes(
165 | self.buffer[size + offset + 32:size + offset + 34],
166 | "little")
167 | logging.debug("comment length:" + str(comment_length))
168 | local_file_header = int.from_bytes(
169 | self.buffer[size + offset + 42:size + offset + 46],
170 | "little")
171 | if i == 0:
172 | self.first_local_file_header = local_file_header
173 | logging.debug("local file header:" + hex(local_file_header))
174 |
175 | i = i + 1
176 | size = (size + filename_length +
177 | extra_field_length + comment_length + 46)
178 |
179 | logging.debug("parse header at:" + hex(offset + size))
180 |
181 | def parse_local_file_headers(self):
182 | size = 0
183 | offset = self.first_local_file_header
184 | for i in range(self.nb_of_central_dir):
185 | logging.debug("Parse local file n°" + str(i))
186 | compressed_data_lenght = int.from_bytes(
187 | self.buffer[size + offset + 18:size + offset + 22],
188 | "little")
189 | logging.debug("compressed data length:" +
190 | str(compressed_data_lenght))
191 | filename_length = int.from_bytes(
192 | self.buffer[size + offset + 26:size + offset + 28],
193 | "little")
194 | logging.debug("filename length:" + str(filename_length))
195 | extra_field_length = int.from_bytes(
196 | self.buffer[size + offset + 28:size + offset + 30],
197 | "little")
198 | logging.debug("extra field length:" + str(extra_field_length))
199 | local_file_size = (compressed_data_lenght +
200 | filename_length + extra_field_length + 30)
201 | logging.debug("local file length:" + hex(local_file_size))
202 | size = size + local_file_size
203 | logging.debug("parse header at:" + hex(offset + size))
204 | self.offset_local_file.append(offset + size)
205 | self.end_of_data = offset + size
206 | if i == 0:
207 | self.end_of_first_local_file_header = self.end_of_data
208 |
209 | def add_data_to_file(self, data_before_local, data_after_local,
210 | write_buffer=False):
211 | logging.info("Add data before local lenght:" +
212 | str(len(data_before_local)))
213 | new_buffer = self.buffer
214 | for i in self.offset_central_directory:
215 | logging.debug("parse central directory at: " + hex(i))
216 | local_file_header = int.from_bytes(
217 | self.buffer[i + 42:i + 46],
218 | "little")
219 | logging.debug("old local file header: " + hex(local_file_header))
220 | local_file_header = local_file_header + len(data_before_local)
221 | logging.debug("new local file header: " + hex(local_file_header))
222 | bytes_local_file_header = local_file_header.to_bytes(4, "little")
223 | logging.debug("change value at:" + hex(i + 42))
224 | new_buffer[i + 42:i + 46] = bytes_local_file_header
225 |
226 | logging.info("old central directory header: " +
227 | hex(self.central_dir_file_header))
228 | new_central_dir_file_header = (self.central_dir_file_header +
229 | len(data_after_local) +
230 | len(data_before_local))
231 | logging.info("new central directory header: " +
232 | hex(new_central_dir_file_header))
233 | bytes_offset = new_central_dir_file_header.to_bytes(4, "little")
234 | new_buffer[self.end_central_dir + 16:
235 | self.end_central_dir + 20] = bytes_offset
236 | self.buffer = new_buffer
237 |
238 | if write_buffer:
239 | new_buffer = (data_before_local +
240 | new_buffer[:self.end_of_data] +
241 | data_after_local +
242 | new_buffer[self.central_dir_file_header:])
243 | self.buffer = new_buffer
244 |
245 | def get_local_file_data(self):
246 | return self.buffer[:self.end_of_data]
247 |
248 | def get_data_after_central_directory(self):
249 | return self.buffer[self.central_dir_file_header:]
250 |
251 | def get_first_part_length(self):
252 | return len(self.get_local_file_data())
253 |
254 | def get_second_part_length(self):
255 | return len(self.get_data_after_central_directory())
256 |
--------------------------------------------------------------------------------
/Truepolyglot/PdfFileTransformer/pdf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | """
4 | This is free and unencumbered software released into the public domain.
5 |
6 | Anyone is free to copy, modify, publish, use, compile, sell, or
7 | distribute this software, either in source code form or as a compiled
8 | binary, for any purpose, commercial or non-commercial, and by any
9 | means.
10 |
11 | In jurisdictions that recognize copyright laws, the author or authors
12 | of this software dedicate any and all copyright interest in the
13 | software to the public domain. We make this dedication for the benefit
14 | of the public at large and to the detriment of our heirs and
15 | successors. We intend this dedication to be an overt act of
16 | relinquishment in perpetuity of all present and future rights to this
17 | software under copyright law.
18 |
19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 | OTHER DEALINGS IN THE SOFTWARE.
26 |
27 | For more information, please refer to
28 | """
29 |
30 | import logging as Logging
31 | import re
32 | import tempfile
33 | from PyPDF2 import PdfFileWriter, PdfFileReader
34 | from PyPDF2.utils import PdfReadError
35 |
36 | logging = Logging.getLogger("pdf")
37 |
38 | class Pdf:
39 |
40 | def __init__(self, filename):
41 | self.filename = filename
42 | self.buffer = bytearray()
43 | self.objects = [] # [(7,0,b"data"), (8,0,b"data2"), ..]
44 | self.trailer = {} # {Root: (7, 0), Info: (5, 0)}
45 | self.translation_table = {} # {(6,0):7, (5,0): 8}, ..]
46 | self.original_xref_offset = 0
47 | self.original_first_obj_offset = 0
48 | self.file_offset = 0
49 |
50 | self.clean_and_read_pdf()
51 | self.check_pdf_header()
52 | self.parse_xref_offset()
53 | self.parse_xref_table()
54 | self.parse_objects()
55 | self.parse_trailer()
56 |
57 | def clean_and_read_pdf(self):
58 | f_input = open(self.filename, "rb")
59 | pdf_header = f_input.read(8)
60 | f_input.seek(0)
61 | f_output = tempfile.TemporaryFile()
62 | writer = PdfFileWriter()
63 | reader = PdfFileReader(f_input)
64 | info = reader.getDocumentInfo()
65 | logging.info("Document info:" + str(info))
66 | writer.addMetadata(info)
67 | if info.producer is None:
68 | writer.addMetadata({u'/Producer': u'TruePolyglot'})
69 | elif info.creator is None:
70 | writer.addMetadata({u'/Creator': u'TruePolyglot'})
71 | try:
72 | writer.cloneReaderDocumentRoot(reader)
73 | writer.write(f_output)
74 | except PdfReadError as e:
75 | logging.error("The PDF appears to be malformed. Try running it through qpdf.")
76 | raise e
77 | f_input.close()
78 | f_output.seek(0)
79 | self.buffer = bytearray(f_output.read())
80 | self.size = len(self.buffer)
81 | f_output.close()
82 |
83 | def check_pdf_header(self):
84 | if self.buffer[0:5] == b"%PDF-":
85 | pdf_version = self.buffer[5:8].decode("utf-8")
86 | logging.info("PDF Header found: " + pdf_version)
87 | else:
88 | raise Exception("PDF Header not found")
89 |
90 | def parse_xref_offset(self):
91 | r = re.compile(b'startxref\n([0-9]+)')
92 | m = r.search(self.buffer)
93 | if m is None:
94 | raise Exception('Unable to find xref offset')
95 | self.original_xref_offset = int(m.group(1))
96 | logging.info("Xref offset found at: " + hex(self.original_xref_offset))
97 |
98 | def parse_xref_table(self):
99 | xref_table = []
100 | r = re.compile(b'xref\n([0-9]+) ([0-9]+)')
101 | offset = self.original_xref_offset
102 | s = r.search(self.buffer[offset:offset + 32])
103 | nb_xtable_object = int(s.group(2))
104 | logging.info("Nb objects in Xref table: " + str(nb_xtable_object))
105 | xref_header_size = s.end()
106 | r = re.compile(b'([0-9]+) ([0-9]+) ([f|n])')
107 | x = 0
108 | for i in range(nb_xtable_object):
109 | s = r.search(
110 | self.buffer[self.original_xref_offset + xref_header_size + x:])
111 | if s is not None:
112 | x = x + s.end()
113 | xref_table.append((int(s.group(1)),
114 | int(s.group(2)),
115 | s.group(3)))
116 | logging.debug("Xref table:")
117 | for i in xref_table:
118 | logging.debug(str(i[0]) + " " +
119 | str(i[1]) + " " +
120 | i[2].decode("utf-8"))
121 |
122 | def parse_objects(self):
123 | r_begin = re.compile(b'([0-9]+) ([0-9]+) obj\n')
124 | r_end = re.compile(b'\nendobj\n')
125 |
126 | offset_buffer = 0
127 | obj = ()
128 | while offset_buffer < self.size:
129 | m_begin = r_begin.match(
130 | self.buffer[offset_buffer:offset_buffer + 32])
131 | obj_nb_index = 0
132 | obj_nb_offset = 0
133 | obj_offset_start = 0
134 | obj_offset_end = 0
135 | if m_begin is not None:
136 | if self.original_first_obj_offset == 0:
137 | self.original_first_obj_offset = (offset_buffer +
138 | m_begin.start())
139 | obj_nb_index = int(m_begin.group(1))
140 | obj_nb_offset = int(m_begin.group(2))
141 | obj_data_start = m_begin.end()
142 | obj_offset_start = offset_buffer + m_begin.start()
143 | while offset_buffer < self.size:
144 | m_end = r_end.match(
145 | self.buffer[offset_buffer:offset_buffer + 8])
146 | if m_end is not None:
147 | obj_offset_end = offset_buffer + m_end.end() - 2
148 | break
149 | else:
150 | offset_buffer = offset_buffer + 1
151 | else:
152 | offset_buffer = offset_buffer + 1
153 |
154 | if (obj_offset_start != 0 and
155 | obj_offset_end != 0):
156 | a = obj_offset_start + obj_data_start
157 | b = obj_offset_end - 6
158 | obj = (obj_nb_index, obj_nb_offset,
159 | self.buffer[a:b])
160 | logging.debug("Objects: (" + str(obj_nb_index) +
161 | ", " + str(obj_nb_offset) +
162 | ", " + hex(obj_offset_start) +
163 | ", " + hex(obj_offset_end))
164 | self.objects.append(obj)
165 |
166 | def parse_trailer(self):
167 | r_begin = re.compile(b'trailer\n')
168 | s_begin = r_begin.search(self.buffer[self.original_xref_offset:])
169 | start = self.original_xref_offset + s_begin.start()
170 | logging.info("Trailer found at:" + hex(start))
171 |
172 | r_root = re.compile(b'/Root ([0-9]+) ([0-9]+) R')
173 | s_root = r_root.search(self.buffer[self.original_xref_offset:])
174 | if s_root is None:
175 | raise Exception('Root not found')
176 | else:
177 | self.trailer["Root"] = (int(s_root.group(1)), int(s_root.group(2)))
178 |
179 | r_info = re.compile(b'/Info ([0-9]+) ([0-9]+) R')
180 | s_info = r_info.search(self.buffer[self.original_xref_offset:])
181 | if s_info is not None:
182 | self.trailer["Info"] = (int(s_info.group(1)), int(s_info.group(2)))
183 |
184 | def get_file_header(self):
185 | return self.buffer[:self.original_first_obj_offset]
186 |
187 | def get_xref_table(self):
188 | offset_xref = 0
189 | buf = (b'xref\n' +
190 | str(offset_xref).encode('utf-8') + b' ' +
191 | str(len(self.objects) + 1).encode('utf-8') + b'\n' +
192 | str(0).zfill(10).encode('utf-8') + b' ' +
193 | str(65535).zfill(5).encode('utf-8') + b' f \n')
194 |
195 | for i in range(len(self.objects)):
196 | obj_start = self.get_object_offset(i)
197 | logging.debug("Obj %d at %d" % (self.objects[i][0], obj_start))
198 | buf = (buf +
199 | (str(obj_start).zfill(10)).encode('utf-8') + b' ' +
200 | str(0).zfill(5).encode('utf-8') + b' ' +
201 | b'n' + b' \n')
202 | return buf
203 |
204 | def get_trailer(self):
205 | trailer_data = (b"trailer\n<<\n/Size " +
206 | str(len(self.objects) + 1).encode("utf-8") +
207 | b"\n/Root " +
208 | str(self.trailer["Root"][0]).encode("utf-8") +
209 | b" " +
210 | str(self.trailer["Root"][1]).encode("utf-8") +
211 | b" R\n")
212 | if "Info" in self.trailer:
213 | trailer_data = (trailer_data +
214 | b"/Info " +
215 | str(self.trailer["Info"][0]).encode("utf-8") +
216 | b" " +
217 | str(self.trailer["Info"][1]).encode("utf-8") +
218 | b" R\n")
219 | trailer_data = trailer_data + b">>"
220 | return trailer_data
221 |
222 | def get_xref_offset(self):
223 | return self.get_end_of_last_object() + 1
224 |
225 | def get_eof(self):
226 | s = (b'startxref\n' +
227 | str(self.get_xref_offset()).encode("utf-8") +
228 | b'\n%%EOF\n')
229 | return s
230 |
231 | def build_object(self, obj):
232 | buf = (str(obj[0]).encode("utf-8") +
233 | b' ' +
234 | str(obj[1]).encode("utf-8") +
235 | b' obj\n' +
236 | obj[2] +
237 | b'\nendobj')
238 | return buf
239 |
240 | def get_build_buffer(self):
241 | b_buffer = bytearray()
242 | b_buffer = b_buffer + self.get_file_header()
243 | for obj in self.objects:
244 | b_buffer = b_buffer + self.build_object(obj) + b'\n'
245 | b_buffer = b_buffer + self.get_xref_table()
246 | b_buffer = b_buffer + self.get_trailer() + b'\n'
247 | b_buffer = b_buffer + self.get_eof()
248 | return b_buffer
249 |
250 | def get_obj(self, nb):
251 | for obj in self.objects:
252 | if obj[0] == nb:
253 | return obj
254 |
255 | def get_end_of_last_object(self):
256 | offset = self.get_last_object_offset()
257 | offset = offset + len(self.build_object(self.objects[-1]))
258 | return offset
259 |
260 | def generate_stream_obj_data(self, data):
261 | buf = (b'<<\n/Filter /FlateDecode\n/Length ' +
262 | str(len(data)).encode("utf-8") +
263 | b'\n>>\nstream\n' +
264 | data +
265 | b'\nendstream')
266 | return buf
267 |
268 | def insert_new_obj_stream_at(self, position, stream_data):
269 | '''
270 | Return offset of stream data
271 | '''
272 | logging.info("Insert obj at %d" % position)
273 | obj_nb = position
274 | obj_off = 0
275 | data = self.generate_stream_obj_data(stream_data)
276 | obj = (obj_nb, obj_off, data)
277 |
278 | obj_data = self.build_object(obj)
279 | full_obj_size = len(obj_data)
280 | logging.info("New object full size is: " + str(full_obj_size))
281 |
282 | obj = (obj_nb, obj_off, data)
283 | self.objects.insert(position, obj)
284 |
285 | self.reorder_objects()
286 | self.fix_trailer_ref()
287 |
288 | def get_first_stream_offset(self):
289 | offset = self.file_offset + len(self.get_file_header())
290 | r = re.compile(b'stream\n')
291 | m = r.search(self.objects[0][2])
292 | offset = offset + len(b"1 0 obj\n") + m.end()
293 | return offset
294 |
295 | def get_last_stream_offset(self):
296 | offset = self.file_offset + self.get_last_object_offset()
297 | r = re.compile(b'stream\n')
298 | m = r.search(self.build_object(self.objects[-1]))
299 | return offset + m.end()
300 |
301 | def get_object_offset(self, index):
302 | offset = self.file_offset + len(self.get_file_header())
303 | for obj in self.objects[:index]:
304 | offset = offset + len(self.build_object(obj)) + 1
305 | return offset
306 |
307 | def get_last_object_offset(self):
308 | offset = self.get_object_offset(len(self.objects) - 1)
309 | return offset
310 |
311 | def insert_new_obj_stream_at_start(self, data):
312 | return self.insert_new_obj_stream_at(0, data)
313 |
314 | def insert_new_obj_stream_at_end(self, data):
315 | return self.insert_new_obj_stream_at(len(self.objects) + 1,
316 | data)
317 |
318 | def generate_translation_table(self):
319 | for i in range(len(self.objects)):
320 | self.translation_table[(self.objects[i][0],
321 | self.objects[i][1])] = i + 1
322 | logging.debug(self.translation_table)
323 |
324 | def replace_ref(self, ibuffer):
325 | '''
326 | Exemple:
327 | in: AZERTY 6 0 R -- BGT 88 0 R HYT
328 | out: AZERTY 77 0 R -- BGT 9 0 R HYT
329 | '''
330 | index = 0
331 | obuffer = bytearray()
332 | while True:
333 | r = re.compile(b'([0-9]+) ([0-9]+) R')
334 | s = r.search(ibuffer[index:])
335 | if s is None:
336 | obuffer = obuffer + ibuffer[index:]
337 | break
338 | o_old = int(s.group(1))
339 | p_old = int(s.group(2))
340 | o_new = self.translation_table[(o_old, p_old)]
341 | p_new = p_old
342 |
343 | newref = (str(o_new).encode("utf-8") +
344 | b" " +
345 | str(p_new).encode("utf-8") +
346 | b" R")
347 |
348 | nbuffer = ibuffer[index:index + s.start()] + newref
349 | obuffer = obuffer + nbuffer
350 | index = index + s.end()
351 | return obuffer
352 |
353 | def reorder_objects(self):
354 | self.generate_translation_table()
355 | offset_obj = len(self.get_file_header())
356 | for i in range(len(self.objects)):
357 | buf = self.objects[i][2]
358 | new_buf = self.replace_ref(buf)
359 | obj_nb = self.objects[i][0]
360 | new_obj_nb = self.translation_table[(obj_nb, 0)]
361 | new_obj_start = offset_obj
362 | size_obj = len(self.build_object((new_obj_nb,
363 | 0,
364 | new_buf)))
365 | new_obj_end = new_obj_start + size_obj
366 |
367 | offset_obj = new_obj_end + 1
368 | obj = (new_obj_nb,
369 | 0,
370 | new_buf)
371 | self.objects[i] = obj
372 |
373 | def fix_trailer_ref(self):
374 | new_obj_nb = self.translation_table[self.trailer["Root"]]
375 | self.trailer["Root"] = (new_obj_nb, 0)
376 |
377 | if "Info" in self.trailer:
378 | new_obj_nb = self.translation_table[self.trailer["Info"]]
379 | self.trailer["Info"] = (new_obj_nb, 0)
380 |
--------------------------------------------------------------------------------
/Truepolyglot/ZipFileTransformer/zipfile.py:
--------------------------------------------------------------------------------
1 | """
2 | Read and write ZIP files.
3 |
4 | XXX references to utf-8 need further investigation.
5 | """
6 | import io
7 | import os
8 | import importlib.util
9 | import sys
10 | import time
11 | import stat
12 | import shutil
13 | import struct
14 | import binascii
15 | import threading
16 |
17 | try:
18 | import zlib # We may need its compression method
19 | crc32 = zlib.crc32
20 | except ImportError:
21 | zlib = None
22 | crc32 = binascii.crc32
23 |
24 | try:
25 | import bz2 # We may need its compression method
26 | except ImportError:
27 | bz2 = None
28 |
29 | try:
30 | import lzma # We may need its compression method
31 | except ImportError:
32 | lzma = None
33 |
34 | __all__ = ["BadZipFile", "BadZipfile", "error",
35 | "ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA",
36 | "is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
37 |
38 | class BadZipFile(Exception):
39 | pass
40 |
41 |
42 | class LargeZipFile(Exception):
43 | """
44 | Raised when writing a zipfile, the zipfile requires ZIP64 extensions
45 | and those extensions are disabled.
46 | """
47 |
48 | error = BadZipfile = BadZipFile # Pre-3.2 compatibility names
49 |
50 |
51 | ZIP64_LIMIT = (1 << 31) - 1
52 | ZIP_FILECOUNT_LIMIT = (1 << 16) - 1
53 | ZIP_MAX_COMMENT = (1 << 16) - 1
54 |
55 | # constants for Zip file compression methods
56 | ZIP_STORED = 0
57 | ZIP_DEFLATED = 8
58 | ZIP_BZIP2 = 12
59 | ZIP_LZMA = 14
60 | # Other ZIP compression methods not supported
61 |
62 | DEFAULT_VERSION = 20
63 | ZIP64_VERSION = 45
64 | BZIP2_VERSION = 46
65 | LZMA_VERSION = 63
66 | # we recognize (but not necessarily support) all features up to that version
67 | MAX_EXTRACT_VERSION = 63
68 |
69 | # Below are some formats and associated data for reading/writing headers using
70 | # the struct module. The names and structures of headers/records are those used
71 | # in the PKWARE description of the ZIP file format:
72 | # http://www.pkware.com/documents/casestudies/APPNOTE.TXT
73 | # (URL valid as of January 2008)
74 |
75 | # The "end of central directory" structure, magic number, size, and indices
76 | # (section V.I in the format document)
77 | structEndArchive = b"<4s4H2LH"
78 | stringEndArchive = b"PK\005\006"
79 | sizeEndCentDir = struct.calcsize(structEndArchive)
80 |
81 | _ECD_SIGNATURE = 0
82 | _ECD_DISK_NUMBER = 1
83 | _ECD_DISK_START = 2
84 | _ECD_ENTRIES_THIS_DISK = 3
85 | _ECD_ENTRIES_TOTAL = 4
86 | _ECD_SIZE = 5
87 | _ECD_OFFSET = 6
88 | _ECD_COMMENT_SIZE = 7
89 | # These last two indices are not part of the structure as defined in the
90 | # spec, but they are used internally by this module as a convenience
91 | _ECD_COMMENT = 8
92 | _ECD_LOCATION = 9
93 |
94 | # The "central directory" structure, magic number, size, and indices
95 | # of entries in the structure (section V.F in the format document)
96 | structCentralDir = "<4s4B4HL2L5H2L"
97 | stringCentralDir = b"PK\001\002"
98 | sizeCentralDir = struct.calcsize(structCentralDir)
99 |
100 | # indexes of entries in the central directory structure
101 | _CD_SIGNATURE = 0
102 | _CD_CREATE_VERSION = 1
103 | _CD_CREATE_SYSTEM = 2
104 | _CD_EXTRACT_VERSION = 3
105 | _CD_EXTRACT_SYSTEM = 4
106 | _CD_FLAG_BITS = 5
107 | _CD_COMPRESS_TYPE = 6
108 | _CD_TIME = 7
109 | _CD_DATE = 8
110 | _CD_CRC = 9
111 | _CD_COMPRESSED_SIZE = 10
112 | _CD_UNCOMPRESSED_SIZE = 11
113 | _CD_FILENAME_LENGTH = 12
114 | _CD_EXTRA_FIELD_LENGTH = 13
115 | _CD_COMMENT_LENGTH = 14
116 | _CD_DISK_NUMBER_START = 15
117 | _CD_INTERNAL_FILE_ATTRIBUTES = 16
118 | _CD_EXTERNAL_FILE_ATTRIBUTES = 17
119 | _CD_LOCAL_HEADER_OFFSET = 18
120 |
121 | # The "local file header" structure, magic number, size, and indices
122 | # (section V.A in the format document)
123 | structFileHeader = "<4s2B4HL2L2H"
124 | stringFileHeader = b"PK\003\004"
125 | sizeFileHeader = struct.calcsize(structFileHeader)
126 |
127 | _FH_SIGNATURE = 0
128 | _FH_EXTRACT_VERSION = 1
129 | _FH_EXTRACT_SYSTEM = 2
130 | _FH_GENERAL_PURPOSE_FLAG_BITS = 3
131 | _FH_COMPRESSION_METHOD = 4
132 | _FH_LAST_MOD_TIME = 5
133 | _FH_LAST_MOD_DATE = 6
134 | _FH_CRC = 7
135 | _FH_COMPRESSED_SIZE = 8
136 | _FH_UNCOMPRESSED_SIZE = 9
137 | _FH_FILENAME_LENGTH = 10
138 | _FH_EXTRA_FIELD_LENGTH = 11
139 |
140 | # The "Zip64 end of central directory locator" structure, magic number, and size
141 | structEndArchive64Locator = "<4sLQL"
142 | stringEndArchive64Locator = b"PK\x06\x07"
143 | sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
144 |
145 | # The "Zip64 end of central directory" record, magic number, size, and indices
146 | # (section V.G in the format document)
147 | structEndArchive64 = "<4sQ2H2L4Q"
148 | stringEndArchive64 = b"PK\x06\x06"
149 | sizeEndCentDir64 = struct.calcsize(structEndArchive64)
150 |
151 | _CD64_SIGNATURE = 0
152 | _CD64_DIRECTORY_RECSIZE = 1
153 | _CD64_CREATE_VERSION = 2
154 | _CD64_EXTRACT_VERSION = 3
155 | _CD64_DISK_NUMBER = 4
156 | _CD64_DISK_NUMBER_START = 5
157 | _CD64_NUMBER_ENTRIES_THIS_DISK = 6
158 | _CD64_NUMBER_ENTRIES_TOTAL = 7
159 | _CD64_DIRECTORY_SIZE = 8
160 | _CD64_OFFSET_START_CENTDIR = 9
161 |
162 | def _check_zipfile(fp):
163 | try:
164 | if _EndRecData(fp):
165 | return True # file has correct magic number
166 | except OSError:
167 | pass
168 | return False
169 |
170 | def is_zipfile(filename):
171 | """Quickly see if a file is a ZIP file by checking the magic number.
172 |
173 | The filename argument may be a file or file-like object too.
174 | """
175 | result = False
176 | try:
177 | if hasattr(filename, "read"):
178 | result = _check_zipfile(fp=filename)
179 | else:
180 | with open(filename, "rb") as fp:
181 | result = _check_zipfile(fp)
182 | except OSError:
183 | pass
184 | return result
185 |
186 | def _EndRecData64(fpin, offset, endrec):
187 | """
188 | Read the ZIP64 end-of-archive records and use that to update endrec
189 | """
190 | try:
191 | fpin.seek(offset - sizeEndCentDir64Locator, 2)
192 | except OSError:
193 | # If the seek fails, the file is not large enough to contain a ZIP64
194 | # end-of-archive record, so just return the end record we were given.
195 | return endrec
196 |
197 | data = fpin.read(sizeEndCentDir64Locator)
198 | if len(data) != sizeEndCentDir64Locator:
199 | return endrec
200 | sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
201 | if sig != stringEndArchive64Locator:
202 | return endrec
203 |
204 | if diskno != 0 or disks != 1:
205 | raise BadZipFile("zipfiles that span multiple disks are not supported")
206 |
207 | # Assume no 'zip64 extensible data'
208 | fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
209 | data = fpin.read(sizeEndCentDir64)
210 | if len(data) != sizeEndCentDir64:
211 | return endrec
212 | sig, sz, create_version, read_version, disk_num, disk_dir, \
213 | dircount, dircount2, dirsize, diroffset = \
214 | struct.unpack(structEndArchive64, data)
215 | if sig != stringEndArchive64:
216 | return endrec
217 |
218 | # Update the original endrec using data from the ZIP64 record
219 | endrec[_ECD_SIGNATURE] = sig
220 | endrec[_ECD_DISK_NUMBER] = disk_num
221 | endrec[_ECD_DISK_START] = disk_dir
222 | endrec[_ECD_ENTRIES_THIS_DISK] = dircount
223 | endrec[_ECD_ENTRIES_TOTAL] = dircount2
224 | endrec[_ECD_SIZE] = dirsize
225 | endrec[_ECD_OFFSET] = diroffset
226 | return endrec
227 |
228 |
229 | def _EndRecData(fpin):
230 | """Return data from the "End of Central Directory" record, or None.
231 |
232 | The data is a list of the nine items in the ZIP "End of central dir"
233 | record followed by a tenth item, the file seek offset of this record."""
234 |
235 | # Determine file size
236 | fpin.seek(0, 2)
237 | filesize = fpin.tell()
238 |
239 | # Check to see if this is ZIP file with no archive comment (the
240 | # "end of central directory" structure should be the last item in the
241 | # file if this is the case).
242 | try:
243 | fpin.seek(-sizeEndCentDir, 2)
244 | except OSError:
245 | return None
246 | data = fpin.read()
247 | if (len(data) == sizeEndCentDir and
248 | data[0:4] == stringEndArchive and
249 | data[-2:] == b"\000\000"):
250 | # the signature is correct and there's no comment, unpack structure
251 | endrec = struct.unpack(structEndArchive, data)
252 | endrec=list(endrec)
253 |
254 | # Append a blank comment and record start offset
255 | endrec.append(b"")
256 | endrec.append(filesize - sizeEndCentDir)
257 |
258 | # Try to read the "Zip64 end of central directory" structure
259 | return _EndRecData64(fpin, -sizeEndCentDir, endrec)
260 |
261 | # Either this is not a ZIP file, or it is a ZIP file with an archive
262 | # comment. Search the end of the file for the "end of central directory"
263 | # record signature. The comment is the last item in the ZIP file and may be
264 | # up to 64K long. It is assumed that the "end of central directory" magic
265 | # number does not appear in the comment.
266 | maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
267 | fpin.seek(maxCommentStart, 0)
268 | data = fpin.read()
269 | start = data.rfind(stringEndArchive)
270 | if start >= 0:
271 | # found the magic number; attempt to unpack and interpret
272 | recData = data[start:start+sizeEndCentDir]
273 | if len(recData) != sizeEndCentDir:
274 | # Zip file is corrupted.
275 | return None
276 | endrec = list(struct.unpack(structEndArchive, recData))
277 | commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
278 | comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
279 | endrec.append(comment)
280 | endrec.append(maxCommentStart + start)
281 |
282 | # Try to read the "Zip64 end of central directory" structure
283 | return _EndRecData64(fpin, maxCommentStart + start - filesize,
284 | endrec)
285 |
286 | # Unable to find a valid end of central directory structure
287 | return None
288 |
289 |
290 | class ZipInfo (object):
291 | """Class with attributes describing each file in the ZIP archive."""
292 |
293 | __slots__ = (
294 | 'orig_filename',
295 | 'filename',
296 | 'date_time',
297 | 'compress_type',
298 | '_compresslevel',
299 | 'comment',
300 | 'extra',
301 | 'create_system',
302 | 'create_version',
303 | 'extract_version',
304 | 'reserved',
305 | 'flag_bits',
306 | 'volume',
307 | 'internal_attr',
308 | 'external_attr',
309 | 'header_offset',
310 | 'CRC',
311 | 'compress_size',
312 | 'file_size',
313 | '_raw_time',
314 | )
315 |
316 | def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
317 | self.orig_filename = filename # Original file name in archive
318 |
319 | # Terminate the file name at the first null byte. Null bytes in file
320 | # names are used as tricks by viruses in archives.
321 | null_byte = filename.find(chr(0))
322 | if null_byte >= 0:
323 | filename = filename[0:null_byte]
324 | # This is used to ensure paths in generated ZIP files always use
325 | # forward slashes as the directory separator, as required by the
326 | # ZIP format specification.
327 | if os.sep != "/" and os.sep in filename:
328 | filename = filename.replace(os.sep, "/")
329 |
330 | self.filename = filename # Normalized file name
331 | self.date_time = date_time # year, month, day, hour, min, sec
332 |
333 | if date_time[0] < 1980:
334 | raise ValueError('ZIP does not support timestamps before 1980')
335 |
336 | # Standard values:
337 | self.compress_type = ZIP_STORED # Type of compression for the file
338 | self._compresslevel = None # Level for the compressor
339 | self.comment = b"" # Comment for each file
340 | self.extra = b"" # ZIP extra data
341 | if sys.platform == 'win32':
342 | self.create_system = 0 # System which created ZIP archive
343 | else:
344 | # Assume everything else is unix-y
345 | self.create_system = 3 # System which created ZIP archive
346 | self.create_version = DEFAULT_VERSION # Version which created ZIP archive
347 | self.extract_version = DEFAULT_VERSION # Version needed to extract archive
348 | self.reserved = 0 # Must be zero
349 | self.flag_bits = 0 # ZIP flag bits
350 | self.volume = 0 # Volume number of file header
351 | self.internal_attr = 0 # Internal attributes
352 | self.external_attr = 0 # External file attributes
353 | # Other attributes are set by class ZipFile:
354 | # header_offset Byte offset to the file header
355 | # CRC CRC-32 of the uncompressed file
356 | # compress_size Size of the compressed file
357 | # file_size Size of the uncompressed file
358 |
359 | def __repr__(self):
360 | result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)]
361 | if self.compress_type != ZIP_STORED:
362 | result.append(' compress_type=%s' %
363 | compressor_names.get(self.compress_type,
364 | self.compress_type))
365 | hi = self.external_attr >> 16
366 | lo = self.external_attr & 0xFFFF
367 | if hi:
368 | result.append(' filemode=%r' % stat.filemode(hi))
369 | if lo:
370 | result.append(' external_attr=%#x' % lo)
371 | isdir = self.is_dir()
372 | if not isdir or self.file_size:
373 | result.append(' file_size=%r' % self.file_size)
374 | if ((not isdir or self.compress_size) and
375 | (self.compress_type != ZIP_STORED or
376 | self.file_size != self.compress_size)):
377 | result.append(' compress_size=%r' % self.compress_size)
378 | result.append('>')
379 | return ''.join(result)
380 |
381 | def FileHeader(self, zip64=None):
382 | """Return the per-file header as a string."""
383 | dt = self.date_time
384 | dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
385 | dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
386 | if self.flag_bits & 0x08:
387 | # Set these to zero because we write them after the file data
388 | CRC = compress_size = file_size = 0
389 | else:
390 | CRC = self.CRC
391 | compress_size = self.compress_size
392 | file_size = self.file_size
393 |
394 | extra = self.extra
395 |
396 | min_version = 0
397 | if zip64 is None:
398 | zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
399 | if zip64:
400 | fmt = ' ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
404 | if not zip64:
405 | raise LargeZipFile("Filesize would require ZIP64 extensions")
406 | # File is larger than what fits into a 4 byte integer,
407 | # fall back to the ZIP64 extension
408 | file_size = 0xffffffff
409 | compress_size = 0xffffffff
410 | min_version = ZIP64_VERSION
411 |
412 | if self.compress_type == ZIP_BZIP2:
413 | min_version = max(BZIP2_VERSION, min_version)
414 | elif self.compress_type == ZIP_LZMA:
415 | min_version = max(LZMA_VERSION, min_version)
416 |
417 | self.extract_version = max(min_version, self.extract_version)
418 | self.create_version = max(min_version, self.create_version)
419 | filename, flag_bits = self._encodeFilenameFlags()
420 | header = struct.pack(structFileHeader, stringFileHeader,
421 | self.extract_version, self.reserved, flag_bits,
422 | self.compress_type, dostime, dosdate, CRC,
423 | compress_size, file_size,
424 | len(filename), len(extra))
425 | return header + filename + extra
426 |
427 | def _encodeFilenameFlags(self):
428 | try:
429 | return self.filename.encode('ascii'), self.flag_bits
430 | except UnicodeEncodeError:
431 | return self.filename.encode('utf-8'), self.flag_bits | 0x800
432 |
433 | def _decodeExtra(self):
434 | # Try to decode the extra field.
435 | extra = self.extra
436 | unpack = struct.unpack
437 | while len(extra) >= 4:
438 | tp, ln = unpack(' len(extra):
440 | raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln))
441 | if tp == 0x0001:
442 | if ln >= 24:
443 | counts = unpack('> 1) ^ 0xEDB88320
519 | else:
520 | crc >>= 1
521 | return crc
522 |
523 | # ZIP supports a password-based form of encryption. Even though known
524 | # plaintext attacks have been found against it, it is still useful
525 | # to be able to get data out of such a file.
526 | #
527 | # Usage:
528 | # zd = _ZipDecrypter(mypwd)
529 | # plain_bytes = zd(cypher_bytes)
530 |
531 | def _ZipDecrypter(pwd):
532 | key0 = 305419896
533 | key1 = 591751049
534 | key2 = 878082192
535 |
536 | global _crctable
537 | if _crctable is None:
538 | _crctable = list(map(_gen_crc, range(256)))
539 | crctable = _crctable
540 |
541 | def crc32(ch, crc):
542 | """Compute the CRC32 primitive on one byte."""
543 | return (crc >> 8) ^ crctable[(crc ^ ch) & 0xFF]
544 |
545 | def update_keys(c):
546 | nonlocal key0, key1, key2
547 | key0 = crc32(c, key0)
548 | key1 = (key1 + (key0 & 0xFF)) & 0xFFFFFFFF
549 | key1 = (key1 * 134775813 + 1) & 0xFFFFFFFF
550 | key2 = crc32(key1 >> 24, key2)
551 |
552 | for p in pwd:
553 | update_keys(p)
554 |
555 | def decrypter(data):
556 | """Decrypt a bytes object."""
557 | result = bytearray()
558 | append = result.append
559 | for c in data:
560 | k = key2 | 2
561 | c ^= ((k * (k^1)) >> 8) & 0xFF
562 | update_keys(c)
563 | append(c)
564 | return bytes(result)
565 |
566 | return decrypter
567 |
568 |
569 | class LZMACompressor:
570 |
571 | def __init__(self):
572 | self._comp = None
573 |
574 | def _init(self):
575 | props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1})
576 | self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[
577 | lzma._decode_filter_properties(lzma.FILTER_LZMA1, props)
578 | ])
579 | return struct.pack('')
819 | return ''.join(result)
820 |
821 | def readline(self, limit=-1):
822 | """Read and return a line from the stream.
823 |
824 | If limit is specified, at most limit bytes will be read.
825 | """
826 |
827 | if limit < 0:
828 | # Shortcut common case - newline found in buffer.
829 | i = self._readbuffer.find(b'\n', self._offset) + 1
830 | if i > 0:
831 | line = self._readbuffer[self._offset: i]
832 | self._offset = i
833 | return line
834 |
835 | return io.BufferedIOBase.readline(self, limit)
836 |
837 | def peek(self, n=1):
838 | """Returns buffered bytes without advancing the position."""
839 | if n > len(self._readbuffer) - self._offset:
840 | chunk = self.read(n)
841 | if len(chunk) > self._offset:
842 | self._readbuffer = chunk + self._readbuffer[self._offset:]
843 | self._offset = 0
844 | else:
845 | self._offset -= len(chunk)
846 |
847 | # Return up to 512 bytes to reduce allocation overhead for tight loops.
848 | return self._readbuffer[self._offset: self._offset + 512]
849 |
850 | def readable(self):
851 | return True
852 |
853 | def read(self, n=-1):
854 | """Read and return up to n bytes.
855 | If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
856 | """
857 | if n is None or n < 0:
858 | buf = self._readbuffer[self._offset:]
859 | self._readbuffer = b''
860 | self._offset = 0
861 | while not self._eof:
862 | buf += self._read1(self.MAX_N)
863 | return buf
864 |
865 | end = n + self._offset
866 | if end < len(self._readbuffer):
867 | buf = self._readbuffer[self._offset:end]
868 | self._offset = end
869 | return buf
870 |
871 | n = end - len(self._readbuffer)
872 | buf = self._readbuffer[self._offset:]
873 | self._readbuffer = b''
874 | self._offset = 0
875 | while n > 0 and not self._eof:
876 | data = self._read1(n)
877 | if n < len(data):
878 | self._readbuffer = data
879 | self._offset = n
880 | buf += data[:n]
881 | break
882 | buf += data
883 | n -= len(data)
884 | return buf
885 |
886 | def _update_crc(self, newdata):
887 | # Update the CRC using the given data.
888 | if self._expected_crc is None:
889 | # No need to compute the CRC if we don't have a reference value
890 | return
891 | self._running_crc = crc32(newdata, self._running_crc)
892 | # Check the CRC if we're at the end of the file
893 | if self._eof and self._running_crc != self._expected_crc:
894 | raise BadZipFile("Bad CRC-32 for file %r" % self.name)
895 |
896 | def read1(self, n):
897 | """Read up to n bytes with at most one read() system call."""
898 |
899 | if n is None or n < 0:
900 | buf = self._readbuffer[self._offset:]
901 | self._readbuffer = b''
902 | self._offset = 0
903 | while not self._eof:
904 | data = self._read1(self.MAX_N)
905 | if data:
906 | buf += data
907 | break
908 | return buf
909 |
910 | end = n + self._offset
911 | if end < len(self._readbuffer):
912 | buf = self._readbuffer[self._offset:end]
913 | self._offset = end
914 | return buf
915 |
916 | n = end - len(self._readbuffer)
917 | buf = self._readbuffer[self._offset:]
918 | self._readbuffer = b''
919 | self._offset = 0
920 | if n > 0:
921 | while not self._eof:
922 | data = self._read1(n)
923 | if n < len(data):
924 | self._readbuffer = data
925 | self._offset = n
926 | buf += data[:n]
927 | break
928 | if data:
929 | buf += data
930 | break
931 | return buf
932 |
933 | def _read1(self, n):
934 | # Read up to n compressed bytes with at most one read() system call,
935 | # decrypt and decompress them.
936 | if self._eof or n <= 0:
937 | return b''
938 |
939 | # Read from file.
940 | if self._compress_type == ZIP_DEFLATED:
941 | ## Handle unconsumed data.
942 | data = self._decompressor.unconsumed_tail
943 | if n > len(data):
944 | data += self._read2(n - len(data))
945 | else:
946 | data = self._read2(n)
947 |
948 | if self._compress_type == ZIP_STORED:
949 | self._eof = self._compress_left <= 0
950 | elif self._compress_type == ZIP_DEFLATED:
951 | n = max(n, self.MIN_READ_SIZE)
952 | data = self._decompressor.decompress(data, n)
953 | self._eof = (self._decompressor.eof or
954 | self._compress_left <= 0 and
955 | not self._decompressor.unconsumed_tail)
956 | if self._eof:
957 | data += self._decompressor.flush()
958 | else:
959 | data = self._decompressor.decompress(data)
960 | self._eof = self._decompressor.eof or self._compress_left <= 0
961 |
962 | data = data[:self._left]
963 | self._left -= len(data)
964 | if self._left <= 0:
965 | self._eof = True
966 | self._update_crc(data)
967 | return data
968 |
969 | def _read2(self, n):
970 | if self._compress_left <= 0:
971 | return b''
972 |
973 | n = max(n, self.MIN_READ_SIZE)
974 | n = min(n, self._compress_left)
975 |
976 | data = self._fileobj.read(n)
977 | self._compress_left -= len(data)
978 | if not data:
979 | raise EOFError
980 |
981 | if self._decrypter is not None:
982 | data = self._decrypter(data)
983 | return data
984 |
985 | def close(self):
986 | try:
987 | if self._close_fileobj:
988 | self._fileobj.close()
989 | finally:
990 | super().close()
991 |
992 | def seekable(self):
993 | return self._seekable
994 |
995 | def seek(self, offset, whence=0):
996 | if not self._seekable:
997 | raise io.UnsupportedOperation("underlying stream is not seekable")
998 | curr_pos = self.tell()
999 | if whence == 0: # Seek from start of file
1000 | new_pos = offset
1001 | elif whence == 1: # Seek from current position
1002 | new_pos = curr_pos + offset
1003 | elif whence == 2: # Seek from EOF
1004 | new_pos = self._orig_file_size + offset
1005 | else:
1006 | raise ValueError("whence must be os.SEEK_SET (0), "
1007 | "os.SEEK_CUR (1), or os.SEEK_END (2)")
1008 |
1009 | if new_pos > self._orig_file_size:
1010 | new_pos = self._orig_file_size
1011 |
1012 | if new_pos < 0:
1013 | new_pos = 0
1014 |
1015 | read_offset = new_pos - curr_pos
1016 | buff_offset = read_offset + self._offset
1017 |
1018 | if buff_offset >= 0 and buff_offset < len(self._readbuffer):
1019 | # Just move the _offset index if the new position is in the _readbuffer
1020 | self._offset = buff_offset
1021 | read_offset = 0
1022 | elif read_offset < 0:
1023 | # Position is before the current position. Reset the ZipExtFile
1024 | self._fileobj.seek(self._orig_compress_start)
1025 | self._running_crc = self._orig_start_crc
1026 | self._compress_left = self._orig_compress_size
1027 | self._left = self._orig_file_size
1028 | self._readbuffer = b''
1029 | self._offset = 0
1030 | self._decompressor = _get_decompressor(self._compress_type)
1031 | self._eof = False
1032 | read_offset = new_pos
1033 |
1034 | while read_offset > 0:
1035 | read_len = min(self.MAX_SEEK_READ, read_offset)
1036 | self.read(read_len)
1037 | read_offset -= read_len
1038 |
1039 | return self.tell()
1040 |
1041 | def tell(self):
1042 | if not self._seekable:
1043 | raise io.UnsupportedOperation("underlying stream is not seekable")
1044 | filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset
1045 | return filepos
1046 |
1047 |
1048 | class _ZipWriteFile(io.BufferedIOBase):
1049 | def __init__(self, zf, zinfo, zip64):
1050 | self._zinfo = zinfo
1051 | self._zip64 = zip64
1052 | self._zipfile = zf
1053 | self._compressor = _get_compressor(zinfo.compress_type,
1054 | zinfo._compresslevel)
1055 | self._file_size = 0
1056 | self._compress_size = 0
1057 | self._crc = 0
1058 |
1059 | @property
1060 | def _fileobj(self):
1061 | return self._zipfile.fp
1062 |
1063 | def writable(self):
1064 | return True
1065 |
1066 | def write(self, data):
1067 | if self.closed:
1068 | raise ValueError('I/O operation on closed file.')
1069 | nbytes = len(data)
1070 | self._file_size += nbytes
1071 | self._crc = crc32(data, self._crc)
1072 | if self._compressor:
1073 | data = self._compressor.compress(data)
1074 | self._compress_size += len(data)
1075 | self._fileobj.write(data)
1076 | return nbytes
1077 |
1078 | def close(self):
1079 | if self.closed:
1080 | return
1081 | super().close()
1082 | # Flush any data from the compressor, and update header info
1083 | if self._compressor:
1084 | buf = self._compressor.flush()
1085 | self._compress_size += len(buf)
1086 | self._fileobj.write(buf)
1087 | self._zinfo.compress_size = self._compress_size
1088 | else:
1089 | self._zinfo.compress_size = self._file_size
1090 | self._zinfo.CRC = self._crc
1091 | self._zinfo.file_size = self._file_size
1092 |
1093 | # Write updated header info
1094 | if self._zinfo.flag_bits & 0x08:
1095 | # Write CRC and file sizes after the file data
1096 | fmt = ' ZIP64_LIMIT:
1103 | raise RuntimeError('File size unexpectedly exceeded ZIP64 '
1104 | 'limit')
1105 | if self._compress_size > ZIP64_LIMIT:
1106 | raise RuntimeError('Compressed size unexpectedly exceeded '
1107 | 'ZIP64 limit')
1108 | # Seek backwards and write file header (which will now include
1109 | # correct CRC and file sizes)
1110 |
1111 | # Preserve current position in file
1112 | self._zipfile.start_dir = self._fileobj.tell()
1113 | self._fileobj.seek(self._zinfo.header_offset)
1114 | self._fileobj.write(self._zinfo.FileHeader(self._zip64))
1115 | self._fileobj.seek(self._zipfile.start_dir)
1116 |
1117 | self._zipfile._writing = False
1118 |
1119 | # Successfully written: Add file to our caches
1120 | self._zipfile.filelist.append(self._zinfo)
1121 | self._zipfile.NameToInfo[self._zinfo.filename] = self._zinfo
1122 |
1123 | class ZipFile:
1124 | """ Class with methods to open, read, write, close, list zip files.
1125 |
1126 | z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True,
1127 | compresslevel=None)
1128 |
1129 | file: Either the path to the file, or a file-like object.
1130 | If it is a path, the file will be opened and closed by ZipFile.
1131 | mode: The mode can be either read 'r', write 'w', exclusive create 'x',
1132 | or append 'a'.
1133 | compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib),
1134 | ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma).
1135 | allowZip64: if True ZipFile will create files with ZIP64 extensions when
1136 | needed, otherwise it will raise an exception when this would
1137 | be necessary.
1138 | compresslevel: None (default for the given compression type) or an integer
1139 | specifying the level to pass to the compressor.
1140 | When using ZIP_STORED or ZIP_LZMA this keyword has no effect.
1141 | When using ZIP_DEFLATED integers 0 through 9 are accepted.
1142 | When using ZIP_BZIP2 integers 1 through 9 are accepted.
1143 |
1144 | """
1145 |
1146 | fp = None # Set here since __del__ checks it
1147 | _windows_illegal_name_trans_table = None
1148 |
1149 | def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True,
1150 | compresslevel=None):
1151 | """Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
1152 | or append 'a'."""
1153 | if mode not in ('r', 'w', 'x', 'a'):
1154 | raise ValueError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
1155 |
1156 | _check_compression(compression)
1157 |
1158 | self._allowZip64 = allowZip64
1159 | self._didModify = False
1160 | self.debug = 0 # Level of printing: 0 through 3
1161 | self.NameToInfo = {} # Find file info given name
1162 | self.filelist = [] # List of ZipInfo instances for archive
1163 | self.compression = compression # Method of compression
1164 | self.compresslevel = compresslevel
1165 | self.mode = mode
1166 | self.pwd = None
1167 | self._comment = b''
1168 |
1169 | # Check if we were passed a file-like object
1170 | if isinstance(file, os.PathLike):
1171 | file = os.fspath(file)
1172 | if isinstance(file, str):
1173 | # No, it's a filename
1174 | self._filePassed = 0
1175 | self.filename = file
1176 | modeDict = {'r' : 'rb', 'w': 'w+b', 'x': 'x+b', 'a' : 'r+b',
1177 | 'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
1178 | filemode = modeDict[mode]
1179 | while True:
1180 | try:
1181 | self.fp = io.open(file, filemode)
1182 | except OSError:
1183 | if filemode in modeDict:
1184 | filemode = modeDict[filemode]
1185 | continue
1186 | raise
1187 | break
1188 | else:
1189 | self._filePassed = 1
1190 | self.fp = file
1191 | self.filename = getattr(file, 'name', None)
1192 | self._fileRefCnt = 1
1193 | self._lock = threading.RLock()
1194 | self._seekable = True
1195 | self._writing = False
1196 |
1197 | try:
1198 | if mode == 'r':
1199 | self._RealGetContents()
1200 | elif mode in ('w', 'x'):
1201 | # set the modified flag so central directory gets written
1202 | # even if no files are added to the archive
1203 | self._didModify = True
1204 | try:
1205 | self.start_dir = self.fp.tell()
1206 | except (AttributeError, OSError):
1207 | self.fp = _Tellable(self.fp)
1208 | self.start_dir = 0
1209 | self._seekable = False
1210 | else:
1211 | # Some file-like objects can provide tell() but not seek()
1212 | try:
1213 | self.fp.seek(self.start_dir)
1214 | except (AttributeError, OSError):
1215 | self._seekable = False
1216 | elif mode == 'a':
1217 | try:
1218 | # See if file is a zip file
1219 | self._RealGetContents()
1220 | # seek to start of directory and overwrite
1221 | self.fp.seek(self.start_dir)
1222 | except BadZipFile:
1223 | # file is not a zip file, just append
1224 | self.fp.seek(0, 2)
1225 |
1226 | # set the modified flag so central directory gets written
1227 | # even if no files are added to the archive
1228 | self._didModify = True
1229 | self.start_dir = self.fp.tell()
1230 | else:
1231 | raise ValueError("Mode must be 'r', 'w', 'x', or 'a'")
1232 | except:
1233 | fp = self.fp
1234 | self.fp = None
1235 | self._fpclose(fp)
1236 | raise
1237 |
1238 | def __enter__(self):
1239 | return self
1240 |
1241 | def __exit__(self, type, value, traceback):
1242 | self.close()
1243 |
1244 | def __repr__(self):
1245 | result = ['<%s.%s' % (self.__class__.__module__,
1246 | self.__class__.__qualname__)]
1247 | if self.fp is not None:
1248 | if self._filePassed:
1249 | result.append(' file=%r' % self.fp)
1250 | elif self.filename is not None:
1251 | result.append(' filename=%r' % self.filename)
1252 | result.append(' mode=%r' % self.mode)
1253 | else:
1254 | result.append(' [closed]')
1255 | result.append('>')
1256 | return ''.join(result)
1257 |
1258 | def _RealGetContents(self):
1259 | """Read in the table of contents for the ZIP file."""
1260 | fp = self.fp
1261 | try:
1262 | endrec = _EndRecData(fp)
1263 | except OSError:
1264 | raise BadZipFile("File is not a zip file")
1265 | if not endrec:
1266 | raise BadZipFile("File is not a zip file")
1267 | if self.debug > 1:
1268 | print(endrec)
1269 | size_cd = endrec[_ECD_SIZE] # bytes in central directory
1270 | offset_cd = endrec[_ECD_OFFSET] # offset of central directory
1271 | self._comment = endrec[_ECD_COMMENT] # archive comment
1272 |
1273 | # "concat" is zero, unless zip was concatenated to another file
1274 | concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
1275 | if endrec[_ECD_SIGNATURE] == stringEndArchive64:
1276 | # If Zip64 extension structures are present, account for them
1277 | concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
1278 |
1279 | if self.debug > 2:
1280 | inferred = concat + offset_cd
1281 | print("given, inferred, offset", offset_cd, inferred, concat)
1282 | # self.start_dir: Position of start of central directory
1283 | self.start_dir = offset_cd + concat
1284 | fp.seek(self.start_dir, 0)
1285 | data = fp.read(size_cd)
1286 | fp = io.BytesIO(data)
1287 | total = 0
1288 | while total < size_cd:
1289 | centdir = fp.read(sizeCentralDir)
1290 | if len(centdir) != sizeCentralDir:
1291 | raise BadZipFile("Truncated central directory")
1292 | centdir = struct.unpack(structCentralDir, centdir)
1293 | if centdir[_CD_SIGNATURE] != stringCentralDir:
1294 | raise BadZipFile("Bad magic number for central directory")
1295 | if self.debug > 2:
1296 | print(centdir)
1297 | filename = fp.read(centdir[_CD_FILENAME_LENGTH])
1298 | flags = centdir[5]
1299 | if flags & 0x800:
1300 | # UTF-8 file names extension
1301 | filename = filename.decode('utf-8')
1302 | else:
1303 | # Historical ZIP filename encoding
1304 | filename = filename.decode('cp437')
1305 | # Create ZipInfo instance to store file information
1306 | x = ZipInfo(filename)
1307 | x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
1308 | x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
1309 | x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
1310 | (x.create_version, x.create_system, x.extract_version, x.reserved,
1311 | x.flag_bits, x.compress_type, t, d,
1312 | x.CRC, x.compress_size, x.file_size) = centdir[1:12]
1313 | if x.extract_version > MAX_EXTRACT_VERSION:
1314 | raise NotImplementedError("zip file version %.1f" %
1315 | (x.extract_version / 10))
1316 | x.volume, x.internal_attr, x.external_attr = centdir[15:18]
1317 | # Convert date/time code to (year, month, day, hour, min, sec)
1318 | x._raw_time = t
1319 | x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
1320 | t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
1321 |
1322 | x._decodeExtra()
1323 | x.header_offset = x.header_offset + concat
1324 | self.filelist.append(x)
1325 | self.NameToInfo[x.filename] = x
1326 |
1327 | # update total bytes read from central directory
1328 | total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
1329 | + centdir[_CD_EXTRA_FIELD_LENGTH]
1330 | + centdir[_CD_COMMENT_LENGTH])
1331 |
1332 | if self.debug > 2:
1333 | print("total", total)
1334 |
1335 |
1336 | def namelist(self):
1337 | """Return a list of file names in the archive."""
1338 | return [data.filename for data in self.filelist]
1339 |
1340 | def infolist(self):
1341 | """Return a list of class ZipInfo instances for files in the
1342 | archive."""
1343 | return self.filelist
1344 |
1345 | def printdir(self, file=None):
1346 | """Print a table of contents for the zip file."""
1347 | print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"),
1348 | file=file)
1349 | for zinfo in self.filelist:
1350 | date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
1351 | print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size),
1352 | file=file)
1353 |
1354 | def testzip(self):
1355 | """Read all the files and check the CRC."""
1356 | chunk_size = 2 ** 20
1357 | for zinfo in self.filelist:
1358 | try:
1359 | # Read by chunks, to avoid an OverflowError or a
1360 | # MemoryError with very large embedded files.
1361 | with self.open(zinfo.filename, "r") as f:
1362 | while f.read(chunk_size): # Check CRC-32
1363 | pass
1364 | except BadZipFile:
1365 | return zinfo.filename
1366 |
1367 | def getinfo(self, name):
1368 | """Return the instance of ZipInfo given 'name'."""
1369 | info = self.NameToInfo.get(name)
1370 | if info is None:
1371 | raise KeyError(
1372 | 'There is no item named %r in the archive' % name)
1373 |
1374 | return info
1375 |
1376 | def setpassword(self, pwd):
1377 | """Set default password for encrypted files."""
1378 | if pwd and not isinstance(pwd, bytes):
1379 | raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__)
1380 | if pwd:
1381 | self.pwd = pwd
1382 | else:
1383 | self.pwd = None
1384 |
1385 | @property
1386 | def comment(self):
1387 | """The comment text associated with the ZIP file."""
1388 | return self._comment
1389 |
1390 | @comment.setter
1391 | def comment(self, comment):
1392 | if not isinstance(comment, bytes):
1393 | raise TypeError("comment: expected bytes, got %s" % type(comment).__name__)
1394 | # check for valid comment length
1395 | if len(comment) > ZIP_MAX_COMMENT:
1396 | import warnings
1397 | warnings.warn('Archive comment is too long; truncating to %d bytes'
1398 | % ZIP_MAX_COMMENT, stacklevel=2)
1399 | comment = comment[:ZIP_MAX_COMMENT]
1400 | self._comment = comment
1401 | self._didModify = True
1402 |
1403 | def read(self, name, pwd=None):
1404 | """Return file bytes (as a string) for name."""
1405 | with self.open(name, "r", pwd) as fp:
1406 | return fp.read()
1407 |
1408 | def open(self, name, mode="r", pwd=None, *, force_zip64=False):
1409 | """Return file-like object for 'name'.
1410 |
1411 | name is a string for the file name within the ZIP file, or a ZipInfo
1412 | object.
1413 |
1414 | mode should be 'r' to read a file already in the ZIP file, or 'w' to
1415 | write to a file newly added to the archive.
1416 |
1417 | pwd is the password to decrypt files (only used for reading).
1418 |
1419 | When writing, if the file size is not known in advance but may exceed
1420 | 2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large
1421 | files. If the size is known in advance, it is best to pass a ZipInfo
1422 | instance for name, with zinfo.file_size set.
1423 | """
1424 | if mode not in {"r", "w"}:
1425 | raise ValueError('open() requires mode "r" or "w"')
1426 | if pwd and not isinstance(pwd, bytes):
1427 | raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__)
1428 | if pwd and (mode == "w"):
1429 | raise ValueError("pwd is only supported for reading files")
1430 | if not self.fp:
1431 | raise ValueError(
1432 | "Attempt to use ZIP archive that was already closed")
1433 |
1434 | # Make sure we have an info object
1435 | if isinstance(name, ZipInfo):
1436 | # 'name' is already an info object
1437 | zinfo = name
1438 | elif mode == 'w':
1439 | zinfo = ZipInfo(name)
1440 | zinfo.compress_type = self.compression
1441 | zinfo._compresslevel = self.compresslevel
1442 | else:
1443 | # Get info object for name
1444 | zinfo = self.getinfo(name)
1445 |
1446 | if mode == 'w':
1447 | return self._open_to_write(zinfo, force_zip64=force_zip64)
1448 |
1449 | if self._writing:
1450 | raise ValueError("Can't read from the ZIP file while there "
1451 | "is an open writing handle on it. "
1452 | "Close the writing handle before trying to read.")
1453 |
1454 | # Open for reading:
1455 | self._fileRefCnt += 1
1456 | zef_file = _SharedFile(self.fp, zinfo.header_offset,
1457 | self._fpclose, self._lock, lambda: self._writing)
1458 | try:
1459 | # Skip the file header:
1460 | fheader = zef_file.read(sizeFileHeader)
1461 | if len(fheader) != sizeFileHeader:
1462 | raise BadZipFile("Truncated file header")
1463 | fheader = struct.unpack(structFileHeader, fheader)
1464 | if fheader[_FH_SIGNATURE] != stringFileHeader:
1465 | raise BadZipFile("Bad magic number for file header")
1466 |
1467 | fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
1468 | if fheader[_FH_EXTRA_FIELD_LENGTH]:
1469 | zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
1470 |
1471 | if zinfo.flag_bits & 0x20:
1472 | # Zip 2.7: compressed patched data
1473 | raise NotImplementedError("compressed patched data (flag bit 5)")
1474 |
1475 | if zinfo.flag_bits & 0x40:
1476 | # strong encryption
1477 | raise NotImplementedError("strong encryption (flag bit 6)")
1478 |
1479 | if zinfo.flag_bits & 0x800:
1480 | # UTF-8 filename
1481 | fname_str = fname.decode("utf-8")
1482 | else:
1483 | fname_str = fname.decode("cp437")
1484 |
1485 | if fname_str != zinfo.orig_filename:
1486 | raise BadZipFile(
1487 | 'File name in directory %r and header %r differ.'
1488 | % (zinfo.orig_filename, fname))
1489 |
1490 | # check for encrypted flag & handle password
1491 | is_encrypted = zinfo.flag_bits & 0x1
1492 | zd = None
1493 | if is_encrypted:
1494 | if not pwd:
1495 | pwd = self.pwd
1496 | if not pwd:
1497 | raise RuntimeError("File %r is encrypted, password "
1498 | "required for extraction" % name)
1499 |
1500 | zd = _ZipDecrypter(pwd)
1501 | # The first 12 bytes in the cypher stream is an encryption header
1502 | # used to strengthen the algorithm. The first 11 bytes are
1503 | # completely random, while the 12th contains the MSB of the CRC,
1504 | # or the MSB of the file time depending on the header type
1505 | # and is used to check the correctness of the password.
1506 | header = zef_file.read(12)
1507 | h = zd(header[0:12])
1508 | if zinfo.flag_bits & 0x8:
1509 | # compare against the file type from extended local headers
1510 | check_byte = (zinfo._raw_time >> 8) & 0xff
1511 | else:
1512 | # compare against the CRC otherwise
1513 | check_byte = (zinfo.CRC >> 24) & 0xff
1514 | if h[11] != check_byte:
1515 | raise RuntimeError("Bad password for file %r" % name)
1516 |
1517 | return ZipExtFile(zef_file, mode, zinfo, zd, True)
1518 | except:
1519 | zef_file.close()
1520 | raise
1521 |
1522 | def _open_to_write(self, zinfo, force_zip64=False):
1523 | if force_zip64 and not self._allowZip64:
1524 | raise ValueError(
1525 | "force_zip64 is True, but allowZip64 was False when opening "
1526 | "the ZIP file."
1527 | )
1528 | if self._writing:
1529 | raise ValueError("Can't write to the ZIP file while there is "
1530 | "another write handle open on it. "
1531 | "Close the first handle before opening another.")
1532 |
1533 | # Sizes and CRC are overwritten with correct data after processing the file
1534 | if not hasattr(zinfo, 'file_size'):
1535 | zinfo.file_size = 0
1536 | zinfo.compress_size = 0
1537 | zinfo.CRC = 0
1538 |
1539 | zinfo.flag_bits = 0x00
1540 | if zinfo.compress_type == ZIP_LZMA:
1541 | # Compressed data includes an end-of-stream (EOS) marker
1542 | zinfo.flag_bits |= 0x02
1543 | if not self._seekable:
1544 | zinfo.flag_bits |= 0x08
1545 |
1546 | if not zinfo.external_attr:
1547 | zinfo.external_attr = 0o600 << 16 # permissions: ?rw-------
1548 |
1549 | # Compressed size can be larger than uncompressed size
1550 | zip64 = self._allowZip64 and \
1551 | (force_zip64 or zinfo.file_size * 1.05 > ZIP64_LIMIT)
1552 |
1553 | if self._seekable:
1554 | self.fp.seek(self.start_dir)
1555 | zinfo.header_offset = self.fp.tell()
1556 |
1557 | self._writecheck(zinfo)
1558 | self._didModify = True
1559 |
1560 | self.fp.write(zinfo.FileHeader(zip64))
1561 |
1562 | self._writing = True
1563 | return _ZipWriteFile(self, zinfo, zip64)
1564 |
1565 | def extract(self, member, path=None, pwd=None):
1566 | """Extract a member from the archive to the current working directory,
1567 | using its full name. Its file information is extracted as accurately
1568 | as possible. `member' may be a filename or a ZipInfo object. You can
1569 | specify a different directory using `path'.
1570 | """
1571 | if path is None:
1572 | path = os.getcwd()
1573 | else:
1574 | path = os.fspath(path)
1575 |
1576 | return self._extract_member(member, path, pwd)
1577 |
1578 | def extractall(self, path=None, members=None, pwd=None):
1579 | """Extract all members from the archive to the current working
1580 | directory. `path' specifies a different directory to extract to.
1581 | `members' is optional and must be a subset of the list returned
1582 | by namelist().
1583 | """
1584 | if members is None:
1585 | members = self.namelist()
1586 |
1587 | if path is None:
1588 | path = os.getcwd()
1589 | else:
1590 | path = os.fspath(path)
1591 |
1592 | for zipinfo in members:
1593 | self._extract_member(zipinfo, path, pwd)
1594 |
1595 | @classmethod
1596 | def _sanitize_windows_name(cls, arcname, pathsep):
1597 | """Replace bad characters and remove trailing dots from parts."""
1598 | table = cls._windows_illegal_name_trans_table
1599 | if not table:
1600 | illegal = ':<>|"?*'
1601 | table = str.maketrans(illegal, '_' * len(illegal))
1602 | cls._windows_illegal_name_trans_table = table
1603 | arcname = arcname.translate(table)
1604 | # remove trailing dots
1605 | arcname = (x.rstrip('.') for x in arcname.split(pathsep))
1606 | # rejoin, removing empty parts.
1607 | arcname = pathsep.join(x for x in arcname if x)
1608 | return arcname
1609 |
1610 | def _extract_member(self, member, targetpath, pwd):
1611 | """Extract the ZipInfo object 'member' to a physical
1612 | file on the path targetpath.
1613 | """
1614 | if not isinstance(member, ZipInfo):
1615 | member = self.getinfo(member)
1616 |
1617 | # build the destination pathname, replacing
1618 | # forward slashes to platform specific separators.
1619 | arcname = member.filename.replace('/', os.path.sep)
1620 |
1621 | if os.path.altsep:
1622 | arcname = arcname.replace(os.path.altsep, os.path.sep)
1623 | # interpret absolute pathname as relative, remove drive letter or
1624 | # UNC path, redundant separators, "." and ".." components.
1625 | arcname = os.path.splitdrive(arcname)[1]
1626 | invalid_path_parts = ('', os.path.curdir, os.path.pardir)
1627 | arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
1628 | if x not in invalid_path_parts)
1629 | if os.path.sep == '\\':
1630 | # filter illegal characters on Windows
1631 | arcname = self._sanitize_windows_name(arcname, os.path.sep)
1632 |
1633 | targetpath = os.path.join(targetpath, arcname)
1634 | targetpath = os.path.normpath(targetpath)
1635 |
1636 | # Create all upper directories if necessary.
1637 | upperdirs = os.path.dirname(targetpath)
1638 | if upperdirs and not os.path.exists(upperdirs):
1639 | os.makedirs(upperdirs)
1640 |
1641 | if member.is_dir():
1642 | if not os.path.isdir(targetpath):
1643 | os.mkdir(targetpath)
1644 | return targetpath
1645 |
1646 | with self.open(member, pwd=pwd) as source, \
1647 | open(targetpath, "wb") as target:
1648 | shutil.copyfileobj(source, target)
1649 |
1650 | return targetpath
1651 |
1652 | def _writecheck(self, zinfo):
1653 | """Check for errors before writing a file to the archive."""
1654 | if zinfo.filename in self.NameToInfo:
1655 | import warnings
1656 | warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3)
1657 | if self.mode not in ('w', 'x', 'a'):
1658 | raise ValueError("write() requires mode 'w', 'x', or 'a'")
1659 | if not self.fp:
1660 | raise ValueError(
1661 | "Attempt to write ZIP archive that was already closed")
1662 | _check_compression(zinfo.compress_type)
1663 | if not self._allowZip64:
1664 | requires_zip64 = None
1665 | if len(self.filelist) >= ZIP_FILECOUNT_LIMIT:
1666 | requires_zip64 = "Files count"
1667 | elif zinfo.file_size > ZIP64_LIMIT:
1668 | requires_zip64 = "Filesize"
1669 | elif zinfo.header_offset > ZIP64_LIMIT:
1670 | requires_zip64 = "Zipfile size"
1671 | if requires_zip64:
1672 | raise LargeZipFile(requires_zip64 +
1673 | " would require ZIP64 extensions")
1674 |
1675 | def write(self, filename, arcname=None,
1676 | compress_type=None, compresslevel=None):
1677 | """Put the bytes from filename into the archive under the name
1678 | arcname."""
1679 | if not self.fp:
1680 | raise ValueError(
1681 | "Attempt to write to ZIP archive that was already closed")
1682 | if self._writing:
1683 | raise ValueError(
1684 | "Can't write to ZIP archive while an open writing handle exists"
1685 | )
1686 |
1687 | zinfo = ZipInfo.from_file(filename, arcname)
1688 |
1689 | if zinfo.is_dir():
1690 | zinfo.compress_size = 0
1691 | zinfo.CRC = 0
1692 | else:
1693 | if compress_type is not None:
1694 | zinfo.compress_type = compress_type
1695 | else:
1696 | zinfo.compress_type = self.compression
1697 |
1698 | if compresslevel is not None:
1699 | zinfo._compresslevel = compresslevel
1700 | else:
1701 | zinfo._compresslevel = self.compresslevel
1702 |
1703 | if zinfo.is_dir():
1704 | with self._lock:
1705 | if self._seekable:
1706 | self.fp.seek(self.start_dir)
1707 | zinfo.header_offset = self.fp.tell() # Start of header bytes
1708 | if zinfo.compress_type == ZIP_LZMA:
1709 | # Compressed data includes an end-of-stream (EOS) marker
1710 | zinfo.flag_bits |= 0x02
1711 |
1712 | self._writecheck(zinfo)
1713 | self._didModify = True
1714 |
1715 | self.filelist.append(zinfo)
1716 | self.NameToInfo[zinfo.filename] = zinfo
1717 | self.fp.write(zinfo.FileHeader(False))
1718 | self.start_dir = self.fp.tell()
1719 | else:
1720 | with open(filename, "rb") as src, self.open(zinfo, 'w') as dest:
1721 | shutil.copyfileobj(src, dest, 1024*8)
1722 |
1723 | def writestr(self, zinfo_or_arcname, data,
1724 | compress_type=None, compresslevel=None):
1725 | """Write a file into the archive. The contents is 'data', which
1726 | may be either a 'str' or a 'bytes' instance; if it is a 'str',
1727 | it is encoded as UTF-8 first.
1728 | 'zinfo_or_arcname' is either a ZipInfo instance or
1729 | the name of the file in the archive."""
1730 | if isinstance(data, str):
1731 | data = data.encode("utf-8")
1732 | if not isinstance(zinfo_or_arcname, ZipInfo):
1733 | zinfo = ZipInfo(filename=zinfo_or_arcname,
1734 | date_time=time.localtime(time.time())[:6])
1735 | zinfo.compress_type = self.compression
1736 | zinfo._compresslevel = self.compresslevel
1737 | if zinfo.filename[-1] == '/':
1738 | zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x
1739 | zinfo.external_attr |= 0x10 # MS-DOS directory flag
1740 | else:
1741 | zinfo.external_attr = 0o600 << 16 # ?rw-------
1742 | else:
1743 | zinfo = zinfo_or_arcname
1744 |
1745 | if not self.fp:
1746 | raise ValueError(
1747 | "Attempt to write to ZIP archive that was already closed")
1748 | if self._writing:
1749 | raise ValueError(
1750 | "Can't write to ZIP archive while an open writing handle exists."
1751 | )
1752 |
1753 | if compress_type is not None:
1754 | zinfo.compress_type = compress_type
1755 |
1756 | if compresslevel is not None:
1757 | zinfo._compresslevel = compresslevel
1758 |
1759 | zinfo.file_size = len(data) # Uncompressed size
1760 | with self._lock:
1761 | with self.open(zinfo, mode='w') as dest:
1762 | dest.write(data)
1763 |
1764 | def __del__(self):
1765 | """Call the "close()" method in case the user forgot."""
1766 | self.close()
1767 |
1768 | def close(self):
1769 | """Close the file, and for mode 'w', 'x' and 'a' write the ending
1770 | records."""
1771 | if self.fp is None:
1772 | return
1773 |
1774 | if self._writing:
1775 | raise ValueError("Can't close the ZIP file while there is "
1776 | "an open writing handle on it. "
1777 | "Close the writing handle before closing the zip.")
1778 |
1779 | try:
1780 | if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
1781 | with self._lock:
1782 | if self._seekable:
1783 | self.fp.seek(self.start_dir)
1784 | self._write_end_record()
1785 | finally:
1786 | fp = self.fp
1787 | self.fp = None
1788 | self._fpclose(fp)
1789 |
1790 | def _write_end_record(self):
1791 | for zinfo in self.filelist: # write central directory
1792 | dt = zinfo.date_time
1793 | dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
1794 | dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
1795 | extra = []
1796 | if zinfo.file_size > ZIP64_LIMIT \
1797 | or zinfo.compress_size > ZIP64_LIMIT:
1798 | extra.append(zinfo.file_size)
1799 | extra.append(zinfo.compress_size)
1800 | file_size = 0xffffffff
1801 | compress_size = 0xffffffff
1802 | else:
1803 | file_size = zinfo.file_size
1804 | compress_size = zinfo.compress_size
1805 |
1806 | if zinfo.header_offset > ZIP64_LIMIT:
1807 | extra.append(zinfo.header_offset)
1808 | header_offset = 0xffffffff
1809 | else:
1810 | header_offset = zinfo.header_offset
1811 |
1812 | extra_data = zinfo.extra
1813 | min_version = 0
1814 | if extra:
1815 | # Append a ZIP64 field to the extra's
1816 | extra_data = struct.pack(
1817 | ' ZIP_FILECOUNT_LIMIT:
1860 | requires_zip64 = "Files count"
1861 | elif centDirOffset > ZIP64_LIMIT:
1862 | requires_zip64 = "Central directory offset"
1863 | elif centDirSize > ZIP64_LIMIT:
1864 | requires_zip64 = "Central directory size"
1865 | if requires_zip64:
1866 | # Need to write the ZIP64 end-of-archive records
1867 | if not self._allowZip64:
1868 | raise LargeZipFile(requires_zip64 +
1869 | " would require ZIP64 extensions")
1870 | zip64endrec = struct.pack(
1871 | structEndArchive64, stringEndArchive64,
1872 | 44, 45, 45, 0, 0, centDirCount, centDirCount,
1873 | centDirSize, centDirOffset)
1874 | self.fp.write(zip64endrec)
1875 |
1876 | zip64locrec = struct.pack(
1877 | structEndArchive64Locator,
1878 | stringEndArchive64Locator, 0, pos2, 1)
1879 | self.fp.write(zip64locrec)
1880 | centDirCount = min(centDirCount, 0xFFFF)
1881 | centDirSize = min(centDirSize, 0xFFFFFFFF)
1882 | centDirOffset = min(centDirOffset, 0xFFFFFFFF)
1883 |
1884 | endrec = struct.pack(structEndArchive, stringEndArchive,
1885 | 0, 0, centDirCount, centDirCount,
1886 | centDirSize, centDirOffset, len(self._comment))
1887 | self.fp.write(endrec)
1888 | self.fp.write(self._comment)
1889 | self.fp.flush()
1890 |
1891 | def _fpclose(self, fp):
1892 | assert self._fileRefCnt > 0
1893 | self._fileRefCnt -= 1
1894 | if not self._fileRefCnt and not self._filePassed:
1895 | fp.close()
1896 |
1897 |
1898 | class PyZipFile(ZipFile):
1899 | """Class to create ZIP archives with Python library files and packages."""
1900 |
1901 | def __init__(self, file, mode="r", compression=ZIP_STORED,
1902 | allowZip64=True, optimize=-1):
1903 | ZipFile.__init__(self, file, mode=mode, compression=compression,
1904 | allowZip64=allowZip64)
1905 | self._optimize = optimize
1906 |
1907 | def writepy(self, pathname, basename="", filterfunc=None):
1908 | """Add all files from "pathname" to the ZIP archive.
1909 |
1910 | If pathname is a package directory, search the directory and
1911 | all package subdirectories recursively for all *.py and enter
1912 | the modules into the archive. If pathname is a plain
1913 | directory, listdir *.py and enter all modules. Else, pathname
1914 | must be a Python *.py file and the module will be put into the
1915 | archive. Added modules are always module.pyc.
1916 | This method will compile the module.py into module.pyc if
1917 | necessary.
1918 | If filterfunc(pathname) is given, it is called with every argument.
1919 | When it is False, the file or directory is skipped.
1920 | """
1921 | pathname = os.fspath(pathname)
1922 | if filterfunc and not filterfunc(pathname):
1923 | if self.debug:
1924 | label = 'path' if os.path.isdir(pathname) else 'file'
1925 | print('%s %r skipped by filterfunc' % (label, pathname))
1926 | return
1927 | dir, name = os.path.split(pathname)
1928 | if os.path.isdir(pathname):
1929 | initname = os.path.join(pathname, "__init__.py")
1930 | if os.path.isfile(initname):
1931 | # This is a package directory, add it
1932 | if basename:
1933 | basename = "%s/%s" % (basename, name)
1934 | else:
1935 | basename = name
1936 | if self.debug:
1937 | print("Adding package in", pathname, "as", basename)
1938 | fname, arcname = self._get_codename(initname[0:-3], basename)
1939 | if self.debug:
1940 | print("Adding", arcname)
1941 | self.write(fname, arcname)
1942 | dirlist = sorted(os.listdir(pathname))
1943 | dirlist.remove("__init__.py")
1944 | # Add all *.py files and package subdirectories
1945 | for filename in dirlist:
1946 | path = os.path.join(pathname, filename)
1947 | root, ext = os.path.splitext(filename)
1948 | if os.path.isdir(path):
1949 | if os.path.isfile(os.path.join(path, "__init__.py")):
1950 | # This is a package directory, add it
1951 | self.writepy(path, basename,
1952 | filterfunc=filterfunc) # Recursive call
1953 | elif ext == ".py":
1954 | if filterfunc and not filterfunc(path):
1955 | if self.debug:
1956 | print('file %r skipped by filterfunc' % path)
1957 | continue
1958 | fname, arcname = self._get_codename(path[0:-3],
1959 | basename)
1960 | if self.debug:
1961 | print("Adding", arcname)
1962 | self.write(fname, arcname)
1963 | else:
1964 | # This is NOT a package directory, add its files at top level
1965 | if self.debug:
1966 | print("Adding files from directory", pathname)
1967 | for filename in sorted(os.listdir(pathname)):
1968 | path = os.path.join(pathname, filename)
1969 | root, ext = os.path.splitext(filename)
1970 | if ext == ".py":
1971 | if filterfunc and not filterfunc(path):
1972 | if self.debug:
1973 | print('file %r skipped by filterfunc' % path)
1974 | continue
1975 | fname, arcname = self._get_codename(path[0:-3],
1976 | basename)
1977 | if self.debug:
1978 | print("Adding", arcname)
1979 | self.write(fname, arcname)
1980 | else:
1981 | if pathname[-3:] != ".py":
1982 | raise RuntimeError(
1983 | 'Files added with writepy() must end with ".py"')
1984 | fname, arcname = self._get_codename(pathname[0:-3], basename)
1985 | if self.debug:
1986 | print("Adding file", arcname)
1987 | self.write(fname, arcname)
1988 |
1989 | def _get_codename(self, pathname, basename):
1990 | """Return (filename, archivename) for the path.
1991 |
1992 | Given a module name path, return the correct file path and
1993 | archive name, compiling if necessary. For example, given
1994 | /python/lib/string, return (/python/lib/string.pyc, string).
1995 | """
1996 | def _compile(file, optimize=-1):
1997 | import py_compile
1998 | if self.debug:
1999 | print("Compiling", file)
2000 | try:
2001 | py_compile.compile(file, doraise=True, optimize=optimize)
2002 | except py_compile.PyCompileError as err:
2003 | print(err.msg)
2004 | return False
2005 | return True
2006 |
2007 | file_py = pathname + ".py"
2008 | file_pyc = pathname + ".pyc"
2009 | pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='')
2010 | pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1)
2011 | pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2)
2012 | if self._optimize == -1:
2013 | # legacy mode: use whatever file is present
2014 | if (os.path.isfile(file_pyc) and
2015 | os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime):
2016 | # Use .pyc file.
2017 | arcname = fname = file_pyc
2018 | elif (os.path.isfile(pycache_opt0) and
2019 | os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime):
2020 | # Use the __pycache__/*.pyc file, but write it to the legacy pyc
2021 | # file name in the archive.
2022 | fname = pycache_opt0
2023 | arcname = file_pyc
2024 | elif (os.path.isfile(pycache_opt1) and
2025 | os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime):
2026 | # Use the __pycache__/*.pyc file, but write it to the legacy pyc
2027 | # file name in the archive.
2028 | fname = pycache_opt1
2029 | arcname = file_pyc
2030 | elif (os.path.isfile(pycache_opt2) and
2031 | os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime):
2032 | # Use the __pycache__/*.pyc file, but write it to the legacy pyc
2033 | # file name in the archive.
2034 | fname = pycache_opt2
2035 | arcname = file_pyc
2036 | else:
2037 | # Compile py into PEP 3147 pyc file.
2038 | if _compile(file_py):
2039 | if sys.flags.optimize == 0:
2040 | fname = pycache_opt0
2041 | elif sys.flags.optimize == 1:
2042 | fname = pycache_opt1
2043 | else:
2044 | fname = pycache_opt2
2045 | arcname = file_pyc
2046 | else:
2047 | fname = arcname = file_py
2048 | else:
2049 | # new mode: use given optimization level
2050 | if self._optimize == 0:
2051 | fname = pycache_opt0
2052 | arcname = file_pyc
2053 | else:
2054 | arcname = file_pyc
2055 | if self._optimize == 1:
2056 | fname = pycache_opt1
2057 | elif self._optimize == 2:
2058 | fname = pycache_opt2
2059 | else:
2060 | msg = "invalid value for 'optimize': {!r}".format(self._optimize)
2061 | raise ValueError(msg)
2062 | if not (os.path.isfile(fname) and
2063 | os.stat(fname).st_mtime >= os.stat(file_py).st_mtime):
2064 | if not _compile(file_py, optimize=self._optimize):
2065 | fname = arcname = file_py
2066 | archivename = os.path.split(arcname)[1]
2067 | if basename:
2068 | archivename = "%s/%s" % (basename, archivename)
2069 | return (fname, archivename)
2070 |
2071 |
2072 | def main(args=None):
2073 | import argparse
2074 |
2075 | description = 'A simple command-line interface for zipfile module.'
2076 | parser = argparse.ArgumentParser(description=description)
2077 | group = parser.add_mutually_exclusive_group(required=True)
2078 | group.add_argument('-l', '--list', metavar='',
2079 | help='Show listing of a zipfile')
2080 | group.add_argument('-e', '--extract', nargs=2,
2081 | metavar=('', ''),
2082 | help='Extract zipfile into target dir')
2083 | group.add_argument('-c', '--create', nargs='+',
2084 | metavar=('', ''),
2085 | help='Create zipfile from sources')
2086 | group.add_argument('-t', '--test', metavar='',
2087 | help='Test if a zipfile is valid')
2088 | args = parser.parse_args(args)
2089 |
2090 | if args.test is not None:
2091 | src = args.test
2092 | with ZipFile(src, 'r') as zf:
2093 | badfile = zf.testzip()
2094 | if badfile:
2095 | print("The following enclosed file is corrupted: {!r}".format(badfile))
2096 | print("Done testing")
2097 |
2098 | elif args.list is not None:
2099 | src = args.list
2100 | with ZipFile(src, 'r') as zf:
2101 | zf.printdir()
2102 |
2103 | elif args.extract is not None:
2104 | src, curdir = args.extract
2105 | with ZipFile(src, 'r') as zf:
2106 | zf.extractall(curdir)
2107 |
2108 | elif args.create is not None:
2109 | zip_name = args.create.pop(0)
2110 | files = args.create
2111 |
2112 | def addToZip(zf, path, zippath):
2113 | if os.path.isfile(path):
2114 | zf.write(path, zippath, ZIP_DEFLATED)
2115 | elif os.path.isdir(path):
2116 | if zippath:
2117 | zf.write(path, zippath)
2118 | for nm in sorted(os.listdir(path)):
2119 | addToZip(zf,
2120 | os.path.join(path, nm), os.path.join(zippath, nm))
2121 | # else: ignore
2122 |
2123 | with ZipFile(zip_name, 'w') as zf:
2124 | for path in files:
2125 | zippath = os.path.basename(path)
2126 | if not zippath:
2127 | zippath = os.path.basename(os.path.dirname(path))
2128 | if zippath in ('', os.curdir, os.pardir):
2129 | zippath = ''
2130 | addToZip(zf, path, zippath)
2131 |
2132 | if __name__ == "__main__":
2133 | main()
2134 |
--------------------------------------------------------------------------------