16 | Bakthat is a MIT licensed backup framework written in Python, it's both a command line tool and a Python module that helps you manage backups on Amazon S3/Glacier and OpenStack Swift. It automatically compress, encrypt (symmetric encryption) and upload your files.
17 |
18 |
19 |
You may also check out BakServer, a self-hosted Python server, to help you manage backups anywhere and keep multiple bakthat client synchronized across servers.
20 |
21 |
--------------------------------------------------------------------------------
/bakthat/utils.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 | import logging
3 | from datetime import timedelta
4 | import re
5 |
6 | log = logging.getLogger(__name__)
7 |
8 |
9 | def _timedelta_total_seconds(td):
10 | """Python 2.6 backward compatibility function for timedelta.total_seconds.
11 |
12 | :type td: timedelta object
13 | :param td: timedelta object
14 |
15 | :rtype: float
16 | :return: The total number of seconds for the given timedelta object.
17 |
18 | """
19 | if hasattr(timedelta, "total_seconds"):
20 | return getattr(td, "total_seconds")()
21 |
22 | # Python 2.6 backward compatibility
23 | return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / float(10**6)
24 |
25 |
26 | def _interval_string_to_seconds(interval_string):
27 | """Convert internal string like 1M, 1Y3M, 3W to seconds.
28 |
29 | :type interval_string: str
30 | :param interval_string: Interval string like 1M, 1W, 1M3W4h2s...
31 | (s => seconds, m => minutes, h => hours, D => days, W => weeks, M => months, Y => Years).
32 |
33 | :rtype: int
34 | :return: The conversion in seconds of interval_string.
35 |
36 | """
37 | interval_exc = "Bad interval format for {0}".format(interval_string)
38 | interval_dict = {"s": 1, "m": 60, "h": 3600, "D": 86400,
39 | "W": 7*86400, "M": 30*86400, "Y": 365*86400}
40 |
41 | interval_regex = re.compile("^(?P[0-9]+)(?P[smhDWMY])")
42 | seconds = 0
43 |
44 | while interval_string:
45 | match = interval_regex.match(interval_string)
46 | if match:
47 | num, ext = int(match.group("num")), match.group("ext")
48 | if num > 0 and ext in interval_dict:
49 | seconds += num * interval_dict[ext]
50 | interval_string = interval_string[match.end():]
51 | else:
52 | raise Exception(interval_exc)
53 | else:
54 | raise Exception(interval_exc)
55 | return seconds
56 |
--------------------------------------------------------------------------------
/docs/_themes/LICENSE:
--------------------------------------------------------------------------------
1 | Modifications:
2 |
3 | Copyright (c) 2010 Kenneth Reitz.
4 |
5 |
6 | Original Project:
7 |
8 | Copyright (c) 2010 by Armin Ronacher.
9 |
10 |
11 | Some rights reserved.
12 |
13 | Redistribution and use in source and binary forms of the theme, with or
14 | without modification, are permitted provided that the following conditions
15 | are met:
16 |
17 | * Redistributions of source code must retain the above copyright
18 | notice, this list of conditions and the following disclaimer.
19 |
20 | * Redistributions in binary form must reproduce the above
21 | copyright notice, this list of conditions and the following
22 | disclaimer in the documentation and/or other materials provided
23 | with the distribution.
24 |
25 | * The names of the contributors may not be used to endorse or
26 | promote products derived from this software without specific
27 | prior written permission.
28 |
29 | We kindly ask you to only use these themes in an unmodified manner just
30 | for Flask and Flask-related products, not for unrelated projects. If you
31 | like the visual style and want to use it for your own projects, please
32 | consider making some larger changes to the themes (such as changing
33 | font faces, sizes, colors or margins).
34 |
35 | THIS THEME IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
36 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
39 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
40 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
41 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
42 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
43 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
44 | ARISING IN ANY WAY OUT OF THE USE OF THIS THEME, EVEN IF ADVISED OF THE
45 | POSSIBILITY OF SUCH DAMAGE.
46 |
--------------------------------------------------------------------------------
/docs/api.rst:
--------------------------------------------------------------------------------
1 | .. _api:
2 |
3 | Bakthat API
4 | ===========
5 |
6 | Bakthat
7 | -------
8 |
9 | These functions are called when using bakthat in command line mode and are the foundation of the bakthat module.
10 |
11 |
12 | .. module:: bakthat
13 |
14 | backup
15 | ~~~~~~
16 |
17 | .. autofunction:: backup
18 |
19 | restore
20 | ~~~~~~~
21 |
22 | .. autofunction:: restore
23 |
24 | info
25 | ~~~~
26 |
27 | .. autofunction:: info
28 |
29 | show
30 | ~~~~
31 |
32 | .. autofunction:: show
33 |
34 | delete
35 | ~~~~~~
36 |
37 | .. autofunction:: delete
38 |
39 | delete_older_than
40 | ~~~~~~~~~~~~~~~~~
41 |
42 | .. autofunction:: delete_older_than
43 |
44 | rotate_backups
45 | ~~~~~~~~~~~~~~
46 |
47 | .. autofunction:: rotate_backups
48 |
49 |
50 | Backends
51 | --------
52 |
53 | BakthatBackend
54 | ~~~~~~~~~~~~~~
55 |
56 | .. autoclass:: bakthat.backends.BakthatBackend
57 | :members:
58 |
59 | GlacierBackend
60 | ~~~~~~~~~~~~~~
61 |
62 | .. autoclass:: bakthat.backends.GlacierBackend
63 | :members:
64 |
65 | S3Backend
66 | ~~~~~~~~~
67 |
68 | .. autoclass:: bakthat.backends.S3Backend
69 | :members:
70 |
71 | SwiftBackend
72 | ~~~~~~~~~~~~
73 |
74 | .. autoclass:: bakthat.backends.SwiftBackend
75 | :members:
76 |
77 |
78 | RotationConfig
79 | ~~~~~~~~~~~~~~
80 |
81 | .. autoclass:: bakthat.backends.RotationConfig
82 | :members:
83 |
84 | Helper
85 | ------
86 |
87 | BakHelper
88 | ~~~~~~~~~
89 |
90 | .. autoclass:: bakthat.helper.BakHelper
91 | :members:
92 |
93 | KeyValue
94 | ~~~~~~~~
95 |
96 | .. autoclass:: bakthat.helper.KeyValue
97 | :members:
98 |
99 | Sync
100 | ----
101 |
102 | BakSyncer
103 | ~~~~~~~~~
104 |
105 | .. autoclass:: bakthat.sync.BakSyncer
106 | :members:
107 |
108 | bakmanager_hook
109 | ~~~~~~~~~~~~~~~
110 |
111 | .. autofunction:: bakthat.sync.bakmanager_hook
112 |
113 | Utils
114 | -----
115 |
116 | .. autofunction:: bakthat.utils._timedelta_total_seconds
117 |
118 | .. autofunction:: bakthat.utils._interval_string_to_seconds
119 |
120 | Models
121 | ------
122 |
123 | .. autoclass:: bakthat.models.Backups
124 | :members:
125 |
126 |
127 | .. autoclass:: bakthat.models.Inventory
128 | :members:
129 |
130 |
131 | .. autoclass:: bakthat.models.Jobs
132 | :members:
133 |
134 |
135 | .. autoclass:: bakthat.models.Config
136 | :members:
137 |
--------------------------------------------------------------------------------
/bakthat/plugin.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | import importlib
3 | import os
4 | import sys
5 | import logging
6 | import atexit
7 |
8 | from bakthat.conf import PLUGINS_DIR, events
9 |
10 | log = logging.getLogger(__name__)
11 | plugin_setup = False
12 |
13 |
14 | def setup_plugins(conf=None):
15 | """ Add the plugin dir to the PYTHON_PATH,
16 | and activate them."""
17 | global plugin_setup
18 | if not plugin_setup:
19 | log.debug("Setting up plugins")
20 | plugins_dir = conf.get("plugins_dir", PLUGINS_DIR)
21 |
22 | if os.path.isdir(plugins_dir):
23 | log.debug("Adding {0} to plugins dir".format(plugins_dir))
24 | sys.path.append(plugins_dir)
25 |
26 | for plugin in conf.get("plugins", []):
27 | p = load_class(plugin)
28 | if issubclass(p, Plugin):
29 | load_plugin(p, conf)
30 | else:
31 | raise Exception("Plugin must be a bakthat.plugin.Plugin subclass!")
32 | plugin_setup = True
33 |
34 |
35 | def load_class(full_class_string):
36 | """ Dynamically load a class from a string. """
37 | class_data = full_class_string.split(".")
38 | module_path = ".".join(class_data[:-1])
39 | class_str = class_data[-1]
40 |
41 | module = importlib.import_module(module_path)
42 | return getattr(module, class_str)
43 |
44 |
45 | def load_plugin(plugin, conf):
46 | p = plugin(conf)
47 | log.debug("Activating {0}".format(p))
48 | p.activate()
49 |
50 | def deactivate_plugin():
51 | try:
52 | p.deactivate()
53 | except NotImplementedError:
54 | pass
55 | atexit.register(deactivate_plugin)
56 |
57 |
58 | class Plugin(object):
59 | """ Abstract plugin class.
60 | Plugin should implement activate, and optionnaly deactivate.
61 | """
62 | def __init__(self, conf):
63 | self.conf = conf
64 | self.events = events
65 | self.log = log
66 |
67 | def __getattr__(self, attr):
68 | if attr in ["before_backup",
69 | "on_backup",
70 | "before_restore",
71 | "on_restore",
72 | "before_delete",
73 | "on_delete",
74 | "before_delete_older_than",
75 | "on_delete_older_than",
76 | "before_rotate_backups",
77 | "on_rotate_backups"]:
78 | return getattr(self.events, attr)
79 | else:
80 | raise Exception("Event {0} does not exist!".format(attr))
81 |
82 | def __repr__(self):
83 | return "".format(self.__class__.__name__)
84 |
85 | def __str__(self):
86 | return self.__repr__()
87 |
88 | def activate(self):
89 | raise NotImplementedError("Plugin should implement this!")
90 |
91 | def deactivate(self):
92 | raise NotImplementedError("Plugin may implement this!")
93 |
--------------------------------------------------------------------------------
/docs/_themes/flask_theme_support.py:
--------------------------------------------------------------------------------
1 | # flasky extensions. flasky pygments style based on tango style
2 | from pygments.style import Style
3 | from pygments.token import Keyword, Name, Comment, String, Error, \
4 | Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
5 |
6 |
7 | class FlaskyStyle(Style):
8 | background_color = "#f8f8f8"
9 | default_style = ""
10 |
11 | styles = {
12 | # No corresponding class for the following:
13 | #Text: "", # class: ''
14 | Whitespace: "underline #f8f8f8", # class: 'w'
15 | Error: "#a40000 border:#ef2929", # class: 'err'
16 | Other: "#000000", # class 'x'
17 |
18 | Comment: "italic #8f5902", # class: 'c'
19 | Comment.Preproc: "noitalic", # class: 'cp'
20 |
21 | Keyword: "bold #004461", # class: 'k'
22 | Keyword.Constant: "bold #004461", # class: 'kc'
23 | Keyword.Declaration: "bold #004461", # class: 'kd'
24 | Keyword.Namespace: "bold #004461", # class: 'kn'
25 | Keyword.Pseudo: "bold #004461", # class: 'kp'
26 | Keyword.Reserved: "bold #004461", # class: 'kr'
27 | Keyword.Type: "bold #004461", # class: 'kt'
28 |
29 | Operator: "#582800", # class: 'o'
30 | Operator.Word: "bold #004461", # class: 'ow' - like keywords
31 |
32 | Punctuation: "bold #000000", # class: 'p'
33 |
34 | # because special names such as Name.Class, Name.Function, etc.
35 | # are not recognized as such later in the parsing, we choose them
36 | # to look the same as ordinary variables.
37 | Name: "#000000", # class: 'n'
38 | Name.Attribute: "#c4a000", # class: 'na' - to be revised
39 | Name.Builtin: "#004461", # class: 'nb'
40 | Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
41 | Name.Class: "#000000", # class: 'nc' - to be revised
42 | Name.Constant: "#000000", # class: 'no' - to be revised
43 | Name.Decorator: "#888", # class: 'nd' - to be revised
44 | Name.Entity: "#ce5c00", # class: 'ni'
45 | Name.Exception: "bold #cc0000", # class: 'ne'
46 | Name.Function: "#000000", # class: 'nf'
47 | Name.Property: "#000000", # class: 'py'
48 | Name.Label: "#f57900", # class: 'nl'
49 | Name.Namespace: "#000000", # class: 'nn' - to be revised
50 | Name.Other: "#000000", # class: 'nx'
51 | Name.Tag: "bold #004461", # class: 'nt' - like a keyword
52 | Name.Variable: "#000000", # class: 'nv' - to be revised
53 | Name.Variable.Class: "#000000", # class: 'vc' - to be revised
54 | Name.Variable.Global: "#000000", # class: 'vg' - to be revised
55 | Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
56 |
57 | Number: "#990000", # class: 'm'
58 |
59 | Literal: "#000000", # class: 'l'
60 | Literal.Date: "#000000", # class: 'ld'
61 |
62 | String: "#4e9a06", # class: 's'
63 | String.Backtick: "#4e9a06", # class: 'sb'
64 | String.Char: "#4e9a06", # class: 'sc'
65 | String.Doc: "italic #8f5902", # class: 'sd' - like a comment
66 | String.Double: "#4e9a06", # class: 's2'
67 | String.Escape: "#4e9a06", # class: 'se'
68 | String.Heredoc: "#4e9a06", # class: 'sh'
69 | String.Interpol: "#4e9a06", # class: 'si'
70 | String.Other: "#4e9a06", # class: 'sx'
71 | String.Regex: "#4e9a06", # class: 'sr'
72 | String.Single: "#4e9a06", # class: 's1'
73 | String.Symbol: "#4e9a06", # class: 'ss'
74 |
75 | Generic: "#000000", # class: 'g'
76 | Generic.Deleted: "#a40000", # class: 'gd'
77 | Generic.Emph: "italic #000000", # class: 'ge'
78 | Generic.Error: "#ef2929", # class: 'gr'
79 | Generic.Heading: "bold #000080", # class: 'gh'
80 | Generic.Inserted: "#00A000", # class: 'gi'
81 | Generic.Output: "#888", # class: 'go'
82 | Generic.Prompt: "#745334", # class: 'gp'
83 | Generic.Strong: "bold #000000", # class: 'gs'
84 | Generic.Subheading: "bold #800080", # class: 'gu'
85 | Generic.Traceback: "bold #a40000", # class: 'gt'
86 | }
87 |
--------------------------------------------------------------------------------
/test_bakthat_swift.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 | import bakthat
3 | import tempfile
4 | import hashlib
5 | import os
6 | import time
7 | import unittest
8 | import logging
9 |
10 | log = logging.getLogger()
11 |
12 | handler = logging.StreamHandler()
13 | handler.setLevel(logging.DEBUG)
14 | handler.addFilter(bakthat.BakthatFilter())
15 | handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
16 | log.addHandler(handler)
17 | log.setLevel(logging.DEBUG)
18 |
19 |
20 | class BakthatSwiftBackendTestCase(unittest.TestCase):
21 | """This test cases use profile test_swift """
22 |
23 | def setUp(self):
24 | self.test_file = tempfile.NamedTemporaryFile()
25 | self.test_file.write("Bakthat Test File")
26 | self.test_file.seek(0)
27 | self.test_filename = self.test_file.name.split("/")[-1]
28 | self.test_hash = hashlib.sha1(self.test_file.read()).hexdigest()
29 | self.password = "bakthat_encrypted_test"
30 | self.test_profile = "test_swift"
31 |
32 | def test_swift_backup_restore(self):
33 | backup_data = bakthat.backup(self.test_file.name, "swift", password="",
34 | profile=self.test_profile)
35 | log.info(backup_data)
36 |
37 | #self.assertEqual(bakthat.match_filename(self.test_filename, "swift",
38 | # profile=self.test_profile
39 | # )[0]["filename"],
40 | # self.test_filename)
41 |
42 | bakthat.restore(self.test_filename, "swift", profile=self.test_profile)
43 |
44 | restored_hash = hashlib.sha1(
45 | open(self.test_filename).read()).hexdigest()
46 |
47 | self.assertEqual(self.test_hash, restored_hash)
48 |
49 | os.remove(self.test_filename)
50 |
51 | bakthat.delete(backup_data["stored_filename"], "swift", profile=self.test_profile)
52 |
53 | #self.assertEqual(bakthat.match_filename(self.test_filename, "swift",
54 | # profile=self.test_profile), [])
55 |
56 | def test_swift_delete_older_than(self):
57 | backup_res = bakthat.backup(self.test_file.name, "swift", password="",
58 | profile=self.test_profile)
59 |
60 | #self.assertEqual(bakthat.match_filename(self.test_filename, "swift",
61 | # profile=self.test_profile
62 | # )[0]["filename"],
63 | # self.test_filename)
64 |
65 | bakthat.restore(self.test_filename, "swift",
66 | profile=self.test_profile)
67 |
68 | restored_hash = hashlib.sha1(
69 | open(self.test_filename).read()).hexdigest()
70 |
71 | self.assertEqual(self.test_hash, restored_hash)
72 |
73 | os.remove(self.test_filename)
74 |
75 | test_deleted = bakthat.delete_older_than(self.test_filename, "1Y",
76 | "swift",
77 | profile=self.test_profile)
78 |
79 | self.assertEqual(test_deleted, [])
80 |
81 | time.sleep(10)
82 |
83 | test_deleted = bakthat.delete_older_than(self.test_filename, "9s",
84 | "swift",
85 | profile=self.test_profile)
86 |
87 | key_deleted = test_deleted[0]
88 |
89 | self.assertEqual(key_deleted, backup_res["stored_filename"])
90 |
91 | #self.assertEqual(bakthat.match_filename(self.test_filename,
92 | # "swift",
93 | # profile=self.test_profile),
94 | # [])
95 |
96 | def test_swift_encrypted_backup_restore(self):
97 | backup_data = bakthat.backup(self.test_file.name, "swift", password=self.password,
98 | profile=self.test_profile)
99 |
100 | #self.assertEqual(bakthat.match_filename(self.test_filename, "swift",
101 | # profile=self.test_profile)
102 | # [0]["filename"], self.test_filename)
103 |
104 | # Check if stored file is encrypted
105 | #self.assertTrue(bakthat.match_filename(self.test_filename, "swift",
106 | # profile=self.test_profile)
107 | # [0]["is_enc"])
108 |
109 | bakthat.restore(self.test_filename, "swift", password=self.password,
110 | profile=self.test_profile)
111 |
112 | restored_hash = hashlib.sha1(
113 | open(self.test_filename).read()).hexdigest()
114 |
115 | self.assertEqual(self.test_hash, restored_hash)
116 |
117 | os.remove(self.test_filename)
118 |
119 | bakthat.delete(backup_data["stored_filename"], "swift",
120 | profile=self.test_profile)
121 |
122 | #self.assertEqual(bakthat.match_filename(self.test_filename,
123 | # "swift",
124 | # profile=self.test_profile),
125 | # [])
126 |
127 | if __name__ == '__main__':
128 | unittest.main()
129 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | REM Command file for Sphinx documentation
4 |
5 | if "%SPHINXBUILD%" == "" (
6 | set SPHINXBUILD=sphinx-build
7 | )
8 | set BUILDDIR=_build
9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
10 | set I18NSPHINXOPTS=%SPHINXOPTS% .
11 | if NOT "%PAPER%" == "" (
12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
14 | )
15 |
16 | if "%1" == "" goto help
17 |
18 | if "%1" == "help" (
19 | :help
20 | echo.Please use `make ^` where ^ is one of
21 | echo. html to make standalone HTML files
22 | echo. dirhtml to make HTML files named index.html in directories
23 | echo. singlehtml to make a single large HTML file
24 | echo. pickle to make pickle files
25 | echo. json to make JSON files
26 | echo. htmlhelp to make HTML files and a HTML help project
27 | echo. qthelp to make HTML files and a qthelp project
28 | echo. devhelp to make HTML files and a Devhelp project
29 | echo. epub to make an epub
30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
31 | echo. text to make text files
32 | echo. man to make manual pages
33 | echo. texinfo to make Texinfo files
34 | echo. gettext to make PO message catalogs
35 | echo. changes to make an overview over all changed/added/deprecated items
36 | echo. linkcheck to check all external links for integrity
37 | echo. doctest to run all doctests embedded in the documentation if enabled
38 | goto end
39 | )
40 |
41 | if "%1" == "clean" (
42 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
43 | del /q /s %BUILDDIR%\*
44 | goto end
45 | )
46 |
47 | if "%1" == "html" (
48 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
49 | if errorlevel 1 exit /b 1
50 | echo.
51 | echo.Build finished. The HTML pages are in %BUILDDIR%/html.
52 | goto end
53 | )
54 |
55 | if "%1" == "dirhtml" (
56 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
57 | if errorlevel 1 exit /b 1
58 | echo.
59 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
60 | goto end
61 | )
62 |
63 | if "%1" == "singlehtml" (
64 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
65 | if errorlevel 1 exit /b 1
66 | echo.
67 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
68 | goto end
69 | )
70 |
71 | if "%1" == "pickle" (
72 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
73 | if errorlevel 1 exit /b 1
74 | echo.
75 | echo.Build finished; now you can process the pickle files.
76 | goto end
77 | )
78 |
79 | if "%1" == "json" (
80 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
81 | if errorlevel 1 exit /b 1
82 | echo.
83 | echo.Build finished; now you can process the JSON files.
84 | goto end
85 | )
86 |
87 | if "%1" == "htmlhelp" (
88 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
89 | if errorlevel 1 exit /b 1
90 | echo.
91 | echo.Build finished; now you can run HTML Help Workshop with the ^
92 | .hhp project file in %BUILDDIR%/htmlhelp.
93 | goto end
94 | )
95 |
96 | if "%1" == "qthelp" (
97 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
98 | if errorlevel 1 exit /b 1
99 | echo.
100 | echo.Build finished; now you can run "qcollectiongenerator" with the ^
101 | .qhcp project file in %BUILDDIR%/qthelp, like this:
102 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Bakthat.qhcp
103 | echo.To view the help file:
104 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Bakthat.ghc
105 | goto end
106 | )
107 |
108 | if "%1" == "devhelp" (
109 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
110 | if errorlevel 1 exit /b 1
111 | echo.
112 | echo.Build finished.
113 | goto end
114 | )
115 |
116 | if "%1" == "epub" (
117 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
118 | if errorlevel 1 exit /b 1
119 | echo.
120 | echo.Build finished. The epub file is in %BUILDDIR%/epub.
121 | goto end
122 | )
123 |
124 | if "%1" == "latex" (
125 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
126 | if errorlevel 1 exit /b 1
127 | echo.
128 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
129 | goto end
130 | )
131 |
132 | if "%1" == "text" (
133 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
134 | if errorlevel 1 exit /b 1
135 | echo.
136 | echo.Build finished. The text files are in %BUILDDIR%/text.
137 | goto end
138 | )
139 |
140 | if "%1" == "man" (
141 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
142 | if errorlevel 1 exit /b 1
143 | echo.
144 | echo.Build finished. The manual pages are in %BUILDDIR%/man.
145 | goto end
146 | )
147 |
148 | if "%1" == "texinfo" (
149 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
150 | if errorlevel 1 exit /b 1
151 | echo.
152 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
153 | goto end
154 | )
155 |
156 | if "%1" == "gettext" (
157 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
158 | if errorlevel 1 exit /b 1
159 | echo.
160 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
161 | goto end
162 | )
163 |
164 | if "%1" == "changes" (
165 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
166 | if errorlevel 1 exit /b 1
167 | echo.
168 | echo.The overview file is in %BUILDDIR%/changes.
169 | goto end
170 | )
171 |
172 | if "%1" == "linkcheck" (
173 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
174 | if errorlevel 1 exit /b 1
175 | echo.
176 | echo.Link check complete; look for any errors in the above output ^
177 | or in %BUILDDIR%/linkcheck/output.txt.
178 | goto end
179 | )
180 |
181 | if "%1" == "doctest" (
182 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
183 | if errorlevel 1 exit /b 1
184 | echo.
185 | echo.Testing of doctests in the sources finished, look at the ^
186 | results in %BUILDDIR%/doctest/output.txt.
187 | goto end
188 | )
189 |
190 | :end
191 |
--------------------------------------------------------------------------------
/docs/_themes/kr_small/static/flasky.css_t:
--------------------------------------------------------------------------------
1 | /*
2 | * flasky.css_t
3 | * ~~~~~~~~~~~~
4 | *
5 | * Sphinx stylesheet -- flasky theme based on nature theme.
6 | *
7 | * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS.
8 | * :license: BSD, see LICENSE for details.
9 | *
10 | */
11 |
12 | @import url("basic.css");
13 | /* -- page layout ----------------------------------------------------------- */
14 |
15 | body {
16 | font-family: 'Georgia', serif;
17 | font-size: 17px;
18 | color: #000;
19 | background: white;
20 | margin: 0;
21 | padding: 0;
22 | }
23 |
24 | div.documentwrapper {
25 | float: left;
26 | width: 100%;
27 | }
28 |
29 | div.bodywrapper {
30 | margin: 40px auto 0 auto;
31 | width: 700px;
32 | }
33 |
34 | hr {
35 | border: 1px solid #B1B4B6;
36 | }
37 |
38 | div.body {
39 | background-color: #ffffff;
40 | color: #3E4349;
41 | padding: 0 30px 30px 30px;
42 | }
43 |
44 | img.floatingflask {
45 | padding: 0 0 10px 10px;
46 | float: right;
47 | }
48 |
49 | div.footer {
50 | text-align: right;
51 | color: #888;
52 | padding: 10px;
53 | font-size: 14px;
54 | width: 650px;
55 | margin: 0 auto 40px auto;
56 | }
57 |
58 | div.footer a {
59 | color: #888;
60 | text-decoration: underline;
61 | }
62 |
63 | div.related {
64 | line-height: 32px;
65 | color: #888;
66 | }
67 |
68 | div.related ul {
69 | padding: 0 0 0 10px;
70 | }
71 |
72 | div.related a {
73 | color: #444;
74 | }
75 |
76 | /* -- body styles ----------------------------------------------------------- */
77 |
78 | a {
79 | color: #004B6B;
80 | text-decoration: underline;
81 | }
82 |
83 | a:hover {
84 | color: #6D4100;
85 | text-decoration: underline;
86 | }
87 |
88 | div.body {
89 | padding-bottom: 40px; /* saved for footer */
90 | }
91 |
92 | div.body h1,
93 | div.body h2,
94 | div.body h3,
95 | div.body h4,
96 | div.body h5,
97 | div.body h6 {
98 | font-family: 'Garamond', 'Georgia', serif;
99 | font-weight: normal;
100 | margin: 30px 0px 10px 0px;
101 | padding: 0;
102 | }
103 |
104 | {% if theme_index_logo %}
105 | div.indexwrapper h1 {
106 | text-indent: -999999px;
107 | background: url({{ theme_index_logo }}) no-repeat center center;
108 | height: {{ theme_index_logo_height }};
109 | }
110 | {% endif %}
111 |
112 | div.body h2 { font-size: 180%; }
113 | div.body h3 { font-size: 150%; }
114 | div.body h4 { font-size: 130%; }
115 | div.body h5 { font-size: 100%; }
116 | div.body h6 { font-size: 100%; }
117 |
118 | a.headerlink {
119 | color: white;
120 | padding: 0 4px;
121 | text-decoration: none;
122 | }
123 |
124 | a.headerlink:hover {
125 | color: #444;
126 | background: #eaeaea;
127 | }
128 |
129 | div.body p, div.body dd, div.body li {
130 | line-height: 1.4em;
131 | }
132 |
133 | div.admonition {
134 | background: #fafafa;
135 | margin: 20px -30px;
136 | padding: 10px 30px;
137 | border-top: 1px solid #ccc;
138 | border-bottom: 1px solid #ccc;
139 | }
140 |
141 | div.admonition p.admonition-title {
142 | font-family: 'Garamond', 'Georgia', serif;
143 | font-weight: normal;
144 | font-size: 24px;
145 | margin: 0 0 10px 0;
146 | padding: 0;
147 | line-height: 1;
148 | }
149 |
150 | div.admonition p.last {
151 | margin-bottom: 0;
152 | }
153 |
154 | div.highlight{
155 | background-color: white;
156 | }
157 |
158 | dt:target, .highlight {
159 | background: #FAF3E8;
160 | }
161 |
162 | div.note {
163 | background-color: #eee;
164 | border: 1px solid #ccc;
165 | }
166 |
167 | div.seealso {
168 | background-color: #ffc;
169 | border: 1px solid #ff6;
170 | }
171 |
172 | div.topic {
173 | background-color: #eee;
174 | }
175 |
176 | div.warning {
177 | background-color: #ffe4e4;
178 | border: 1px solid #f66;
179 | }
180 |
181 | p.admonition-title {
182 | display: inline;
183 | }
184 |
185 | p.admonition-title:after {
186 | content: ":";
187 | }
188 |
189 | pre, tt {
190 | font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
191 | font-size: 0.85em;
192 | }
193 |
194 | img.screenshot {
195 | }
196 |
197 | tt.descname, tt.descclassname {
198 | font-size: 0.95em;
199 | }
200 |
201 | tt.descname {
202 | padding-right: 0.08em;
203 | }
204 |
205 | img.screenshot {
206 | -moz-box-shadow: 2px 2px 4px #eee;
207 | -webkit-box-shadow: 2px 2px 4px #eee;
208 | box-shadow: 2px 2px 4px #eee;
209 | }
210 |
211 | table.docutils {
212 | border: 1px solid #888;
213 | -moz-box-shadow: 2px 2px 4px #eee;
214 | -webkit-box-shadow: 2px 2px 4px #eee;
215 | box-shadow: 2px 2px 4px #eee;
216 | }
217 |
218 | table.docutils td, table.docutils th {
219 | border: 1px solid #888;
220 | padding: 0.25em 0.7em;
221 | }
222 |
223 | table.field-list, table.footnote {
224 | border: none;
225 | -moz-box-shadow: none;
226 | -webkit-box-shadow: none;
227 | box-shadow: none;
228 | }
229 |
230 | table.footnote {
231 | margin: 15px 0;
232 | width: 100%;
233 | border: 1px solid #eee;
234 | }
235 |
236 | table.field-list th {
237 | padding: 0 0.8em 0 0;
238 | }
239 |
240 | table.field-list td {
241 | padding: 0;
242 | }
243 |
244 | table.footnote td {
245 | padding: 0.5em;
246 | }
247 |
248 | dl {
249 | margin: 0;
250 | padding: 0;
251 | }
252 |
253 | dl dd {
254 | margin-left: 30px;
255 | }
256 |
257 | pre {
258 | padding: 0;
259 | margin: 15px -30px;
260 | padding: 8px;
261 | line-height: 1.3em;
262 | padding: 7px 30px;
263 | background: #eee;
264 | border-radius: 2px;
265 | -moz-border-radius: 2px;
266 | -webkit-border-radius: 2px;
267 | }
268 |
269 | dl pre {
270 | margin-left: -60px;
271 | padding-left: 60px;
272 | }
273 |
274 | tt {
275 | background-color: #ecf0f3;
276 | color: #222;
277 | /* padding: 1px 2px; */
278 | }
279 |
280 | tt.xref, a tt {
281 | background-color: #FBFBFB;
282 | }
283 |
284 | a:hover tt {
285 | background: #EEE;
286 | }
287 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = _build
9 |
10 | # Internal variables.
11 | PAPEROPT_a4 = -D latex_paper_size=a4
12 | PAPEROPT_letter = -D latex_paper_size=letter
13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
14 | # the i18n builder cannot share the environment and doctrees with the others
15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
16 |
17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
18 |
19 | help:
20 | @echo "Please use \`make ' where is one of"
21 | @echo " html to make standalone HTML files"
22 | @echo " dirhtml to make HTML files named index.html in directories"
23 | @echo " singlehtml to make a single large HTML file"
24 | @echo " pickle to make pickle files"
25 | @echo " json to make JSON files"
26 | @echo " htmlhelp to make HTML files and a HTML help project"
27 | @echo " qthelp to make HTML files and a qthelp project"
28 | @echo " devhelp to make HTML files and a Devhelp project"
29 | @echo " epub to make an epub"
30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
31 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
32 | @echo " text to make text files"
33 | @echo " man to make manual pages"
34 | @echo " texinfo to make Texinfo files"
35 | @echo " info to make Texinfo files and run them through makeinfo"
36 | @echo " gettext to make PO message catalogs"
37 | @echo " changes to make an overview of all changed/added/deprecated items"
38 | @echo " linkcheck to check all external links for integrity"
39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
40 |
41 | clean:
42 | -rm -rf $(BUILDDIR)/*
43 |
44 | html:
45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
46 | @echo
47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
48 |
49 | dirhtml:
50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
51 | @echo
52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
53 |
54 | singlehtml:
55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
56 | @echo
57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
58 |
59 | pickle:
60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
61 | @echo
62 | @echo "Build finished; now you can process the pickle files."
63 |
64 | json:
65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
66 | @echo
67 | @echo "Build finished; now you can process the JSON files."
68 |
69 | htmlhelp:
70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
71 | @echo
72 | @echo "Build finished; now you can run HTML Help Workshop with the" \
73 | ".hhp project file in $(BUILDDIR)/htmlhelp."
74 |
75 | qthelp:
76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
77 | @echo
78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Bakthat.qhcp"
81 | @echo "To view the help file:"
82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Bakthat.qhc"
83 |
84 | devhelp:
85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
86 | @echo
87 | @echo "Build finished."
88 | @echo "To view the help file:"
89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Bakthat"
90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Bakthat"
91 | @echo "# devhelp"
92 |
93 | epub:
94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
95 | @echo
96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
97 |
98 | latex:
99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
100 | @echo
101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
103 | "(use \`make latexpdf' here to do that automatically)."
104 |
105 | latexpdf:
106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
107 | @echo "Running LaTeX files through pdflatex..."
108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
110 |
111 | text:
112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
113 | @echo
114 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
115 |
116 | man:
117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
118 | @echo
119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
120 |
121 | texinfo:
122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
123 | @echo
124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
125 | @echo "Run \`make' in that directory to run these through makeinfo" \
126 | "(use \`make info' here to do that automatically)."
127 |
128 | info:
129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
130 | @echo "Running Texinfo files through makeinfo..."
131 | make -C $(BUILDDIR)/texinfo info
132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
133 |
134 | gettext:
135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
136 | @echo
137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
138 |
139 | changes:
140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
141 | @echo
142 | @echo "The overview file is in $(BUILDDIR)/changes."
143 |
144 | linkcheck:
145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
146 | @echo
147 | @echo "Link check complete; look for any errors in the above output " \
148 | "or in $(BUILDDIR)/linkcheck/output.txt."
149 |
150 | doctest:
151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
152 | @echo "Testing of doctests in the sources finished, look at the " \
153 | "results in $(BUILDDIR)/doctest/output.txt."
154 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. Bakthat documentation master file, created by
2 | sphinx-quickstart on Fri Mar 1 10:32:38 2013.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Bakthat: Python backup framework and command line tool
7 | ======================================================
8 |
9 | Release v\ |version|.
10 |
11 | Bakthat is a MIT licensed backup framework written in Python, it's both a command line tool and a Python module that helps you manage backups on Amazon `S3 `_/`Glacier `_ and OpenStack `Swift `_. It automatically compress, encrypt (symmetric encryption) and upload your files.
12 |
13 | Here are some features:
14 |
15 | * Compress with `tarfile `_
16 | * Encrypt with `beefish `_ (**optional**)
17 | * Upload/download to S3 or Glacier with `boto `_
18 | * Local backups inventory stored in a SQLite database with `peewee `_
19 | * Delete older than, and `Grandfather-father-son backup rotation `_ supported
20 | * Possibility to sync backups database between multiple clients via a centralized server
21 | * Exclude files using .gitignore like file
22 | * Extendable with plugins
23 |
24 | You can restore backups **with** or **without** bakthat, you just have to download the backup, decrypt it with `Beefish `_ command-line tool and untar it.
25 |
26 | You may also check out `BakServer `_, a self-hosted Python server, to help you manage backups anywhere and keep multiple bakthat client synchronized across servers.
27 |
28 |
29 | Requirements
30 | ------------
31 |
32 | Bakthat requirements are automatically installed when installing bakthat, but if you want you can install them manually:
33 |
34 | ::
35 |
36 | $ pip install -r requirements.txt
37 |
38 |
39 | * `aaargh `_
40 | * `pycrypto `_
41 | * `beefish `_
42 | * `boto `_
43 | * `GrandFatherSon `_
44 | * `peewee `_
45 | * `byteformat `_
46 | * `pyyaml `_
47 | * `sh `_
48 | * `requests `_
49 | * `events `_
50 |
51 | If you want to use OpenStack Swift, following additional packages are also required.
52 |
53 | * `python-swiftclient `_
54 | * `python-keystoneclient `_
55 |
56 |
57 |
58 | Overview
59 | --------
60 |
61 | Bakthat command line tool
62 | ~~~~~~~~~~~~~~~~~~~~~~~~~
63 |
64 | ::
65 |
66 | $ pip install bakthat
67 |
68 | $ bakthat configure
69 |
70 | $ bakthat backup mydir
71 | Backing up mydir
72 | Password (blank to disable encryption):
73 | Password confirmation:
74 | Compressing...
75 | Encrypting...
76 | Uploading...
77 | Upload completion: 0%
78 | Upload completion: 100%
79 |
80 | or
81 |
82 | $ cd mydir
83 | $ bakthat backup
84 |
85 | $ bakthat show
86 | 2013-03-05T19:36:15 s3 3.1 KB mydir.20130305193615.tgz.enc
87 |
88 | $ bakthat restore mydir
89 | Restoring mydir.20130305193615.tgz.enc
90 | Password:
91 | Downloading...
92 | Decrypting...
93 | Uncompressing...
94 |
95 | $ bakthat delete mydir.20130305193615.tgz.enc
96 | Deleting mydir.20130305193615.tgz.enc
97 |
98 | Bakthat Python API
99 | ~~~~~~~~~~~~~~~~~~
100 |
101 | .. code-block:: python
102 |
103 | import logging
104 | import sh
105 | logging.basicConfig(level=logging.INFO)
106 |
107 | from bakthat.helper import BakHelper
108 |
109 | BACKUP_NAME = "myhost_mysql"
110 | BACKUP_PASSWORD = "mypassword"
111 | MYSQL_USER = "root"
112 | MYSQL_PASSWORD = "mypassword"
113 |
114 | with BakHelper(BACKUP_NAME, password=BACKUP_PASSWORD, tags=["mysql"]) as bh:
115 | sh.mysqldump("-p{0}".format(MYSQL_PASSWORD),
116 | u=MYSQL_USER,
117 | all_databases=True,
118 | _out="dump.sql")
119 | bh.backup()
120 | bh.rotate()
121 |
122 |
123 | Installation
124 | ------------
125 |
126 | With pip/easy_install:
127 |
128 | ::
129 |
130 | $ pip install bakthat
131 |
132 | From source:
133 |
134 | ::
135 |
136 | $ git clone https://github.com/tsileo/bakthat.git
137 | $ cd bakthat
138 | $ sudo python setup.py install
139 |
140 |
141 | Next, you need to set your AWS credentials:
142 |
143 | ::
144 |
145 | $ bakthat configure
146 |
147 |
148 | User Guide
149 | ----------
150 |
151 | .. toctree::
152 | :maxdepth: 3
153 |
154 | user_guide
155 |
156 | Developer's Guide
157 | -----------------
158 |
159 | .. toctree::
160 | :maxdepth: 3
161 |
162 | developer_guide
163 |
164 | API Documentation
165 | -----------------
166 |
167 | .. toctree::
168 | :maxdepth: 2
169 |
170 | api
171 |
172 | Articles
173 | --------
174 |
175 | * `Bakthat 0.5.0 Released With OpenStack Swift Support and BakManager Integration `_
176 | * `Backing Up MongoDB to Amazon Glacier/S3 With Python Using Sh and Bakthat `_
177 | * `Bakthat 0.4.5 Released, Introducing a New Helper: KeyValue `_
178 | * `Bakthat 0.2.0 Released Adding Amazon Glacier Support `_
179 |
180 | Indices and tables
181 | ==================
182 |
183 | * :ref:`genindex`
184 | * :ref:`modindex`
185 | * :ref:`search`
186 |
--------------------------------------------------------------------------------
/bakthat/sync.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 | import logging
3 | import socket
4 | from bakthat.models import Backups, Config
5 | from bakthat.conf import config
6 | import requests
7 | import json
8 |
9 | log = logging.getLogger(__name__)
10 |
11 |
12 | def bakmanager_periodic_backups(conf):
13 | """Fetch periodic backups info from bakmanager.io API."""
14 | if conf.get("bakmanager_token"):
15 | bakmanager_backups_endpoint = conf.get("bakmanager_api", "https://bakmanager.io/api/keys/")
16 | r = requests.get(bakmanager_backups_endpoint, auth=(conf.get("bakmanager_token"), ""))
17 | r.raise_for_status()
18 | for key in r.json().get("_items", []):
19 | latest_date = key.get("latest", {}).get("date_human")
20 | line = "{key:20} status: {status:5} interval: {interval_human:6} total: {total_size_human:10}".format(**key)
21 | line += " latest: {0} ".format(latest_date)
22 | log.info(line)
23 | else:
24 | log.error("No bakmanager_token setting for the current profile.")
25 |
26 |
27 | def bakmanager_hook(conf, backup_data, key=None):
28 | """First version of a hook for monitoring periodic backups with BakManager
29 | (https://bakmanager.io).
30 |
31 | :type conf: dict
32 | :param conf: Current profile config
33 |
34 | :type backup_data: dict
35 | :param backup_data: Backup data (size)
36 |
37 | :type key: str
38 | :param key: Periodic backup identifier
39 | """
40 | try:
41 | if conf.get("bakmanager_token"):
42 | bakmanager_backups_endpoint = conf.get("bakmanager_api", "https://bakmanager.io/api/backups/")
43 | bak_backup = {"key": key, "host": socket.gethostname(), "size": backup_data["size"]}
44 | bak_payload = {"backup": json.dumps(bak_backup)}
45 | r = requests.post(bakmanager_backups_endpoint, bak_payload, auth=(conf.get("bakmanager_token"), ""))
46 | r.raise_for_status()
47 | else:
48 | log.error("No bakmanager_token setting for the current profile.")
49 | except Exception, exc:
50 | log.error("Error while submitting periodic backup to BakManager.")
51 | log.exception(exc)
52 |
53 |
54 | class BakSyncer():
55 | """Helper to synchronize change on a backup set via a REST API.
56 |
57 | No sensitive information is transmitted except (you should be using https):
58 | - API user/password
59 | - a hash (hashlib.sha512) of your access_key concatened with
60 | your s3_bucket or glacier_vault, to be able to sync multiple
61 | client with the same configuration stored as metadata for each bakckupyy.
62 |
63 | :type conf: dict
64 | :param conf: Config (url, username, password)
65 | """
66 | def __init__(self, conf=None):
67 | conf = {} if conf is None else conf
68 | sync_conf = dict(url=config.get("sync", {}).get("url"),
69 | username=config.get("sync", {}).get("username"),
70 | password=config.get("sync", {}).get("password"))
71 | sync_conf.update(conf)
72 |
73 | self.sync_auth = (sync_conf["username"], sync_conf["password"])
74 | self.api_url = sync_conf["url"]
75 |
76 | self.request_kwargs = dict(auth=self.sync_auth)
77 |
78 | self.request_kwargs["headers"] = {'content-type': 'application/json', 'bakthat-client': socket.gethostname()}
79 |
80 | self.get_resource = lambda x: self.api_url + "/{0}".format(x)
81 |
82 | def register(self):
83 | """Register/create the current host on the remote server if not already registered."""
84 | if not Config.get_key("client_id"):
85 | r_kwargs = self.request_kwargs.copy()
86 | r = requests.post(self.get_resource("clients"), **r_kwargs)
87 | if r.status_code == 200:
88 | client = r.json()
89 | if client:
90 | Config.set_key("client_id", client["_id"])
91 | else:
92 | log.error("An error occured during sync: {0}".format(r.text))
93 | else:
94 | log.debug("Already registered ({0})".format(Config.get_key("client_id")))
95 |
96 | def sync(self):
97 | """Draft for implementing bakthat clients (hosts) backups data synchronization.
98 |
99 | Synchronize Bakthat sqlite database via a HTTP POST request.
100 |
101 | Backups are never really deleted from sqlite database, we just update the is_deleted key.
102 |
103 | It sends the last server sync timestamp along with data updated since last sync.
104 | Then the server return backups that have been updated on the server since last sync.
105 |
106 | On both sides, backups are either created if they don't exists or updated if the incoming version is newer.
107 | """
108 | log.debug("Start syncing")
109 |
110 | self.register()
111 |
112 | last_sync_ts = Config.get_key("sync_ts", 0)
113 | to_insert_in_mongo = [b._data for b in Backups.search(last_updated_gt=last_sync_ts)]
114 | data = dict(sync_ts=last_sync_ts, new=to_insert_in_mongo)
115 | r_kwargs = self.request_kwargs.copy()
116 | log.debug("Initial payload: {0}".format(data))
117 | r_kwargs.update({"data": json.dumps(data)})
118 | r = requests.post(self.get_resource("backups/sync"), **r_kwargs)
119 | if r.status_code != 200:
120 | log.error("An error occured during sync: {0}".format(r.text))
121 | return
122 |
123 | log.debug("Sync result: {0}".format(r.json()))
124 | to_insert_in_bakthat = r.json().get("updated", [])
125 | sync_ts = r.json().get("sync_ts")
126 | for newbackup in to_insert_in_bakthat:
127 | log.debug("Upsert {0}".format(newbackup))
128 | Backups.upsert(**newbackup)
129 |
130 | Config.set_key("sync_ts", sync_ts)
131 |
132 | log.debug("Sync succcesful")
133 |
134 | def reset_sync(self):
135 | log.debug("reset sync")
136 | Config.set_key("sync_ts", 0)
137 | Config.set_key("client_id", None)
138 |
139 | def sync_auto(self):
140 | """Trigger sync if autosync is enabled."""
141 | if config.get("sync", {}).get("auto", False):
142 | self.sync()
143 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | =======
2 | Bakthat
3 | =======
4 |
5 | I stopped working on bakthat, I'm now working on another backup-related project: `blobstash `_/`blobsnap `_.
6 | If somebody want to be a collaborator and continue development, please open an issue.
7 |
8 | Bakthat is a MIT licensed backup framework written in Python, it's both a command line tool and a Python module that helps you manage backups on Amazon `S3 `_/`Glacier `_ and OpenStack `Swift `_. It automatically compress, encrypt (symmetric encryption) and upload your files.
9 |
10 | Here are some features:
11 |
12 | * Compress with `tarfile `_
13 | * Encrypt with `beefish `_ (**optional**)
14 | * Upload/download to S3 or Glacier with `boto `_
15 | * Local backups inventory stored in a SQLite database with `peewee `_
16 | * Delete older than, and `Grandfather-father-son backup rotation `_ supported
17 | * Possibility to sync backups database between multiple clients via a centralized server
18 | * Exclude files using .gitignore like file
19 | * Extendable with plugins
20 |
21 | You can restore backups **with** or **without** bakthat, you just have to download the backup, decrypt it with `Beefish `_ command-line tool and untar it.
22 |
23 | Check out `the documentation to get started `_.
24 |
25 |
26 | Overview
27 | --------
28 |
29 | Bakthat command line tool
30 | ~~~~~~~~~~~~~~~~~~~~~~~~~
31 |
32 | ::
33 |
34 | $ pip install bakthat
35 |
36 | $ bakthat configure
37 |
38 | $ bakthat backup mydir
39 | Backing up mydir
40 | Password (blank to disable encryption):
41 | Password confirmation:
42 | Compressing...
43 | Encrypting...
44 | Uploading...
45 | Upload completion: 0%
46 | Upload completion: 100%
47 |
48 | or
49 |
50 | $ cd mydir
51 | $ bakthat backup
52 |
53 | $ bakthat show
54 | 2013-03-05T19:36:15 s3 3.1 KB mydir.20130305193615.tgz.enc
55 |
56 | $ bakthat restore mydir
57 | Restoring mydir.20130305193615.tgz.enc
58 | Password:
59 | Downloading...
60 | Decrypting...
61 | Uncompressing...
62 |
63 | $ bakthat delete mydir.20130305193615.tgz.enc
64 | Deleting mydir.20130305193615.tgz.enc
65 |
66 | Bakthat Python API
67 | ~~~~~~~~~~~~~~~~~~
68 |
69 | .. code-block:: python
70 |
71 | import logging
72 | import sh
73 | logging.basicConfig(level=logging.INFO)
74 |
75 | from bakthat.helper import BakHelper
76 |
77 | BACKUP_NAME = "myhost_mysql"
78 | BACKUP_PASSWORD = "mypassword"
79 | MYSQL_USER = "root"
80 | MYSQL_PASSWORD = "mypassword"
81 |
82 | with BakHelper(BACKUP_NAME, password=BACKUP_PASSWORD, tags=["mysql"]) as bh:
83 | sh.mysqldump("-p{0}".format(MYSQL_PASSWORD),
84 | u=MYSQL_USER,
85 | all_databases=True,
86 | _out="dump.sql")
87 | bh.backup()
88 | bh.rotate()
89 |
90 |
91 | Changelog
92 | ---------
93 |
94 | 0.7.0
95 | ~~~~~
96 |
97 | **Not released yet**, developed in the **develop** branch.
98 |
99 | - Incremental backups support, with `Incremental-Backups-Tools `_.
100 | - Revamped configuration handling
101 | - Stronger unit tests
102 | - Plugin architecture improved
103 | - Switch from aaargh to cliff for the CLI handling
104 |
105 | 0.6.0
106 | ~~~~~
107 |
108 | **June 5 2013**
109 |
110 | - Event hooks handling
111 | - Support for plugin
112 |
113 | 0.5.5
114 | ~~~~~
115 |
116 | **May 26 2013**
117 |
118 | - Support for excluding files, using .bakthatexclude/.gitignore file, or a custom file.
119 | - Added support for reduced redundancy when using S3
120 |
121 | 0.5.4
122 | ~~~~~
123 |
124 | **May 8 2013**
125 |
126 | - Better log handling
127 | - Allow more complex rotation scheme
128 |
129 | 0.5.3
130 | ~~~~~
131 |
132 | **May 6 2013**
133 |
134 | - Bugfix config
135 |
136 | 0.5.2
137 | ~~~~~
138 |
139 | **May 6 2013**
140 |
141 | - new BAKTHAT_PASSWORD environment variable to set password from command line.
142 |
143 | 0.5.1
144 | ~~~~~
145 |
146 | **May 5 2013**
147 |
148 | - New **-c**/**--config** argument.
149 | - New periodic_backups command tied to `BakManager API `_.
150 |
151 | 0.5.0
152 | ~~~~~
153 |
154 | **April 21 2013**
155 |
156 | - New Swift backend, thanks to @yoyama
157 | - ls command removed in favor of the show command
158 | - Compression can now be disabled with the compress setting
159 | - Bugfix default destination
160 |
161 | 0.4.5
162 | ~~~~~
163 |
164 | **Mars 20 2013**
165 |
166 | - bugfix configure (cancel of configure cmd cause empty yml), thanks to @yoyama
167 | - new bakthat.helper.KeyValue
168 | - BakSyncer improvement
169 |
170 | 0.4.4
171 | ~~~~~
172 |
173 | **Mars 10 2013**
174 |
175 | - bugfix (forgot to remove a dumptruck import)
176 |
177 | 0.4.3
178 | ~~~~~
179 |
180 | **Mars 10 2013**
181 |
182 | - bakthat show bugfix
183 |
184 | 0.4.2
185 | ~~~~~
186 |
187 | **Mars 10 2013**
188 |
189 | - Using `peewee `_ instead of dumptruck, should be Python2.6 compatible again.
190 |
191 |
192 | 0.4.1
193 | ~~~~~
194 |
195 | **Mars 8 2013**
196 |
197 | - small bugfix when restoring from glacier
198 | - bakhelper now support custom configuration and profiles
199 | - aligned date in show command
200 |
201 | 0.4.0
202 | ~~~~~
203 |
204 | If you come from bakthat 0.3.x, you need to run:
205 |
206 | ::
207 |
208 | $ bakthat upgrade_to_dump_truck
209 |
210 | And you also need to run again **bakthat configure**.
211 |
212 | ::
213 |
214 | $ cat ~/.bakthat.conf
215 | $ bakthat configure
216 |
217 | **Changes:**
218 |
219 | - The filename is now a positional argument for all command
220 | - Using `DumpTruck `_ instead of `shelve `_
221 | - Save backups metadata for both backends
222 | - BakHelper to help build backup scripts
223 | - BakSyncer to help keep you list sync over a custom REST API
224 | - Now adding a dot between the original filename and the date component
225 | - Tags support (-t/--tags argument)
226 | - Profiles support (-p/--profile argument)
227 | - New show command, with search support (tags/filename/destination)
228 | - `Hosted documentation `_
229 |
230 |
231 | 0.3.10
232 | ~~~~~
233 |
234 | - bug fix glacier upload
235 |
236 | 0.3.9
237 | ~~~~~
238 |
239 | - small bug fixes (when updating an existing configuration)
240 |
241 | 0.3.8
242 | ~~~~~
243 |
244 | - Added **delete_older_than** command
245 | - Added **rotate_backups** command (Grandfather-father-son backup rotation scheme)
246 |
247 |
248 | Contributors
249 | ------------
250 |
251 | - Eric Chamberlain
252 | - Darius Braziunas
253 | - Sławomir Żak
254 | - Andreyev Dias de Melo
255 | - Jake McGraw
256 | - You Yamagata
257 | - Jordi Funollet
258 |
259 |
260 | License (MIT)
261 | -------------
262 |
263 | Copyright (c) 2012 Thomas Sileo
264 |
265 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
266 |
267 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
268 |
269 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
270 |
--------------------------------------------------------------------------------
/test_bakthat.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 | import bakthat
3 | import tempfile
4 | import hashlib
5 | import os
6 | import time
7 | import unittest
8 | import logging
9 |
10 | log = logging.getLogger()
11 |
12 | handler = logging.StreamHandler()
13 | handler.setLevel(logging.DEBUG)
14 | handler.addFilter(bakthat.BakthatFilter())
15 | handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
16 | log.addHandler(handler)
17 | log.setLevel(logging.DEBUG)
18 |
19 |
20 | class BakthatTestCase(unittest.TestCase):
21 |
22 | def setUp(self):
23 | self.test_file = tempfile.NamedTemporaryFile()
24 | self.test_file.write("Bakthat Test File")
25 | self.test_file.seek(0)
26 | self.test_filename = self.test_file.name.split("/")[-1]
27 | self.test_hash = hashlib.sha1(self.test_file.read()).hexdigest()
28 | self.password = "bakthat_encrypted_test"
29 |
30 | def test_internals(self):
31 | with self.assertRaises(Exception):
32 | bakthat._interval_string_to_seconds("1z")
33 |
34 | self.assertEqual(bakthat._interval_string_to_seconds("2D1h"), 86400 * 2 + 3600)
35 | self.assertEqual(bakthat._interval_string_to_seconds("3M"), 3*30*86400)
36 |
37 | def test_keyvalue_helper(self):
38 | from bakthat.helper import KeyValue
39 | kv = KeyValue()
40 | test_string = "Bakthat Test str"
41 | test_key = "bakthat-unittest"
42 | test_key_enc = "bakthat-unittest-testenc"
43 | test_key2 = "itshouldfail"
44 | test_password = "bakthat-password"
45 | kv.set_key(test_key, test_string)
46 | kv.set_key(test_key_enc, test_string, password=test_password)
47 | self.assertEqual(test_string, kv.get_key(test_key))
48 | self.assertEqual(test_string, kv.get_key(test_key_enc, password=test_password))
49 | #from urllib2 import urlopen, HTTPError
50 | #test_url = kv.get_key_url(test_key, 10)
51 | #self.assertEqual(json.loads(urlopen(test_url).read()), test_string)
52 | #time.sleep(30)
53 | #with self.assertRaises(HTTPError):
54 | # urlopen(test_url).read()
55 | kv.delete_key(test_key_enc)
56 | kv.delete_key(test_key)
57 | self.assertEqual(kv.get_key(test_key), None)
58 | self.assertEqual(kv.get_key(test_key2), None)
59 |
60 |
61 | def test_s3_backup_restore(self):
62 | backup_data = bakthat.backup(self.test_file.name, "s3", password="")
63 | log.info(backup_data)
64 |
65 | #self.assertEqual(bakthat.match_filename(self.test_filename, "s3")[0]["filename"],
66 | # self.test_filename)
67 |
68 | bakthat.restore(self.test_filename, "s3")
69 |
70 | restored_hash = hashlib.sha1(open(self.test_filename).read()).hexdigest()
71 |
72 | self.assertEqual(self.test_hash, restored_hash)
73 |
74 | os.remove(self.test_filename)
75 |
76 | bakthat.delete(self.test_filename, "s3")
77 |
78 | #self.assertEqual(bakthat.match_filename(self.test_filename), [])
79 |
80 | def test_s3_delete_older_than(self):
81 | backup_res = bakthat.backup(self.test_file.name, "s3", password="")
82 |
83 | #self.assertEqual(bakthat.match_filename(self.test_filename, "s3")[0]["filename"],
84 | # self.test_filename)
85 |
86 | bakthat.restore(self.test_filename, "s3")
87 |
88 | restored_hash = hashlib.sha1(open(self.test_filename).read()).hexdigest()
89 |
90 | self.assertEqual(self.test_hash, restored_hash)
91 |
92 | os.remove(self.test_filename)
93 |
94 | test_deleted = bakthat.delete_older_than(self.test_filename, "1Y", destination="s3")
95 |
96 | self.assertEqual(test_deleted, [])
97 |
98 | time.sleep(10)
99 |
100 | test_deleted = bakthat.delete_older_than(self.test_filename, "9s", destination="s3")
101 |
102 | key_deleted = test_deleted[0]
103 |
104 | self.assertEqual(key_deleted.stored_filename, backup_res.stored_filename)
105 |
106 | #self.assertEqual(bakthat.match_filename(self.test_filename), [])
107 |
108 | def test_s3_encrypted_backup_restore(self):
109 | bakthat.backup(self.test_file.name, "s3", password=self.password)
110 |
111 | #self.assertEqual(bakthat.match_filename(self.test_filename, "s3")[0]["filename"],
112 | # self.test_filename)
113 |
114 | # Check if stored file is encrypted
115 | #self.assertTrue(bakthat.match_filename(self.test_filename, "s3")[0]["is_enc"])
116 |
117 | bakthat.restore(self.test_filename, "s3", password=self.password)
118 |
119 | restored_hash = hashlib.sha1(open(self.test_filename).read()).hexdigest()
120 |
121 | self.assertEqual(self.test_hash, restored_hash)
122 |
123 | os.remove(self.test_filename)
124 |
125 | bakthat.delete(self.test_filename, "s3")
126 |
127 | #self.assertEqual(bakthat.match_filename(self.test_filename), [])
128 |
129 | def test_glacier_backup_restore(self):
130 | if raw_input("Test glacier upload/download ? It can take up to 4 hours ! (y/N): ").lower() == "y":
131 |
132 | # Backup dummy file
133 | bakthat.backup(self.test_file.name, "glacier", password="")
134 |
135 | # Check that file is showing up in bakthat ls
136 | #self.assertEqual(bakthat.match_filename(self.test_filename, "glacier")[0]["filename"],
137 | # self.test_filename)
138 | # TODO replace by a Backups.search
139 |
140 | # We initialize glacier backend
141 | # to check that the file appear in both local and remote (S3) inventory
142 | #glacier_backend = GlacierBackend(None)
143 |
144 | #archives = glacier_backend.load_archives()
145 | #archives_s3 = glacier_backend.load_archives_from_s3()
146 |
147 | # Check that local and remote custom inventory are equal
148 | #self.assertEqual(archives, archives_s3)
149 |
150 | # Next we check that the file is stored in both inventories
151 | #inventory_key_name = bakthat.match_filename(self.test_filename, "glacier")[0]["key"]
152 |
153 | #self.assertTrue(inventory_key_name in [a.get("filename") for a in archives])
154 | #self.assertTrue(inventory_key_name in [a.get("filename") for a in archives_s3])
155 |
156 | # Restore backup
157 | job = bakthat.restore(self.test_filename, "glacier", job_check=True)
158 |
159 | # Check that a job is initiated
160 | self.assertEqual(job.__dict__["action"], "ArchiveRetrieval")
161 | self.assertEqual(job.__dict__["status_code"], "InProgress")
162 |
163 | while 1:
164 | # Check every ten minutes if the job is done
165 | result = bakthat.restore(self.test_filename, "glacier")
166 |
167 | # If job is done, we can download the file
168 | if result:
169 | restored_hash = hashlib.sha1(open(self.test_filename).read()).hexdigest()
170 |
171 | # Check if the hash of the restored file is equal to inital file hash
172 | self.assertEqual(self.test_hash, restored_hash)
173 |
174 | os.remove(self.test_filename)
175 |
176 | # Now, we can delete the restored file
177 | bakthat.delete(self.test_filename, "glacier")
178 |
179 | # Check that the file is deleted
180 | #self.assertEqual(bakthat.match_filename(self.test_filename, "glacier"), [])
181 | # TODO Backups.search
182 |
183 | #archives = glacier_backend.load_archives()
184 | #archives_s3 = glacier_backend.load_archives_from_s3()
185 |
186 | # Check if the file has been removed from both archives
187 | #self.assertEqual(archives, archives_s3)
188 | #self.assertTrue(inventory_key_name not in archives)
189 | #self.assertTrue(inventory_key_name not in archives_s3)
190 |
191 | break
192 | else:
193 | time.sleep(600)
194 |
195 | if __name__ == '__main__':
196 | unittest.main()
197 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Bakthat documentation build configuration file, created by
4 | # sphinx-quickstart on Fri Mar 1 10:32:38 2013.
5 | #
6 | # This file is execfile()d with the current directory set to its containing dir.
7 | #
8 | # Note that not all possible configuration values are present in this
9 | # autogenerated file.
10 | #
11 | # All configuration values have a default; values that are commented out
12 | # serve to show the default.
13 |
14 | import sys, os
15 |
16 | # If extensions (or modules to document with autodoc) are in another directory,
17 | # add these directories to sys.path here. If the directory is relative to the
18 | # documentation root, use os.path.abspath to make it absolute, like shown here.
19 | #sys.path.insert(0, os.path.abspath('.'))
20 |
21 | # -- General configuration -----------------------------------------------------
22 |
23 | # If your documentation needs a minimal Sphinx version, state it here.
24 | #needs_sphinx = '1.0'
25 |
26 | # Add any Sphinx extension module names here, as strings. They can be extensions
27 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
28 | extensions = ['sphinx.ext.autodoc']
29 |
30 | # Add any paths that contain templates here, relative to this directory.
31 | templates_path = ['_templates']
32 |
33 | # The suffix of source filenames.
34 | source_suffix = '.rst'
35 |
36 | # The encoding of source files.
37 | #source_encoding = 'utf-8-sig'
38 |
39 | # The master toctree document.
40 | master_doc = 'index'
41 |
42 | # General information about the project.
43 | project = u'Bakthat'
44 | copyright = u'2013, Thomas Sileo'
45 |
46 | # The version info for the project you're documenting, acts as replacement for
47 | # |version| and |release|, also used in various other places throughout the
48 | # built documents.
49 | #
50 | # The short X.Y version.
51 | version = '0.6.0'
52 | # The full version, including alpha/beta/rc tags.
53 | release = '0.6.0'
54 |
55 | # The language for content autogenerated by Sphinx. Refer to documentation
56 | # for a list of supported languages.
57 | #language = None
58 |
59 | # There are two options for replacing |today|: either, you set today to some
60 | # non-false value, then it is used:
61 | #today = ''
62 | # Else, today_fmt is used as the format for a strftime call.
63 | #today_fmt = '%B %d, %Y'
64 |
65 | # List of patterns, relative to source directory, that match files and
66 | # directories to ignore when looking for source files.
67 | exclude_patterns = ['_build']
68 |
69 | # The reST default role (used for this markup: `text`) to use for all documents.
70 | #default_role = None
71 |
72 | # If true, '()' will be appended to :func: etc. cross-reference text.
73 | #add_function_parentheses = True
74 |
75 | # If true, the current module name will be prepended to all description
76 | # unit titles (such as .. function::).
77 | #add_module_names = True
78 |
79 | # If true, sectionauthor and moduleauthor directives will be shown in the
80 | # output. They are ignored by default.
81 | #show_authors = False
82 |
83 | # The name of the Pygments (syntax highlighting) style to use.
84 | pygments_style = 'sphinx'
85 |
86 | # A list of ignored prefixes for module index sorting.
87 | #modindex_common_prefix = []
88 |
89 |
90 | # -- Options for HTML output ---------------------------------------------------
91 | sys.path.append(os.path.abspath('_themes'))
92 | html_theme_path = ['_themes']
93 | html_theme = 'kr'
94 | # The theme to use for HTML and HTML Help pages. See the documentation for
95 | # a list of builtin themes.
96 | #html_theme = 'default'
97 |
98 | # Theme options are theme-specific and customize the look and feel of a theme
99 | # further. For a list of options available for each theme, see the
100 | # documentation.
101 | #html_theme_options = {}
102 |
103 | # Add any paths that contain custom themes here, relative to this directory.
104 | #html_theme_path = []
105 |
106 | # The name for this set of Sphinx documents. If None, it defaults to
107 | # " v documentation".
108 | #html_title = None
109 |
110 | # A shorter title for the navigation bar. Default is the same as html_title.
111 | #html_short_title = None
112 |
113 | # The name of an image file (relative to this directory) to place at the top
114 | # of the sidebar.
115 | #html_logo = None
116 |
117 | # The name of an image file (within the static path) to use as favicon of the
118 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
119 | # pixels large.
120 | #html_favicon = None
121 |
122 | # Add any paths that contain custom static files (such as style sheets) here,
123 | # relative to this directory. They are copied after the builtin static files,
124 | # so a file named "default.css" will overwrite the builtin "default.css".
125 | html_static_path = ['_static']
126 |
127 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
128 | # using the given strftime format.
129 | #html_last_updated_fmt = '%b %d, %Y'
130 |
131 | # If true, SmartyPants will be used to convert quotes and dashes to
132 | # typographically correct entities.
133 | #html_use_smartypants = True
134 |
135 | # Custom sidebar templates, maps document names to template names.
136 | html_sidebars = {
137 | 'index': ['sidebarintro.html', 'localtoc.html', 'searchbox.html', 'sidebarend.html'],
138 | '**': ['sidebarintro.html', 'localtoc.html', 'relations.html'
139 | , 'searchbox.html', 'sidebarend.html'],
140 | }
141 |
142 | # Additional templates that should be rendered to pages, maps page names to
143 | # template names.
144 | #html_additional_pages = {}
145 |
146 | # If false, no module index is generated.
147 | #html_domain_indices = True
148 |
149 | # If false, no index is generated.
150 | #html_use_index = True
151 |
152 | # If true, the index is split into individual pages for each letter.
153 | #html_split_index = False
154 |
155 | # If true, links to the reST sources are added to the pages.
156 | #html_show_sourcelink = True
157 |
158 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
159 | #html_show_sphinx = True
160 |
161 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
162 | #html_show_copyright = True
163 |
164 | # If true, an OpenSearch description file will be output, and all pages will
165 | # contain a tag referring to it. The value of this option must be the
166 | # base URL from which the finished HTML is served.
167 | #html_use_opensearch = ''
168 |
169 | # This is the file name suffix for HTML files (e.g. ".xhtml").
170 | #html_file_suffix = None
171 |
172 | # Output file base name for HTML help builder.
173 | htmlhelp_basename = 'Bakthatdoc'
174 |
175 |
176 | # -- Options for LaTeX output --------------------------------------------------
177 |
178 | latex_elements = {
179 | # The paper size ('letterpaper' or 'a4paper').
180 | #'papersize': 'letterpaper',
181 |
182 | # The font size ('10pt', '11pt' or '12pt').
183 | #'pointsize': '10pt',
184 |
185 | # Additional stuff for the LaTeX preamble.
186 | #'preamble': '',
187 | }
188 |
189 | # Grouping the document tree into LaTeX files. List of tuples
190 | # (source start file, target name, title, author, documentclass [howto/manual]).
191 | latex_documents = [
192 | ('index', 'Bakthat.tex', u'Bakthat Documentation',
193 | u'Thomas Sileo', 'manual'),
194 | ]
195 |
196 | # The name of an image file (relative to this directory) to place at the top of
197 | # the title page.
198 | #latex_logo = None
199 |
200 | # For "manual" documents, if this is true, then toplevel headings are parts,
201 | # not chapters.
202 | #latex_use_parts = False
203 |
204 | # If true, show page references after internal links.
205 | #latex_show_pagerefs = False
206 |
207 | # If true, show URL addresses after external links.
208 | #latex_show_urls = False
209 |
210 | # Documents to append as an appendix to all manuals.
211 | #latex_appendices = []
212 |
213 | # If false, no module index is generated.
214 | #latex_domain_indices = True
215 |
216 |
217 | # -- Options for manual page output --------------------------------------------
218 |
219 | # One entry per manual page. List of tuples
220 | # (source start file, name, description, authors, manual section).
221 | man_pages = [
222 | ('index', 'bakthat', u'Bakthat Documentation',
223 | [u'Thomas Sileo'], 1)
224 | ]
225 |
226 | # If true, show URL addresses after external links.
227 | #man_show_urls = False
228 |
229 |
230 | # -- Options for Texinfo output ------------------------------------------------
231 |
232 | # Grouping the document tree into Texinfo files. List of tuples
233 | # (source start file, target name, title, author,
234 | # dir menu entry, description, category)
235 | texinfo_documents = [
236 | ('index', 'Bakthat', u'Bakthat Documentation',
237 | u'Thomas Sileo', 'Bakthat', 'One line description of project.',
238 | 'Miscellaneous'),
239 | ]
240 |
241 | # Documents to append as an appendix to all manuals.
242 | #texinfo_appendices = []
243 |
244 | # If false, no module index is generated.
245 | #texinfo_domain_indices = True
246 |
247 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
248 | #texinfo_show_urls = 'footnote'
249 |
--------------------------------------------------------------------------------
/docs/_themes/kr/static/flasky.css_t:
--------------------------------------------------------------------------------
1 | /*
2 | * flasky.css_t
3 | * ~~~~~~~~~~~~
4 | *
5 | * :copyright: Copyright 2010 by Armin Ronacher. Modifications by Kenneth Reitz.
6 | * :license: Flask Design License, see LICENSE for details.
7 | */
8 |
9 | {% set page_width = '940px' %}
10 | {% set sidebar_width = '220px' %}
11 |
12 | @import url("basic.css");
13 | @import url("//netdna.bootstrapcdn.com/font-awesome/3.1.1/css/font-awesome.css");
14 | /* -- page layout ----------------------------------------------------------- */
15 |
16 | @import url('//fonts.googleapis.com/css?family=Open+Sans:400italic,700italic,400,700|Inconsolata');
17 |
18 | body {
19 | font-family: "Open Sans", "Helvetica Neue", Helvetica, Arial, sans-serif;
20 | font-size: 15px;
21 | background-color: white;
22 | color: #000;
23 | margin: 0;
24 | padding: 0;
25 | }
26 |
27 | div.document {
28 | width: {{ page_width }};
29 | margin: 30px auto 0 auto;
30 | }
31 |
32 | div.documentwrapper {
33 | float: left;
34 | width: 100%;
35 | }
36 |
37 | div.bodywrapper {
38 | margin: 0 0 0 {{ sidebar_width }};
39 | }
40 |
41 | div.sphinxsidebar {
42 | width: {{ sidebar_width }};
43 | }
44 |
45 | hr {
46 | border: 1px solid #B1B4B6;
47 | }
48 |
49 | div.body {
50 | background-color: #ffffff;
51 | color: #3E4349;
52 | padding: 0 30px 0 30px;
53 | }
54 |
55 | img.floatingflask {
56 | padding: 0 0 10px 10px;
57 | float: right;
58 | }
59 |
60 | div.footer {
61 | width: {{ page_width }};
62 | margin: 20px auto 30px auto;
63 | font-size: 14px;
64 | color: #888;
65 | text-align: right;
66 | }
67 |
68 | div.footer a {
69 | color: #888;
70 | }
71 |
72 | div.related {
73 | display: none;
74 | }
75 |
76 | div.sphinxsidebar a {
77 | color: #444;
78 | text-decoration: none;
79 | border-bottom: 1px dotted #999;
80 | }
81 |
82 | div.sphinxsidebar a:hover {
83 | border-bottom: 1px solid #999;
84 | }
85 |
86 | div.sphinxsidebar {
87 | font-size: 14px;
88 | line-height: 1.5;
89 | }
90 |
91 | div.sphinxsidebarwrapper {
92 | padding: 18px 10px;
93 | }
94 |
95 | div.sphinxsidebarwrapper p.logo {
96 | padding: 0;
97 | margin: -10px 0 0 -20px;
98 | text-align: center;
99 | }
100 |
101 | div.sphinxsidebar h3,
102 | div.sphinxsidebar h4 {
103 | color: #444;
104 | font-size: 24px;
105 | font-weight: normal;
106 | margin: 0 0 5px 0;
107 | padding: 0;
108 | }
109 |
110 | div.sphinxsidebar h4 {
111 | font-size: 20px;
112 | }
113 |
114 | div.sphinxsidebar h3 a {
115 | color: #444;
116 | }
117 |
118 | div.sphinxsidebar p.logo a,
119 | div.sphinxsidebar h3 a,
120 | div.sphinxsidebar p.logo a:hover,
121 | div.sphinxsidebar h3 a:hover {
122 | border: none;
123 | }
124 |
125 | div.sphinxsidebar p {
126 | color: #555;
127 | margin: 10px 0;
128 | }
129 |
130 | div.sphinxsidebar ul {
131 | margin: 10px 0;
132 | padding: 0;
133 | color: #000;
134 | }
135 |
136 | div.sphinxsidebar input {
137 | border: 1px solid #ccc;
138 | font-size: 1em;
139 | }
140 |
141 | /* -- body styles ----------------------------------------------------------- */
142 |
143 | a {
144 | color: #004B6B;
145 | text-decoration: underline;
146 | }
147 |
148 | a:hover {
149 | color: #6D4100;
150 | text-decoration: underline;
151 | }
152 |
153 | div.body h1,
154 | div.body h2,
155 | div.body h3,
156 | div.body h4,
157 | div.body h5,
158 | div.body h6 {
159 | font-weight: normal;
160 | margin: 30px 0px 10px 0px;
161 | padding: 0;
162 | }
163 |
164 | div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; }
165 | div.body h2 { font-size: 180%; }
166 | div.body h3 { font-size: 150%; }
167 | div.body h4 { font-size: 130%; }
168 | div.body h5 { font-size: 100%; }
169 | div.body h6 { font-size: 100%; }
170 |
171 | a.headerlink {
172 | color: #ddd;
173 | padding: 0 4px;
174 | text-decoration: none;
175 | }
176 |
177 | a.headerlink:hover {
178 | color: #444;
179 | background: #eaeaea;
180 | }
181 |
182 | div.body p, div.body dd, div.body li {
183 | line-height: 1.4em;
184 | }
185 |
186 | div.admonition {
187 | background: #fafafa;
188 | margin: 20px -30px;
189 | padding: 10px 30px;
190 | border-top: 1px solid #ccc;
191 | border-bottom: 1px solid #ccc;
192 | }
193 |
194 | div.admonition tt.xref, div.admonition a tt {
195 | border-bottom: 1px solid #fafafa;
196 | }
197 |
198 | dd div.admonition {
199 | margin-left: -60px;
200 | padding-left: 60px;
201 | }
202 |
203 | div.admonition p.admonition-title {
204 | font-weight: normal;
205 | font-size: 24px;
206 | margin: 0 0 10px 0;
207 | padding: 0;
208 | line-height: 1;
209 | }
210 |
211 | div.admonition p.last {
212 | margin-bottom: 0;
213 | }
214 |
215 | div.highlight {
216 | background-color: white;
217 | }
218 |
219 | dt:target, .highlight {
220 | background: #FAF3E8;
221 | }
222 |
223 | div.note {
224 | background-color: #eee;
225 | border: 1px solid #ccc;
226 | }
227 |
228 | div.seealso {
229 | background-color: #ffc;
230 | border: 1px solid #ff6;
231 | }
232 |
233 | div.topic {
234 | background-color: #eee;
235 | }
236 |
237 | p.admonition-title {
238 | display: inline;
239 | }
240 |
241 | p.admonition-title:after {
242 | content: ":";
243 | }
244 |
245 | pre, tt {
246 | font-family: 'Inconsolata', 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
247 | }
248 |
249 | img.screenshot {
250 | }
251 |
252 | tt.descname, tt.descclassname {
253 | font-size: 0.95em;
254 | }
255 |
256 | tt.descname {
257 | padding-right: 0.08em;
258 | }
259 |
260 | img.screenshot {
261 | -moz-box-shadow: 2px 2px 4px #eee;
262 | -webkit-box-shadow: 2px 2px 4px #eee;
263 | box-shadow: 2px 2px 4px #eee;
264 | }
265 |
266 | table.docutils {
267 | border: 1px solid #888;
268 | -moz-box-shadow: 2px 2px 4px #eee;
269 | -webkit-box-shadow: 2px 2px 4px #eee;
270 | box-shadow: 2px 2px 4px #eee;
271 | }
272 |
273 | table.docutils td, table.docutils th {
274 | border: 1px solid #888;
275 | padding: 0.25em 0.7em;
276 | }
277 |
278 | table.field-list, table.footnote {
279 | border: none;
280 | -moz-box-shadow: none;
281 | -webkit-box-shadow: none;
282 | box-shadow: none;
283 | }
284 |
285 | table.footnote {
286 | margin: 15px 0;
287 | width: 100%;
288 | border: 1px solid #eee;
289 | background: #fdfdfd;
290 | font-size: 0.9em;
291 | }
292 |
293 | table.footnote + table.footnote {
294 | margin-top: -15px;
295 | border-top: none;
296 | }
297 |
298 | table.field-list th {
299 | padding: 0 0.8em 0 0;
300 | font-size: 12px;
301 | width: 100px;
302 | }
303 |
304 | table.field-list td {
305 | padding: 0;
306 | }
307 |
308 | table.footnote td.label {
309 | width: 0px;
310 | padding: 0.3em 0 0.3em 0.5em;
311 | }
312 |
313 | table.footnote td {
314 | padding: 0.3em 0.5em;
315 | }
316 |
317 | dl {
318 | margin: 0;
319 | padding: 0;
320 | }
321 |
322 | dl dd {
323 | margin-left: 30px;
324 | }
325 |
326 | blockquote {
327 | margin: 0 0 0 30px;
328 | padding: 0;
329 | }
330 |
331 | ul, ol {
332 | margin: 10px 0 10px 30px;
333 | padding: 0;
334 | }
335 |
336 | pre {
337 | background: #eee;
338 | padding: 7px 30px;
339 | margin: 15px -30px;
340 | line-height: 1.3em;
341 | }
342 |
343 | dl pre, blockquote pre, li pre {
344 | margin-left: -60px;
345 | padding-left: 60px;
346 | }
347 |
348 | dl dl pre {
349 | margin-left: -90px;
350 | padding-left: 90px;
351 | }
352 |
353 | tt {
354 | background-color: #ecf0f3;
355 | color: #222;
356 | /* padding: 1px 2px; */
357 | }
358 |
359 | tt.xref, a tt {
360 | background-color: #FBFBFB;
361 | border-bottom: 1px solid white;
362 | }
363 |
364 | a.reference {
365 | text-decoration: none;
366 | border-bottom: 1px dotted #004B6B;
367 | }
368 |
369 | a.reference:hover {
370 | border-bottom: 1px solid #6D4100;
371 | }
372 |
373 | a.footnote-reference {
374 | text-decoration: none;
375 | font-size: 0.7em;
376 | vertical-align: top;
377 | border-bottom: 1px dotted #004B6B;
378 | }
379 |
380 | a.footnote-reference:hover {
381 | border-bottom: 1px solid #6D4100;
382 | }
383 |
384 | a:hover tt {
385 | background: #EEE;
386 | }
387 |
388 |
389 | @media screen and (max-width: 600px) {
390 |
391 | div.sphinxsidebar {
392 | display: none;
393 | }
394 |
395 | div.document {
396 | width: 100%;
397 |
398 | }
399 |
400 | div.documentwrapper {
401 | margin-left: 0;
402 | margin-top: 0;
403 | margin-right: 0;
404 | margin-bottom: 0;
405 | }
406 |
407 | div.bodywrapper {
408 | margin-top: 0;
409 | margin-right: 0;
410 | margin-bottom: 0;
411 | margin-left: 0;
412 | }
413 |
414 | ul {
415 | margin-left: 0;
416 | }
417 |
418 | .document {
419 | width: auto;
420 | }
421 |
422 | .footer {
423 | width: auto;
424 | }
425 |
426 | .bodywrapper {
427 | margin: 0;
428 | }
429 |
430 | .footer {
431 | width: auto;
432 | }
433 |
434 | .github {
435 | display: none;
436 | }
437 |
438 | }
439 |
440 | /* misc. */
441 |
442 | .revsys-inline {
443 | display: none!important;
444 | }
445 |
446 | div.sphinxsidebar #searchbox input[type="text"] {
447 | width: 150px;
448 | }
--------------------------------------------------------------------------------
/bakthat/models.py:
--------------------------------------------------------------------------------
1 | import peewee
2 | from datetime import datetime
3 | from bakthat.conf import config, load_config, DATABASE
4 | import hashlib
5 | import json
6 | import sqlite3
7 | import os
8 | import requests
9 | import logging
10 |
11 | log = logging.getLogger(__name__)
12 |
13 | database = peewee.SqliteDatabase(DATABASE)
14 |
15 |
16 | class JsonField(peewee.CharField):
17 | """Custom JSON field."""
18 | def db_value(self, value):
19 | return json.dumps(value)
20 |
21 | def python_value(self, value):
22 | try:
23 | return json.loads(value)
24 | except:
25 | return value
26 |
27 |
28 | class BaseModel(peewee.Model):
29 | class Meta:
30 | database = database
31 |
32 |
33 | class SyncedModel(peewee.Model):
34 | class Meta:
35 | database = database
36 |
37 |
38 | class History(BaseModel):
39 | """History for sync."""
40 | data = JsonField()
41 | ts = peewee.IntegerField(index=True)
42 | action = peewee.CharField(index=True)
43 | model = peewee.CharField(index=True)
44 | pk = peewee.CharField(index=True)
45 |
46 | class Meta:
47 | db_table = 'history'
48 |
49 |
50 | class Backups(SyncedModel):
51 | """Backups Model."""
52 | backend = peewee.CharField(index=True)
53 | backend_hash = peewee.CharField(index=True, null=True)
54 | backup_date = peewee.IntegerField(index=True)
55 | filename = peewee.TextField(index=True)
56 | is_deleted = peewee.BooleanField()
57 | last_updated = peewee.IntegerField()
58 | metadata = JsonField()
59 | size = peewee.IntegerField()
60 | stored_filename = peewee.TextField(index=True, unique=True)
61 | tags = peewee.CharField()
62 |
63 | def __repr__(self):
64 | return "".format(self._data.get("stored_filename"))
65 |
66 | @classmethod
67 | def match_filename(cls, filename, destination, **kwargs):
68 | conf = config
69 | if kwargs.get("config"):
70 | conf = load_config(kwargs.get("config"))
71 |
72 | profile = conf.get(kwargs.get("profile", "default"))
73 |
74 | s3_key = hashlib.sha512(profile.get("access_key") +
75 | profile.get("s3_bucket")).hexdigest()
76 | glacier_key = hashlib.sha512(profile.get("access_key") +
77 | profile.get("glacier_vault")).hexdigest()
78 |
79 | try:
80 | fquery = "{0}*".format(filename)
81 | query = Backups.select().where(Backups.filename % fquery |
82 | Backups.stored_filename % fquery,
83 | Backups.backend == destination,
84 | Backups.backend_hash << [s3_key, glacier_key])
85 | query = query.order_by(Backups.backup_date.desc())
86 | return query.get()
87 | except Backups.DoesNotExist:
88 | return
89 |
90 | @classmethod
91 | def search(cls, query="", destination="", **kwargs):
92 | conf = config
93 | if kwargs.get("config"):
94 | conf = load_config(kwargs.get("config"))
95 |
96 | if not destination:
97 | destination = ["s3", "glacier"]
98 | if isinstance(destination, (str, unicode)):
99 | destination = [destination]
100 |
101 | query = "*{0}*".format(query)
102 | wheres = []
103 |
104 | if kwargs.get("profile"):
105 | profile = conf.get(kwargs.get("profile"))
106 |
107 | s3_key = hashlib.sha512(profile.get("access_key") +
108 | profile.get("s3_bucket")).hexdigest()
109 | glacier_key = hashlib.sha512(profile.get("access_key") +
110 | profile.get("glacier_vault")).hexdigest()
111 |
112 | wheres.append(Backups.backend_hash << [s3_key, glacier_key])
113 |
114 | wheres.append(Backups.filename % query |
115 | Backups.stored_filename % query)
116 | wheres.append(Backups.backend << destination)
117 | wheres.append(Backups.is_deleted == False)
118 |
119 | older_than = kwargs.get("older_than")
120 | if older_than:
121 | wheres.append(Backups.backup_date < older_than)
122 |
123 | backup_date = kwargs.get("backup_date")
124 | if backup_date:
125 | wheres.append(Backups.backup_date == backup_date)
126 |
127 | last_updated_gt = kwargs.get("last_updated_gt")
128 | if last_updated_gt:
129 | wheres.append(Backups.last_updated >= last_updated_gt)
130 |
131 | tags = kwargs.get("tags", [])
132 | if tags:
133 | if isinstance(tags, (str, unicode)):
134 | tags = tags.split()
135 | tags_query = ["Backups.tags % '*{0}*'".format(tag) for tag in tags]
136 | tags_query = eval("({0})".format(" and ".join(tags_query)))
137 | wheres.append(tags_query)
138 |
139 | return Backups.select().where(*wheres).order_by(Backups.last_updated.desc())
140 |
141 | def set_deleted(self):
142 | self.is_deleted = True
143 | self.last_updated = int(datetime.utcnow().strftime("%s"))
144 | self.save()
145 |
146 | def is_encrypted(self):
147 | return self.stored_filename.endswith(".enc") or self.metadata.get("is_enc")
148 |
149 | def is_gzipped(self):
150 | return self.metadata.get("is_gzipped")
151 |
152 | @classmethod
153 | def upsert(cls, **backup):
154 | q = Backups.select()
155 | q = q.where(Backups.stored_filename == backup.get("stored_filename"))
156 | if q.count():
157 | del backup["stored_filename"]
158 | Backups.update(**backup).where(Backups.stored_filename == backup.get("stored_filename")).execute()
159 | else:
160 | Backups.create(**backup)
161 |
162 | class Meta:
163 | db_table = 'backups'
164 |
165 | class Sync:
166 | pk = 'stored_filename'
167 |
168 |
169 | class Config(BaseModel):
170 | """key => value config store."""
171 | key = peewee.CharField(index=True, unique=True)
172 | value = JsonField()
173 |
174 | @classmethod
175 | def get_key(self, key, default=None):
176 | try:
177 | return Config.get(Config.key == key).value
178 | except Config.DoesNotExist:
179 | return default
180 |
181 | @classmethod
182 | def set_key(self, key, value=None):
183 | q = Config.select().where(Config.key == key)
184 | if q.count():
185 | Config.update(value=value).where(Config.key == key).execute()
186 | else:
187 | Config.create(key=key, value=value)
188 |
189 | class Meta:
190 | db_table = 'config'
191 |
192 |
193 | class Inventory(SyncedModel):
194 | """Filename => archive_id mapping for glacier archives."""
195 | archive_id = peewee.CharField(index=True, unique=True)
196 | filename = peewee.CharField(index=True)
197 |
198 | @classmethod
199 | def get_archive_id(self, filename):
200 | return Inventory.get(Inventory.filename == filename).archive_id
201 |
202 | class Meta:
203 | db_table = 'inventory'
204 |
205 | class Sync:
206 | pk = 'filename'
207 |
208 |
209 | class Jobs(SyncedModel):
210 | """filename => job_id mapping for glacier archives."""
211 | filename = peewee.CharField(index=True)
212 | job_id = peewee.CharField()
213 |
214 | @classmethod
215 | def get_job_id(cls, filename):
216 | """Try to retrieve the job id for a filename.
217 |
218 | :type filename: str
219 | :param filename: Filename
220 |
221 | :rtype: str
222 | :return: Job Id for the given filename
223 | """
224 | try:
225 | return Jobs.get(Jobs.filename == filename).job_id
226 | except Jobs.DoesNotExist:
227 | return
228 |
229 | @classmethod
230 | def update_job_id(cls, filename, job_id):
231 | """Update job_id for the given filename.
232 |
233 | :type filename: str
234 | :param filename: Filename
235 |
236 | :type job_id: str
237 | :param job_id: New job_id
238 |
239 | :return: None
240 | """
241 | q = Jobs.select().where(Jobs.filename == filename)
242 | if q.count():
243 | Jobs.update(job_id=job_id).where(Jobs.filename == filename).execute()
244 | else:
245 | Jobs.create(filename=filename, job_id=job_id)
246 |
247 | class Meta:
248 | db_table = 'jobs'
249 |
250 |
251 | for table in [Backups, Jobs, Inventory, Config, History]:
252 | if not table.table_exists():
253 | table.create_table()
254 |
255 |
256 | def backup_sqlite(filename):
257 | """Backup bakthat SQLite database to file."""
258 | con = sqlite3.connect(DATABASE)
259 | with open(filename, 'w') as f:
260 | for line in con.iterdump():
261 | f.write("{0}\n".format(line))
262 |
263 |
264 | def restore_sqlite(filename):
265 | """Restore a dump into bakthat SQLite database."""
266 | con = sqlite3.connect(DATABASE)
267 | con.executescript(open(filename).read())
268 |
269 |
270 | def switch_from_dt_to_peewee():
271 | if os.path.isfile(os.path.expanduser("~/.bakthat.dt")):
272 | import dumptruck
273 | import time
274 | dt = dumptruck.DumpTruck(dbname=os.path.expanduser("~/.bakthat.dt"), vars_table="config")
275 | for backup in dt.dump("backups"):
276 | try:
277 | backup["tags"] = " ".join(backup.get("tags", []))
278 | Backups.upsert(**backup)
279 | time.sleep(0.1)
280 | except Exception, exc:
281 | print exc
282 | for ivt in dt.dump("inventory"):
283 | try:
284 | Inventory.create(filename=ivt["filename"],
285 | archive_id=ivt["archive_id"])
286 | except Exception, exc:
287 | print exc
288 | os.remove(os.path.expanduser("~/.bakthat.dt"))
289 |
290 | switch_from_dt_to_peewee()
291 |
--------------------------------------------------------------------------------
/docs/developer_guide.rst:
--------------------------------------------------------------------------------
1 | .. _developer-guide:
2 |
3 | Developer's Guide
4 | =================
5 |
6 | Low level API
7 | -------------
8 |
9 | You can access low level API (the same used when using bakthat in command line mode) from **bakthat** root module.
10 |
11 | .. code-block:: python
12 |
13 | import bakthat
14 |
15 | # roration is optional
16 | bakthat_conf = {'access_key': 'YOURACCESSKEY',
17 | 'secret_key': 'YOURSECRETKEY',
18 | 'glacier_vault': 'yourvault',
19 | 's3_bucket': 'yours3bucket',
20 | 'region_name': 'eu-west-1',
21 | 'rotation': {'days': 7,
22 | 'first_week_day': 5,
23 | 'months': 6,
24 | 'weeks': 6}}
25 |
26 | bakthat.backup("/dir/i/wanto/bak", conf=bakthat_conf)
27 |
28 | bakthat.backup("/dir/i/wanto/bak", conf=bakthat_conf, destination="glacier")
29 |
30 | # or if you want to have generated the configuration file with "bakthat configure" or created ~/.bakthat.yml
31 | bakthat.backup("/dir/i/wanto/bak")
32 |
33 | bakthat.ls()
34 |
35 | # restore in the current working directory
36 | bakthat.restore("bak", conf=bakthat_conf)
37 |
38 |
39 | Event Hooks
40 | ~~~~~~~~~~~
41 |
42 | .. versionadded:: 0.6.0
43 |
44 | You can configure hook to be executed on the following events:
45 |
46 | * before_backup
47 | * on_backup
48 | * before_restore
49 | * on_restore
50 | * before_delete
51 | * on_delete
52 | * before_delete_older_than
53 | * on_delete_older_than
54 | * before_rotate_backups
55 | * on_rotate_backups
56 |
57 | So, **before_** events are executed at the beginning of the action, and **on_** events are executed just before the end.
58 |
59 | For each action, a **session_id** (an uuid4) is assigned, so you can match up **before_** and **on_** events.
60 |
61 | Every callback receive the session_id as first argument, and for **on_** callbacks, you can retrieve the result of the function, most of the time a Backup object or a list of Backup object, depending of the context.
62 |
63 | .. code-block:: python
64 |
65 | from bakthat import backup, events
66 |
67 | def before_backup_callback(session_id):
68 | print session_id, "before_backup"
69 |
70 | def on_backup_callback(session_id, backup):
71 | print session_id, "on_backup", backup
72 |
73 | events.before_backup += before_backup_callback
74 | events.on_backup += on_backup_callback
75 |
76 | bakthat.backup("/home/thomas/mydir")
77 |
78 |
79 | Bakthat makes use of `Events `_ to handle all the "event things".
80 |
81 | Plugins
82 | -------
83 |
84 | .. versionadded:: 0.6.0
85 |
86 | You can create plugins to extend bakthat features, all you need to do is to subclass ``bakthat.plugin.Plugin`` and implement an ``activate`` (and optionally ``deactivate``, executed just before exiting) method.
87 |
88 | The ``activate`` and ``deactivate`` method is called only once. ``activate`` is called when the plugin is initialized, and ``deactivate`` (you can see it like a cleanup function) is called at exit.
89 |
90 | .. note::
91 |
92 | For now, you can't create new command yet with plugin (maybe in the future).
93 |
94 |
95 | By default, plugins are stored in **~/.bakthat_plugins/**, but you can change the plugins location by setting the ``plugins_dir`` setting in your configuration file.
96 |
97 | .. code-block:: yaml
98 |
99 | default:
100 | plugins_dir: /home/thomas/.bakthat_plugins
101 |
102 |
103 | And to enable plugins, add it to the ``plugins`` array:
104 |
105 | .. code-block:: yaml
106 |
107 | default:
108 | plugins: [test_plugin.TestPlugin, filename.MyPlugin]
109 |
110 |
111 | You can access **raw profile configuration** using ``self.conf``, and **bakthat logger** using ``self.log`` (e.g. ``self.log.info("hello")``) and in any methods.
112 | You can also hook events directly on ``self``, like ``self.on_backup += mycallback``.
113 |
114 | Your First Plugin
115 | ~~~~~~~~~~~~~~~~~
116 |
117 | Here is a basic plugin example, a ``TimerPlugin`` in **test_plugin.py**:
118 |
119 | .. code-block:: python
120 |
121 | import time
122 | from bakthat.plugin import Plugin
123 |
124 | class TestPlugin(Plugin):
125 | def activate(self):
126 | self.start = {}
127 | self.stop = {}
128 | self.before_backup += self.before_backup_callback
129 | self.on_backup += self.on_backup_callback
130 |
131 | def before_backup_callback(self, session_id):
132 | self.start[session_id] = time.time()
133 | self.log.info("before_backup {0}".format(session_id))
134 |
135 | def on_backup_callback(self, session_id, backup):
136 | self.stop[session_id] = time.time()
137 | self.log.info("on_backup {0} {1}".format(session_id, backup))
138 | self.log.info("Job duration: {0}s".format(self.stop[session_id] - self.start[session_id]))
139 |
140 |
141 | Now, we can enable it:
142 |
143 | .. code-block:: yaml
144 |
145 | default:
146 | plugins: [test_plugin.TestPlugin]
147 |
148 |
149 | Finally, we can check that our plugin is actually working:
150 |
151 | ::
152 |
153 | $ bakthat backup mydir
154 | before_backup 4028dfc7-7a17-4a99-b3fe-88f6e4879bda
155 | Backing up /home/thomas/mydir
156 | Password (blank to disable encryption):
157 | Compressing...
158 | Uploading...
159 | Upload completion: 0%
160 | Upload completion: 100%
161 | Upload completion: 0%
162 | Upload completion: 100%
163 | on_backup 4028dfc7-7a17-4a99-b3fe-88f6e4879bda
164 | Job duration: 4.34407806396s
165 |
166 | Monkey Patching
167 | ~~~~~~~~~~~~~~~
168 |
169 | With plugin, you have the ability to extend or modify everything in the ``activate function``.
170 |
171 | Here is an example, which update the ``Backups`` model at runtime:
172 |
173 | .. code-block:: python
174 |
175 | from bakthat.plugin import Plugin
176 | from bakthat.models import Backups
177 |
178 |
179 | class MyBackups(Backups):
180 | @classmethod
181 | def my_custom_method(self):
182 | return True
183 |
184 |
185 | class ChangeModelPlugin(Plugin):
186 | """ A basic plugin implementation. """
187 | def activate(self):
188 | global Backups
189 | self.log.info("Replace Backups")
190 | Backups = MyBackups
191 |
192 |
193 | More on event hooks
194 | ~~~~~~~~~~~~~~~~~~~
195 |
196 | See **Event Hooks** for more informations and `Events `_ documentation.
197 |
198 |
199 | Helpers
200 | -------
201 |
202 | BakHelper
203 | ~~~~~~~~~
204 |
205 | BakHelper is a context manager that makes create backup script with bakthat (and it works well with `sh `_) an easy task.
206 |
207 | It takes care of create a temporary directory and make it the current working directory so you can just dump files to backup or call system command line tool lilke mysqldump/mongodump/and so on with the help of sh.
208 |
209 | Here is a minimal example.
210 |
211 | .. code-block:: python
212 |
213 | import logging
214 | logging.basicConfig(level=logging.INFO)
215 |
216 | from bakthat.helper import BakHelper
217 |
218 | with BakHelper("mybackup", tags=["mybackup"]) as bh:
219 |
220 | with open("myfile.txt", "w") as f:
221 | f.write("mydata")
222 |
223 | bh.backup()
224 | bh.rotate()
225 |
226 |
227 | Now test the script:
228 |
229 | ::
230 |
231 | $ python mybackupscript.py
232 | INFO:root:Backing up /tmp/mybackup_JVTGOM
233 | INFO:root:Compressing...
234 | INFO:root:Uploading...
235 | INFO:bakthat.backends:Upload completion: 0%
236 | INFO:bakthat.backends:Upload completion: 100%
237 |
238 | You can also use it like a normal class:
239 |
240 | .. code-block:: python
241 |
242 | import logging
243 | import sh
244 | logging.basicConfig(level=logging.INFO)
245 |
246 | from bakthat.helper import BakHelper
247 |
248 | bakthat_conf = {'access_key': 'YOURACCESSKEY',
249 | 'secret_key': 'YOURSECRETKEY',
250 | 'glacier_vault': 'yourvault',
251 | 's3_bucket': 'yours3bucket',
252 | 'region_name': 'eu-west-1',
253 | 'rotation': {'days': 7,
254 | 'first_week_day': 5,
255 | 'months': 6,
256 | 'weeks': 6}}
257 |
258 | bh = BakHelper(conf=bakthat_conf)
259 | with open("myfile.txt", "w") as f:
260 | f.write("mydata")
261 | bh.backup("myfile.txt")
262 | bh.rotate("myfile.txt")
263 |
264 |
265 | Create a MySQL backup script with BakHelper
266 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
267 |
268 | Here is a MySQL backup script, it makes use of `sh `_ to call system **mysqldump**.
269 |
270 | .. seealso::
271 |
272 | You can also check out a `MongoDB backup script example here `_.
273 |
274 | .. code-block:: python
275 |
276 | import logging
277 | import sh
278 | logging.basicConfig(level=logging.INFO)
279 |
280 | from bakthat.helper import BakHelper
281 |
282 | BACKUP_NAME = "myhost_mysql"
283 | BACKUP_PASSWORD = "mypassword"
284 | MYSQL_USER = "root"
285 | MYSQL_PASSWORD = "mypassword"
286 |
287 | with BakHelper(BACKUP_NAME, password=BACKUP_PASSWORD, tags=["mysql"]) as bh:
288 | sh.mysqldump("-p{0}".format(MYSQL_PASSWORD),
289 | u=MYSQL_USER,
290 | all_databases=True,
291 | _out="dump.sql")
292 | bh.backup()
293 | bh.rotate()
294 |
295 |
296 | .. _keyvalue:
297 |
298 | KeyValue
299 | ~~~~~~~~
300 |
301 | .. versionadded:: 0.4.5
302 |
303 | KeyValue is a simple "key value store" that allows you to quickly store/retrieve strings/objects on Amazon S3.
304 | All values are serialized with json, so **you can directly backup any json serializable value**.
305 |
306 | It can also takes care of compressing (with gzip) and encrypting (optionnal).
307 |
308 | Compression in enabled by default, you can disable it by passing compress=False when setting a key.
309 |
310 | Also, backups stored with KeyValue can be restored with bakthat restore and show up in bakthat show.
311 |
312 | .. code-block:: python
313 |
314 | from bakthat.helper import KeyValue
315 | import json
316 |
317 | bakthat_conf = {'access_key': 'YOURACCESSKEY',
318 | 'secret_key': 'YOURSECRETKEY',
319 | 'glacier_vault': 'yourvault',
320 | 's3_bucket': 'yours3bucket',
321 | 'region_name': 'es-east-1'}
322 |
323 | kv = KeyValue(conf=bakthat_conf)
324 |
325 | mydata = {"some": "data"}
326 | kv.set_key("mykey", mydata)
327 |
328 | mydata_restored = kv.get_key("mykey")
329 |
330 | data_url = kv.get_key_url("mykey", 60) # url expires in 60 secondes
331 |
332 | kv.delete_key("mykey")
333 |
334 | kv.set_key("my_encrypted_key", "myvalue", password="mypassword")
335 | kv.get_key("my_encrypted_key", password="mypassword")
336 |
337 | # You can also disable gzip compression if you want:
338 | kv.set_key("my_non_compressed_key", {"my": "data"}, compress=False)
339 |
340 |
341 | Accessing bakthat SQLite database
342 | ---------------------------------
343 |
344 | Since bakthat stores custom backups metadata (see :ref:`stored-metadata`), you can execute custom SQL query.
--------------------------------------------------------------------------------
/bakthat/backends.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 | import tempfile
3 | import os
4 | import logging
5 | import shelve
6 | import json
7 | import socket
8 | import httplib
9 |
10 | import boto
11 | from boto.s3.key import Key
12 | import math
13 | from boto.glacier.exceptions import UnexpectedHTTPResponseError
14 | from boto.exception import S3ResponseError
15 |
16 | from bakthat.conf import config, DEFAULT_LOCATION, CONFIG_FILE
17 | from bakthat.models import Inventory, Jobs
18 |
19 | log = logging.getLogger(__name__)
20 |
21 |
22 | class glacier_shelve(object):
23 | """Context manager for shelve.
24 |
25 | Deprecated, here for backward compatibility.
26 |
27 | """
28 |
29 | def __enter__(self):
30 | self.shelve = shelve.open(os.path.expanduser("~/.bakthat.db"))
31 |
32 | return self.shelve
33 |
34 | def __exit__(self, exc_type, exc_value, traceback):
35 | self.shelve.close()
36 |
37 |
38 | class BakthatBackend(object):
39 | """Handle Configuration for Backends.
40 |
41 | The profile is only useful when no conf is None.
42 |
43 | :type conf: dict
44 | :param conf: Custom configuration
45 |
46 | :type profile: str
47 | :param profile: Profile name
48 |
49 | """
50 | def __init__(self, conf={}, profile="default"):
51 | self.conf = conf
52 | if not conf:
53 | self.conf = config.get(profile)
54 | if not self.conf:
55 | log.error("No {0} profile defined in {1}.".format(profile, CONFIG_FILE))
56 | if not "access_key" in self.conf or not "secret_key" in self.conf:
57 | log.error("Missing access_key/secret_key in {0} profile ({1}).".format(profile, CONFIG_FILE))
58 |
59 |
60 | class RotationConfig(BakthatBackend):
61 | """Hold backups rotation configuration."""
62 | def __init__(self, conf={}, profile="default"):
63 | BakthatBackend.__init__(self, conf, profile)
64 | self.conf = self.conf.get("rotation", {})
65 |
66 |
67 | class S3Backend(BakthatBackend):
68 | """Backend to handle S3 upload/download."""
69 | def __init__(self, conf={}, profile="default"):
70 | BakthatBackend.__init__(self, conf, profile)
71 |
72 | con = boto.connect_s3(self.conf["access_key"], self.conf["secret_key"])
73 |
74 | region_name = self.conf["region_name"]
75 | if region_name == DEFAULT_LOCATION:
76 | region_name = ""
77 |
78 | try:
79 | self.bucket = con.get_bucket(self.conf["s3_bucket"])
80 | except S3ResponseError, e:
81 | if e.code == "NoSuchBucket":
82 | self.bucket = con.create_bucket(self.conf["s3_bucket"], location=region_name)
83 | else:
84 | raise e
85 |
86 | self.container = self.conf["s3_bucket"]
87 | self.container_key = "s3_bucket"
88 |
89 | def download(self, keyname):
90 | k = Key(self.bucket)
91 | k.key = keyname
92 |
93 | encrypted_out = tempfile.TemporaryFile()
94 | k.get_contents_to_file(encrypted_out)
95 | encrypted_out.seek(0)
96 |
97 | return encrypted_out
98 |
99 | def cb(self, complete, total):
100 | """Upload callback to log upload percentage."""
101 | percent = int(complete * 100.0 / total)
102 | log.info("Upload completion: {0}%".format(percent))
103 |
104 | def upload(self, keyname, filename, **kwargs):
105 | k = Key(self.bucket)
106 | k.key = keyname
107 | upload_kwargs = {"reduced_redundancy": kwargs.get("s3_reduced_redundancy", False)}
108 | if kwargs.get("cb", True):
109 | upload_kwargs = dict(cb=self.cb, num_cb=10)
110 | k.set_contents_from_filename(filename, **upload_kwargs)
111 | k.set_acl("private")
112 |
113 | def ls(self):
114 | return [key.name for key in self.bucket.get_all_keys()]
115 |
116 | def delete(self, keyname):
117 | k = Key(self.bucket)
118 | k.key = keyname
119 | self.bucket.delete_key(k)
120 |
121 |
122 | class GlacierBackend(BakthatBackend):
123 | """Backend to handle Glacier upload/download."""
124 | def __init__(self, conf={}, profile="default"):
125 | BakthatBackend.__init__(self, conf, profile)
126 |
127 | con = boto.connect_glacier(aws_access_key_id=self.conf["access_key"], aws_secret_access_key=self.conf["secret_key"], region_name=self.conf["region_name"])
128 |
129 | self.vault = con.create_vault(self.conf["glacier_vault"])
130 | self.backup_key = "bakthat_glacier_inventory"
131 | self.container = self.conf["glacier_vault"]
132 | self.container_key = "glacier_vault"
133 |
134 | def load_archives(self):
135 | return []
136 |
137 | def backup_inventory(self):
138 | """Backup the local inventory from shelve as a json string to S3."""
139 | if config.get("aws", "s3_bucket"):
140 | archives = self.load_archives()
141 |
142 | s3_bucket = S3Backend(self.conf).bucket
143 | k = Key(s3_bucket)
144 | k.key = self.backup_key
145 |
146 | k.set_contents_from_string(json.dumps(archives))
147 |
148 | k.set_acl("private")
149 |
150 | def load_archives_from_s3(self):
151 | """Fetch latest inventory backup from S3."""
152 | s3_bucket = S3Backend(self.conf).bucket
153 | try:
154 | k = Key(s3_bucket)
155 | k.key = self.backup_key
156 |
157 | return json.loads(k.get_contents_as_string())
158 | except S3ResponseError, exc:
159 | log.error(exc)
160 | return {}
161 |
162 | # def restore_inventory(self):
163 | # """Restore inventory from S3 to DumpTruck."""
164 | # if config.get("aws", "s3_bucket"):
165 | # loaded_archives = self.load_archives_from_s3()
166 |
167 | # # TODO faire le restore
168 | # else:
169 | # raise Exception("You must set s3_bucket in order to backup/restore inventory to/from S3.")
170 |
171 | def restore_inventory(self):
172 | """Restore inventory from S3 to local shelve."""
173 | if config.get("aws", "s3_bucket"):
174 | loaded_archives = self.load_archives_from_s3()
175 |
176 | with glacier_shelve() as d:
177 | archives = {}
178 | for a in loaded_archives:
179 | print a
180 | archives[a["filename"]] = a["archive_id"]
181 | d["archives"] = archives
182 | else:
183 | raise Exception("You must set s3_bucket in order to backup/restore inventory to/from S3.")
184 |
185 | def upload(self, keyname, filename, **kwargs):
186 | archive_id = self.vault.concurrent_create_archive_from_file(filename, keyname)
187 | Inventory.create(filename=keyname, archive_id=archive_id)
188 |
189 | #self.backup_inventory()
190 |
191 | def get_job_id(self, filename):
192 | """Get the job_id corresponding to the filename.
193 |
194 | :type filename: str
195 | :param filename: Stored filename.
196 |
197 | """
198 | return Jobs.get_job_id(filename)
199 |
200 | def delete_job(self, filename):
201 | """Delete the job entry for the filename.
202 |
203 | :type filename: str
204 | :param filename: Stored filename.
205 |
206 | """
207 | job = Jobs.get(Jobs.filename == filename)
208 | job.delete_instance()
209 |
210 | def download(self, keyname, job_check=False):
211 | """Initiate a Job, check its status, and download the archive if it's completed."""
212 | archive_id = Inventory.get_archive_id(keyname)
213 | if not archive_id:
214 | log.error("{0} not found !")
215 | # check if the file exist on S3 ?
216 | return
217 |
218 | job = None
219 |
220 | job_id = Jobs.get_job_id(keyname)
221 | log.debug("Job: {0}".format(job_id))
222 |
223 | if job_id:
224 | try:
225 | job = self.vault.get_job(job_id)
226 | except UnexpectedHTTPResponseError: # Return a 404 if the job is no more available
227 | self.delete_job(keyname)
228 |
229 | if not job:
230 | job = self.vault.retrieve_archive(archive_id)
231 | job_id = job.id
232 | Jobs.update_job_id(keyname, job_id)
233 |
234 | log.info("Job {action}: {status_code} ({creation_date}/{completion_date})".format(**job.__dict__))
235 |
236 | if job.completed:
237 | log.info("Downloading...")
238 | encrypted_out = tempfile.TemporaryFile()
239 |
240 | # Boto related, download the file in chunk
241 | chunk_size = 4 * 1024 * 1024
242 | num_chunks = int(math.ceil(job.archive_size / float(chunk_size)))
243 | job._download_to_fileob(encrypted_out, num_chunks, chunk_size, True, (socket.error, httplib.IncompleteRead))
244 |
245 | encrypted_out.seek(0)
246 | return encrypted_out
247 | else:
248 | log.info("Not completed yet")
249 | if job_check:
250 | return job
251 | return
252 |
253 | def retrieve_inventory(self, jobid):
254 | """Initiate a job to retrieve Galcier inventory or output inventory."""
255 | if jobid is None:
256 | return self.vault.retrieve_inventory(sns_topic=None, description="Bakthat inventory job")
257 | else:
258 | return self.vault.get_job(jobid)
259 |
260 | def retrieve_archive(self, archive_id, jobid):
261 | """Initiate a job to retrieve Galcier archive or download archive."""
262 | if jobid is None:
263 | return self.vault.retrieve_archive(archive_id, sns_topic=None, description='Retrieval job')
264 | else:
265 | return self.vault.get_job(jobid)
266 |
267 | def ls(self):
268 | return [ivt.filename for ivt in Inventory.select()]
269 |
270 | def delete(self, keyname):
271 | archive_id = Inventory.get_archive_id(keyname)
272 | if archive_id:
273 | self.vault.delete_archive(archive_id)
274 | archive_data = Inventory.get(Inventory.filename == keyname)
275 | archive_data.delete_instance()
276 |
277 | #self.backup_inventory()
278 |
279 | def upgrade_from_shelve(self):
280 | try:
281 | with glacier_shelve() as d:
282 | archives = d["archives"]
283 | if "archives" in d:
284 | for key, archive_id in archives.items():
285 | #print {"filename": key, "archive_id": archive_id}
286 | Inventory.create(**{"filename": key, "archive_id": archive_id})
287 | del archives[key]
288 | d["archives"] = archives
289 | except Exception, exc:
290 | log.exception(exc)
291 |
292 | class SwiftBackend(BakthatBackend):
293 | """Backend to handle OpenStack Swift upload/download."""
294 | def __init__(self, conf={}, profile="default"):
295 | BakthatBackend.__init__(self, conf, profile)
296 |
297 | from swiftclient import Connection, ClientException
298 |
299 | self.con = Connection(self.conf["auth_url"], self.conf["access_key"],
300 | self.conf["secret_key"],
301 | auth_version=self.conf["auth_version"],
302 | insecure=True)
303 |
304 | region_name = self.conf["region_name"]
305 | if region_name == DEFAULT_LOCATION:
306 | region_name = ""
307 |
308 | try:
309 | self.con.head_container(self.conf["s3_bucket"])
310 | except ClientException, e:
311 | self.con.put_container(self.conf["s3_bucket"])
312 |
313 | self.container = self.conf["s3_bucket"]
314 | self.container_key = "s3_bucket"
315 |
316 | def download(self, keyname):
317 | headers, data = self.con.get_object(self.container, keyname,
318 | resp_chunk_size=65535)
319 |
320 | encrypted_out = tempfile.TemporaryFile()
321 | for chunk in data:
322 | encrypted_out.write(chunk)
323 | encrypted_out.seek(0)
324 |
325 | return encrypted_out
326 |
327 | def cb(self, complete, total):
328 | """Upload callback to log upload percentage."""
329 | """Swift client does not support callbak"""
330 | percent = int(complete * 100.0 / total)
331 | log.info("Upload completion: {0}%".format(percent))
332 |
333 | def upload(self, keyname, filename, **kwargs):
334 | fp = open(filename, "rb")
335 | self.con.put_object(self.container, keyname, fp)
336 |
337 | def ls(self):
338 | headers, objects = self.con.get_container(self.conf["s3_bucket"])
339 | return [key['name'] for key in objects]
340 |
341 | def delete(self, keyname):
342 | self.con.delete_object(self.container, keyname)
343 |
--------------------------------------------------------------------------------
/bakthat/helper.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 | import logging
3 | import tempfile
4 | import sh
5 | import os
6 | import shutil
7 | import time
8 | import hashlib
9 | import json
10 | from StringIO import StringIO
11 | from gzip import GzipFile
12 |
13 | from beefish import encrypt, decrypt
14 | from boto.s3.key import Key
15 |
16 | import bakthat
17 | from bakthat.conf import DEFAULT_DESTINATION
18 | from bakthat.backends import S3Backend
19 | from bakthat.models import Backups
20 |
21 | log = logging.getLogger(__name__)
22 |
23 |
24 | class KeyValue(S3Backend):
25 | """A Key Value store to store/retrieve object/string on S3.
26 |
27 | Data is gzipped and json encoded before uploading,
28 | compression can be disabled.
29 | """
30 | def __init__(self, conf={}, profile="default"):
31 | S3Backend.__init__(self, conf, profile)
32 | self.profile = profile
33 |
34 | def set_key(self, keyname, value, **kwargs):
35 | """Store a string as keyname in S3.
36 |
37 | :type keyname: str
38 | :param keyname: Key name
39 |
40 | :type value: str
41 | :param value: Value to save, will be json encoded.
42 |
43 | :type value: bool
44 | :keyword compress: Compress content with gzip,
45 | True by default
46 | """
47 | k = Key(self.bucket)
48 | k.key = keyname
49 |
50 | backup_date = int(time.time())
51 | backup = dict(filename=keyname,
52 | stored_filename=keyname,
53 | backup_date=backup_date,
54 | last_updated=backup_date,
55 | backend="s3",
56 | is_deleted=False,
57 | tags="",
58 | metadata={"KeyValue": True,
59 | "is_enc": False,
60 | "is_gzipped": False})
61 |
62 | fileobj = StringIO(json.dumps(value))
63 |
64 | if kwargs.get("compress", True):
65 | backup["metadata"]["is_gzipped"] = True
66 | out = StringIO()
67 | f = GzipFile(fileobj=out, mode="w")
68 | f.write(fileobj.getvalue())
69 | f.close()
70 | fileobj = StringIO(out.getvalue())
71 |
72 | password = kwargs.get("password")
73 | if password:
74 | backup["metadata"]["is_enc"] = True
75 | out = StringIO()
76 | encrypt(fileobj, out, password)
77 | fileobj = out
78 | # Creating the object on S3
79 | k.set_contents_from_string(fileobj.getvalue())
80 | k.set_acl("private")
81 | backup["size"] = k.size
82 |
83 | access_key = self.conf.get("access_key")
84 | container_key = self.conf.get(self.container_key)
85 | backup["backend_hash"] = hashlib.sha512(access_key + container_key).hexdigest()
86 | Backups.upsert(**backup)
87 |
88 | def get_key(self, keyname, **kwargs):
89 | """Return the object stored under keyname.
90 |
91 | :type keyname: str
92 | :param keyname: Key name
93 |
94 | :type default: str
95 | :keyword default: Default value if key name does not exist, None by default
96 |
97 | :rtype: str
98 | :return: The key content as string, or default value.
99 | """
100 | k = Key(self.bucket)
101 | k.key = keyname
102 | if k.exists():
103 | backup = Backups.get(Backups.stored_filename % keyname, Backups.backend == "s3")
104 | fileobj = StringIO(k.get_contents_as_string())
105 |
106 | if backup.is_encrypted():
107 | out = StringIO()
108 | decrypt(fileobj, out, kwargs.get("password"))
109 | fileobj = out
110 | fileobj.seek(0)
111 |
112 | if backup.is_gzipped():
113 | f = GzipFile(fileobj=fileobj, mode="r")
114 | out = f.read()
115 | f.close()
116 | fileobj = StringIO(out)
117 | return json.loads(fileobj.getvalue())
118 | return kwargs.get("default")
119 |
120 | def delete_key(self, keyname):
121 | """Delete the given key.
122 |
123 | :type keyname: str
124 | :param keyname: Key name
125 | """
126 | k = Key(self.bucket)
127 | k.key = keyname
128 | if k.exists():
129 | k.delete()
130 | backup = Backups.match_filename(keyname, "s3", profile=self.profile)
131 | if backup:
132 | backup.set_deleted()
133 | return True
134 |
135 | def get_key_url(self, keyname, expires_in, method="GET"):
136 | """Generate a URL for the keyname object.
137 |
138 | Be careful, the response is JSON encoded.
139 |
140 | :type keyname: str
141 | :param keyname: Key name
142 |
143 | :type expires_in: int
144 | :param expires_in: Number of the second before the expiration of the link
145 |
146 | :type method: str
147 | :param method: HTTP method for access
148 |
149 | :rtype str:
150 | :return: The URL to download the content of the given keyname
151 | """
152 | k = Key(self.bucket)
153 | k.key = keyname
154 | if k.exists:
155 | return k.generate_url(expires_in, method)
156 |
157 |
158 | class BakHelper:
159 | """Helper that makes building scripts with bakthat better faster stronger.
160 |
161 | Designed to be used as a context manager.
162 |
163 | :type backup_name: str
164 | :param backup_name: Backup name
165 | also the prefix for the created temporary directory.
166 |
167 | :type destination: str
168 | :keyword destination: Destination (glacier|s3)
169 |
170 | :type password: str
171 | :keyword password: Password (Empty string to disable encryption, disabled by default)
172 |
173 | :type profile: str
174 | :keyword profile: Profile name, only valid if no custom conf is provided
175 |
176 | :type conf: dict
177 | :keyword conf: Override profiles configuration
178 |
179 | :type tags: list
180 | :param tags: List of tags
181 | """
182 | def __init__(self, backup_name, **kwargs):
183 | self.backup_name = backup_name
184 | self.dir_prefix = "{0}_".format(backup_name)
185 | self.destination = kwargs.get("destination", DEFAULT_DESTINATION)
186 | self.password = kwargs.get("password", "")
187 | self.profile = kwargs.get("profile", "default")
188 | self.conf = kwargs.get("conf", {})
189 | self.tags = kwargs.get("tags", [])
190 | # Key for bakmanager.io hook
191 | self.key = kwargs.get("key", None)
192 | self.syncer = None
193 |
194 | def __enter__(self):
195 | """Save the old current working directory,
196 | create a temporary directory,
197 | and make it the new current working directory.
198 | """
199 | self.old_cwd = os.getcwd()
200 | self.tmpd = tempfile.mkdtemp(prefix=self.dir_prefix)
201 | sh.cd(self.tmpd)
202 | log.info("New current working directory: {0}.".format(self.tmpd))
203 | return self
204 |
205 | def __exit__(self, type, value, traceback):
206 | """Reseting the current working directory,
207 | and run synchronization if enabled.
208 | """
209 | sh.cd(self.old_cwd)
210 | log.info("Back to {0}".format(self.old_cwd))
211 | shutil.rmtree(self.tmpd)
212 | if self.syncer:
213 | log.debug("auto sync")
214 | self.sync()
215 |
216 | def sync(self):
217 | """Shortcut for calling BakSyncer."""
218 | if self.syncer:
219 | try:
220 | return self.syncer.sync()
221 | except Exception, exc:
222 | log.exception(exc)
223 |
224 | def enable_sync(self, api_url, auth=None):
225 | """Enable synchronization with :class:`bakthat.sync.BakSyncer` (optional).
226 |
227 | :type api_url: str
228 | :param api_url: Base API URL.
229 |
230 | :type auth: tuple
231 | :param auth: Optional, tuple/list (username, password) for API authentication.
232 | """
233 | log.debug("Enabling BakSyncer to {0}".format(api_url))
234 | from bakthat.sync import BakSyncer
235 | self.syncer = BakSyncer(api_url, auth)
236 |
237 | def backup(self, filename=None, **kwargs):
238 | """Perform backup.
239 |
240 | :type filename: str
241 | :param filename: File/directory to backup.
242 |
243 | :type password: str
244 | :keyword password: Override already set password.
245 |
246 | :type destination: str
247 | :keyword destination: Override already set destination.
248 |
249 | :type tags: list
250 | :keyword tags: Tags list
251 |
252 | :type profile: str
253 | :keyword profile: Profile name
254 |
255 | :type conf: dict
256 | :keyword conf: Override profiles configuration
257 |
258 | :rtype: dict
259 | :return: A dict containing the following keys: stored_filename, size, metadata and filename.
260 | """
261 | if filename is None:
262 | filename = self.tmpd
263 |
264 | return bakthat.backup(filename,
265 | destination=kwargs.get("destination", self.destination),
266 | password=kwargs.get("password", self.password),
267 | tags=kwargs.get("tags", self.tags),
268 | profile=kwargs.get("profile", self.profile),
269 | conf=kwargs.get("conf", self.conf),
270 | key=kwargs.get("key", self.key),
271 | custom_filename=self.backup_name)
272 |
273 | def restore(self, filename, **kwargs):
274 | """Restore backup in the current working directory.
275 |
276 | :type filename: str
277 | :param filename: File/directory to backup.
278 |
279 | :type password: str
280 | :keyword password: Override already set password.
281 |
282 | :type destination: str
283 | :keyword destination: Override already set destination.
284 |
285 | :type profile: str
286 | :keyword profile: Profile name
287 |
288 | :type conf: dict
289 | :keyword conf: Override profiles configuration
290 |
291 | :rtype: bool
292 | :return: True if successful.
293 | """
294 | return bakthat.restore(filename,
295 | destination=kwargs.get("destination", self.destination),
296 | password=kwargs.get("password", self.password),
297 | profile=kwargs.get("profile", self.profile),
298 | conf=kwargs.get("conf", self.conf))
299 |
300 | def delete_older_than(self, filename=None, interval=None, **kwargs):
301 | """Delete backups older than the given interval string.
302 |
303 | :type filename: str
304 | :param filename: File/directory name.
305 |
306 | :type interval: str
307 | :param interval: Interval string like 1M, 1W, 1M3W4h2s...
308 | (s => seconds, m => minutes, h => hours, D => days, W => weeks, M => months, Y => Years).
309 |
310 | :type destination: str
311 | :keyword destination: Override already set destination.
312 |
313 | :type profile: str
314 | :keyword profile: Profile name
315 |
316 | :type conf: dict
317 | :keyword conf: Override profiles configuration
318 |
319 | :rtype: list
320 | :return: A list containing the deleted keys (S3) or archives (Glacier).
321 | """
322 | if filename is None:
323 | filename = self.tmpd
324 |
325 | return bakthat.delete_older_than(filename, interval,
326 | destination=kwargs.get("destination", self.destination),
327 | profile=kwargs.get("profile", self.profile),
328 | conf=kwargs.get("conf", self.conf))
329 |
330 | def rotate(self, filename=None, **kwargs):
331 | """Rotate backup using grandfather-father-son rotation scheme.
332 |
333 | :type filename: str
334 | :param filename: File/directory name.
335 |
336 | :type destination: str
337 | :keyword destination: Override already set destination.
338 |
339 | :type profile: str
340 | :keyword profile: Profile name
341 |
342 | :type conf: dict
343 | :keyword conf: Override profiles configuration
344 |
345 | :rtype: list
346 | :return: A list containing the deleted keys (S3) or archives (Glacier).
347 | """
348 | if filename is None:
349 | filename = self.backup_name
350 |
351 | return bakthat.rotate_backups(filename,
352 | destination=kwargs.pop("destination", self.destination),
353 | profile=kwargs.get("profile", self.profile),
354 | conf=kwargs.get("conf", self.conf))
355 |
--------------------------------------------------------------------------------
/docs/user_guide.rst:
--------------------------------------------------------------------------------
1 | .. _user_guide:
2 |
3 | User Guide
4 | ==========
5 |
6 | Everything you need to know as a user.
7 |
8 |
9 | Getting Started
10 | ---------------
11 |
12 | Basic usage, "bakthat -h" or "bakthat -h" to show the help.
13 |
14 |
15 | If you haven't configured bakthat yet, you should run:
16 |
17 | ::
18 |
19 | $ bakthat configure
20 |
21 |
22 | .. note::
23 |
24 | Even if you have set a default destination, you can use a different destination using the ``-d``/``--destination`` parameter, for example, if S3 is the default destination, to use Glacier just add "-d glacier" or "--destination glacier".
25 |
26 |
27 | Backup
28 | ------
29 |
30 | ::
31 |
32 | $ bakthat backup --help
33 | usage: bakthat backup [-h] [-d DESTINATION] [--prompt PROMPT] [-t TAGS]
34 | [-p PROFILE] [-c CONFIG] [-k KEY]
35 | [filename]
36 |
37 | positional arguments:
38 | filename
39 |
40 | optional arguments:
41 | -h, --help show this help message and exit
42 | -d DESTINATION, --destination DESTINATION
43 | s3|glacier|swift
44 | --prompt PROMPT yes|no
45 | -t TAGS, --tags TAGS space separated tags
46 | -p PROFILE, --profile PROFILE
47 | profile name (default by default)
48 | -c CONFIG, --config CONFIG
49 | path to config file
50 | -k KEY, --key KEY Custom key for periodic backups (works only with
51 | BakManager.io hook.)
52 |
53 |
54 | When backing up file, bakthat store files in gzip format, under the following format: **originaldirname.utctime.tgz**, where utctime is a UTC datetime (%Y%m%d%H%M%S).
55 |
56 | .. note::
57 |
58 | If you try to backup a file already gziped, bakthat will only rename it (change extention to .tgz and append utctime).
59 |
60 | And you can also disable compression by setting ``compress: false`` in you configuration file (~/.bakthat.yml by default).
61 |
62 |
63 | Bakthat let you tag backups to retrieve them faster, when backing up a file, just append the ``--tags``/``-t`` argument, tags are space separated, when adding multiple tags, just quote the whole string (e.g. ``--tags "tag1 tag2 tag3"``)
64 |
65 | Since version **0.5.2**, you can set the password with BAKTHAT_PASSWORD environment variable.
66 |
67 | ::
68 |
69 | $ BAKTHAT_PASSWORD=mypassword bakthat backup myfile
70 |
71 |
72 | If you don't specify a filename/dirname, bakthat will backup the current working directory.
73 |
74 | ::
75 |
76 | $ cd /dir/i/want/to/bak
77 | backup to S3
78 | $ bakthat backup
79 | or
80 | $ bakthat backup /dir/i/want/to/bak
81 |
82 | $ bakthat backup /my/dir -t "tag1 tag2"
83 |
84 | you can also backup a single file
85 | $ bakthat backup /home/thomas/mysuperfile.txt
86 |
87 | backup to Glacier
88 | $ bakthat backup myfile -d glacier
89 |
90 | set the password with BAKTHAT_PASSWORD environment variable
91 | $ BAKTHAT_PASSWORD=mypassword bakthat backup myfile
92 |
93 | disable password prompt
94 | $ bakthat backup myfile --prompt no
95 |
96 |
97 | Excluding files
98 | ~~~~~~~~~~~~~~~
99 |
100 | .. versionadded:: 0.5.5
101 |
102 | Bakthat use a ".gitignore style" way to exclude files using Unix shell-style wildcards.
103 |
104 | There is two way to exclude files:
105 |
106 | - by creating a **.bakthatexclude** file at the root of the directory you want to backup.
107 | - by specifying a file directly with the ``--exclude-file`` argument.
108 |
109 | By default when performing a backup, if no exclude file is specified, it will look for either a **.bakthatexclude** file or a **.gitignore** file. So you backup a git repository, it will use the existing .gitignore if available.
110 |
111 | Here is an example **.bakthatexclude** file, wich exlude all .pyc and .log files, and both tmp and cache directory.
112 |
113 | ::
114 |
115 | *.pyc
116 | *.log
117 | tmp
118 | cache
119 |
120 |
121 | Reduced redundancy using S3
122 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~
123 |
124 | .. versionadded:: 0.5.5
125 |
126 | If you backup to S3, you can active the reduced redundancy by using the ``--s3-reduced-redundancy`` flag.
127 |
128 | ::
129 |
130 | bakthat backup --s3-reduced-redundancy
131 |
132 | Temp directory
133 | ~~~~~~~~~~~~~~
134 |
135 | You can change the temp directory location by setting the TMPDIR, TEMP or TMP environment variables if the backup is too big to fit in the default temp directory.
136 |
137 | ::
138 |
139 | $ export TMP=/home/thomas
140 |
141 |
142 | Restore
143 | -------
144 |
145 | ::
146 |
147 | $ bakthat restore --help
148 | usage: bakthat restore [-h] [-d DESTINATION] [-p PROFILE] [-c CONFIG]
149 | filename
150 |
151 | positional arguments:
152 | filename
153 |
154 | optional arguments:
155 | -h, --help show this help message and exit
156 | -d DESTINATION, --destination DESTINATION
157 | s3|glacier|swift
158 | -p PROFILE, --profile PROFILE
159 | profile name (default by default)
160 | -c CONFIG, --config CONFIG
161 | path to config file
162 |
163 |
164 | When restoring a backup, you can:
165 |
166 | - specify **filename**: the latest backups will be restored
167 | - specify **stored filename** directly, if you want to restore an older version.
168 |
169 | ::
170 |
171 | $ bakthat restore bak
172 |
173 | if you want to restore an older version
174 | $ bakthat restore bak20120927
175 | or
176 | $ bakthat restore bak20120927.tgz.enc
177 |
178 | restore from Glacier
179 | $ bakthat restore bak -d glacier
180 |
181 | .. note::
182 |
183 | When restoring from Glacier, the first time you call the restore command, the job is initiated, then you can check manually whether or not the job is completed (it takes 3-5h to complete), if so the file will be downloaded and restored.
184 |
185 |
186 | Listing backups
187 | ---------------
188 |
189 | Let's start with the help for the show subcommand:
190 |
191 | ::
192 |
193 | $ bakthat show --help
194 | usage: bakthat show [-h] [-d DESTINATION] [-t TAGS] [-p PROFILE]
195 | [-c CONFIG]
196 | [query]
197 |
198 | positional arguments:
199 | query search filename for query
200 |
201 | optional arguments:
202 | -h, --help show this help message and exit
203 | -d DESTINATION, --destination DESTINATION
204 | glacier|s3|swift, show every destination by default
205 | -t TAGS, --tags TAGS tags space separated
206 | -p PROFILE, --profile PROFILE
207 | profile name (all profiles are displayed by default)
208 | -c CONFIG, --config CONFIG
209 | path to config file
210 |
211 | So when listing backups, you can:
212 |
213 | - filter by query (filename/stored filename)
214 | - filter by destination (either glacier or s3)
215 | - filter by tags
216 | - filter by profile (if you manage multiple AWS/bucket/vault)
217 |
218 | Example:
219 |
220 | ::
221 |
222 | show everything
223 | $ bakthat show
224 |
225 | search for a file stored on s3:
226 | $ bakthat show myfile -d s3
227 |
228 |
229 | Delete
230 | ------
231 |
232 | If the backup is not stored in the default destination, you have to specify it manually.
233 |
234 | .. note::
235 |
236 | Remember that the delete command delete only the most recent matching backup.
237 |
238 | ::
239 |
240 | $ bakthat delete bak
241 |
242 | $ bakthat delete bak -d glacier
243 |
244 |
245 | Delete older than
246 | -----------------
247 |
248 | Delete backup older than the given string interval, like 1M for 1 month and so on.
249 |
250 | - **s** seconds
251 | - **m** minutes
252 | - **h** hours
253 | - **D** days
254 | - **W** weeks
255 | - **M** months
256 | - **Y** Years
257 |
258 | ::
259 |
260 | $ bakthat delete_older_than bakname 3M
261 |
262 | $ bakthat delete_older_than bakname 3M2D8h20m5s
263 |
264 | $ bakthat delete_older_than bakname 3M -d glacier
265 |
266 |
267 | Backup rotation
268 | ---------------
269 |
270 | If you make automated with baktaht, it makes sense to rotate your backups.
271 |
272 | Bakthat allows you to rotate backups using `Grandfather-father-son backup rotation `_, you can set a default rotation configuration.
273 |
274 | ::
275 |
276 | $ bakthat configure_backups_rotation
277 |
278 | Now you can rotate a backup set:
279 |
280 | ::
281 |
282 | $ bakthat rotate_backups bakname
283 |
284 |
285 | .. note::
286 |
287 | Bakthat rely on the `GrandFatherSon `_ module to compute rotations, so if you need to setup more complex rotation scheme (like hourly backups), refer to the docs and change the rotation settings manually in your configuration file.
288 |
289 |
290 | Accessing bakthat Python API
291 | ----------------------------
292 |
293 | Check out the :ref:`developer-guide`.
294 |
295 |
296 | Configuration
297 | -------------
298 |
299 | Bakthat stores configuration in `YAML `_ format, to have the same configuration handling for both command line and Python module use.
300 |
301 | You can also handle **multiples profiles** if you need to manage multiple AWs account or vaults/buckets.
302 |
303 | By default, your configuration is stored in **~/.bakthat.yml**, but you can specify a different file with the ``-c``/``--config`` parameter.
304 |
305 | To get started, you can run ``bakthat configure``.
306 |
307 | ::
308 |
309 | $ bakthat configure
310 |
311 | Here is what a configuration object looks like:
312 |
313 | .. code-block:: yaml
314 |
315 | access_key: YOUR_ACCESS_KEY
316 | secret_key: YOUR_SECRET_KEY
317 | region_name: us-east-1
318 | glacier_vault: myvault
319 | s3_bucket: mybucket
320 |
321 | The **region_name** key is optionnal is you want to use **us-east-1**.
322 |
323 |
324 | Managing profiles
325 | ~~~~~~~~~~~~~~~~~
326 |
327 | Here is how profiles are stored, you can either create them manually or with command line.
328 |
329 | .. code-block:: yaml
330 |
331 | default:
332 | access_key: YOUR_ACCESS_KEY
333 | secret_key: YOUR_SECRET_KEY
334 | region_name: us-east-1
335 | glacier_vault: myvault
336 | s3_bucket: mybucket
337 | myprofile:
338 | access_key: YOUR_ACCESS_KEY
339 | secret_key: YOUR_SECRET_KEY
340 | region_name: us-east-1
341 | glacier_vault: myvault
342 | s3_bucket: mybucket
343 |
344 |
345 | To create a profile from command line with bakthat:
346 |
347 | ::
348 |
349 | $ bakthat configure --profile mynewprofile
350 |
351 | $ bakthat configure -h
352 | usage: bakthat configure [-h] [-p PROFILE]
353 |
354 | optional arguments:
355 | -h, --help show this help message and exit
356 | -p PROFILE, --profile PROFILE
357 | profile name (default by default)
358 |
359 |
360 | Once your profile is configured, you can use it with ``--profile``/``-p`` argument.
361 |
362 | ::
363 |
364 | $ bakthat backup -p myprofile
365 | $ bakthat show -p myprofile
366 |
367 | .. _swift-support:
368 |
369 | OpenStack Swift support
370 | ~~~~~~~~~~~~~~~~~~~~~~~
371 |
372 | .. versionadded:: 0.5.0
373 |
374 | If you use OpenStack Swift as backend, **auth_version** and **auth_url** key are required in configuration.
375 | Following are sample configurations both temp_auth and keystone auth.
376 |
377 | .. code-block:: yaml
378 |
379 | temp_auth:
380 | access_key: ACCOUNT:USER
381 | secret_key: YOUR_SECRET_KEY
382 | region_name:
383 | glacier_vault:
384 | s3_bucket: mybucket
385 | default_destination: swift
386 | auth_url: https:///auth/v1.0
387 | auth_version: '1'
388 | keystone:
389 | access_key: ACCOUNT:USER
390 | secret_key: YOUR_SECRET_KEY
391 | region_name:
392 | glacier_vault:
393 | s3_bucket: mybucket
394 | default_destination: swift
395 | auth_url: https:///v2.0
396 | auth_version: '2'
397 |
398 | .. _stored-metadata:
399 |
400 | Stored metadata
401 | ---------------
402 |
403 | Batkthat stores some data about your backups in a SQLite database (using `peewee `_ as wrapper) for few reasons:
404 |
405 | - to allow you to filter them efficiently.
406 | - to avoid making a lot of requests to AWS.
407 | - to let you sync your bakthat data with multiple servers.
408 |
409 | Here is a example of data stored in the SQLite database:
410 |
411 | .. code-block:: python
412 |
413 | {u'backend': u's3',
414 | u'backend_hash': u'9813aa99062d7a226f3327478eff3f63bf5603cd86999a42a2655f5d460e8e143c63822cb8e2f8998a694afee8d30c4924923dff695c6e5f739dffdd65768408',
415 | u'backup_date': 1362508575,
416 | u'filename': u'mydir',
417 | u'is_deleted': 0,
418 | u'last_updated': 1362508727,
419 | u'metadata': {u'is_enc': True},
420 | u'size': 3120,
421 | u'stored_filename': u'mydir.20130305193615.tgz.enc',
422 | u'tags': []}
423 |
424 | All the keys are explicit, except **backend_hash**, which is the hash of your AWS access key concatenated with either the S3 bucket, either the Glacier vault. This key is used when syncing backups with multiple servers.
425 |
426 |
427 | Backup/Restore Glacier inventory
428 | --------------------------------
429 |
430 | Bakthat automatically backups the local Glacier inventory (a dict with filename => archive_id mapping) to your S3 bucket under the "bakthat_glacier_inventory" key.
431 |
432 | You can retrieve bakthat custom inventory without waiting:
433 |
434 | ::
435 |
436 | $ bakthat show_glacier_inventory
437 |
438 | or
439 |
440 | ::
441 |
442 | $ bakthat show_local_glacier_inventory
443 |
444 | You can trigger a backup mannualy:
445 |
446 | ::
447 |
448 | $ bakthat backup_glacier_inventory
449 |
450 | And here is how to restore the glacier inventory from S3:
451 |
452 | ::
453 |
454 | $ bakthat restore_glacier_inventory
455 |
456 |
457 | S3 and Glacier IAM permissions
458 | ------------------------------
459 |
460 | ::
461 |
462 | {
463 | "Statement": [
464 | {
465 | "Effect": "Allow",
466 | "Action": "s3:*",
467 | "Resource": "arn:aws:s3:::S3_BUCKET_NAME*"
468 | },
469 | {
470 | "Effect": "Allow",
471 | "Action": "glacier:*"
472 | "Resource": "arn:aws:glacier:AWS_REGION:AWS_ACCOUNT_ID:vaults/GLACIER_VAULT_NAME",
473 | }
474 | ]
475 | }
476 |
--------------------------------------------------------------------------------
/bakthat/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 | import tarfile
3 | import tempfile
4 | import os
5 | from datetime import datetime
6 | from getpass import getpass
7 | import logging
8 | import hashlib
9 | import uuid
10 | import socket
11 | import re
12 | import fnmatch
13 | import mimetypes
14 | import calendar
15 | import functools
16 | from contextlib import closing # for Python2.6 compatibility
17 | from gzip import GzipFile
18 |
19 | import yaml
20 | from beefish import decrypt, encrypt_file
21 | import aaargh
22 | import grandfatherson
23 | from byteformat import ByteFormatter
24 |
25 | from bakthat.backends import GlacierBackend, S3Backend, RotationConfig, SwiftBackend
26 | from bakthat.conf import config, events, load_config, DEFAULT_DESTINATION, DEFAULT_LOCATION, CONFIG_FILE, EXCLUDE_FILES
27 | from bakthat.utils import _interval_string_to_seconds
28 | from bakthat.models import Backups
29 | from bakthat.sync import BakSyncer, bakmanager_hook, bakmanager_periodic_backups
30 | from bakthat.plugin import setup_plugins, plugin_setup
31 |
32 | __version__ = "0.6.0"
33 |
34 | app = aaargh.App(description="Compress, encrypt and upload files directly to Amazon S3/Glacier/Swift.")
35 |
36 | log = logging.getLogger("bakthat")
37 |
38 |
39 | class BakthatFilter(logging.Filter):
40 | def filter(self, rec):
41 | if rec.name.startswith("bakthat") or rec.name == "root":
42 | return True
43 | else:
44 | return rec.levelno >= logging.WARNING
45 |
46 |
47 | STORAGE_BACKEND = dict(s3=S3Backend, glacier=GlacierBackend, swift=SwiftBackend)
48 |
49 |
50 | def _get_store_backend(conf, destination=None, profile="default"):
51 | if not isinstance(conf, dict):
52 | conf = load_config(conf)
53 | conf = conf.get(profile)
54 | setup_plugins(conf)
55 | if not destination:
56 | destination = conf.get("default_destination", DEFAULT_DESTINATION)
57 | return STORAGE_BACKEND[destination](conf, profile), destination, conf
58 |
59 |
60 | @app.cmd(help="Delete backups older than the given interval string.")
61 | @app.cmd_arg('filename', type=str, help="Filename to delete")
62 | @app.cmd_arg('interval', type=str, help="Interval string like 1M, 1W, 1M3W4h2s")
63 | @app.cmd_arg('-d', '--destination', type=str, help="s3|glacier|swift", default=None)
64 | @app.cmd_arg('-p', '--profile', type=str, default="default", help="profile name (default by default)")
65 | @app.cmd_arg('-c', '--config', type=str, default=CONFIG_FILE, help="path to config file")
66 | def delete_older_than(filename, interval, profile="default", config=CONFIG_FILE, destination=None, **kwargs):
67 | """Delete backups matching the given filename older than the given interval string.
68 |
69 | :type filename: str
70 | :param filename: File/directory name.
71 |
72 | :type interval: str
73 | :param interval: Interval string like 1M, 1W, 1M3W4h2s...
74 | (s => seconds, m => minutes, h => hours, D => days, W => weeks, M => months, Y => Years).
75 |
76 | :type destination: str
77 | :param destination: glacier|s3|swift
78 |
79 | :type conf: dict
80 | :keyword conf: Override/set AWS configuration.
81 |
82 | :rtype: list
83 | :return: A list containing the deleted keys (S3) or archives (Glacier).
84 |
85 | """
86 | storage_backend, destination, conf = _get_store_backend(config, destination, profile)
87 |
88 | session_id = str(uuid.uuid4())
89 | events.before_delete_older_than(session_id)
90 |
91 | interval_seconds = _interval_string_to_seconds(interval)
92 |
93 | deleted = []
94 |
95 | backup_date_filter = int(datetime.utcnow().strftime("%s")) - interval_seconds
96 | for backup in Backups.search(filename, destination, older_than=backup_date_filter, profile=profile, config=config):
97 | real_key = backup.stored_filename
98 | log.info("Deleting {0}".format(real_key))
99 |
100 | storage_backend.delete(real_key)
101 | backup.set_deleted()
102 | deleted.append(backup)
103 |
104 | events.on_delete_older_than(session_id, deleted)
105 |
106 | return deleted
107 |
108 |
109 | @app.cmd(help="Rotate backups using Grandfather-father-son backup rotation scheme.")
110 | @app.cmd_arg('filename', type=str)
111 | @app.cmd_arg('-d', '--destination', type=str, help="s3|glacier|swift", default=None)
112 | @app.cmd_arg('-p', '--profile', type=str, default="default", help="profile name (default by default)")
113 | @app.cmd_arg('-c', '--config', type=str, default=CONFIG_FILE, help="path to config file")
114 | def rotate_backups(filename, destination=None, profile="default", config=CONFIG_FILE, **kwargs):
115 | """Rotate backup using grandfather-father-son rotation scheme.
116 |
117 | :type filename: str
118 | :param filename: File/directory name.
119 |
120 | :type destination: str
121 | :param destination: s3|glacier|swift
122 |
123 | :type conf: dict
124 | :keyword conf: Override/set AWS configuration.
125 |
126 | :type days: int
127 | :keyword days: Number of days to keep.
128 |
129 | :type weeks: int
130 | :keyword weeks: Number of weeks to keep.
131 |
132 | :type months: int
133 | :keyword months: Number of months to keep.
134 |
135 | :type first_week_day: str
136 | :keyword first_week_day: First week day (to calculate wich weekly backup keep, saturday by default).
137 |
138 | :rtype: list
139 | :return: A list containing the deleted keys (S3) or archives (Glacier).
140 |
141 | """
142 | storage_backend, destination, conf = _get_store_backend(config, destination, profile)
143 | rotate = RotationConfig(conf, profile)
144 | if not rotate:
145 | raise Exception("You must run bakthat configure_backups_rotation or provide rotation configuration.")
146 |
147 | session_id = str(uuid.uuid4())
148 | events.before_rotate_backups(session_id)
149 |
150 | deleted = []
151 |
152 | backups = Backups.search(filename, destination, profile=profile, config=config)
153 | backups_date = [datetime.fromtimestamp(float(backup.backup_date)) for backup in backups]
154 |
155 | rotate_kwargs = rotate.conf.copy()
156 | del rotate_kwargs["first_week_day"]
157 | for k, v in rotate_kwargs.iteritems():
158 | rotate_kwargs[k] = int(v)
159 | rotate_kwargs["firstweekday"] = int(rotate.conf["first_week_day"])
160 | rotate_kwargs["now"] = datetime.utcnow()
161 |
162 | to_delete = grandfatherson.to_delete(backups_date, **rotate_kwargs)
163 | for delete_date in to_delete:
164 | try:
165 | backup_date = int(delete_date.strftime("%s"))
166 | backup = Backups.search(filename, destination, backup_date=backup_date, profile=profile, config=config).get()
167 |
168 | if backup:
169 | real_key = backup.stored_filename
170 | log.info("Deleting {0}".format(real_key))
171 |
172 | storage_backend.delete(real_key)
173 | backup.set_deleted()
174 | deleted.append(backup)
175 | except Exception, exc:
176 | log.error("Error when deleting {0}".format(backup))
177 | log.exception(exc)
178 |
179 | events.on_rotate_backups(session_id, deleted)
180 |
181 | return deleted
182 |
183 |
184 | def _get_exclude(exclude_file):
185 | """ Load a .gitignore like file to exclude files/dir from backups.
186 |
187 | :type exclude_file: str
188 | :param exclude_file: Path to the exclude file
189 |
190 | :rtype: function
191 | :return: A function ready to inject in tar.add(exlude=_exclude)
192 |
193 | """
194 | patterns = filter(None, open(exclude_file).read().split("\n"))
195 |
196 | def _exclude(filename):
197 | for pattern in patterns:
198 | if re.search(fnmatch.translate(pattern), filename):
199 | log.debug("{0} excluded".format(filename))
200 | return True
201 | return False
202 | return _exclude
203 |
204 |
205 | @app.cmd(help="Backup a file or a directory, backup the current directory if no arg is provided.")
206 | @app.cmd_arg('filename', type=str, default=os.getcwd(), nargs="?")
207 | @app.cmd_arg('-d', '--destination', type=str, help="s3|glacier|swift", default=None)
208 | @app.cmd_arg('--prompt', type=str, help="yes|no", default="yes")
209 | @app.cmd_arg('-t', '--tags', type=str, help="space separated tags", default="")
210 | @app.cmd_arg('-p', '--profile', type=str, default="default", help="profile name (default by default)")
211 | @app.cmd_arg('-c', '--config', type=str, default=CONFIG_FILE, help="path to config file")
212 | @app.cmd_arg('-k', '--key', type=str, default=None, help="Custom key for periodic backups (works only with BakManager.io hook.)")
213 | @app.cmd_arg('--exclude-file', type=str, default=None)
214 | @app.cmd_arg('--s3-reduced-redundancy', action="store_true")
215 | def backup(filename=os.getcwd(), destination=None, profile="default", config=CONFIG_FILE, prompt="yes", tags=[], key=None, exclude_file=None, s3_reduced_redundancy=False, **kwargs):
216 | """Perform backup.
217 |
218 | :type filename: str
219 | :param filename: File/directory to backup.
220 |
221 | :type destination: str
222 | :param destination: s3|glacier|swift
223 |
224 | :type prompt: str
225 | :param prompt: Disable password promp, disable encryption,
226 | only useful when using bakthat in command line mode.
227 |
228 | :type tags: str or list
229 | :param tags: Tags either in a str space separated,
230 | either directly a list of str (if calling from Python).
231 |
232 | :type password: str
233 | :keyword password: Password, empty string to disable encryption.
234 |
235 | :type conf: dict
236 | :keyword conf: Override/set AWS configuration.
237 |
238 | :type custom_filename: str
239 | :keyword custom_filename: Override the original filename (only in metadata)
240 |
241 | :rtype: dict
242 | :return: A dict containing the following keys: stored_filename, size, metadata, backend and filename.
243 |
244 | """
245 | storage_backend, destination, conf = _get_store_backend(config, destination, profile)
246 | backup_file_fmt = "{0}.{1}.tgz"
247 |
248 | session_id = str(uuid.uuid4())
249 | events.before_backup(session_id)
250 |
251 | # Check if compression is disabled on the configuration.
252 | if conf:
253 | compress = conf.get("compress", True)
254 | else:
255 | compress = config.get(profile).get("compress", True)
256 |
257 | if not compress:
258 | backup_file_fmt = "{0}.{1}"
259 |
260 | log.info("Backing up " + filename)
261 |
262 | if exclude_file and os.path.isfile(exclude_file):
263 | EXCLUDE_FILES.insert(0, exclude_file)
264 |
265 | _exclude = lambda filename: False
266 | if os.path.isdir(filename):
267 | join = functools.partial(os.path.join, filename)
268 | for efile in EXCLUDE_FILES:
269 | efile = join(efile)
270 | if os.path.isfile(efile):
271 | _exclude = _get_exclude(efile)
272 | log.info("Using {0} to exclude files.".format(efile))
273 |
274 | arcname = filename.strip('/').split('/')[-1]
275 | now = datetime.utcnow()
276 | date_component = now.strftime("%Y%m%d%H%M%S")
277 | stored_filename = backup_file_fmt.format(arcname, date_component)
278 |
279 | backup_date = int(now.strftime("%s"))
280 | backup_data = dict(filename=kwargs.get("custom_filename", arcname),
281 | backup_date=backup_date,
282 | last_updated=backup_date,
283 | backend=destination,
284 | is_deleted=False)
285 |
286 | # Useful only when using bakmanager.io hook
287 | backup_key = key
288 |
289 | password = kwargs.get("password", os.environ.get("BAKTHAT_PASSWORD"))
290 | if password is None and prompt.lower() != "no":
291 | password = getpass("Password (blank to disable encryption): ")
292 | if password:
293 | password2 = getpass("Password confirmation: ")
294 | if password != password2:
295 | log.error("Password confirmation doesn't match")
296 | return
297 |
298 | if not compress:
299 | log.info("Compression disabled")
300 | outname = filename
301 | with open(outname) as outfile:
302 | backup_data["size"] = os.fstat(outfile.fileno()).st_size
303 | bakthat_compression = False
304 |
305 | # Check if the file is not already compressed
306 | elif mimetypes.guess_type(arcname) == ('application/x-tar', 'gzip'):
307 | log.info("File already compressed")
308 | outname = filename
309 |
310 | # removing extension to reformat filename
311 | new_arcname = re.sub(r'(\.t(ar\.)?gz)', '', arcname)
312 | stored_filename = backup_file_fmt.format(new_arcname, date_component)
313 |
314 | with open(outname) as outfile:
315 | backup_data["size"] = os.fstat(outfile.fileno()).st_size
316 |
317 | bakthat_compression = False
318 | else:
319 | # If not we compress it
320 | log.info("Compressing...")
321 |
322 | with tempfile.NamedTemporaryFile(delete=False) as out:
323 | with closing(tarfile.open(fileobj=out, mode="w:gz")) as tar:
324 | tar.add(filename, arcname=arcname, exclude=_exclude)
325 | outname = out.name
326 | out.seek(0)
327 | backup_data["size"] = os.fstat(out.fileno()).st_size
328 | bakthat_compression = True
329 |
330 | bakthat_encryption = False
331 | if password:
332 | bakthat_encryption = True
333 | log.info("Encrypting...")
334 | encrypted_out = tempfile.NamedTemporaryFile(delete=False)
335 | encrypt_file(outname, encrypted_out.name, password)
336 | stored_filename += ".enc"
337 |
338 | # We only remove the file if the archive is created by bakthat
339 | if bakthat_compression:
340 | os.remove(outname) # remove non-encrypted tmp file
341 |
342 | outname = encrypted_out.name
343 |
344 | encrypted_out.seek(0)
345 | backup_data["size"] = os.fstat(encrypted_out.fileno()).st_size
346 |
347 | # Handling tags metadata
348 | if isinstance(tags, list):
349 | tags = " ".join(tags)
350 |
351 | backup_data["tags"] = tags
352 |
353 | backup_data["metadata"] = dict(is_enc=bakthat_encryption,
354 | client=socket.gethostname())
355 | backup_data["stored_filename"] = stored_filename
356 |
357 | access_key = storage_backend.conf.get("access_key")
358 | container_key = storage_backend.conf.get(storage_backend.container_key)
359 | backup_data["backend_hash"] = hashlib.sha512(access_key + container_key).hexdigest()
360 |
361 | log.info("Uploading...")
362 | storage_backend.upload(stored_filename, outname, s3_reduced_redundancy=s3_reduced_redundancy)
363 |
364 | # We only remove the file if the archive is created by bakthat
365 | if bakthat_compression or bakthat_encryption:
366 | os.remove(outname)
367 |
368 | log.debug(backup_data)
369 |
370 | # Insert backup metadata in SQLite
371 | backup = Backups.create(**backup_data)
372 |
373 | BakSyncer(conf).sync_auto()
374 |
375 | # bakmanager.io hook, enable with -k/--key paramter
376 | if backup_key:
377 | bakmanager_hook(conf, backup_data, backup_key)
378 |
379 | events.on_backup(session_id, backup)
380 |
381 | return backup
382 |
383 |
384 | @app.cmd(help="Show backups list.")
385 | @app.cmd_arg('query', type=str, default="", help="search filename for query", nargs="?")
386 | @app.cmd_arg('-d', '--destination', type=str, default="", help="glacier|s3|swift, show every destination by default")
387 | @app.cmd_arg('-t', '--tags', type=str, default="", help="tags space separated")
388 | @app.cmd_arg('-p', '--profile', type=str, default="default", help="profile name (all profiles are displayed by default)")
389 | @app.cmd_arg('-c', '--config', type=str, default=CONFIG_FILE, help="path to config file")
390 | def show(query="", destination="", tags="", profile="default", config=CONFIG_FILE):
391 | backups = Backups.search(query, destination, profile=profile, tags=tags, config=config)
392 | _display_backups(backups)
393 |
394 |
395 | def _display_backups(backups):
396 | bytefmt = ByteFormatter()
397 | for backup in backups:
398 | backup = backup._data
399 | backup["backup_date"] = datetime.fromtimestamp(float(backup["backup_date"])).isoformat()
400 | backup["size"] = bytefmt(backup["size"])
401 | if backup.get("tags"):
402 | backup["tags"] = "({0})".format(backup["tags"])
403 |
404 | log.info("{backup_date}\t{backend:8}\t{size:8}\t{stored_filename} {tags}".format(**backup))
405 |
406 |
407 | @app.cmd(help="Set AWS S3/Glacier credentials.")
408 | @app.cmd_arg('-p', '--profile', type=str, default="default", help="profile name (default by default)")
409 | def configure(profile="default"):
410 | try:
411 | new_conf = config.copy()
412 | new_conf[profile] = config.get(profile, {})
413 |
414 | new_conf[profile]["access_key"] = raw_input("AWS Access Key: ")
415 | new_conf[profile]["secret_key"] = raw_input("AWS Secret Key: ")
416 | new_conf[profile]["s3_bucket"] = raw_input("S3 Bucket Name: ")
417 | new_conf[profile]["glacier_vault"] = raw_input("Glacier Vault Name: ")
418 |
419 | while 1:
420 | default_destination = raw_input("Default destination ({0}): ".format(DEFAULT_DESTINATION))
421 | if default_destination:
422 | default_destination = default_destination.lower()
423 | if default_destination in ("s3", "glacier", "swift"):
424 | break
425 | else:
426 | log.error("Invalid default_destination, should be s3 or glacier, swift, try again.")
427 | else:
428 | default_destination = DEFAULT_DESTINATION
429 | break
430 |
431 | new_conf[profile]["default_destination"] = default_destination
432 | region_name = raw_input("Region Name ({0}): ".format(DEFAULT_LOCATION))
433 | if not region_name:
434 | region_name = DEFAULT_LOCATION
435 | new_conf[profile]["region_name"] = region_name
436 |
437 | if default_destination in ("swift"):
438 | new_conf[profile]["auth_version"] = raw_input("Swift Auth Version: ")
439 | new_conf[profile]["auth_url"] = raw_input("Swift Auth URL: ")
440 |
441 | yaml.dump(new_conf, open(CONFIG_FILE, "w"), default_flow_style=False)
442 |
443 | log.info("Config written in %s" % CONFIG_FILE)
444 | log.info("Run bakthat configure_backups_rotation if needed.")
445 | except KeyboardInterrupt:
446 | log.error("Cancelled by user")
447 |
448 |
449 | @app.cmd(help="Configure backups rotation")
450 | @app.cmd_arg('-p', '--profile', type=str, default="default", help="profile name (default by default)")
451 | def configure_backups_rotation(profile="default"):
452 | rotation_conf = {"rotation": {}}
453 | rotation_conf["rotation"]["days"] = int(raw_input("Number of days to keep: "))
454 | rotation_conf["rotation"]["weeks"] = int(raw_input("Number of weeks to keep: "))
455 | rotation_conf["rotation"]["months"] = int(raw_input("Number of months to keep: "))
456 | while 1:
457 | first_week_day = raw_input("First week day (to calculate wich weekly backup keep, saturday by default): ")
458 | if first_week_day:
459 | if hasattr(calendar, first_week_day.upper()):
460 | first_week_day = getattr(calendar, first_week_day.upper())
461 | break
462 | else:
463 | log.error("Invalid first_week_day, please choose from sunday to saturday.")
464 | else:
465 | first_week_day = calendar.SATURDAY
466 | break
467 | rotation_conf["rotation"]["first_week_day"] = int(first_week_day)
468 | conf_file = open(CONFIG_FILE, "w")
469 | new_conf = config.copy()
470 | new_conf[profile].update(rotation_conf)
471 | yaml.dump(new_conf, conf_file, default_flow_style=False)
472 | log.info("Config written in %s" % CONFIG_FILE)
473 |
474 |
475 | @app.cmd(help="Restore backup in the current directory.")
476 | @app.cmd_arg('filename', type=str)
477 | @app.cmd_arg('-d', '--destination', type=str, help="s3|glacier|swift", default=None)
478 | @app.cmd_arg('-p', '--profile', type=str, default="default", help="profile name (default by default)")
479 | @app.cmd_arg('-c', '--config', type=str, default=CONFIG_FILE, help="path to config file")
480 | def restore(filename, destination=None, profile="default", config=CONFIG_FILE, **kwargs):
481 | """Restore backup in the current working directory.
482 |
483 | :type filename: str
484 | :param filename: File/directory to backup.
485 |
486 | :type destination: str
487 | :param destination: s3|glacier|swift
488 |
489 | :type profile: str
490 | :param profile: Profile name (default by default).
491 |
492 | :type conf: dict
493 | :keyword conf: Override/set AWS configuration.
494 |
495 | :rtype: bool
496 | :return: True if successful.
497 | """
498 | storage_backend, destination, conf = _get_store_backend(config, destination, profile)
499 |
500 | if not filename:
501 | log.error("No file to restore, use -f to specify one.")
502 | return
503 |
504 | backup = Backups.match_filename(filename, destination, profile=profile, config=config)
505 |
506 | if not backup:
507 | log.error("No file matched.")
508 | return
509 |
510 | session_id = str(uuid.uuid4())
511 | events.before_restore(session_id)
512 |
513 | key_name = backup.stored_filename
514 | log.info("Restoring " + key_name)
515 |
516 | # Asking password before actually download to avoid waiting
517 | if key_name and backup.is_encrypted():
518 | password = kwargs.get("password")
519 | if not password:
520 | password = getpass()
521 |
522 | log.info("Downloading...")
523 |
524 | download_kwargs = {}
525 | if kwargs.get("job_check"):
526 | download_kwargs["job_check"] = True
527 | log.info("Job Check: " + repr(download_kwargs))
528 |
529 | out = storage_backend.download(key_name, **download_kwargs)
530 | if kwargs.get("job_check"):
531 | log.info("Job Check Request")
532 | # If it's a job_check call, we return Glacier job data
533 | return out
534 |
535 | if out and backup.is_encrypted():
536 | log.info("Decrypting...")
537 | decrypted_out = tempfile.TemporaryFile()
538 | decrypt(out, decrypted_out, password)
539 | out = decrypted_out
540 |
541 | if out and (key_name.endswith(".tgz") or key_name.endswith(".tgz.enc")):
542 | log.info("Uncompressing...")
543 | out.seek(0)
544 | if not backup.metadata.get("KeyValue"):
545 | tar = tarfile.open(fileobj=out)
546 | tar.extractall()
547 | tar.close()
548 | else:
549 | with closing(GzipFile(fileobj=out, mode="r")) as f:
550 | with open(backup.stored_filename, "w") as out:
551 | out.write(f.read())
552 | elif out:
553 | log.info("Backup is not compressed")
554 | with open(backup.filename, "w") as restored:
555 | out.seek(0)
556 | restored.write(out.read())
557 |
558 | events.on_restore(session_id, backup)
559 |
560 | return backup
561 |
562 |
563 | @app.cmd(help="Delete a backup.")
564 | @app.cmd_arg('filename', type=str)
565 | @app.cmd_arg('-d', '--destination', type=str, help="s3|glacier|swift", default=None)
566 | @app.cmd_arg('-p', '--profile', type=str, default="default", help="profile name (default by default)")
567 | @app.cmd_arg('-c', '--config', type=str, default=CONFIG_FILE, help="path to config file")
568 | def delete(filename, destination=None, profile="default", config=CONFIG_FILE, **kwargs):
569 | """Delete a backup.
570 |
571 | :type filename: str
572 | :param filename: stored filename to delete.
573 |
574 | :type destination: str
575 | :param destination: glacier|s3|swift
576 |
577 | :type profile: str
578 | :param profile: Profile name (default by default).
579 |
580 | :type conf: dict
581 | :keyword conf: A dict with a custom configuration.
582 |
583 | :type conf: dict
584 | :keyword conf: Override/set AWS configuration.
585 |
586 | :rtype: bool
587 | :return: True if the file is deleted.
588 |
589 | """
590 | if not filename:
591 | log.error("No file to delete, use -f to specify one.")
592 | return
593 |
594 | backup = Backups.match_filename(filename, destination, profile=profile, config=config)
595 |
596 | if not backup:
597 | log.error("No file matched.")
598 | return
599 |
600 | key_name = backup.stored_filename
601 |
602 | storage_backend, destination, conf = _get_store_backend(config, destination, profile)
603 |
604 | session_id = str(uuid.uuid4())
605 | events.before_delete(session_id)
606 |
607 | log.info("Deleting {0}".format(key_name))
608 |
609 | storage_backend.delete(key_name)
610 | backup.set_deleted()
611 |
612 | events.on_delete(session_id, backup)
613 |
614 | return backup
615 |
616 |
617 | @app.cmd(help="Periodic backups status (bakmanager.io API)")
618 | @app.cmd_arg('-p', '--profile', type=str, default="default", help="profile name (default by default)")
619 | @app.cmd_arg('-c', '--config', type=str, default=CONFIG_FILE, help="path to config file")
620 | def periodic_backups(config=CONFIG_FILE, profile="default"):
621 | conf = load_config(config).get(profile)
622 | bakmanager_periodic_backups(conf)
623 |
624 |
625 | @app.cmd(help="Trigger synchronization")
626 | def sync(**kwargs):
627 | """Trigger synchronization."""
628 | conf = kwargs.get("conf")
629 | BakSyncer(conf).sync()
630 |
631 |
632 | @app.cmd(help="Reset synchronization")
633 | def reset_sync(**kwargs):
634 | """Reset synchronization."""
635 | conf = kwargs.get("conf")
636 | BakSyncer(conf).reset_sync()
637 |
638 |
639 | def main():
640 |
641 | if not log.handlers:
642 | # logging.basicConfig(level=logging.INFO, format='%(message)s')
643 | handler = logging.StreamHandler()
644 | handler.addFilter(BakthatFilter())
645 | handler.setFormatter(logging.Formatter('%(message)s'))
646 | log.addHandler(handler)
647 | log.setLevel(logging.INFO)
648 |
649 | app.run()
650 |
651 |
652 | if __name__ == '__main__':
653 | main()
654 |
--------------------------------------------------------------------------------