├── .gitignore
├── aging
├── ceph.conf
├── makecephconf.py
├── makecephconf.yaml
├── runme.sh
├── runtests.btrfs.yaml
├── runtests.ext4.yaml
├── runtests.py
├── runtests.xfs.yaml
└── runtests.yaml
├── analysis
├── log_analyzer.py
├── log_threadpool_analyzer.py
└── strace_parser.py
├── cbt
├── .gitignore
└── readme.md
├── models
├── performance
│ ├── FileStore.py
│ ├── Poisson.py
│ ├── Rados.py
│ ├── SimDisk.py
│ ├── SimFS.py
│ ├── TESTburnupi.py
│ ├── disktest.py
│ ├── filestoretest.py
│ ├── fstest.py
│ ├── radostest.py
│ └── test.py
└── reliability
│ ├── ColumnPrint.py
│ ├── Config.py
│ ├── DiskRely.py
│ ├── MultiRely.py
│ ├── README.html
│ ├── RadosRely.py
│ ├── RaidRely.py
│ ├── RelyFuncts.py
│ ├── RelyGUI.py
│ ├── Run.py
│ ├── SiteRely.py
│ ├── TODO.txt
│ ├── inktank.ico
│ ├── main.py
│ ├── sizes.py
│ └── test.py
├── nose
└── plugins
│ └── inventory
│ ├── inventory.py
│ └── setup.py
├── redmine
├── Bugparse.pm
├── age.plot
├── bugage.pl
├── buggraphs.sh
├── bugmunch.pl
├── bugplot.sh
├── bugs.plot
├── index.html
├── progress.pl
├── progress.plot
├── redmine_dump.pl
├── sourcemunch.pl
├── sources.plot
├── sprintmunch.pl
└── sprints.plot
└── regression
├── burnupi-available
├── 3xRep
│ ├── ceph.conf
│ ├── runtests.btrfs.yaml
│ └── runtests.xfs.yaml
├── EC
│ ├── ceph.conf
│ ├── runtests.EC31.yaml
│ ├── runtests.EC62.yaml
│ └── runtests.EC93.yaml
├── backfill
│ ├── ceph.conf.high_rp
│ ├── ceph.conf.low_rp
│ ├── ceph.conf.norm_rp
│ ├── runtests.high_rp.yaml
│ ├── runtests.low_rp.yaml
│ └── runtests.norm_rp.yaml
└── tiering
│ ├── ceph.base.conf
│ ├── ceph.cache.conf
│ ├── ceph.tiered.conf
│ └── runtests.cache.yaml
├── magna-available
├── 3xRep
│ ├── ceph.conf
│ ├── runtests.btrfs.yaml
│ └── runtests.xfs.yaml
├── EC
│ ├── ceph.conf
│ ├── runtests.EC31.yaml
│ ├── runtests.EC62.yaml
│ └── runtests.EC93.yaml
└── backfill
│ ├── ceph.conf.high_rp
│ ├── ceph.conf.low_rp
│ ├── ceph.conf.norm_rp
│ ├── runtests.high_rp.yaml
│ ├── runtests.low_rp.yaml
│ └── runtests.norm_rp.yaml
└── runtests.sh
/.gitignore:
--------------------------------------------------------------------------------
1 | *~
2 | *.diff
3 | *.deb
4 | *.tar.gz
5 | *.tar.bz2
6 | *.swp
7 | *.swo
8 | *.tmp
9 | *.orig
10 | *.patch
11 | *.pyc
12 | *.pyo
13 |
14 | py-compile
15 |
--------------------------------------------------------------------------------
/aging/ceph.conf:
--------------------------------------------------------------------------------
1 | [global]
2 | auth supported = none
3 | log to syslog = false
4 | log file = /var/log/ceph/$name.log
5 | filestore xattr use omap = true
6 | auth cluster required = none
7 | auth service required = none
8 | auth client required = none
9 |
10 | # filestore flusher = false
11 | # objecter inflight op bytes = 2147483648
12 | # ms nocrc = 1
13 | [mon]
14 | mon osd data = /srv/mon.$id
15 |
16 | [osd]
17 | # debug osd = 20
18 | # debug ms = 1
19 | # debug filestore = 20
20 | # debug journal = 20
21 |
22 | [mon.a]
23 | host = burnupiX
24 | mon addr = 127.0.0.1:6789
25 |
26 | [osd.0]
27 | host = burnupiX
28 | osd data = /srv/osd-device-0-data
29 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
30 |
31 | [osd.1]
32 | host = burnupiX
33 | osd data = /srv/osd-device-1-data
34 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
35 |
36 | [osd.2]
37 | host = burnupiX
38 | osd data = /srv/osd-device-2-data
39 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
40 |
41 | [osd.3]
42 | host = burnupiX
43 | osd data = /srv/osd-device-3-data
44 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
45 |
46 | [osd.4]
47 | host = burnupiX
48 | osd data = /srv/osd-device-4-data
49 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
50 |
51 | [osd.5]
52 | host = burnupiX
53 | osd data = /srv/osd-device-5-data
54 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
55 |
56 | [osd.6]
57 | host = burnupiX
58 | osd data = /srv/osd-device-6-data
59 | osd journal = /dev/disk/by-partlabel/osd-device-6-journal
60 |
61 | [osd.7]
62 | host = burnupiX
63 | osd data = /srv/osd-device-7-data
64 | osd journal = /dev/disk/by-partlabel/osd-device-7-journal
65 |
66 | #[osd.8]
67 | # host = burnupiX
68 | # osd data = /srv/osd-device-8-data
69 | # osd journal = /dev/disk/by-partlabel/osd-device-8-journal
70 |
71 | #[osd.9]
72 | # host = burnupiX
73 | # osd data = /srv/osd-device-9-data
74 | # osd journal = /dev/disk/by-partlabel/osd-device-9-journal
75 |
76 | #[osd.10]
77 | # host = burnupiX
78 | # osd data = /srv/osd-device-10-data
79 | # osd journal = /dev/disk/by-partlabel/osd-device-10-journal
80 |
81 | #[osd.11]
82 | # host = burnupiX
83 | # osd data = /srv/osd-device-11-data
84 | # osd journal = /dev/disk/by-partlabel/osd-device-11-journal
85 |
86 | #[osd.12]
87 | # host = burnupiX
88 | # osd data = /srv/osd-device-12-data
89 | # osd journal = /dev/disk/by-partlabel/osd-device-12-journal
90 |
91 | #[osd.13]
92 | # host = burnupiX
93 | # osd data = /srv/osd-device-13-data
94 | # osd journal = /dev/disk/by-partlabel/osd-device-13-journal
95 |
96 | #[osd.14]
97 | # host = burnupiX
98 | # osd data = /srv/osd-device-14-data
99 | # osd journal = /dev/disk/by-partlabel/osd-device-14-journal
100 |
101 | #[osd.15]
102 | # host = burnupiX
103 | # osd data = /srv/osd-device-15-data
104 | # osd journal = /dev/disk/by-partlabel/osd-device-15-journal
105 |
106 | #[osd.16]
107 | # host = burnupiX
108 | # osd data = /srv/osd-device-16-data
109 | # osd journal = /dev/disk/by-partlabel/osd-device-16-journal
110 |
111 | #[osd.17]
112 | # host = burnupiX
113 | # osd data = /srv/osd-device-17-data
114 | # osd journal = /dev/disk/by-partlabel/osd-device-17-journal
115 |
116 | #[osd.18]
117 | # host = burnupiX
118 | # osd data = /srv/osd-device-18-data
119 | # osd journal = /dev/disk/by-partlabel/osd-device-18-journal
120 |
121 | #[osd.19]
122 | # host = burnupiX
123 | # osd data = /srv/osd-device-19-data
124 | # osd journal = /dev/disk/by-partlabel/osd-device-19-journal
125 |
126 | #[osd.20]
127 | # host = burnupiX
128 | # osd data = /srv/osd-device-20-data
129 | # osd journal = /dev/disk/by-partlabel/osd-device-20-journal
130 |
131 | #[osd.21]
132 | # host = burnupiX
133 | # osd data = /srv/osd-device-21-data
134 | # osd journal = /dev/disk/by-partlabel/osd-device-21-journal
135 |
136 | #[osd.22]
137 | # host = burnupiX
138 | # osd data = /srv/osd-device-22-data
139 | # osd journal = /dev/disk/by-partlabel/osd-device-22-journal
140 |
141 | #[osd.23]
142 | # host = burnupiX
143 | # osd data = /srv/osd-device-23-data
144 | # osd journal = /dev/disk/by-partlabel/osd-device-23-journal
145 |
146 |
--------------------------------------------------------------------------------
/aging/makecephconf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | import argparse
3 | import os
4 | import yaml
5 | import copy
6 |
7 | def read_config(config_file):
8 | config = {}
9 | try:
10 | with file(config_file) as f:
11 | g = yaml.safe_load_all(f)
12 | for new in g:
13 | config.update(new)
14 | except IOError, e:
15 | raise argparse.ArgumentTypeError(str(e))
16 | return config
17 |
18 | def parse_args():
19 | parser = argparse.ArgumentParser(description='Continuously run ceph tests.')
20 | parser.add_argument(
21 | '--target',
22 | required=True,
23 | help='Directory where the config files should go.',
24 | )
25 | parser.add_argument(
26 | 'config_file',
27 | help='YAML config file.',
28 | )
29 | args = parser.parse_args()
30 | return args
31 |
32 | def populate(l, name, value):
33 | name = name.replace("_", " ")
34 | l.append(" %s = %s" % (name, value))
35 |
36 | def mkosds(lists, yaml):
37 | i = 0
38 | for server in yaml.get('osd_servers', []):
39 | for j in xrange(0, yaml.get('osds_per_server', 0)):
40 | name = "osd.%d" % i
41 | lists[name] = []
42 | lists[name].append(" host = %s" % server)
43 | lists[name].append(" osd data = /srv/osd-device-%d-data" % j)
44 | lists[name].append(" osd journal = /dev/disk/by-partlabel/osd-device-%d-journal" % j)
45 | i += 1
46 |
47 | def writescript(f, param, value, conf):
48 | for fs, rtconf in sorted(runtests_conf.iteritems()):
49 | pdir = param
50 | if value:
51 | pdir = "%s_%s" % (param, value)
52 | f.write("%s --conf %s --archive %s/%s/%s %s\n" % (runtests_exec, conf, outdir, fs, pdir, rtconf))
53 |
54 | def parametric(lists, yaml):
55 | if "global" not in lists:
56 | lists["global"] = []
57 | scriptname = "%s/runme.sh" % target
58 | f = open(scriptname, 'w')
59 | f.write("#!/bin/bash\n")
60 |
61 | # the default
62 | filename = "%s/default.ceph.conf" % target
63 | writefile(lists, filename)
64 | writescript(f, "default", "", filename)
65 |
66 | for param, value in sorted(yaml.iteritems()):
67 | if isinstance(value, dict):
68 | lc = copy.deepcopy(lists)
69 | for k, v in sorted(value.iteritems()):
70 | populate(lc.get("global"), k, v)
71 | filename = "%s/%s.ceph.conf" % (target, param)
72 | writefile(lc, filename)
73 | writescript(f, param, "", filename)
74 | elif isinstance(value, list):
75 | for vi in value:
76 | lc = copy.deepcopy(lists)
77 | populate(lc.get("global"), param, vi)
78 | filename = "%s/%s_%s.ceph.conf" % (target, param, vi)
79 | writefile(lc, filename)
80 | writescript(f, param, vi, filename)
81 | else:
82 | lc = copy.deepcopy(lists)
83 | populate(lc.get("global"), param, value)
84 | filename = "%s/%s_%s.ceph.conf" % (target, param, value)
85 | writefile(lc, filename)
86 | writescript(f, param, value, filename)
87 | f.close()
88 | os.chmod(scriptname, 0755)
89 |
90 | def writefile(lists, out):
91 | f = open(out, 'w')
92 | # print out
93 | for k, v in sorted(lists.iteritems()):
94 | f.write("[%s]\n" % k)
95 | for line in v:
96 | f.write("%s\n" % line)
97 | f.write("\n")
98 | f.close()
99 |
100 | target = ""
101 | outdir = ""
102 | runtests_exec = ""
103 | runtests_conf = {}
104 |
105 | if __name__ == '__main__':
106 | ctx = parse_args()
107 | config = read_config(ctx.config_file)
108 |
109 | target = os.path.abspath(ctx.target)
110 | os.system("mkdir -p -m0755 -- %s" % target)
111 |
112 | settings = config.get("settings", {})
113 | runtests_exec = settings.get("runtests_exec", "")
114 | runtests_conf = settings.get("runtests_conf", {})
115 | outdir = settings.get("outdir", "")
116 |
117 | default = config.get("default", {})
118 | lists = {}
119 | for section in default:
120 | lists[section] = []
121 | for k, v in default.get(section).iteritems():
122 | populate(lists.get(section), k, v)
123 | mkosds(lists, config.get("settings", {}))
124 | parametric(lists, config.get("parametric", {}))
125 |
--------------------------------------------------------------------------------
/aging/makecephconf.yaml:
--------------------------------------------------------------------------------
1 | settings:
2 | osd_servers: [burnupiX]
3 | osds_per_server: 8
4 |
5 | outdir: "/data/parametric/bobtail/SAS2208-JBOD-8spinning"
6 | runtests_exec: "/home/nhm/src/ceph-tools/aging/runtests.py"
7 | runtests_conf:
8 | btrfs: "/home/nhm/src/ceph-tools/aging/runtests.btrfs.yaml"
9 | xfs: "/home/nhm/src/ceph-tools/aging/runtests.xfs.yaml"
10 | ext4: "/home/nhm/src/ceph-tools/aging/runtests.ext4.yaml"
11 |
12 |
13 | default:
14 | global:
15 | log_to_syslog: "false"
16 | log_file: "/var/log/ceph/$name.log"
17 | auth_cluster_required: "none"
18 | auth_service_required: "none"
19 | auth_client_required: "none"
20 | filestore_xattr_use_omap: "true"
21 |
22 | mon:
23 | mon_osd_data: "/srv/mon.$id"
24 | mon.a:
25 | host: "burnupiX"
26 | mon_addr: "127.0.0.1:6789"
27 |
28 | parametric:
29 | debugging:
30 | debug_lockdep: "0/0"
31 | debug_context: "0/0"
32 | debug_crush: "0/0"
33 | debug_mds: "0/0"
34 | debug_mds_balancer: "0/0"
35 | debug_mds_locker: "0/0"
36 | debug_mds_log: "0/0"
37 | debug_mds_log_expire: "0/0"
38 | debug_mds_migrator: "0/0"
39 | debug_buffer: "0/0"
40 | debug_timer: "0/0"
41 | debug_filer: "0/0"
42 | debug_objecter: "0/0"
43 | debug_rados: "0/0"
44 | debug_rbd: "0/0"
45 | debug_journaler: "0/0"
46 | debug_objectcacher: "0/0"
47 | debug_client: "0/0"
48 | debug_osd: "0/0"
49 | debug_optracker: "0/0"
50 | debug_objclass: "0/0"
51 | debug_filestore: "0/0"
52 | debug_journal: "0/0"
53 | debug_ms: "0/0"
54 | debug_mon: "0/0"
55 | debug_monc: "0/0"
56 | debug_paxos: "0/0"
57 | debug_tp: "0/0"
58 | debug_auth: "0/0"
59 | debug_finisher: "0/0"
60 | debug_heartbeatmap: "0/0"
61 | debug_perfcounter: "0/0"
62 | debug_rgw: "0/0"
63 | debug_hadoop: "0/0"
64 | debug_asok: "0/0"
65 | debug_throttle: "0/0"
66 |
67 | osd_op_threads: [1, 4, 8]
68 | osd_disk_threads: [2, 4, 8]
69 | filestore_op_threads: [1, 4, 8]
70 |
71 | flush_true:
72 | filestore_flush_min: 0
73 | filestore_flusher: "true"
74 |
75 | flush_false:
76 | filestore_flush_min: 0
77 | filestore_flusher: "false"
78 |
79 | journal_aio: ["true"]
80 | ms_nocrc: ["true"]
81 |
82 | big_bytes:
83 | filestore_queue_max_bytes: 1048576000
84 | filestore_queue_committing_max_bytes: 1048576000
85 | journal_max_write_bytes: 1048576000
86 | journal_queue_max_bytes: 1048576000
87 | ms_dispatch_throttle_bytes: 1048576000
88 | objecter_infilght_op_bytes: 1048576000
89 |
90 | big_ops:
91 | filestore_queue_max_ops: 5000
92 | filestore_queue_committing_max_ops: 5000
93 | journal_max_write_entries: 1000
94 | journal_queue_max_ops: 5000
95 | objecter_inflight_ops: 8192
96 |
97 | small_bytes:
98 | filestore_queue_max_bytes: 10485760
99 | filestore_queue_committing_max_bytes: 10485760
100 | journal_max_write_bytes: 10485760
101 | journal_queue_max_bytes: 10485760
102 | ms_dispatch_throttle_bytes: 10485760
103 | objecter_infilght_op_bytes: 10485760
104 |
105 | small_ops:
106 | filestore_queue_max_ops: 50
107 | filestore_queue_committing_max_ops: 50
108 | journal_max_write_entries: 10
109 | journal_queue_max_ops: 50
110 | objecter_inflight_ops: 128
111 |
112 |
--------------------------------------------------------------------------------
/aging/runme.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ./runtests.py --archive=/data/8spinning-8proc-dc/SAS2208-r0x8-bobtail/btrfs/ runtests.btrfs.yaml
4 | ./runtests.py --archive=/data/8spinning-8proc-dc/SAS2208-r0x8-bobtail/xfs/ runtests.xfs.yaml
5 | ./runtests.py --archive=/data/8spinning-8proc-dc/SAS2208-r0x8-bobtail/ext4/ runtests.ext4.yaml
6 |
7 |
--------------------------------------------------------------------------------
/aging/runtests.btrfs.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | head: nhm@localhost
3 | clients: nhm@localhost
4 | servers: nhm@localhost
5 | mons: nhm@localhost
6 | osds_per_node: 8
7 | fs: btrfs
8 | mkfs_opts: -l 16k -n 16k
9 | mount_opts: -o noatime
10 | ceph.conf: /home/nhm/src/ceph-tools/aging/ceph.conf
11 | iterations: 1
12 | rebuild_every_test: True
13 | radosbench:
14 | op_sizes: [4096,131072,4194304]
15 | modes: ['write', 'seq']
16 | time: 300
17 | concurrent_ops: [2,32]
18 | concurrent_procs: 8
19 |
--------------------------------------------------------------------------------
/aging/runtests.ext4.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | head: nhm@localhost
3 | clients: nhm@localhost
4 | servers: nhm@localhost
5 | mons: nhm@localhost
6 | osds_per_node: 8
7 | fs: ext4
8 | mount_opts: -o noatime,user_xattr
9 | ceph.conf: /home/nhm/src/ceph-tools/aging/ceph.conf
10 | iterations: 1
11 | rebuild_every_test: True
12 | radosbench:
13 | op_sizes: [4096,131072,4194304]
14 | modes: ['write', 'seq']
15 | time: 300
16 | concurrent_ops: [2,32]
17 | concurrent_procs: 8
18 |
--------------------------------------------------------------------------------
/aging/runtests.xfs.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | head: nhm@localhost
3 | clients: nhm@localhost
4 | servers: nhm@localhost
5 | mons: nhm@localhost
6 | osds_per_node: 8
7 | fs: xfs
8 | mkfs_opts: -f -i size=2048
9 | mount_opts: -o noatime
10 | ceph.conf: /home/nhm/src/ceph-tools/aging/ceph.conf
11 | iterations: 1
12 | rebuild_every_test: True
13 | radosbench:
14 | op_sizes: [4096, 131072, 4194304]
15 | modes: ['write', 'seq']
16 | time: 300
17 | concurrent_ops: [2,32]
18 | concurrent_procs: 8
19 |
--------------------------------------------------------------------------------
/aging/runtests.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | head: nhm@localhost
3 | clients: nhm@localhost
4 | servers: nhm@localhost
5 | mons: nhm@localhost
6 | osds_per_node: 6
7 | fs: btrfs
8 | mkfs_opts: -l 16k -n 16k
9 | mount_opts: -o noatime
10 | ceph.conf: /home/nhm/src/ceph-tools/aging/ceph.conf
11 | iterations: 1
12 | rebuild_every_test: True
13 | radosbench:
14 | pool: rados-bench
15 | op_sizes: [4096,131072,4194304]
16 | time: 300
17 | concurrent_ops: [16,256]
18 |
--------------------------------------------------------------------------------
/analysis/log_analyzer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | import gzip
4 | import os
5 | import os.path
6 | import re
7 | import sys
8 | import datetime
9 | from datetime import datetime
10 |
11 | tracker_regex = re.compile('.*reqid: (.+), seq: ([0-9]+), time: (\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d\.\d\d\d\d\d\d), event: (.*), request: (.*)')
12 |
13 | def wrapgz(gfilename):
14 | def retval():
15 | gfile = gzip.open(gfilename, 'rb')
16 | gfile.__exit__ = lambda: gfile.close()
17 | return gfile
18 | return (gfilename, retval)
19 |
20 | def wrap(filename):
21 | def retval():
22 | nfile = open(filename, 'rb')
23 | return nfile
24 | return (filename, retval)
25 |
26 | def get_logs(path):
27 | output = {}
28 | output['config'] = lambda: open(os.path.join(path, 'config.yaml'), 'r')
29 | output['osd'] = {}
30 | output['client'] = {}
31 | for path, dirs, files in os.walk(os.path.abspath(path)):
32 | for filename in files:
33 | match = re.match('osd.([0-9]+).log.gz', filename)
34 | if match:
35 | fn = os.path.join(path, filename)
36 | output['osd'][int(match.group(1))] = wrapgz(
37 | os.path.join(path, filename))
38 | match = re.match('osd.([0-9]+).log', filename)
39 | if match and not int(match.group(1)) in output['osd']:
40 | fn = os.path.join(path, filename)
41 | output['osd'][int(match.group(1))] = wrap(
42 | os.path.join(path, filename))
43 | match = re.match('client.([0-9]+).log.gz', filename)
44 | if match:
45 | fn = os.path.join(path, filename)
46 | output['client'][int(match.group(1))] = wrapgz(
47 | os.path.join(path, filename))
48 | match = re.match('client.([0-9]+).log', filename)
49 | if match and not int(match.group(1)) in output['client']:
50 | fn = os.path.join(path, filename)
51 | output['client'][int(match.group(1))] = wrap(
52 | os.path.join(path, filename))
53 | return output
54 |
55 | def parse_tracker_line(line):
56 | retval = {}
57 | match = tracker_regex.match(line)
58 | if match:
59 | retval['reqid'] = match.group(1)
60 | retval['seq'] = int(match.group(2))
61 | retval['time'] = datetime.strptime(
62 | match.group(3), '%Y-%m-%d %H:%M:%S.%f'
63 | )
64 | retval['event'] = match.group(4)
65 | retval['request'] = match.group(5)
66 | return retval
67 | return None
68 |
69 | class Request:
70 | def __init__(self):
71 | self.parsed = []
72 | self.events = []
73 | self.last_event = None
74 | self.first_event = None
75 | self._primary = -1
76 | self.osds = []
77 |
78 |
79 | def add_event(self, parsed):
80 | if self.parsed == []:
81 | self.last_event = parsed['time']
82 | self.first_event = parsed['time']
83 | self.parsed.append(parsed)
84 | self.events.append((parsed['time'], parsed['event'], parsed['osd']))
85 | self.events.sort()
86 | if self.last_event < parsed['time']:
87 | self.last_event = parsed['time']
88 | if self.first_event > parsed['time']:
89 | self.first_event = parsed['time']
90 | if parsed['event'] == 'op_applied':
91 | self._primary = parsed['osd']
92 | if parsed['osd'] not in self.osds:
93 | self.osds.append(parsed['osd'])
94 | self.osds.sort()
95 |
96 | def duration(self):
97 | return (self.last_event - self.first_event).total_seconds()
98 |
99 | def __repr__(self):
100 | return str(self.events) + " " + \
101 | str(self.duration()) + " " + self.parsed[0]['reqid']
102 |
103 | def pretty_print(self):
104 | outstr = "reqid: %s, duration: %s"%(
105 | self.parsed[0]['reqid'],str(self.duration()))
106 | outstr += "\n=====================\n"
107 | for (time, event, osd) in self.events:
108 | outstr += "%s (osd.%s): %s\n"%(str(time), str(osd), event)
109 | outstr += "=====================\n"
110 | return outstr
111 |
112 | def primary(self):
113 | return self._primary
114 |
115 | def replicas(self):
116 | return self.osds
117 |
118 |
119 | requests = {}
120 |
121 | logs = get_logs(sys.argv[1])
122 |
123 | for i, (fn, func) in logs['osd'].iteritems():
124 | with func() as f:
125 | for line in f.readlines():
126 | parsed = parse_tracker_line(line)
127 | if not parsed or parsed['reqid'] == 'unknown.0.0:0':
128 | continue
129 | parsed['osd'] = i
130 | if parsed['reqid'] not in requests:
131 | requests[parsed['reqid']] = Request()
132 | requests[parsed['reqid']].add_event(parsed)
133 |
134 | all_requests = [(i.duration(), i) for i in requests.itervalues()]
135 | all_requests.sort()
136 |
137 | pairs = {}
138 | for _, i in all_requests:
139 | if tuple(i.replicas()) not in pairs:
140 | pairs[tuple(i.replicas())] = 0
141 | pairs[tuple(i.replicas())] += 1
142 | print pairs
143 |
144 | osds = {}
145 | for _, i in all_requests:
146 | if i.primary() not in osds:
147 | osds[i.primary()] = 0
148 | osds[i.primary()] += 1
149 |
150 | print osds
151 |
152 | for _, i in all_requests[:-100:-1]:
153 | print i.pretty_print()
154 |
--------------------------------------------------------------------------------
/analysis/log_threadpool_analyzer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | # This program is used to parse ceph osd log files with threadpool debugging
4 | # set at 15 or higher. These can be generated with ceph-osd or
5 | # test_filestore_workloadgen.
6 | #
7 | # Usage:
8 | #
9 | # log_threadpool_analyzer.py OUT_FILE
10 |
11 | import os
12 | import os.path
13 | import re
14 | import sys
15 | import decimal
16 | import datetime
17 | from datetime import datetime,timedelta
18 |
19 | threads = {}
20 | waits = {}
21 | wc = 0
22 | waitstate = False
23 | sdate = ""
24 | edate = ""
25 |
26 | df = "%Y-%m-%d %H:%M:%S.%f"
27 | dsf = "%Y-%m-%d %H:%M:%S"
28 |
29 | def fcell(item, width):
30 | if isinstance(item, str):
31 | return item.rjust(width)[:width]
32 | if isinstance(item, int):
33 | return str(item).rjust(width)[:width]
34 | if isinstance(item, float):
35 | return ("%.2f" % item).rjust(width)[:width]
36 |
37 | filename = sys.argv[1]
38 | f = open(filename, 'rb')
39 | for line in f:
40 | words = line.split()
41 | date = "%s %s" % (words[0], words[1])
42 |
43 | edate = date
44 | if sdate == "":
45 | sdate = date
46 |
47 | if words[4] == 'FileStore::op_tp' and words[5] == 'worker':
48 | # Handle wait periods
49 | if waitstate == True:
50 | waits[wc]['done'] = date
51 | waitstate = False
52 | wc += 1
53 | if words[6] == 'waiting':
54 | waits[wc] = {}
55 | waits[wc]['start'] = date
56 | waitstate = True
57 | elif words[6] == "wq":
58 | blah,thread = words[7].split('::', 1)
59 | if thread not in threads:
60 | threads[thread] = {}
61 | action = words[8]
62 | item = words[10]
63 |
64 | if item not in threads[thread]:
65 | threads[thread][item] = {}
66 | length = len(threads[thread][item])
67 | if action == 'start':
68 | length += 1;
69 | threads[thread][item][length] = {}
70 |
71 | threads[thread][item][length][action] = date
72 |
73 | seconds = {}
74 | for wait in waits:
75 | if 'start' not in waits[wait] or 'done' not in waits[wait]:
76 | continue
77 | start = datetime.strptime(waits[wait]['start'], df)
78 | done = datetime.strptime(waits[wait]['done'], df)
79 |
80 | while (done - start).total_seconds() > 0:
81 | second = start.strftime(dsf)
82 | nt = start.replace(microsecond=0) + timedelta(seconds=1)
83 | if (nt > done):
84 | nt = done
85 | delta = nt - start
86 |
87 | if second not in seconds:
88 | seconds[second] = {}
89 | if 'wait' in seconds[second]:
90 | seconds[second]['wait'] += delta.total_seconds()
91 | else:
92 | seconds[second]['wait'] = delta.total_seconds()
93 | start = nt
94 |
95 | for thread in threads:
96 | for item in threads[thread]:
97 | for instance in threads[thread][item]:
98 | if 'start' not in threads[thread][item][instance] or 'done' not in threads[thread][item][instance]:
99 | continue
100 |
101 | start = datetime.strptime(threads[thread][item][instance]['start'], df)
102 | done = datetime.strptime(threads[thread][item][instance]['done'], df)
103 |
104 | while (done - start).total_seconds() > 0:
105 | second = start.strftime(dsf)
106 |
107 | if second not in seconds:
108 | seconds[second] = {}
109 | if 'threads' not in seconds[second]:
110 | seconds[second]['threads'] = {}
111 | if thread not in seconds[second]['threads']:
112 | seconds[second]['threads'][thread] = {}
113 |
114 | nt = start.replace(microsecond=0) + timedelta(seconds=1)
115 | if (nt > done):
116 | nt = done
117 | if 'count' not in seconds[second]['threads'][thread]:
118 | seconds[second]['threads'][thread]['count'] = 1
119 | else:
120 | seconds[second]['threads'][thread]['count'] += 1
121 |
122 | delta = nt - start
123 |
124 | if 'time' not in seconds[second]['threads'][thread]:
125 | seconds[second]['threads'][thread]['time'] = delta.total_seconds()
126 | else:
127 | seconds[second]['threads'][thread]['time'] += delta.total_seconds()
128 |
129 | start = nt
130 |
131 |
132 | d = datetime.strptime(sdate, df).replace(microsecond=0)
133 | ed = datetime.strptime(edate, df).replace(microsecond=0)
134 | print fcell(" " * 19, 19), fcell("Waiting", 10),
135 | for thread in sorted(threads):
136 | print fcell(thread, 10),
137 | print fcell(thread, 10),
138 | print fcell(thread, 10),
139 | print ""
140 | print fcell("TiemStamp", 19), fcell("% Time", 10),
141 | for thread in sorted(threads):
142 | print fcell("% Time", 10),
143 | print fcell("Op Count", 10),
144 | print fcell("Avg Op Tm", 10),
145 | print ""
146 | print fcell("-" * 19, 19), fcell("-" * 10, 10),
147 | for thread in sorted(threads):
148 | print fcell("-" * 10, 10),
149 | print fcell("-" * 10, 10),
150 | print fcell("-" * 10, 10),
151 | print ""
152 |
153 | while d <= ed:
154 | second = d.strftime(dsf)
155 | sdict = seconds.get(second, {})
156 | print fcell(second,19),
157 | wait = "%.2f%%" % float(sdict.get('wait', 0) * 100)
158 | print fcell(wait, 10),
159 | for thread in sorted(threads):
160 | trdict = sdict.get('threads', {})
161 | tdict = trdict.get(thread, {})
162 | util = float(tdict.get('time', 0))
163 | count = tdict.get('count', 0)
164 | print fcell("%.2f%%" % float(util * 100), 10),
165 | print fcell(count, 10),
166 | avgoptime = "N/A"
167 | if count > 0:
168 | avgoptime = 1000 * util / count
169 | print fcell(avgoptime, 10),
170 | print ""
171 |
172 | d += timedelta(seconds=1)
173 |
--------------------------------------------------------------------------------
/analysis/strace_parser.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | # This program is used to parse system calls generated via stracing a program
4 | # like ceph-osd or test_filestore_workloadgen.
5 | #
6 | # strace invokation to use:
7 | #
8 | # strace -q -a1 -s0 -f -tttT -oOUT_FILE -e trace=file,desc,process,socket APPLICATION ARGUMENTS
9 | #
10 | # then run this like:
11 | #
12 | # strace_parser.py OUT_FILE
13 |
14 | import os
15 | import os.path
16 | import re
17 | import sys
18 | import decimal
19 | import datetime
20 | from datetime import datetime
21 |
22 | ops = ["writev", "syscall_306", "ftruncate", "openat", "open", "stat", "setxattr", "removexattr", "close", "lseek", "read", "write", "pwrite", "clone", "sync_file_range", "fsync", "getdents", "link", "unlink", "mkdir", "rmdir", "ioctl", "access", "fcntl", "rename"]
23 | threads = {}
24 | seconds = {}
25 | writev_bucket = {}
26 | first = 0
27 |
28 | def fcell(item):
29 | if isinstance(item, str):
30 | return item.rjust(9)[:9]
31 | if isinstance(item, int):
32 | return str(item).rjust(9)[:9]
33 | # return str(item).rjust(9)[:9]
34 | if isinstance(item, float):
35 | return ("%.2f" % item).rjust(9)[:9]
36 |
37 | last_sec = 0
38 | filename = sys.argv[1]
39 | f = open(filename, 'rb')
40 | for line in f:
41 | line = ' '.join(line.split())
42 | # print line
43 | words = line.split(" ", 2)
44 | thread = words[0]
45 | unixtime = words[1].split(".")[0]
46 | if not (thread.isdigit() or unixtime.isdigit()):
47 | print "malformed line: %s" % line
48 | continue
49 |
50 | if first == 0:
51 | first = int(unixtime)
52 |
53 | thread = words[0]
54 | if thread not in threads:
55 | threads[thread] = {}
56 | second = int(unixtime) - first
57 | for s in xrange(last_sec, second+1):
58 | if s not in seconds:
59 | seconds[s] = {}
60 | for thread in threads:
61 | seconds[s][thread] = {}
62 | last_sec = second
63 |
64 | if thread not in seconds[second]:
65 | seconds[second][thread] = {}
66 |
67 | op_string = words[2]
68 | found = False
69 | for op in ops:
70 | add = False
71 | if op_string.startswith("<... %s " % op):
72 | found = True
73 | add = True
74 | elif op_string.startswith("%s(" % op):
75 | found = True
76 | if "unfinished" not in op_string:
77 | add = True
78 |
79 | if add is True:
80 | regex = "(\<)(\d+\.\d+)(\>)"
81 | match = re.search(regex, op_string)
82 | latency = float(match.group(2))
83 | if op is "writev":
84 | regex = "(= )(\d+)( \<)"
85 | match = re.search(regex, op_string)
86 | return_code = int(match.group(2))
87 | if return_code not in writev_bucket:
88 | writev_bucket[return_code] = 1
89 | else:
90 | writev_bucket[return_code] += 1
91 |
92 | if op is "syscall_306":
93 | print "syscall_306 latency: %s" % latency
94 | if op not in seconds[second][thread]:
95 | seconds[second][thread][op] = {}
96 | seconds[second][thread][op]['count'] = 1
97 | seconds[second][thread][op]['latency'] = latency
98 | seconds[second][thread][op]['latsum'] = latency
99 | else:
100 | cur_count = seconds[second][thread][op]['count']
101 | cur_latency = seconds[second][thread][op]['latency']
102 | seconds[second][thread][op]['count'] = cur_count + 1
103 | seconds[second][thread][op]['latency'] = (cur_latency * cur_count + latency) / (cur_count + 1)
104 | seconds[second][thread][op]['latsum'] += latency
105 |
106 | if found is False:
107 | print "Didn't find op in: %s" % op_string
108 |
109 | print fcell("second"),
110 | for op in ops:
111 | print fcell(op),
112 | print ""
113 |
114 | for second in seconds:
115 | counts = {}
116 | latencies = {}
117 | latsums = {}
118 | for op in ops:
119 | counts[op] = 0
120 | latencies[op] = 0
121 | latsums[op] = 0
122 |
123 | for thread in seconds[second]:
124 | th = seconds[second][thread]
125 | for op in ops:
126 | opdict = th.get(op, {})
127 | cur_count = counts[op]
128 | cur_latency = latencies[op]
129 | counts[op] += opdict.get('count', 0)
130 | if counts[op] > 0:
131 | latencies[op] = (cur_latency * cur_count + opdict.get('latency', 0)) / counts[op]
132 | latsums[op] += opdict.get('latsum', 0)
133 |
134 | print fcell(second),
135 | for op in ops:
136 | if op is "writev":
137 | print fcell(counts.get(op, 0)),
138 | else:
139 | print fcell(latsums.get(op, 0)),
140 | print ""
141 | print ""
142 | print "writev call statistics:"
143 | print ""
144 | print "Write Size, Frequency"
145 | for key in sorted(writev_bucket.keys()):
146 | print "%s, %s" % (key, writev_bucket[key])
147 |
--------------------------------------------------------------------------------
/cbt/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.pyo
3 |
--------------------------------------------------------------------------------
/cbt/readme.md:
--------------------------------------------------------------------------------
1 | CBT has moved to its own repository. You can find the new CBT location at:
2 |
3 | http://github.com/ceph/cbt
4 |
5 |
--------------------------------------------------------------------------------
/models/performance/FileStore.py:
--------------------------------------------------------------------------------
1 | #
2 | # This is intended to be able to simulate the overhead that
3 | # the RADOS filestore adds to standard I/O test patterns
4 | #
5 | # NOTE:
6 | # we are modeling the time to perform a single operation,
7 | # but these are fundamentally throughput models, so it should
8 | # be assumed that another operation will come immediately
9 | # after the one we are simulating.
10 | #
11 |
12 | import Poisson
13 |
14 | GIG = 1000000000
15 |
16 |
17 | class FileStore(object):
18 | """ Performance Modeling FileStore Simulation. """
19 |
20 | # vaguely reasonable parameters
21 | md_bsize = 4096 # unit of metadata read/write
22 | j_header = 4096 # size of a journal record header
23 | block_sz = 512 * 1024 # unit of write aggregation
24 | sync_time = 5000000 # flush interval (in micro-seconds)
25 |
26 | # magic tunables (to which we shouldn't be all that sensitive)
27 | md_fraction = .001 # fraction of disk containing metadata
28 | md_cache_sz = 2500 # number of objects we cache
29 | rd_cache_sz = 1 * GIG # available data cache
30 |
31 | CACHE_WARN = 0.05 # too high a hit rate is probably wrong
32 | warnings = "" # I didn't want these to come out in mid test
33 |
34 | def __init__(self, data_fs, journal_fs=None, journal_share=1):
35 | """ create a file store simulation
36 | data_fs -- SimFS for the data file system
37 | journal_fs -- SimFS for the journal (none if useing data disk)
38 | journal_share -- how many OSDs share the journal device
39 | """
40 | self.data_fs = data_fs
41 | self.journal_fs = journal_fs
42 | self.journal_share = journal_share
43 |
44 | # bad approximations of typical seek distances
45 | self.md_seek = self.md_fraction * data_fs.disk.size
46 | self.seek = data_fs.disk.size
47 |
48 | def md_miss_rate(self, nobj):
49 | """ expected meta-data lookup cache miss rate """
50 | r = float(self.md_cache_sz) / float(nobj)
51 | #if r > self.CACHE_WARN and not "to meta-data cache" in self.warnings:
52 | # msg = "\n\t%d objects too few relative to meta-data cache" % (nobj)
53 | # self.warnings += msg
54 | return 1 - r if r < 1 else 0
55 |
56 | def d_miss_rate(self, nobj, obj_size):
57 | """ expected data lookup cache miss rate """
58 | r = float(self.rd_cache_sz) / (nobj * obj_size * self.journal_share)
59 | if r > self.CACHE_WARN and not "to data cache" in self.warnings:
60 | msg = "\n\t%d x %d byte objects too small relative to data cache"
61 | self.warnings += msg % (nobj, obj_size)
62 | return 1 - r if r < 1 else 0
63 |
64 | def md_reads(self, bsize, obj_size):
65 | """ number of metadata reads to find desired data """
66 | return 1
67 |
68 | def md_writes(self, bsize, obj_size):
69 | """ number of metadata updates to write to an object """
70 | return 0.7 # this is a fudge factor
71 |
72 | def read(self, bsize, obj_size, depth=1, nobj=2500):
73 | """ average time for reads """
74 |
75 | # figure out how much metadata we will actually read
76 | mdr = self.md_reads(bsize, obj_size) * self.md_miss_rate(nobj)
77 |
78 | # figure out how long it will take to do the I/O
79 | mt = self.data_fs.read(self.md_bsize, self.seek,
80 | seq=False, depth=depth)
81 | dt = self.data_fs.read(bsize, self.seek, seq=False, depth=depth)
82 | #print("FS-r: raw mdr=%f, mt=%d, dt=%d" % (mdr, mt, dt))
83 | dt *= self.d_miss_rate(nobj, obj_size)
84 | #print("FS-r: adj dt=%d" % (dt))
85 | return dt + (mdr * mt)
86 |
87 | def write(self, bsize, obj_size, depth=1, nobj=2500):
88 | """ average time for object writes """
89 |
90 | # figure out how much metadata we will actually read
91 | mdr = self.md_reads(bsize, obj_size) * self.md_miss_rate(nobj)
92 | lt = mdr * self.data_fs.read(self.md_bsize, self.seek,
93 | seq=False, depth=depth)
94 |
95 | mdw = self.md_writes(bsize, obj_size)
96 | if self.journal_fs == None: # journal on the data device
97 | jt = self.data_fs.write(self.j_header + bsize, self.seek,
98 | seq=True, sync=True, depth=depth)
99 | dt = self.data_fs.write(bsize, self.seek,
100 | seq=False, sync=True, depth=depth)
101 | dt *= self.d_miss_rate(nobj, obj_size)
102 | mt = mdw * self.data_fs.write(self.md_bsize, self.seek,
103 | seq=False, sync=True, depth=depth)
104 | return lt + jt + dt + mt
105 | else: # separate journal
106 | jt = self.journal_fs.write(self.j_header + bsize, self.seek,
107 | seq=False, sync=True, depth=depth)
108 | jt *= self.journal_share # FIX this seems wrong
109 | dt = self.data_fs.write(bsize, obj_size,
110 | seq=False, depth=depth)
111 | mt = mdw * self.data_fs.write(self.md_bsize, self.md_seek,
112 | seq=False, depth=depth)
113 | #print("FS-w: raw lt=%d, jt=%d, dt=%d, mt=%d" % (lt, jt, dt, mt))
114 |
115 | # compute expected metadata write aggregation
116 | ops_per_sync = self.sync_time / (dt + mt)
117 | PsO = Poisson.PnPlus(float(1) / nobj, ops_per_sync, 2)
118 | mt /= 1 + PsO # sloppy math
119 |
120 | # compute expected data aggregation and cache hits
121 | tot_blocks = nobj * obj_size / self.block_sz
122 | PsB = Poisson.PnPlus(float(1) / tot_blocks, ops_per_sync, 2)
123 | dt /= 1 + PsB # sloppy math
124 | dt *= self.d_miss_rate(nobj, obj_size) # sloppy math
125 | #print("FS-w: adj lt=%d, jt=%d, dt=%d, mt=%d" % (lt, jt, dt, mt))
126 |
127 | # in principle, journal and data writes are parallel
128 | if jt > dt + mt:
129 | if not "journal caps" in self.warnings:
130 | msg = "\n\tjournal caps throughput for %d parallel %d byte writes"
131 | self.warnings += msg % (self.journal_share, bsize)
132 | return lt + jt
133 | else:
134 | return lt + dt + mt
135 |
136 | def create(self):
137 | """ new file creation """
138 |
139 | # FIX: I just made this up
140 | HUGE = 1000000000000 # big enough to avoid cache hits
141 | return self.data_fs.create() + self.write(self.md_bsize, HUGE)
142 |
143 | def delete(self):
144 | """ file deletion """
145 |
146 | # FIX: I just made this up
147 | return self.data_fs.delete() + self.write(0, self.block_sz)
148 |
--------------------------------------------------------------------------------
/models/performance/Poisson.py:
--------------------------------------------------------------------------------
1 | #
2 | # probabilities of Poisson-distributed events
3 | #
4 |
5 | import math
6 |
7 |
8 | def Pn(rate, interval, n=1):
9 | """ probability of exactly N events during an interval
10 | rate -- average event rate
11 | interval -- sample period of interest
12 | n -- number of desired events
13 | """
14 | expect = float(rate) * interval
15 | p = math.exp(-expect)
16 | if n > 0:
17 | p *= (expect ** n)
18 | p /= math.factorial(n)
19 | return p
20 |
21 |
22 | def PnPlus(rate, interval, n=1):
23 | """ probability of N or more events during an interval
24 | rate -- average event rate
25 | interval -- sample period of interest
26 | n -- number of desired events
27 | """
28 | p = 1.0
29 | i = 0
30 | while i < n:
31 | p -= Pn(rate, interval, i)
32 | i += 1
33 | return p
34 |
--------------------------------------------------------------------------------
/models/performance/Rados.py:
--------------------------------------------------------------------------------
1 | #
2 | # This is intended to be able to simulate the overhead that
3 | # a remote RADOS client experiences for standard test loads
4 | #
5 | # NOTE:
6 | #
7 | # All of the lower level models try to estimate single operation
8 | # latency (at a request depth, with amortized costs for async
9 | # operations) for a single resource (e.g. disk).
10 | #
11 | # This is a model for a parallel system. I found it difficult
12 | # to model parallel latencies, so I decided to do this one as a
13 | # throughput model. This is confusing because all of the quantities
14 | # we are dealing with are latencies ... so when I do some baffling
15 | # multiply or divide to a time, understand it as the inverse operation
16 | # on a throughput. I considered inverting everything to make these
17 | # computations more obvious, but the before-and-after inversions
18 | # were even more confusing.
19 | #
20 |
21 | # useful unit multipliers
22 | GIG = 1000000000
23 |
24 |
25 | class Rados(object):
26 | """ Performance Modeling RADOS Simulation. """
27 |
28 | nic_overhead = 0.00 # fraction of NIC we can't use
29 | null_resp = 1000 # NOP response time
30 | warnings = "" # save these up for reporting later
31 |
32 | def __init__(self, filestore,
33 | front_nic=10 * GIG, back_nic=10 * GIG,
34 | nodes=1, osd_per_node=1):
35 | """ create a RADOS simulation
36 | filestore -- simulation
37 | front_nic -- front side NIC speed
38 | back_nic -- back side NIC speed
39 | nodes -- number of nodes in the cluster
40 | osd_per_node -- number of OSDs per node
41 | """
42 |
43 | self.filestore = filestore
44 | self.num_nodes = nodes
45 | self.num_osds = nodes * osd_per_node
46 | self.osd_per_node = osd_per_node
47 | self.frontside = (1 - self.nic_overhead) * front_nic / 8
48 | self.backside = (1 - self.nic_overhead) * back_nic / 8
49 |
50 | def network(self, bsize, bw):
51 | """ expected time to do send/receive a message
52 | bsize -- size of the message to be sent
53 | bw -- NIC bandwidth for this operation
54 | """
55 | SECOND = 1000000
56 | return SECOND * bsize / bw
57 |
58 | def read(self, bsize, obj_size, nobj=2500, depth=1, clients=1):
59 | """ average time for reads (modeled as throughput)
60 | bsize -- size of the read
61 | objsize -- size of the object we are reading from
62 | nobj -- number of objects over which reads are spread
63 | depth -- number of concurrent requests per client
64 | clients -- number of parallel clients generating load
65 | """
66 |
67 | # how does spreading affect depth, numobj
68 | nobj /= self.num_osds
69 | if depth * clients < self.num_osds:
70 | d = 1
71 | else:
72 | d = depth * clients / self.num_osds
73 |
74 | # at what rate can filestore process these requests
75 | ftime = self.filestore.read(bsize, obj_size, depth=d, nobj=nobj)
76 | ftime /= self.num_osds
77 |
78 | # at what rate can (shared) server NIC return responses
79 | stime = self.network(bsize,
80 | self.frontside * self.num_nodes / self.osd_per_node)
81 |
82 | # at what rate can (a single) client NIC accept responses
83 | ctime = self.network(bsize, self.frontside * clients)
84 |
85 | # RADOS throughput is the least of these
86 | if stime > ctime:
87 | net_worst = stime
88 | slowpoke = "server"
89 | else:
90 | net_worst = ctime
91 | slowpoke = "client"
92 | if net_worst > ftime:
93 | worst = net_worst
94 | if "byte reads" not in self.warnings:
95 | msg = "\n\t%s NIC caps throughput for %d byte reads"
96 | self.warnings += msg % (slowpoke, bsize)
97 | else:
98 | worst = ftime
99 |
100 | # and we have to add in something for the req/response
101 | return worst + self.null_resp / depth
102 |
103 | def write(self, bsize, obj_size, nobj=2500, depth=1, clients=1, copies=1):
104 | """ average time for object writes
105 | bsize -- size of the write
106 | objsize -- size of the object we are reading to
107 | nobj -- number of objects over which reads are spread
108 | depth -- number of concurrent requests per client
109 | clients -- number of parallel clients generating load
110 | copies -- number of copies being made
111 | """
112 |
113 | # how does spreading affect depth, numobj
114 | nobj *= float(copies) / self.num_osds
115 | if depth * clients * copies < self.num_osds:
116 | d = 1
117 | else:
118 | d = depth * clients * copies / self.num_osds
119 |
120 | # at what rate can filestores process these requests
121 | ftime = self.filestore.write(bsize, obj_size, depth=d, nobj=nobj)
122 | ftime /= self.num_osds # many operate in parallel
123 | ftime *= copies # but they are also making copies
124 |
125 | # at what rate can (shared) primary server accept/replicate
126 | fsbw = self.frontside * self.num_nodes / self.osd_per_node
127 | bsbw = self.backside * self.num_nodes / self.osd_per_node
128 | stime = self.network(bsize, fsbw)
129 | stime += (copies - 1) * self.network(bsize, bsbw)
130 |
131 | # at what rate can (a single) client NIC generate writes
132 | ctime = self.network(bsize, self.frontside * clients)
133 |
134 | # RADOS throughput is the least of these
135 | if stime > ctime:
136 | net_worst = stime
137 | slowpoke = "server"
138 | else:
139 | net_worst = ctime
140 | slowpoke = "client"
141 | if net_worst > ftime:
142 | worst = net_worst
143 | if "byte writes" not in self.warnings:
144 | msg = "\n\t%s NIC caps throughput for %d-copy %d byte writes"
145 | self.warnings += msg % (slowpoke, copies, bsize)
146 | else:
147 | worst = ftime
148 |
149 | # and we have to add in something for the req/response
150 | return worst + self.null_resp / depth
151 |
152 | def create(self, depth=1):
153 | """ new object creation """
154 |
155 | return self.op_latency + self.filestore.create()
156 |
157 | def delete(self, depth=1):
158 | """ object deletion """
159 |
160 | return self.op_latency + self.filestore.delete()
161 |
--------------------------------------------------------------------------------
/models/performance/TESTburnupi.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | #
3 | # this is a configuration file for a performance simulation
4 | #
5 |
6 | # size constants
7 | MEG = 1000 * 1000
8 | GIG = 1000 * MEG
9 | TERA = 1000 * GIG
10 |
11 | import test
12 |
13 | data = { # data storage devices
14 | 'fs': "xfs"
15 | }
16 |
17 | journal = { # journal devices
18 | 'device': "ssd",
19 | 'size': 1 * GIG,
20 | 'speed': 400 * MEG,
21 | 'iops': 30000,
22 | 'streams': 8,
23 | 'fs': "xfs",
24 | 'shared': True
25 | }
26 |
27 | cluster = { # cluster configuration
28 | 'front': 10 * GIG,
29 | 'back': 10 * GIG,
30 | 'nodes': 3,
31 | 'osd_per_node': 6
32 | }
33 |
34 | tests = { # what tests to run with what parameters
35 | # raw disk parameters and simulations
36 | 'DiskParms': False,
37 | 'FioJournal': True,
38 | 'FioRdepths': [1, 32],
39 | 'FioRsize': 16 * GIG,
40 |
41 | # FIO performance tests
42 | 'FioFdepths': [1, 32],
43 | 'FioFsize': 16 * GIG,
44 |
45 | # filestore performance tests
46 | 'SioFdepths': [16],
47 | 'SioFsize': 1 * GIG,
48 | 'SioFnobj': 2500,
49 |
50 | # RADOS performance tests
51 | 'SioRdepths': [16],
52 | 'SioRsize': 1 * GIG,
53 | 'SioRnobj': 2500 * 3 * 6, # multiply by number of OSDs
54 | 'SioRcopies': [2],
55 | 'SioRclients': [3],
56 | 'SioRinstances': [4]
57 | }
58 |
59 | test.test(data, journal, cluster, tests)
60 |
--------------------------------------------------------------------------------
/models/performance/disktest.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | #
3 | # disk simulation exerciser
4 | # prints out all of the interesting disk performance
5 | # parameters and simulated bandwidth for standard tests
6 |
7 | # mnemonic scale constants
8 | MILLION = 1000000 # capacities and speeds
9 |
10 | def kb(val):
11 | """ number of kilobytes (1024) in a block """
12 | return val / 1024
13 |
14 |
15 | def meg(val):
16 | """ mumber of millions (10^6) of bytes """
17 | return val / 1000000
18 |
19 |
20 | def gig(val):
21 | """ mumber of billions (10^9) of bytes """
22 | return val / 1000000000
23 |
24 |
25 | def iops(us):
26 | """ convert a us/operation into IOPS """
27 | return 1000000 / us
28 |
29 |
30 | def bw(bs, us):
31 | """ convert block size and us/operation into MB/s bandwidth """
32 | return bs / us
33 |
34 |
35 | def tptest(disk, filesize, depth):
36 | print "\t bs\t seq read\t seq write\t rnd read\t rnd write"
37 | print "\t -----\t --------\t ---------\t --------\t ---------"
38 | for bs in (4096, 128 * 1024, 4096 * 1024):
39 | tsr = disk.avgTime(bs, filesize, read=True, seq=True, depth=depth)
40 | tsw = disk.avgTime(bs, filesize, read=False, seq=True, depth=depth)
41 | trr = disk.avgTime(bs, filesize, read=True, seq=False, depth=depth)
42 | trw = disk.avgTime(bs, filesize, read=False, seq=False, depth=depth)
43 |
44 | if bw(bs, tsw) >= 10:
45 | format = "\t%5dK\t%7d MB/s\t%7d MB/s\t%7.1f MB/s\t%7.1f MB/s"
46 | else:
47 | format = "\t%5dK\t%7.1f MB/s\t%7.1f MB/s\t%7.1f MB/s\t%7.1f MB/s"
48 | print(format % (kb(bs), bw(bs, float(tsr)), bw(bs, float(tsw)),
49 | bw(bs, float(trr)), bw(bs, float(trw))))
50 | print("\t \t%7d IOPS\t%7d IOPS\t%7d IOPS\t%7d IOPS" %
51 | (iops(tsr), iops(tsw), iops(trr), iops(trw)))
52 |
53 |
54 | def disktest(disk):
55 | """ compute & display basic performance data for a simulated disk """
56 |
57 | print " basic disk parameters:"
58 | print "\tdrive size\t%d GB" % gig(disk.size)
59 | print "\trpm \t%d" % disk.rpm
60 | print "\txfer rate \t%d MB/s" % meg(disk.media_speed)
61 | print("\tseek time \t%d-%dus, avg %dus" %
62 | (disk.settle_read, disk.max_seek, disk.avg_seek))
63 | print "\twrite back\t%s" % ("True" if disk.do_writeback else "False")
64 | print "\tread ahead\t%s" % ("True" if disk.do_readahead else "False")
65 | print "\tmax depth \t%d" % disk.max_depth
66 |
67 | print "\n computed performance parameters:"
68 | rot = 0 if disk.rpm == 0 else (MILLION / (disk.rpm / 60))
69 | print "\trotation \t%dus" % rot
70 | print "\ttrack size \t%d bytes" % disk.trk_size
71 | print "\theads \t%d" % disk.heads
72 | print "\tcylinders \t%d" % disk.cylinders
73 |
74 | print "\n data transfer times:"
75 | print "\t size time iops"
76 | for bs in (4096, 128 * 1024, 4096 * 1024):
77 | t = disk.xferTime(bs)
78 | r = 1000000 / t
79 | print "\t%6dK %7dus %7d" % (kb(bs), t, r)
80 |
81 | print "\n seek times:"
82 | print "\t cyls read write"
83 | cyls = 1
84 | while cyls < disk.cylinders * 10:
85 | print("\t%7d %7dus %7dus" %
86 | (cyls, disk.seekTime(cyls), disk.seekTime(cyls, read=False)))
87 | cyls *= 10
88 |
--------------------------------------------------------------------------------
/models/performance/filestoretest.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | #
3 | # filestore simulation exerciser
4 |
5 | # mnemonic scale constants
6 | MILLION = 1000000 # capacities and speeds
7 |
8 | def kb(val):
9 | """ number of kilobytes (1024) in a block """
10 | return val / 1024
11 |
12 |
13 | def iops(us):
14 | """ convert a us/operation into IOPS """
15 | return 1000000 / us
16 |
17 |
18 | def bw(bs, us):
19 | """ convert block size and us/operation into MB/s bandwidth """
20 | return bs / us
21 |
22 |
23 | def fstoretest(fs, obj_size=4 * MILLION, nobj=2500, depth=1, crtdlt=False):
24 | """ compute & display standard filestore test results """
25 |
26 | if crtdlt:
27 | tc = fs.create()
28 | td = fs.delete()
29 | print "\t\t create\t delete"
30 | print "\t\t%6d IOPS\t %6d IOPS" % (iops(tc), iops(td))
31 | print ""
32 |
33 | print "\t bs\t rnd read\t rnd write"
34 | print "\t -----\t --------\t ---------"
35 | for bs in (4096, 128 * 1024, 4096 * 1024):
36 | trr = fs.read(bs, obj_size, depth=1, nobj=nobj)
37 | trw = fs.write(bs, obj_size, depth=depth, nobj=nobj)
38 |
39 | format = "\t%5dK\t%7.1f MB/s\t%7.1f MB/s"
40 | print(format %
41 | (kb(bs), bw(bs, float(trr)), bw(bs, float(trw))))
42 | print "\t \t %6d IOPS\t %6d IOPS" % (iops(trr), iops(trw))
43 |
--------------------------------------------------------------------------------
/models/performance/fstest.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | #
3 | # disk simulation exerciser
4 | # prints out all of the interesting disk performance
5 | # parameters and simulated bandwidth for standard tests
6 |
7 | # mnemonic scale constants
8 | MILLION = 1000000 # capacities and speeds
9 |
10 | def kb(val):
11 | """ number of kilobytes (1024) in a block """
12 | return val / 1024
13 |
14 |
15 | def iops(us):
16 | """ convert a us/operation into IOPS """
17 | return 1000000 / us
18 |
19 |
20 | def bw(bs, us):
21 | """ convert block size and us/operation into MB/s bandwidth """
22 | return bs / us
23 |
24 |
25 | def fstest(fs, filesize=16 * MILLION, depth=1, direct=False,
26 | sync=False, crtdlt=False):
27 | """ compute & display standard fio to filesystem on a disk
28 | fs -- file system to be tested
29 | filesize -- size of file in which I/O is being done
30 | depth -- number of concurrent requests
31 | direct -- I/O is direct (not buffered)
32 | sync -- updates are immediately flushed
33 | """
34 |
35 | if crtdlt:
36 | tc = fs.create(sync=sync)
37 | td = fs.delete(sync=sync)
38 | print "\t\t create\t delete"
39 | print "\t\t%6d IOPS\t %6d IOPS" % (iops(tc), iops(td))
40 | print ""
41 |
42 | print "\t bs\t seq read\t seq write\t rnd read\t rnd write"
43 | print "\t -----\t --------\t ---------\t --------\t ---------"
44 | for bs in (4096, 128 * 1024, 4096 * 1024):
45 | tsr = fs.read(bs, filesize, seq=True, depth=depth, direct=direct)
46 | tsw = fs.write(bs, filesize, seq=True, depth=depth, direct=direct,
47 | sync=sync)
48 | trr = fs.read(bs, filesize, seq=False, depth=depth, direct=direct)
49 | trw = fs.write(bs, filesize, seq=False, depth=depth, direct=direct,
50 | sync=sync)
51 |
52 | if bw(bs, tsw) >= 10:
53 | format = "\t%5dK\t%7d MB/s\t%7d MB/s\t%7.1f MB/s\t%7.1f MB/s"
54 | else:
55 | format = "\t%5dK\t%7.1f MB/s\t%7.1f MB/s\t%7.1f MB/s\t%7.1f MB/s"
56 | print(format %
57 | (kb(bs), bw(bs, tsr), bw(bs, tsw),
58 | bw(bs, float(trr)), bw(bs, float(trw))))
59 | print("\t \t %6d IOPS\t %6d IOPS\t %6d IOPS\t %6d IOPS" %
60 | (iops(tsr), iops(tsw), iops(trr), iops(trw)))
61 |
--------------------------------------------------------------------------------
/models/performance/radostest.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | #
3 | # RADOS simulation exerciser
4 |
5 | # mnemonic scale constants
6 | MILLION = 1000000 # capacities and speeds
7 | SECOND = 1000000 # times are in micro-seconds
8 |
9 | def kb(val):
10 | """ number of kilobytes (1024) in a block """
11 | return val / 1024
12 |
13 |
14 | def iops(us):
15 | """ convert a us/operation into IOPS """
16 | return SECOND / us
17 |
18 |
19 | def bw(bs, us):
20 | """ convert block size and us/operation into MB/s bandwidth """
21 | return bs / us
22 |
23 |
24 | def radostest(fs, obj_size=16 * MILLION, nobj=2500,
25 | clients=1, depth=1, copies=1, crtdlt=False):
26 | """ compute & display standard filestore test results """
27 |
28 | if crtdlt:
29 | tc = fs.create(depth=depth)
30 | td = fs.delete(depth=depth)
31 | print "\t\t create\t delete"
32 | print "\t\t%6d IOPS\t %6d IOPS" % (iops(tc), iops(td))
33 | print ""
34 |
35 | print "\t bs\t rnd read\t rnd write"
36 | print "\t -----\t --------\t ---------"
37 | for bs in (4096, 128 * 1024, 4096 * 1024):
38 | trr = fs.read(bs, obj_size, nobj=nobj, clients=clients, depth=depth)
39 | trw = fs.write(bs, obj_size, nobj=nobj, depth=depth,
40 | clients=clients, copies=copies)
41 |
42 | format = "\t%5dK\t%7.1f MB/s\t%7.1f MB/s"
43 | print(format %
44 | (kb(bs), bw(bs, float(trr)), bw(bs, float(trw))))
45 | print "\t \t %6d IOPS\t %6d IOPS" % (iops(trr), iops(trw))
46 |
--------------------------------------------------------------------------------
/models/reliability/ColumnPrint.py:
--------------------------------------------------------------------------------
1 | #
2 | # Ceph - scalable distributed file system
3 | #
4 | # Copyright (C) Inktank
5 | #
6 | # This is free software; you can redistribute it and/or
7 | # modify it under the terms of the GNU Lesser General Public
8 | # License version 2.1, as published by the Free Software
9 | # Foundation. See file COPYING.
10 | #
11 | #
12 | # This is a class to output model results (times, capacities, durabilities,
13 | # and probabilities) in attractive, standard width columns.
14 | #
15 | # This class defines a standard string format for each line.
16 | # It includes methods to turn many data types into strings.
17 | # Create each output line as a vector of string (values),
18 | # and then print it with the chosen format.
19 | #
20 |
21 | from RelyFuncts import YEAR, DAY, HOUR, MINUTE, SECOND
22 |
23 |
24 | class ColumnPrint:
25 | """ a class to produce attractive columnar output """
26 |
27 | def __init__(self, headings, maxdesc=20):
28 | """ derive the format string """
29 | self.headings = headings
30 | Indent = 4
31 | DescWid = maxdesc
32 | ColWid = 12
33 |
34 | # figure out how wide our columns have to be
35 | wid = 0
36 | for s in self.headings:
37 | if len(s) > wid:
38 | wid = len(s)
39 | if wid >= ColWid:
40 | ColWid = wid + 1
41 |
42 | # generate the format string
43 | f = ""
44 | i = 0
45 | while i < Indent:
46 | f += ' '
47 | i += 1
48 |
49 | col = 0
50 | while col < len(self.headings):
51 | wid = DescWid if col == 0 else ColWid
52 | f += '%'
53 | if col == 0:
54 | f += "-%ds" % wid
55 | else:
56 | f += "%ds" % wid
57 | col += 1
58 |
59 | self.format = f
60 |
61 | def printLine(self, list):
62 | """ print an output line from a list (of string items) """
63 | print(self.format % tuple(list))
64 |
65 | def printHeadings(self):
66 | """ print out a set of column headings and separator line """
67 | print ""
68 | print self.format % tuple(self.headings)
69 |
70 | # how wide should a dash be
71 | dashes = 0
72 | for s in self.headings:
73 | if len(s) > dashes:
74 | dashes = len(s)
75 |
76 | # create a line with that many dashes
77 | s = ""
78 | while dashes > 0:
79 | s += '-'
80 | dashes -= 1
81 |
82 | # create a tupple with the right number of lines
83 | l = list()
84 | i = 0
85 | while i < len(self.headings):
86 | l.append(s)
87 | i += 1
88 |
89 | print self.format % tuple(l)
90 |
91 |
92 | def printSize(sz, unit=1000):
93 | """ print out a size with the appropriate unit suffix """
94 |
95 | fmt10 = ["%dB", "%dKiB", "%dMiB", "%dGiB", "%dTiB", "%dPiB"]
96 | fmt2 = ["%dB", "%dKB", "%dMB", "%dGB", "%dTB", "%dPB"]
97 | fmt = fmt10 if unit == 1000 else fmt2
98 | i = 0
99 | while i < len(fmt):
100 | if sz < unit:
101 | break
102 | sz /= unit
103 | i += 1
104 | return fmt[i] % (sz)
105 |
106 |
107 | def printTime(t):
108 | """ print out a time in an appropriate unit """
109 | if t < 2 * MINUTE:
110 | return "%d seconds" % (t / SECOND)
111 | if t < 5 * HOUR:
112 | return "%d minutes" % (t / MINUTE)
113 | if t < 3 * DAY:
114 | return "%d hours" % (t / HOUR)
115 | if t < YEAR:
116 | return "%d days" % (t / DAY)
117 | if (t % YEAR) == 0:
118 | return "%d years" % (t / YEAR)
119 | else:
120 | return "%5.1f years" % (t / YEAR)
121 |
122 |
123 | def printDurability(d):
124 | """ print out a durability in a reasonable format """
125 | if d < .99999:
126 | return "%6.3f%%" % (d * 100)
127 | else:
128 | nines = 0
129 | while d > .9:
130 | nines += 1
131 | d -= .9
132 | d *= 10
133 | return "%d-nines" % (nines)
134 |
135 |
136 | def printProbability(p):
137 | """ print out a probability in a reasonable format """
138 | if p > .0000001:
139 | return "%9.6f%%" % (p * 100)
140 | else:
141 | return "%9.3e" % (p)
142 |
143 |
144 | def printExp(f):
145 | return "%9.3e" % (f)
146 |
147 |
148 | def printFloat(f):
149 | return "%9.3f" % (f)
150 |
--------------------------------------------------------------------------------
/models/reliability/Config.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | #
3 | # Ceph - scalable distributed file system
4 | #
5 | # Copyright (C) Inktank
6 | #
7 | # This is free software; you can redistribute it and/or
8 | # modify it under the terms of the GNU Lesser General Public
9 | # License version 2.1, as published by the Free Software
10 | # Foundation. See file COPYING.
11 | #
12 |
13 | """
14 | Default values and the object that contains them
15 | (e.g. passed into and back from the GUI)
16 | """
17 | from RelyFuncts import SECOND, MINUTE, HOUR, DAY, YEAR, FitRate
18 | from sizes import GB, MiB, TiB
19 |
20 |
21 | class Config(object):
22 |
23 | def __init__(self):
24 | """ default test parameters """
25 |
26 | self.period = 1.0 * YEAR
27 | self.verbose = "all"
28 |
29 | self.disk_type = "Enterprise"
30 | self.disk_size = 2 * TiB
31 | self.disk_nre = 1E-16
32 | self.disk_fit = 826
33 | self.disk_fit2 = 826
34 | self.nre_model = "fail"
35 |
36 | self.node_fit = 1000
37 |
38 | self.raid_vols = 2
39 | self.raid_replace = 6.0 * HOUR
40 | self.raid_recover = 20 * MiB
41 |
42 | self.rados_copies = 2
43 | self.rados_markout = 10.0 * MINUTE
44 | self.rados_recover = 50 * MiB
45 | self.rados_decluster = 200
46 | self.rados_fullness = 0.75
47 |
48 | self.obj_size = 1 * GB
49 | self.stripe_length = 1
50 |
51 | self.remote_sites = 1
52 | self.remote_recover = 10 * MiB
53 | self.remote_latency = 0.0 * SECOND
54 | self.majeure = FitRate(.001, YEAR)
55 | self.site_recover = 30.0 * DAY
56 |
--------------------------------------------------------------------------------
/models/reliability/DiskRely.py:
--------------------------------------------------------------------------------
1 | #
2 | # Ceph - scalable distributed file system
3 | #
4 | # Copyright (C) Inktank
5 | #
6 | # This is free software; you can redistribute it and/or
7 | # modify it under the terms of the GNU Lesser General Public
8 | # License version 2.1, as published by the Free Software
9 | # Foundation. See file COPYING.
10 | #
11 |
12 | """
13 | basic disk reliability model
14 | the modeled unit is one drive
15 | """
16 |
17 | from RelyFuncts import YEAR, Pfail, Pn
18 | from sizes import GiB, TiB
19 |
20 | DISKSIZE = 2 * TiB
21 |
22 |
23 | class Disk(object):
24 |
25 | def __init__(self, size, fits, nre, desc, fits2=0):
26 | """ create a disk reliability simulation
27 | size -- in bytes
28 | fits -- failures per billion hours
29 | nre -- non-recoverable errors per byte
30 | desc -- description for reporting purposes
31 | fits2 -- secondary failure rate
32 | """
33 | self.size = size
34 | self.rawsize = size
35 | self.fits = fits
36 | self.fits2 = fits if fits2 == 0 else fits2
37 | self.nre = nre
38 | self.description = desc
39 |
40 | self.P_rep = 0 # inapplicable
41 | self.L_rep = 0 # inapplicable
42 | self.P_site = 0 # inapplicable
43 | self.L_site = 0 # inapplicable
44 |
45 | def compute(self, period=YEAR, mult=1, secondary=False):
46 | """ compute probabilities and expected data loss for likely failures
47 | period -- time over which we want to model failures
48 | mult -- FIT rate multiplier (e.g. many parallel units)
49 | secondary -- this is a second (more likely) failure
50 | """
51 | fits = self.fits2 if secondary else self.fits
52 | self.P_drive = float(1) - Pfail(fits * mult, period, n=0)
53 | self.L_drive = self.size
54 | self.P_nre = self.p_nre(bytes=self.size * mult)
55 | self.L_nre = self.size
56 | self.dur = 1.0 - (self.P_drive + self.P_nre)
57 |
58 | def p_nre(self, bytes=0):
59 | """ probability of NRE during reading or writing
60 | bytes -- number of bytes to be written or read
61 | """
62 | if bytes == 0:
63 | bytes = self.size
64 |
65 | # uses a different flavor probability function
66 | p = Pn(self.nre * bytes * 8, 1)
67 | return p
68 |
69 |
70 | class EnterpriseDisk(Disk):
71 | """ Spec'd Enterprise Drive (Seagate Barracuda) """
72 |
73 | def __init__(self, size=DISKSIZE):
74 | Disk.__init__(self, size=size, fits=826, nre=1.0e-15,
75 | desc="Enterprise drive")
76 |
77 |
78 | class ConsumerDisk(Disk):
79 | """ Spec'd Consumer Drive (Seagate Barracuda) """
80 |
81 | def __init__(self, size=DISKSIZE):
82 | Disk.__init__(self, size=size, fits=1320, nre=1.0e-14,
83 | desc="Consumer drive")
84 |
85 |
86 | class RealDisk(Disk):
87 | """ Specs from Schroeders 2007 FAST paper """
88 |
89 | def __init__(self, size=DISKSIZE):
90 | Disk.__init__(self, size=size, fits=7800, nre=1.0e-14,
91 | desc="real-world disk")
92 |
--------------------------------------------------------------------------------
/models/reliability/MultiRely.py:
--------------------------------------------------------------------------------
1 | #
2 | # Ceph - scalable distributed file system
3 | #
4 | # Copyright (C) Inktank
5 | #
6 | # This is free software; you can redistribute it and/or
7 | # modify it under the terms of the GNU Lesser General Public
8 | # License version 2.1, as published by the Free Software
9 | # Foundation. See file COPYING.
10 | #
11 |
12 | """
13 | Multi-site recovery model
14 |
15 | the modeled unit is, again, a Placement Group, but now we factor
16 | in multi-site replication, force majeure events and site repairs.
17 |
18 | This makes all of the functions in this module more complex than
19 | their single-site counterparts, because they factor in combinations
20 | of device and site failures
21 | """
22 |
23 | from RelyFuncts import SECOND, YEAR
24 | from sizes import MiB
25 |
26 |
27 | class MultiSite(object):
28 |
29 | def __init__(self, rados, site, speed=10 * MiB, latency=0, sites=1):
30 | """ create a site reliability simulation
31 | rados -- single site rados reliability model
32 | site -- site reliability model
33 | speed -- multi-site replication/recovery speed
34 | latency -- replication latency
35 | sites -- number of sites replicating a single object
36 | """
37 | self.rados = rados
38 | self.site = site
39 | self.sites = sites
40 | self.speed = speed
41 | self.latency = latency
42 | self.size = site.size # useful size of each site
43 | self.rawsize = site.size * sites
44 | self.description = "RADOS: %d-site, %d-cp" % (sites, rados.copies)
45 |
46 | def descend(self, period, p, f, survivors):
47 | """ recursive per site failure model
48 | period -- the period during which this site must remain up
49 | p -- accumulated probability of reaching this point
50 | f -- tupple of accumulated failures thus far
51 | (sites, all copies on site, NREs)
52 | survivors -- number of surviving replica sites
53 | """
54 |
55 | # probabilities of site or copy failures during period
56 | self.site.compute(period=period, mult=survivors)
57 | self.rados.compute(period=period)
58 | if survivors > 1:
59 | # we haven't yet reached the bottom of the tree
60 | self.descend(self.site.replace, p * self.site.P_site,
61 | (f[0] + 1, f[1], f[2]), survivors - 1)
62 | self.descend(self.rados.rebuild_time(self.speed),
63 | p * self.rados.P_drive,
64 | (f[0], f[1] + 1, f[2]), survivors - 1)
65 | obj_fetch = SECOND * self.rados.objsize / self.speed
66 | self.descend(obj_fetch, p * self.rados.P_nre,
67 | (f[0], f[1], f[2] + 1), survivors - 1)
68 | return
69 |
70 | # we are down to the last site
71 | if f[0] + f[1] == self.sites - 1: # these are last copies
72 | self.P_drive += p * self.rados.P_drive
73 | self.L_drive = self.rados.L_drive # sb 1/2 PG
74 | self.P_nre += p * self.rados.P_nre # FIX ... wrong bytecount
75 | self.L_nre = self.rados.L_nre # sb one object
76 | if f[0] == self.sites - 1: # this is last site
77 | self.P_site += p * self.site.P_site
78 | self.L_site = self.site.L_site
79 |
80 | def compute(self, period=YEAR):
81 | """ compute the failure tree for multiple sites """
82 |
83 | # initialize probabilities
84 | self.dur = 1.0
85 | self.P_site = 0
86 | self.P_drive = 0
87 | self.P_nre = 0
88 | self.P_rep = 0
89 | self.L_rep = 0
90 | self.L_nre = 0
91 | self.L_drive = 0
92 | self.L_site = 0
93 |
94 | # note a few important sizes
95 | disk_size = self.rados.size * self.rados.full
96 |
97 | # descend the tree of probabilities and tally the damage
98 | self.descend(period=period, p=1.0, f=(0, 0, 0), survivors=self.sites)
99 |
100 | # compute the probability/loss for asynchronous replication failure
101 | if self.latency > 0:
102 | self.site.compute(period=YEAR, mult=self.sites)
103 | self.P_rep = self.site.P_site
104 | self.L_rep = self.latency * self.speed / (2 * SECOND)
105 |
106 | # compute the (loss weighted) overall multi-site durability
107 | self.dur -= self.P_site
108 | self.dur -= self.P_drive * self.L_drive / disk_size
109 | self.dur -= self.P_nre * self.L_nre / disk_size
110 | self.dur -= self.P_rep * self.L_rep / disk_size
111 |
--------------------------------------------------------------------------------
/models/reliability/RadosRely.py:
--------------------------------------------------------------------------------
1 | # Ceph - scalable distributed file system
2 | #
3 | # Copyright (C) Inktank
4 | #
5 | # This is free software; you can redistribute it and/or
6 | # modify it under the terms of the GNU Lesser General Public
7 | # License version 2.1, as published by the Free Software
8 | # Foundation. See file COPYING.
9 | #
10 |
11 | """
12 | RADOS reliability model
13 | the modeled unit is a Placement Group
14 | """
15 |
16 | from RelyFuncts import SECOND, MINUTE, YEAR
17 | from sizes import KB, MB, GB
18 |
19 | MARKOUT = 10 * MINUTE
20 | RECOVER = 50 * 1000000
21 | FULL = 0.75
22 |
23 |
24 | class RADOS(object):
25 | """ model a single-volume RADOS OSD """
26 |
27 | def __init__(self, disk,
28 | pg=200, # recommended
29 | copies=2, # recommended minimum
30 | speed=RECOVER, # typical large object speed
31 | delay=MARKOUT, # default mark-out
32 | fullness=FULL, # how full are the volumes
33 | objsize=1 * GB, # average object size
34 | stripe=1, # typical stripe length
35 | nre_model="ignore"): # scrub largely eliminates these
36 | """ create a RADOS reliability simulation
37 | pg -- number of placement groups per OSD
38 | copies -- number of copies for these objects
39 | speed -- expected recovery rate (bytes/second)
40 | delay -- automatic mark-out interval (hours)
41 | objsize -- typical object size
42 | stripe -- typical stripe length
43 | nre_model -- how to handle NREs (ignore, error, fail)
44 | """
45 | self.disk = disk
46 | self.speed = speed
47 | self.pgs = pg
48 | self.copies = copies
49 | self.delay = delay
50 | self.full = fullness
51 | self.objsize = objsize
52 | self.stripe = stripe
53 | self.nre_model = nre_model
54 | self.size = disk.size # useful data
55 | self.rawsize = disk.size * copies # space consumed
56 | self.description = "RADOS: %d cp" % (copies)
57 |
58 | self.P_site = 0 # inapplicable
59 | self.L_site = 0 # inapplicable
60 | self.P_rep = 0 # inapplicable
61 | self.L_rep = 0 # inapplicable
62 |
63 | def rebuild_time(self, speed):
64 | """ expected time to recover from a drive failure """
65 | seconds = float(self.disk.size * self.full) / (speed * self.pgs)
66 | return seconds * SECOND
67 |
68 | def loss_fraction(self, sites=1):
69 | """ the fraction of objects that are lost when a drive fails """
70 |
71 | if self.copies <= 1 and sites <= 1:
72 | return 1
73 | return float(1) / (2 * self.pgs)
74 |
75 | def compute(self, period=YEAR, mult=1):
76 | """ probability of an arbitrary object surviving the period
77 | period -- time over which Pfail should be estimated
78 | mult -- FIT rate multiplier
79 | """
80 | self.dur = 1.0
81 |
82 | # probability of an initial failure (of any copy)
83 | n = mult * self.copies * self.stripe
84 | self.disk.compute(period=period, mult=n)
85 | self.P_drive = self.disk.P_drive
86 | self.P_nre = self.disk.P_drive
87 |
88 | # probability of losing the remaining copies
89 | n = self.pgs
90 | recover = float(self.delay) + self.rebuild_time(self.speed)
91 | copies = self.copies - 1
92 | while copies > 0:
93 | self.disk.compute(period=recover, mult=copies * n,
94 | secondary=True)
95 | self.P_drive *= self.disk.P_drive
96 | if copies > 1:
97 | self.P_nre *= self.disk.P_drive
98 | copies -= 1
99 |
100 | # amount of data to be read and written
101 | read_bytes = self.size * self.full
102 | write_bytes = read_bytes if self.copies > 1 else 0
103 |
104 | # fraction of objects affected by this failure
105 | fraction = self.loss_fraction()
106 | self.L_drive = read_bytes * fraction
107 | self.dur = 1.0 - (self.P_drive * fraction)
108 |
109 | # probability of and expected loss due to NREs
110 | if self.nre_model == "ignore":
111 | self.P_nre = 0
112 | self.L_nre = 0
113 | else: # we will lose the lesser of a PG or object
114 | self.P_nre *= self.disk.p_nre(bytes=read_bytes + write_bytes)
115 | pg = self.size * self.full * fraction
116 | self.L_nre = self.objsize if self.objsize < pg else pg
117 | self.dur -= self.P_nre * self.L_nre / (self.size * self.full)
118 |
--------------------------------------------------------------------------------
/models/reliability/RaidRely.py:
--------------------------------------------------------------------------------
1 | #
2 | # Ceph - scalable distributed file system
3 | #
4 | # Copyright (C) Inktank
5 | #
6 | # This is free software; you can redistribute it and/or
7 | # modify it under the terms of the GNU Lesser General Public
8 | # License version 2.1, as published by the Free Software
9 | # Foundation. See file COPYING.
10 | #
11 |
12 | """
13 | RAID reliability model
14 | the modeled unit is a RAID set
15 | """
16 |
17 | from RelyFuncts import SECOND, HOUR, YEAR
18 |
19 | DELAY = 6 * HOUR # pretty fast replacement
20 | RECOVER = 50000000 # per disk in from set
21 | MODEL = "fail+error" # fail on detected NRE
22 | # undetected errors get through
23 | OBJSIZE = 1000000000 # default object size
24 |
25 |
26 | class RAID(object):
27 | """ model a mirrored raid set """
28 |
29 | def __init__(self, disk, volumes, recovery, delay, nre_model, objsize):
30 | """ create a RAID reliability simulation
31 | disk -- underlying disk
32 | volumes -- number of total volumes in set
33 | recovery -- rebuild rate (bytes/second)
34 | delay -- rebuild delay (hours)
35 | nre_model -- how to handle NREs
36 | objsize -- average object size for NRE damage
37 | """
38 | self.disk = disk
39 | self.speed = recovery
40 | self.volumes = volumes
41 | self.delay = delay
42 | self.nre_model = nre_model
43 | self.objsize = objsize
44 | self.parity = 0
45 | self.copies = 1
46 | self.size = disk.size
47 | self.rawsize = disk.size * volumes # size of a RAID set
48 |
49 | self.P_rep = 0 # inapplicable
50 | self.L_rep = 0 # inapplicable
51 | self.P_site = 0 # inapplicable
52 | self.L_site = 0 # inapplicable
53 |
54 | def rebuild_time(self):
55 | seconds = self.disk.size / self.speed
56 | return seconds * SECOND
57 |
58 | def compute(self, period=YEAR):
59 | """ probability of an arbitrary object surviving the period """
60 |
61 | # probability of an initial failure of any volume in the set
62 | self.disk.compute(period=period, mult=self.volumes)
63 |
64 | # how many disks do we need to do the recovery
65 | survivors = self.volumes - 1
66 | if self.parity > 0:
67 | required = self.volumes - self.parity
68 | elif self.copies > 1:
69 | required = 1
70 | else:
71 | required = self.volumes
72 |
73 | # can we recover from the loss of a single drive
74 | if survivors >= required:
75 | # probability of losing all but the last drive
76 | p = self.disk.P_drive
77 | recover = float(self.delay) + self.rebuild_time()
78 | while survivors > required:
79 | self.disk.compute(period=recover, mult=survivors,
80 | secondary=True)
81 | p *= self.disk.P_drive
82 | survivors -= 1
83 |
84 | # probability of losing the last drive
85 | self.disk.compute(period=recover, mult=survivors,
86 | secondary=True)
87 | self.P_drive = p * self.disk.P_drive
88 | self.L_drive = self.size
89 |
90 | # probability of NRE on the last drive
91 | read_bytes = self.disk.size * required
92 | write_bytes = self.disk.size
93 | self.P_nre = p * self.disk.p_nre(bytes=read_bytes + write_bytes)
94 | else: # we couldn't withstand even a single failure
95 | self.P_drive = self.disk.P_drive
96 | self.L_drive = self.disk.L_drive
97 | self.P_nre = self.disk.P_nre # semi-arbitrary
98 |
99 | # compute the expected loss due to NRE
100 | self.L_nre = self.size
101 | self.dur = 1.0 - (self.P_drive + self.P_nre)
102 | if self.nre_model == "ignore":
103 | self.P_nre = 0
104 | self.L_nre = 0
105 | if self.nre_model == "error":
106 | self.L_nre = self.objsize
107 | elif self.nre_model == "error+fail/2":
108 | self.L_nre = (self.size + self.objsize) / 2
109 |
110 | self.dur = 1.0 - (self.P_drive + self.P_nre)
111 |
112 |
113 | class RAID0(RAID):
114 | """ model a striped RAID set """
115 |
116 | def __init__(self, disk, volumes=2, # default 2 stripes
117 | recovery=0, # efficient recovery
118 | delay=0, # moderatly responsive
119 | nre_model=MODEL, # optimum durability
120 | objsize=OBJSIZE):
121 |
122 | RAID.__init__(self, disk, volumes=volumes, recovery=recovery,
123 | delay=delay, nre_model=nre_model, objsize=objsize)
124 | self.parity = 0
125 | self.size = disk.size * volumes
126 | self.description = "RAID-0: %d vol" % (volumes)
127 |
128 |
129 | class RAID1(RAID):
130 | """ model a mirrored RAID set """
131 |
132 | def __init__(self, disk, volumes=2, # default 2 mirror
133 | recovery=RECOVER, # efficient recovery
134 | delay=DELAY, # moderatly responsive
135 | nre_model=MODEL, # optimum durability
136 | objsize=OBJSIZE):
137 |
138 | RAID.__init__(self, disk, volumes=volumes, recovery=recovery,
139 | delay=delay, nre_model=nre_model, objsize=objsize)
140 | self.parity = 0
141 | self.copies = volumes
142 | self.description = "RAID-1: %d cp" % (volumes)
143 |
144 |
145 | class RAID5(RAID):
146 | """ model a RAID set with one parity volume """
147 |
148 | def __init__(self, disk, volumes=4, # default 3+1
149 | recovery=RECOVER / 3, # recovery from three volumes
150 | delay=DELAY, # moderatly responsive
151 | nre_model=MODEL, # optimum durability
152 | objsize=OBJSIZE):
153 |
154 | RAID.__init__(self, disk, volumes=volumes, recovery=recovery,
155 | delay=delay, nre_model=nre_model, objsize=objsize)
156 | self.parity = 1
157 | self.size = disk.size * (volumes - 1)
158 | self.description = "RAID-5: %d+%d" % (volumes - 1, 1)
159 |
160 |
161 | class RAID6(RAID):
162 | """ model a RAID set with two parity volumes """
163 |
164 | def __init__(self, disk, volumes=8, # default 6+2
165 | recovery=RECOVER / 6, # recovery from six volumes
166 | delay=DELAY, # moderatly responsive
167 | nre_model=MODEL, # optimum durability
168 | objsize=OBJSIZE):
169 |
170 | RAID.__init__(self, disk, volumes=volumes, recovery=recovery,
171 | delay=delay, nre_model=nre_model, objsize=objsize)
172 | self.parity = 2
173 | self.size = disk.size * (volumes - 2)
174 | self.description = "RAID-6: %d+%d" % (volumes - 2, 2)
175 |
--------------------------------------------------------------------------------
/models/reliability/RelyFuncts.py:
--------------------------------------------------------------------------------
1 | #
2 | # Ceph - scalable distributed file system
3 | #
4 | # Copyright (C) Inktank
5 | #
6 | # This is free software; you can redistribute it and/or
7 | # modify it under the terms of the GNU Lesser General Public
8 | # License version 2.1, as published by the Free Software
9 | # Foundation. See file COPYING.
10 | #
11 |
12 | """
13 | basic mathematical functions used in reliability modeling
14 |
15 | for some reason I felt inclined not to ask clients to know what the
16 | denominator for FIT rates was, so I have provided routines to
17 | convert between FITs and convenient units
18 | """
19 |
20 | import math
21 |
22 | # units of time (FIT rates)
23 | HOUR = 1
24 | MINUTE = float(HOUR) / 60
25 | SECOND = float(HOUR) / 3600
26 | DAY = float(HOUR * 24)
27 | YEAR = (HOUR * 24 * 365.25)
28 |
29 | BILLION = 1000000000
30 |
31 |
32 | def FitRate(events, period=YEAR):
33 | """ FIT rate corresponding to a rate in other unit
34 | events -- number of events
35 | period -- period in which that many events happen
36 | """
37 | return events * BILLION / period
38 |
39 |
40 | def mttf(fits):
41 | """ MTTF corresponding to an FIT rate
42 | fits --- FIT rate
43 | """
44 | return BILLION / fits
45 |
46 |
47 | def Pfail(fitRate, hours, n=1):
48 | """ probability of exactly n failures during an interval
49 | fitRate -- nominal FIT rate
50 | hours -- number of hours to await event
51 | n -- number of events for which we want estimate
52 | """
53 | expected = float(fitRate) * hours / 1000000000
54 | return Pn(expected, n)
55 |
56 |
57 | def Pfail_lt(fitRate, hours, n=1):
58 | """ probability of less than n failures during an interval
59 | fitRate -- nominal FIT rate
60 | hours -- number of hours to await event
61 | n -- number of events for which we want estimate
62 | """
63 | expected = float(fitRate) * hours / 1000000000
64 | tot = float(0)
65 | i = n - 1
66 | while i >= 0:
67 | p = Pn(expected, i)
68 | tot += p
69 | i -= 1
70 | return tot
71 |
72 |
73 | def Pfail_gt(fitRate, hours, n=1):
74 | """ probability of more than n failures during an interval
75 | fitRate -- nominal FIT rate
76 | hours -- number of hours to await event
77 | n -- number of events for which we want estimate
78 | """
79 | expected = float(fitRate) * hours / 1000000000
80 | tot = float(0)
81 | i = n
82 | while i >= 0:
83 | p = Pn(expected, i)
84 | tot += p
85 | i -= 1
86 | return 1.0E0 - tot
87 |
88 |
89 | def Pn(expected=1, n=0):
90 | """ probability of n events occurring when exp are expected
91 | exp -- number of events expected during this period
92 | n -- number of events for which we want estimate
93 | """
94 | p = math.exp(-expected)
95 | if n > 0:
96 | p *= (expected ** n)
97 | p /= math.factorial(n)
98 | return p
99 |
100 |
101 | def Punion(*probs):
102 | """ probability of the Union of multiple events
103 | probs -- a list of probabilities
104 | """
105 |
106 | # DeMorgan: negation of disjunction equals union of the negations
107 | Pu = 1.0
108 | for p in probs:
109 | Pu *= (1 - p)
110 | return(1 - Pu)
111 |
112 |
113 | def multiFit(fitRate, total, required, repair, oneRepair=True):
114 | """ effective FIT rate required/total redundant components
115 | fitRate -- FIT rate of a single component
116 | total -- number of redundant components in system
117 | required -- number required for continued operation
118 | repair -- repair time (in hours)
119 | oneRepair -- all failures within a single repair period
120 |
121 | FITs(all_fail) =
122 | FITs(initial failure) * P(rest fail during repair period)
123 | """
124 |
125 | # FIX ... these are only approximations, I should do better
126 | fits = total * fitRate # initial FIT rate
127 | total -= 1 # we are down one
128 | if oneRepair:
129 | # all failures must occur within a single repair period
130 | # note: this very slightly under-estimates P_restfail in cases
131 | # where required > 1
132 | P_restfail = Pfail(total*fitRate, repair, n=total+1-required)
133 | fits *= P_restfail
134 | else:
135 | # each failure starts a new repair period
136 | # note: these numbers are small enough that expected reasonably
137 | # approximates the probability
138 | while total >= required:
139 | P_nextfail = total * fitRate * repair * 10E-9
140 | fits *= P_nextfail
141 | total -= 1
142 | return fits
143 |
--------------------------------------------------------------------------------
/models/reliability/Run.py:
--------------------------------------------------------------------------------
1 | #
2 | # Ceph - scalable distributed file system
3 | #
4 | # Copyright (C) Inktank
5 | #
6 | # This is free software; you can redistribute it and/or
7 | # modify it under the terms of the GNU Lesser General Public
8 | # License version 2.1, as published by the Free Software
9 | # Foundation. See file COPYING.
10 | #
11 |
12 | from ColumnPrint import ColumnPrint, printTime, printSize, printFloat
13 | from ColumnPrint import printExp, printDurability, printProbability
14 | from RelyFuncts import mttf, YEAR
15 | from sizes import PiB
16 |
17 | """
18 | run a series of tests
19 | This is the module that knows how to run a simulation,
20 | and how to report the results.
21 | """
22 |
23 |
24 | def printParms(fmt, disk, raid, rados, site, multi):
25 | """
26 | print out the parameters associated with a test
27 | """
28 | if disk is not None:
29 | print "Disk Modeling Parameters"
30 | print " size: %10s" % printSize(disk.size)
31 | print(" FIT rate: %10d (MTBF = %s)" %
32 | (disk.fits, printTime(mttf(disk.fits))))
33 | print " NRE rate: %10.1E" % disk.nre
34 |
35 | if raid is not None:
36 | print "RAID parameters"
37 | print " replace: %16s" % printTime(raid.delay)
38 | if raid.speed > 0:
39 | print(" recovery rate: %7s/s (%s)" %
40 | (printSize(raid.speed),
41 | printTime(raid.rebuild_time())))
42 | print " NRE model: %10s" % raid.nre_model
43 | print " object size: %10s" % printSize(raid.objsize)
44 |
45 | if rados is not None:
46 | print "RADOS parameters"
47 | print " auto mark-out: %14s" % printTime(rados.delay)
48 | print(" recovery rate: %8s/s (%s/drive)" %
49 | (printSize(rados.speed),
50 |
51 | printTime(rados.rebuild_time(rados.speed))))
52 | print " osd fullness: %7d%%" % (rados.full * 100)
53 | print " declustering: %7d PG/OSD" % rados.pgs
54 | print " NRE model: %10s" % rados.nre_model
55 | print " object size: %7s" % \
56 | printSize(rados.objsize, unit=1024)
57 | print " stripe length:%7d" % rados.stripe
58 |
59 | if site is not None:
60 | print "Site parameters"
61 | s = 0 if multi is None else multi.sites
62 | if site.fits == 0:
63 | print " disasters: IGNORED"
64 | else:
65 | tf = mttf(site.fits)
66 | print(" disaster rate: %12s (%d FITS)" %
67 | (printTime(tf), site.fits))
68 | if site.replace == 0:
69 | print " site recovery: NEVER"
70 | else:
71 | print " site recovery: %11s" % printTime(site.replace)
72 |
73 | if multi is not None:
74 | print(" recovery rate: %8s/s (%s/PG)" %
75 | (printSize(multi.speed),
76 | printTime(multi.rados.rebuild_time(multi.speed))))
77 | if multi.latency == 0:
78 | print " replication: synchronous"
79 | else:
80 | print(" replication: asynchronous (%s delay)" %
81 | (fmt.printTime(multi.latency)))
82 |
83 |
84 | def Run(tests, period=YEAR, verbosity="all"):
85 | """ run and report a set of specified simulations
86 | tests -- actual list of simulations to run
87 | (print a header line for each None test)
88 | period -- simulation period
89 | verbosity -- output options
90 | """
91 |
92 | # column headings
93 | heads = ("storage", "durability",
94 | "PL(site)", "PL(copies)", "PL(NRE)", "PL(rep)", "loss/PiB")
95 |
96 | # column descriptions
97 | legends = [
98 | "storage unit/configuration being modeled",
99 | "probability of object survival*",
100 | "probability of loss due to site failures*",
101 | "probability of loss due to drive failures*",
102 | "probability of loss due to NREs during recovery*",
103 | "probability of loss due to replication failure*",
104 | "expected data loss per Petabyte*"
105 | ]
106 |
107 | # use the headings to generate a format
108 | format = ColumnPrint(heads, maxdesc=20)
109 |
110 | # figure out what output he wants
111 | headings = True
112 | parms = True
113 | descr = True
114 | if verbosity == "parameters":
115 | descr = False
116 | elif verbosity == "headings":
117 | parms = False
118 | descr = False
119 | elif verbosity == "data only":
120 | parms = False
121 | descr = False
122 | headings = False
123 |
124 | # introspect the tests to find the disk/raid/rados parameters
125 | disk = None
126 | raid = None
127 | rados = None
128 | site = None
129 | multi = None
130 | for t in tests:
131 | c = t.__class__.__name__
132 | if disk is None and "Disk" in c:
133 | disk = t
134 | if raid is None and c.startswith("RAID"):
135 | raid = t
136 | if rados is None and c.startswith("RADOS"):
137 | rados = t
138 | if site is None and c.startswith("Site"):
139 | site = t
140 | if multi is None and c.startswith("MultiSite"):
141 | multi = t
142 |
143 | # find elements that only exist beneath others
144 | if site is None and multi is not None:
145 | site = multi.site
146 | if rados is None and multi is not None:
147 | rados = multi.rados
148 | if disk is None and rados is not None:
149 | disk = rados.disk
150 | if disk is None and raid is not None:
151 | disk = raid.disk
152 |
153 | if parms:
154 | printParms(format, disk, raid, rados, site, multi)
155 |
156 | if descr:
157 | print ""
158 | print "Column legends"
159 | s = printTime(period)
160 | i = 1
161 | while i <= len(legends):
162 | l = legends[i - 1]
163 | if l.endswith('*'):
164 | print "\t%d %s (per %s)" % (i, l, s)
165 | else:
166 | print "\t%d %s" % (i, l)
167 | i += 1
168 |
169 | if headings:
170 | format.printHeadings()
171 |
172 | # expected data loss after drive failures
173 | for t in tests:
174 | if t is None:
175 | format.printHeadings()
176 | continue
177 |
178 | # calculate the renderable reliabilities and durability
179 | s = list()
180 | t.compute(period=period)
181 | s.append(t.description) # description
182 | s.append(printDurability(t.dur)) # durability
183 | s.append(printProbability(t.P_site)) # P(site failure)
184 | s.append(printProbability(t.P_drive)) # P(drive failure)
185 | s.append(printProbability(t.P_nre)) # P(NRE on recovery)
186 | s.append(printProbability(t.P_rep)) # P(replication failure)
187 | l = (t.P_site * t.L_site) + (t.P_drive * t.L_drive) +\
188 | (t.P_nre * t.L_nre) + (t.P_rep * t.L_rep)
189 | s.append(printExp(l * PiB / t.rawsize)) # expected loss/PiB
190 | format.printLine(s)
191 |
--------------------------------------------------------------------------------
/models/reliability/SiteRely.py:
--------------------------------------------------------------------------------
1 | # Ceph - scalable distributed file system
2 | #
3 | # Copyright (C) Inktank
4 | #
5 | # This is free software; you can redistribute it and/or
6 | # modify it under the terms of the GNU Lesser General Public
7 | # License version 2.1, as published by the Free Software
8 | # Foundation. See file COPYING.
9 | #
10 |
11 | """
12 | basic site reliability model
13 | incorporating force majeure events and site replacement time
14 | the modeled unit is a single site
15 | (and is independent of the underlying storage)
16 | """
17 |
18 | from RelyFuncts import YEAR, Pfail, FitRate, mttf
19 | from sizes import GiB, TiB, PiB
20 |
21 | DISASTER = FitRate(.001, YEAR)
22 |
23 |
24 | class Site(object):
25 |
26 | def __init__(self, fits=DISASTER, rplc=0, size=1 * PiB):
27 | """ create a site reliability simulation
28 | fits -- catastrophic site failures per billion hours
29 | rplc -- how long it will take to replace a failed facility
30 | size -- amount of data at this site
31 | """
32 | self.fits = fits
33 | self.replace = rplc
34 | self.size = size
35 | self.rawsize = size
36 | if size >= PiB:
37 | self.description = "Site (%d PB)" % (size / PiB)
38 | else:
39 | self.description = "Site (%d TB)" % (size / TiB)
40 |
41 | self.P_drive = 0 # inapplicable
42 | self.L_drive = 0 # inapplicable
43 | self.P_nre = 0 # inapplicable
44 | self.L_nre = 0 # inapplicable
45 | self.P_rep = 0 # inapplicable
46 | self.L_rep = 0 # inapplicable
47 |
48 | def compute(self, period=YEAR, mult=1):
49 | """ probability of survival for an arbitrary object
50 | period -- time period over which to estimate failures
51 | mult -- FIT rate multiplier
52 | """
53 | self.P_site = float(1) - Pfail(self.fits * mult, period, n=0)
54 | self.L_site = self.size
55 | self.dur = 1.0 - self.P_site
56 |
57 | def availability(self):
58 | """ fraction of the time during which a remote copy is available """
59 | # if we are ignoring failures, availability is 100%
60 | if self.fits == 0:
61 | return 1.0
62 |
63 | # if there is no repair, annual probability of non-failure
64 | if self.replace == 0:
65 | return Pfail(self.fits, YEAR, n=0)
66 |
67 | # one minus the time between failures and repair
68 | ttf = mttf(self.fits)
69 | return float(ttf) / (ttf + self.replace)
70 |
--------------------------------------------------------------------------------
/models/reliability/TODO.txt:
--------------------------------------------------------------------------------
1 | TEST CASES:
2 |
3 | DO:
4 | add copyrights
5 |
6 | tweak default parameters for more meaningful output
7 |
8 |
9 | VALIDATE RESULTS
10 | test my results vs servethehome.com/raid-calclulator
11 | they support RAID-0 and get a very different number than I do
12 | we used to agree on RAID-1 (do we still)
13 | 2+1 ... I say .000124, they say .00052
14 | 6+2 ... I say 1.6E-9, they say 6E-6
15 | but they are counting reads and writes, which I should
16 | http://www.servethehome.com/raid-calculator/raid-reliability-calculator-simple-mttdl-model/
17 |
18 | but reconsider what I want to do with it,
19 | come up with a new model for what it should be in each
20 | disk does NRE probability and loss computations
21 | disk incorporates scrubbing
22 | ?does disk control the nre model?
23 | come up with a new code refactoring for who does what
24 | implement it
25 |
26 | AND ... there was another calculator
27 |
28 |
29 | INVESTIGATE:
30 |
31 | FIX 1 site 3cp is 200x more reliable than 3site 1cp
32 | because my psc computation for the third failure still
33 | assumes that I need NPG drives after 2nd drive failure
34 |
35 | CONSIDER ADD fit2 support to the DiskRely
36 |
37 | CONSIDER ADD scrubbing and scrub efficacy in eliminating NREs
38 |
39 | CONSIDER ADD RAID stripes on top of the others
40 |
--------------------------------------------------------------------------------
/models/reliability/inktank.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ceph/ceph-tools/610504289f7f25db6c3612b1d283de6ccbfce392/models/reliability/inktank.ico
--------------------------------------------------------------------------------
/models/reliability/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 | #
3 | # Ceph - scalable distributed file system
4 | #
5 | # Copyright (C) Inktank
6 | #
7 | # This is free software; you can redistribute it and/or
8 | # modify it under the terms of the GNU Lesser General Public
9 | # License version 2.1, as published by the Free Software
10 | # Foundation. See file COPYING.
11 | #
12 |
13 | """
14 | main routine for driving simulations
15 | process args and invoke gui or a default set of tests
16 | """
17 |
18 | from DiskRely import Disk
19 | from RaidRely import RAID0, RAID1, RAID5, RAID6
20 | from RadosRely import RADOS
21 | from SiteRely import Site
22 | from MultiRely import MultiSite
23 | from Config import Config
24 | from Run import Run
25 |
26 |
27 | def oneTest(cfg, which):
28 | """
29 | run a single simulation (call-back from the GUI)
30 | cfg -- configuration values to use
31 | which -- type of simulation to be run
32 | """
33 |
34 | # everybody needs a disk simulation
35 | disk = Disk(size=cfg.disk_size,
36 | fits=cfg.disk_fit, fits2=cfg.disk_fit2,
37 | nre=cfg.disk_nre,
38 | desc="Disk: %s" % (cfg.disk_type))
39 |
40 | if which == "disk":
41 | Run([disk], period=cfg.period, verbosity=cfg.verbose)
42 | return
43 |
44 | if which == "raid":
45 | if cfg.raid_type == "RAID-0":
46 | raid = RAID0(disk, volumes=cfg.raid_vols,
47 | nre_model=cfg.nre_model,
48 | recovery=cfg.raid_recover,
49 | delay=cfg.raid_replace,
50 | objsize=cfg.obj_size)
51 | elif cfg.raid_type == "RAID-1":
52 | raid = RAID1(disk, volumes=cfg.raid_vols,
53 | nre_model=cfg.nre_model,
54 | recovery=cfg.raid_recover,
55 | delay=cfg.raid_replace,
56 | objsize=cfg.obj_size)
57 | elif cfg.raid_type == "RAID-5":
58 | raid = RAID5(disk, volumes=cfg.raid_vols,
59 | nre_model=cfg.nre_model,
60 | recovery=cfg.raid_recover,
61 | delay=cfg.raid_replace,
62 | objsize=cfg.obj_size)
63 | elif cfg.raid_type == "RAID-6":
64 | raid = RAID6(disk, volumes=cfg.raid_vols,
65 | nre_model=cfg.nre_model,
66 | recovery=cfg.raid_recover,
67 | delay=cfg.raid_replace,
68 | objsize=cfg.obj_size)
69 | Run([raid], period=cfg.period, verbosity=cfg.verbose)
70 | return
71 |
72 | rados = RADOS(disk, pg=cfg.rados_decluster,
73 | copies=cfg.rados_copies,
74 | speed=cfg.rados_recover,
75 | fullness=cfg.rados_fullness,
76 | objsize=cfg.obj_size,
77 | stripe=cfg.stripe_length,
78 | nre_model=cfg.nre_model,
79 | delay=cfg.rados_markout)
80 | if which == "rados":
81 | Run([rados], period=cfg.period, verbosity=cfg.verbose)
82 | return
83 |
84 | if which == "multi":
85 | site = Site(fits=cfg.majeure, rplc=cfg.site_recover)
86 | multi = MultiSite(rados, site,
87 | speed=cfg.remote_recover,
88 | latency=cfg.remote_latency,
89 | sites=cfg.remote_sites)
90 | Run([multi], period=cfg.period, verbosity=cfg.verbose)
91 | return
92 |
93 |
94 | def defaultTests(cfg):
95 | """
96 | run a standard set of interesting simulations
97 | cfg -- default configuration values
98 | """
99 | disk = Disk(size=cfg.disk_size, fits=cfg.disk_fit,
100 | nre=cfg.disk_nre,
101 | desc="Disk: %s" % (cfg.disk_type))
102 |
103 | raid0 = RAID0(disk, volumes=2,
104 | nre_model=cfg.nre_model,
105 | recovery=cfg.raid_recover,
106 | delay=cfg.raid_replace,
107 | objsize=cfg.obj_size)
108 | raid1 = RAID1(disk, volumes=2,
109 | nre_model=cfg.nre_model,
110 | recovery=cfg.raid_recover,
111 | delay=cfg.raid_replace,
112 | objsize=cfg.obj_size)
113 | raid5 = RAID5(disk, volumes=4,
114 | nre_model=cfg.nre_model,
115 | recovery=cfg.raid_recover,
116 | delay=cfg.raid_replace,
117 | objsize=cfg.obj_size)
118 | raid6 = RAID6(disk, volumes=8,
119 | nre_model=cfg.nre_model,
120 | recovery=cfg.raid_recover,
121 | delay=cfg.raid_replace,
122 | objsize=cfg.obj_size)
123 |
124 | tests = [disk, raid0, raid5, raid1, raid6]
125 |
126 | # single site RADOS
127 | for cp in (1, 2, 3):
128 | rados = RADOS(disk, pg=cfg.rados_decluster,
129 | copies=cp,
130 | speed=cfg.rados_recover,
131 | fullness=cfg.rados_fullness,
132 | objsize=cfg.obj_size,
133 | stripe=cfg.stripe_length,
134 | nre_model=cfg.nre_model,
135 | delay=cfg.rados_markout)
136 | tests.append(rados)
137 |
138 | # multi-site RADOS
139 | tests.append(None)
140 | site = Site(fits=cfg.majeure, rplc=cfg.site_recover)
141 | tests.append(site)
142 | for sites in (1, 2, 3, 4):
143 | for cp in (1, 2, 3):
144 | rados = RADOS(disk, pg=cfg.rados_decluster,
145 | copies=cp,
146 | speed=cfg.rados_recover,
147 | fullness=cfg.rados_fullness,
148 | objsize=cfg.obj_size,
149 | stripe=cfg.stripe_length,
150 | nre_model=cfg.nre_model,
151 | delay=cfg.rados_markout)
152 |
153 | multi = MultiSite(rados, site,
154 | speed=cfg.remote_recover,
155 | latency=cfg.remote_latency,
156 | sites=sites)
157 | tests.append(multi)
158 |
159 | # and run them all
160 | Run(tests, period=cfg.period, verbosity=cfg.verbose)
161 |
162 |
163 | def main():
164 | """ CLI entry-point:
165 | process command line arguments, run gui or a standard set of tests
166 | """
167 |
168 | # process the command line arguments arguments
169 | from optparse import OptionParser
170 | parser = OptionParser(usage="usage: %prog [options]")
171 | parser.add_option("-g", "--gui", dest="gui", action="store_true",
172 | default=False, help="GUI control panel")
173 | (opts, files) = parser.parse_args()
174 |
175 | for f in files:
176 | if f == "gui" or f == "GUI":
177 | opts.gui = True
178 |
179 | # default configuration parameters
180 | cfg = Config()
181 | if opts.gui: # use the GUI to control the computations
182 | from RelyGUI import RelyGUI
183 | gui = RelyGUI(cfg, oneTest)
184 | gui.mainloop()
185 | else: # run a stanadrd set of models
186 | defaultTests(cfg)
187 |
188 |
189 | if __name__ == "__main__":
190 | main()
191 |
--------------------------------------------------------------------------------
/models/reliability/sizes.py:
--------------------------------------------------------------------------------
1 | #
2 | # Ceph - scalable distributed file system
3 | #
4 | # Copyright (C) Inktank
5 | #
6 | # This is free software; you can redistribute it and/or
7 | # modify it under the terms of the GNU Lesser General Public
8 | # License version 2.1, as published by the Free Software
9 | # Foundation. See file COPYING.
10 | #
11 | """
12 | commonly used size multipliers
13 | """
14 |
15 | # decimal speeds/capacitgies
16 | KiB = 1000
17 | MiB = KiB * 1000
18 | GiB = MiB * 1000
19 | TiB = GiB * 1000
20 | PiB = TiB * 1000
21 |
22 | # binary capacities
23 | KB = 1024
24 | MB = KB * 1024
25 | GB = MB * 1024
26 | TB = GB * 1024
27 |
--------------------------------------------------------------------------------
/nose/plugins/inventory/inventory.py:
--------------------------------------------------------------------------------
1 | """
2 | Bypass actual execution and produce an inventory of test cases,
3 | with test-case descriptions based on defined annotation attributes.
4 |
5 | This is a modified version of the standard collect.py plugin (for --collect-only)
6 | """
7 | from nose.plugins.base import Plugin
8 | from nose.case import Test
9 | import logging
10 | import unittest
11 | import sys
12 | import csv
13 |
14 | log = logging.getLogger(__name__)
15 |
16 | class TestInventory(Plugin):
17 | """
18 | Print an inventory of enabled tests,
19 | as annotated by specified properties.
20 | (probably best used with -q)
21 | """
22 | # default test description properties
23 | annoteProps = [ 'resource', 'method', 'operation', 'assertion' ]
24 |
25 | name = "test-inventory"
26 | enableOpt = 'inventory_only'
27 | outputFormat = 'json'
28 | numTests = 0
29 |
30 | def options(self, parser, env):
31 | """Register commandline options for this plugin.
32 | """
33 | parser.add_option('--test-inventory',
34 | action='store_true',
35 | dest=self.enableOpt,
36 | help=self.help())
37 |
38 | parser.add_option('--test-inventory-properties',
39 | action='store',
40 | dest='propertyList',
41 | default=env.get('NOSE_INVENTORY_PROPERTIES'),
42 | help="%s\n[NOSE_INVENTORY_PROPERTIES]\n" %
43 | "ordered list of comma-separated property names")
44 |
45 | parser.add_option('--test-inventory-format',
46 | action='store',
47 | dest='outputFormat',
48 | default=env.get('NOSE_INVENTORY_FORMAT'),
49 | help="supported formats are: col,csv,json\n[NOSE_INVENTORY_FORMAT]\n")
50 |
51 |
52 | def configure(self, options, config):
53 | """Figure out our annotation properties and output format
54 | """
55 | Plugin.configure(self, options, config)
56 | if options.propertyList != None:
57 | self.annoteProps = options.propertyList.split(',')
58 | log.debug("debug: Using annotation properties: %s" % self.annoteProps)
59 |
60 | if options.outputFormat == 'csv':
61 | self.outputFormat = 'csv'
62 | # CSV output should start with column headings
63 | sys.stderr.write('Test')
64 | for p in self.annoteProps:
65 | sys.stderr.write(',' + p)
66 | sys.stderr.write('\n')
67 | elif options.outputFormat == 'json':
68 | self.outputFormat = 'json'
69 | else:
70 | maxlen = 1
71 | for p in self.annoteProps:
72 | if p.__len__() > maxlen:
73 | maxlen = p.__len__();
74 | self.outputFormat = ' %-' + '%d' % maxlen + 's %s\n'
75 | log.debug("debug: Using output format '%s'\n" % self.outputFormat);
76 |
77 | def prepareTestLoader(self, loader):
78 | """Install test-inventory suite class in TestLoader.
79 | """
80 | # Disable context awareness
81 | log.debug("Preparing test loader")
82 | loader.suiteClass = TestSuiteFactory(self.conf)
83 |
84 | def prepareTestCase(self, test):
85 | """Replace actual test with dummy that always passes.
86 | """
87 | # Return something that always passes
88 | log.debug("debug: Inventorying test case %s", test)
89 | if not isinstance(test, Test):
90 | return
91 |
92 | # print out the annotation properties
93 | self._describeTestCase(test)
94 |
95 | # pretend to run this test
96 | def run(result):
97 | # We need to make these plugin calls because there won't be
98 | # a result proxy, due to using a stripped-down test suite
99 | self.conf.plugins.startTest(test)
100 | result.startTest(test)
101 | self.conf.plugins.addSuccess(test)
102 | result.addSuccess(test)
103 | self.conf.plugins.stopTest(test)
104 | result.stopTest(test)
105 | return run
106 |
107 | def _describeTestCase(self, testcase):
108 | """Write out a description of the specified test-case
109 | (in terms of specified attributes, in configured format)
110 | """
111 | # the real testcase (with the annotation decorations) has
112 | # probably been wrapped by many plug-ins, so we have to
113 | # un-peel the onion to find the annotation attributes
114 | o = testcase
115 | while o != None:
116 | if hasattr(o, self.annoteProps[0]):
117 | break
118 |
119 | # stop when we run out of levels
120 | if hasattr(o, "test"):
121 | o = o.test
122 | else:
123 | o = None
124 |
125 | # we never found any annotation properties
126 | if o == None:
127 | # in collumn format, we list all test cases
128 | if self.outputFormat != 'json' and self.outputFormat != 'csv':
129 | sys.stderr.write("Test Case: %s\n" % testcase);
130 | return
131 |
132 | # write out the properties as quoted comman separated values
133 | if self.outputFormat == 'csv':
134 | # write out the properties as quoted comma separated values
135 | sys.stderr.write("'%s'" % testcase)
136 | for p in self.annoteProps:
137 | sys.stderr.write(",")
138 | value = getattr(o, p, None)
139 | if value != None:
140 | sys.stderr.write("'" + value + "'")
141 | sys.stderr.write("\n");
142 |
143 | # write out the properties in json
144 | elif self.outputFormat == 'json':
145 | if self.numTests > 0:
146 | sys.stderr.write(",")
147 | else:
148 | sys.stderr.write("[")
149 | sys.stderr.write("\n\t{\n")
150 | sys.stderr.write("\t\ttestcase: \"%s\"" % testcase)
151 | for p in self.annoteProps:
152 | value = getattr(o, p, None)
153 | if value == None:
154 | continue
155 | sys.stderr.write(",")
156 | sys.stderr.write("\n\t\t%s: \"%s\"" % (p, value))
157 | sys.stderr.write("\n\t}");
158 | self.numTests += 1
159 |
160 | # write out the properties in neat cols under the test name
161 | else:
162 | sys.stderr.write("Test Case: %s\n" % testcase);
163 | for p in self.annoteProps:
164 | if hasattr(o, p):
165 | value = getattr(o, p, "True")
166 | sys.stderr.write(self.outputFormat % (p, value))
167 |
168 | def report(self, stream):
169 | """ Called after all tests have run to produce final output
170 | """
171 | if self.outputFormat == 'json':
172 | sys.stderr.write("\n]\n")
173 |
174 | class TestSuiteFactory:
175 | """
176 | Factory for producing configured test suites.
177 | """
178 | def __init__(self, conf):
179 | self.conf = conf
180 |
181 | def __call__(self, tests=(), **kw):
182 | return TestSuite(tests, conf=self.conf)
183 |
184 |
185 | class TestSuite(unittest.TestSuite):
186 | """
187 | Basic test suite that bypasses most proxy and plugin calls, but does
188 | wrap tests in a nose.case.Test so prepareTestCase will be called.
189 | """
190 | def __init__(self, tests=(), conf=None):
191 | self.conf = conf
192 | # Exec lazy suites: makes discovery depth-first
193 | if callable(tests):
194 | tests = tests()
195 | log.debug("TestSuite(%r)", tests)
196 | unittest.TestSuite.__init__(self, tests)
197 |
198 | def addTest(self, test):
199 | if isinstance(test, unittest.TestSuite):
200 | self._tests.append(test)
201 | else:
202 | self._tests.append(Test(test, config=self.conf))
203 |
--------------------------------------------------------------------------------
/nose/plugins/inventory/setup.py:
--------------------------------------------------------------------------------
1 | """
2 | nose plugin for test-case inventory/description
3 |
4 | from this directory, run easy_install .
5 | """
6 | import sys
7 | try:
8 | import ez_setup
9 | ez_setup.use_setuptools()
10 | except ImportError:
11 | pass
12 | from setuptools import setup
13 |
14 | setup(
15 | name='Test Inventory plugin',
16 | version='0.1',
17 | author='Mark Kampe',
18 | author_email = 'mark.kampe@dreamhost.com',
19 | description = 'Test Inventory',
20 | license = 'LGPL',
21 | py_modules = ['inventory'],
22 | entry_points = {
23 | 'nose.plugins': [
24 | 'test_inventory = inventory:TestInventory'
25 | ]
26 | }
27 | )
28 |
--------------------------------------------------------------------------------
/redmine/Bugparse.pm:
--------------------------------------------------------------------------------
1 | #
2 | # module: Bugparse
3 | #
4 | # purpose:
5 | # to figure out which columns of a bug dump contain
6 | # which information, ideally driven by a comment header
7 | #
8 | # the supported literal column keys are:
9 | # bugid
10 | # category
11 | # closed
12 | # created
13 | # history
14 | # points
15 | # priority
16 | # project
17 | # source
18 | # status
19 | # tags
20 | # type
21 | # version
22 | #
23 | use warnings;
24 | use strict;
25 |
26 | package Bugparse;
27 |
28 | require Exporter;
29 | my @ISA = qw(Exporter);
30 | my @EXPORT = qw(parser);
31 |
32 | #
33 | # parse a line that is presumed to contain column headings
34 | # and initialize a mapping from column names to numbers
35 | #
36 | sub parser {
37 | my $str = substr( $_[0], 1 );
38 | my @cols = split( '\t', $str );
39 | my %colMap = ();
40 |
41 | # try to get every column
42 | for( my $i = 0; $i < scalar @cols; $i++ ) {
43 | $cols[$i] =~ s/^\s+//;
44 | $cols[$i] =~ s/\s+$//;
45 |
46 | if ($cols[$i] eq 'bugid') {
47 | $colMap{'bugid'} = $i;
48 | } elsif ($cols[$i] eq 'category') {
49 | $colMap{'category'} = $i;
50 | } elsif ($cols[$i] eq 'issue type') {
51 | $colMap{'type'} = $i;
52 | } elsif ($cols[$i] eq 'source') {
53 | $colMap{'source'} = $i;
54 | } elsif ($cols[$i] eq 'prty') {
55 | $colMap{'priority'} = $i;
56 | } elsif ($cols[$i] eq 'version') {
57 | $colMap{'version'} = $i;
58 | } elsif ($cols[$i] eq 'created') {
59 | $colMap{'created'} = $i;
60 | } elsif ($cols[$i] eq 'closed') {
61 | $colMap{'closed'} = $i;
62 | } elsif ($cols[$i] eq 'status') {
63 | $colMap{'status'} = $i;
64 | } elsif ($cols[$i] eq 'history') {
65 | $colMap{'history'} = $i;
66 | } elsif ($cols[$i] eq 'points') {
67 | $colMap{'points'} = $i;
68 | } elsif ($cols[$i] eq 'project') {
69 | $colMap{'project'} = $i;
70 | } elsif ($cols[$i] eq 'tags') {
71 | $colMap{'tags'} = $i;
72 | }
73 | # don't sweat unrecognized columns
74 | }
75 |
76 | return( %colMap );
77 | }
78 |
79 | 1;
80 |
--------------------------------------------------------------------------------
/redmine/age.plot:
--------------------------------------------------------------------------------
1 | #! /usr/bin/gnuplot
2 | #
3 | # generate plots of time to fix and bug ages
4 | # broken down by tracker-type or priority
5 | #
6 | # usage:
7 | # gnuplot age.plot
8 | #
9 | # expected input format:
10 | # bucket urgent high normal low feature support cleanup tasks doc ...
11 | # first set of columns is for time to fix
12 | # second set of columns is for age (of unfixed) issues
13 | #
14 | # The bucket (in column 1) is merely a label
15 | #
16 | # TODO
17 | # Having this script know what the names and colors of
18 | # the issue classifications ties this to the database
19 | # and the reduction script. Much better would be if
20 | # the reduction script could pass the titles and colors
21 | # in to me. Maybe 'lc variable' can help here.
22 | #
23 | # NOTE:
24 | # the variable BASE, which controls input and output file names,
25 | # must have been initialized ... e.g.
26 | # BASE = "weekly"
27 | # INFILE = "bugs.".BASE
28 | # output files will have names of the form $BASE-{ttf,age}.png
29 | #
30 |
31 | print "Processing input file: ".INFILE." to create output ".BASE."-{ttf,age}.png"
32 |
33 | # output to png files
34 | set term png
35 |
36 | # things usually get busier to the right
37 | set key left top
38 |
39 | set xtics out nomirror
40 | set ytics out nomirror
41 |
42 | set style data linespoints
43 |
44 | set output BASE."-ttf.png"
45 | set title "Issue Fix Times (days)";
46 | plot INFILE u 2:xticlabels(1) \
47 | t "Immediate" lc rgb 'violet',\
48 | '' u 3 t "Urgent" lc rgb 'red', \
49 | '' u 4 t "High" lc rgb 'pink', \
50 | '' u 5 t "Normal" lc rgb 'orange',\
51 | '' u 6 t "Low" lc rgb 'yellow',\
52 | '' u 7 t "Feature" lc rgb 'green', \
53 | '' u 9 t "Support" lc rgb 'blue', \
54 | '' u 9 t "Cleanup" lc rgb 'cyan', \
55 | '' u 10 t "Tasks" lc rgb 'white', \
56 | '' u 11 t "Doc" lc rgb 'grey';
57 |
58 | set output BASE."-age.png"
59 | set title "Issue Ages (days)";
60 | plot INFILE u 12:xticlabels(1) \
61 | t "Immediate" lc rgb 'violet',\
62 | '' u 13 t "Urgent" lc rgb 'red', \
63 | '' u 14 t "High" lc rgb 'pink', \
64 | '' u 15 t "Normal" lc rgb 'orange',\
65 | '' u 16 t "Low" lc rgb 'yellow',\
66 | '' u 17 t "Feature" lc rgb 'green', \
67 | '' u 18 t "Support" lc rgb 'blue', \
68 | '' u 19 t "Cleanup" lc rgb 'cyan', \
69 | '' u 20 t "Tasks" lc rgb 'white', \
70 | '' u 21 t "Doc" lc rgb 'grey';
71 |
--------------------------------------------------------------------------------
/redmine/bugage.pl:
--------------------------------------------------------------------------------
1 | #!/usr/bin/perl
2 | #
3 | # script: bugage.pl
4 | #
5 | # purpose:
6 | # to read through a raw bug list and generate
7 | # aging statistics
8 | #
9 | # output:
10 | # one row per age accumulation bucket,
11 | # with a (time to fix) count for each bug classification
12 | # and an (age) count for each bug classification
13 | #
14 | use warnings;
15 | use strict;
16 | use Carp;
17 |
18 | use Getopt::Std;
19 | use File::Basename;
20 | use Time::Local;
21 |
22 | use Bugparse;
23 |
24 | sub usage()
25 | {
26 | print STDERR "Usage: bugmunch.pl [switches] [file ...]\n";
27 | print STDERR " -b # ...... bucket multiplier\n";
28 | print STDERR " -m # ...... max bucket\n";
29 | print STDERR " -s date ... report start date\n";
30 | print STDERR " -e date ... report end date\n";
31 | }
32 |
33 | # parameters
34 | my $bucket_factor = 2; # bucket multiplier
35 | my $bucket_max = 1000; # maximum bucket
36 | my $start_date; # report starting date
37 | my $end_date; # report end date
38 |
39 | #
40 | # FIX: this shouldn't be hard coded, but I should find a way to
41 | # put them in/get them from the RedMine dump. The trick is
42 | # that these are a function of tracker-type and priority.
43 | #
44 | my @columns = ('Immediate', 'Urgent', 'High', 'Normal', 'Low',
45 | 'Feature', 'Support', 'Cleanup', 'Tasks', 'Documentation' );
46 |
47 | #
48 | # FIX: this shouldn't be hard coded, but should probably be read from
49 | # a product specific table that maps issue types and priorities
50 | # into reporting buckets.
51 | #
52 | sub get_bug_class
53 | { (my $bugtype, my $priority) = ($_[0], $_[1]);
54 | return ($bugtype eq 'Bug') ? "$priority" : "$bugtype";
55 | }
56 |
57 |
58 | # accumulated information
59 | my %fix_times = (); # time to fix counters
60 | my %open_ages = (); # age counters
61 | my @buckets; # list of bucket sizes
62 |
63 | # figure out the index of the bucket for a particular number
64 | sub get_bucket
65 | { (my $count) = ($_[0]);
66 |
67 | for( my $i = 0; $i < scalar @buckets; $i++ ) {
68 | if ($count <= $buckets[$i]) {
69 | return( $i );
70 | }
71 | }
72 |
73 | return scalar @buckets;
74 | }
75 |
76 | # return the number of days in a time interval
77 | sub days
78 | { (my $t) = ($_[0]);
79 |
80 | return ($t / (24 * 60 * 60));
81 | }
82 |
83 | #
84 | # routine: parse_date
85 | #
86 | # parameters: date (mm/dd/yyyy)
87 | #
88 | # returns: time value for that date
89 | #
90 | sub parse_date
91 | {
92 | # discect the specified time
93 | (my $mon, my $day, my $year) = split( '/', $_[0] );
94 | return timegm( 0, 0, 0, $day, $mon-1, $year );
95 | }
96 |
97 | #
98 | # routine: process_newbug
99 | #
100 | # purpose:
101 | # accumulate another bug report
102 | #
103 | sub process_bug
104 | {
105 | (my $created, my $bugtype, my $priority, my $fixed) = ($_[0], $_[1], $_[2], $_[3]);
106 |
107 | # figure out its class
108 | my $class_name = get_bug_class( $bugtype, $priority );
109 |
110 | # figure out its age
111 | my $c = parse_date( $created );
112 | my $f = ($fixed eq "none") ? time() : parse_date( $fixed );
113 | my $bucket = get_bucket( days( $f - $c ) );
114 | my $hash = "$class_name-$bucket";
115 |
116 | # update the appropriate count
117 | if ($fixed ne "none") {
118 | if (defined $fix_times{$hash}) {
119 | $fix_times{$hash}++;
120 | } else {
121 | $fix_times{$hash} = 1;
122 | }
123 | } else {
124 | if (defined $open_ages{$hash}) {
125 | $open_ages{$hash}++;
126 | } else {
127 | $open_ages{$hash} = 1;
128 | }
129 | }
130 | }
131 |
132 | #
133 | # routine: flush_buckets
134 | #
135 | # purpose: generate the output (bucket names and per-column counts)
136 | #
137 | sub flush_buckets
138 | {
139 | # print out the column headers
140 | printf( "# bucket " );
141 | for ( my $i = 0; $i < scalar @columns; $i++ ) {
142 | printf( "fix-%s ", $columns[$i] );
143 | }
144 | for ( my $i = 0; $i < scalar @columns; $i++ ) {
145 | printf( "age-%s ", $columns[$i] );
146 | }
147 | printf("\n");
148 |
149 | # for each bucket
150 | my $prev = 0;
151 | for ( my $i = 0; $i <= scalar @buckets; $i++ ) {
152 | if ($i < scalar @buckets) {
153 | printf("%d-%d\t", $prev,$buckets[$i]);
154 | } else {
155 | printf(">%d\t", $prev);
156 | }
157 |
158 | # print all of the fix times
159 | for ( my $j = 0; $j < scalar @columns; $j++ ) {
160 | my $hash = "$columns[$j]-$i";
161 | printf("%d\t", defined($fix_times{$hash}) ? $fix_times{$hash} : 0);
162 | }
163 |
164 | # print all of the ages
165 | for ( my $j = 0; $j < scalar @columns; $j++ ) {
166 | my $hash = "$columns[$j]-$i";
167 | printf("%d\t", defined($open_ages{$hash}) ? $open_ages{$hash} : 0);
168 | }
169 | $prev = $buckets[$i];
170 | printf("\n");
171 | }
172 | }
173 |
174 | #
175 | # routine: process_file
176 | #
177 | # purpose:
178 | # to read the lines of an input file and pass the non-comments
179 | # to the appropriate accumulation routines.
180 | #
181 | sub process_file
182 | { (my $file) = ($_[0]);
183 |
184 | # first line should be a headers comment
185 | my $first = <$file>;
186 | my %columns = Bugparse::parser($first);
187 |
188 | # make sure we got all the columns we needed
189 | foreach my $c ('created','priority','type','closed') {
190 | if (!defined( $columns{$c})) {
191 | die("Unable to find column: $c\n");
192 | }
193 | }
194 | my $crt = $columns{'created'};
195 | my $prt = $columns{'priority'};
196 | my $typ = $columns{'type'};
197 | my $cls = $columns{'closed'};
198 |
199 | # use those columns to find what we want in the following lines
200 | while( <$file> ) {
201 | if (!/^#/) { # ignore comments
202 | # carve it into tab separated fields
203 | my @fields = split( '\t', $_ );
204 |
205 | # remove any leading or trailing blanks
206 | for ( my $i = 0; $i < scalar @fields; $i++ ) {
207 | $fields[$i] =~ s/^\s+//;
208 | $fields[$i] =~ s/\s+$//;
209 | }
210 |
211 | # and process the fields we care about
212 | process_bug( $fields[$crt], $fields[$typ], $fields[$prt], $fields[$cls]);
213 | }
214 | }
215 | }
216 |
217 |
218 | #
219 | # routine: main
220 | #
221 | # purpose:
222 | # process arguments
223 | # figure out what operations we are supposed to do
224 | # perform them
225 | #
226 | # notes:
227 | # we require a command just to make sure the caller
228 | # knows what he is doing
229 | #
230 | sub main
231 | {
232 | # parse the input parameters
233 | my %options = ();
234 | if (!getopts('wmafs:e:', \%options)) {
235 | usage();
236 | exit(1);
237 | }
238 |
239 | # see what our bucket parameters are
240 | if (defined( $options{'b'} )) {
241 | $bucket_factor = $options{'b'};
242 | }
243 | if (defined( $options{'m'} )) {
244 | $bucket_max = $options{'m'};
245 | }
246 |
247 | # initialize the bucket size array
248 | my $i = 0;
249 | for( my $sz = 1; $sz <= $bucket_max; $sz *= $bucket_factor ) {
250 | $buckets[$i++] = $sz;
251 | }
252 |
253 | # see what our reporting period is
254 | $start_date = defined( $options{'s'} ) ? parse_date($options{'s'}) : 0;
255 | $end_date = defined( $options{'e'} ) ? parse_date($options{'e'}) : time();
256 |
257 | # then process the input file(s)
258 | my $args = scalar @ARGV;
259 | if ( $args < 1 ) {
260 | process_file( 'STDIN' );
261 | } else {
262 | for( my $i = 0; $i < $args; $i++ ) {
263 | open(my $file, "<$ARGV[$i]") ||
264 | die "Unable to open input file $ARGV[$i]";
265 | process_file( $file );
266 | close( $file );
267 | }
268 | }
269 |
270 | # and flush out the accumulated counts
271 | flush_buckets();
272 |
273 | exit(0);
274 | }
275 |
276 | main();
277 |
--------------------------------------------------------------------------------
/redmine/buggraphs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # process a Redmine bug dump (produced by redmine_dump.pl) to extract
4 | # and plot interesting statistics
5 |
6 | # input file to process
7 | if [ -z "$1" ]
8 | then
9 | echo "Usage: buggraphs.sh dumpfile [target directory]"
10 | exit 1
11 | else
12 | bugdump="$1"
13 | fi
14 |
15 | # where to put our work products
16 | if [ -z "$2" ]
17 | then
18 | echo "Results can be found in: $TEMP"
19 | TEMP="/tmp/bugtemp.$$"
20 | mkdir $TEMP
21 | else
22 | TEMP="$2"
23 | fi
24 |
25 | # limit how far back we go to keep the X axis readable
26 | WSTART="40w"
27 | MSTART="25m"
28 |
29 | echo "Processing $bugdump for monthly statistics"
30 | perl bugmunch.pl -s $MSTART $bugdump > $TEMP/monthly
31 | if [ $? -ne 0 ]
32 | then
33 | echo "FATAL: error monthly bugmunch"
34 | exit 1
35 | else
36 | ./bugplot.sh $TEMP/monthly bugs.plot
37 | fi
38 |
39 | echo "Processing $bugdump for weekly statistics"
40 | perl bugmunch.pl -w -s $WSTART $bugdump > $TEMP/weekly
41 | if [ $? -ne 0 ]
42 | then
43 | echo "FATAL: error in weekly bugmunch"
44 | exit 1
45 | else
46 | ./bugplot.sh $TEMP/weekly bugs.plot
47 | fi
48 | perl bugmunch.pl -w -r $bugdump > $TEMP/bugstats.txt
49 | if [ $? -ne 0 ]
50 | then
51 | echo "FATAL: error in weekly bugstats"
52 | exit 1
53 | fi
54 |
55 | echo "Processing $bugdump for source statistics"
56 | perl sourcemunch.pl -w -s $WSTART $bugdump > $TEMP/sources
57 | if [ $? -ne 0 ]
58 | then
59 | echo "FATAL: error in weekly sourcemunch"
60 | exit 1
61 | else
62 | grep Urgent $TEMP/sources > $TEMP/Urgent
63 | ./bugplot.sh $TEMP/Urgent sources.plot
64 | grep High $TEMP/sources > $TEMP/High
65 | ./bugplot.sh $TEMP/High sources.plot
66 | grep Normal $TEMP/sources > $TEMP/Normal
67 | ./bugplot.sh $TEMP/Normal sources.plot
68 | grep Feature $TEMP/sources > $TEMP/Feature
69 | ./bugplot.sh $TEMP/Feature sources.plot
70 | fi
71 |
72 | echo "Processing $bugdump for age statistics"
73 | perl bugage.pl $bugdump > $TEMP/all
74 | if [ $? -ne 0 ]
75 | then
76 | echo "FATAL: error in bug age report"
77 | exit 1
78 | else
79 | ./bugplot.sh $TEMP/all age.plot
80 | fi
81 |
82 | # FIX we need to automatically generate a list of interesting sprints
83 | # (probably based on their dates)
84 | echo "^2012" > $TEMP/oldsprints
85 | echo "^2013" >> $TEMP/oldsprints
86 | echo "^v2" >> $TEMP/oldsprints
87 | echo "^v3" >> $TEMP/oldsprints
88 | echo "^v0.1" >> $TEMP/oldsprints
89 | echo "^v0.2" >> $TEMP/oldsprints
90 | echo "^v0.3" >> $TEMP/oldsprints
91 |
92 | echo "Processing $bugdump for sprintly statistics"
93 | perl sprintmunch.pl $bugdump | grep -v -f $TEMP/oldsprints > $TEMP/sprintly
94 | if [ $? -ne 0 ]
95 | then
96 | echo "FATAL: error in sprintly report"
97 | exit 1
98 | else
99 | ./bugplot.sh $TEMP/sprintly sprints.plot
100 | fi
101 |
--------------------------------------------------------------------------------
/redmine/bugplot.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | #
3 | # Gnuplot scripts cannot accept command line parameters.
4 | #
5 | # This script (meant to be run before a generic plotting script)
6 | # sets input and output file names, and then loads the plot script.
7 |
8 | if [ -z "$1" ]
9 | then
10 | echo "usage: bugplot.sh base [plotscript]"
11 | exit 1
12 | fi
13 |
14 | if [ -n "$2" ]
15 | then
16 | plot="$2"
17 | else
18 | plot="bugs.plot"
19 | fi
20 |
21 | {
22 | echo "BASE = \"$1\"";
23 | echo "INFILE = \"$1\"";
24 | echo "load \"$plot\"";
25 | } | gnuplot 2>&1 | grep -v arial
26 |
--------------------------------------------------------------------------------
/redmine/bugs.plot:
--------------------------------------------------------------------------------
1 | #! /usr/bin/gnuplot
2 | #
3 | # generate plots of numbers of issues per time period.
4 | # broken down by tracker-type or priority
5 | #
6 | # usage:
7 | # gnuplot weekly.plot bugs.plot
8 | # gnuplot monthly.plot bugs.plot
9 | #
10 | # expected input format:
11 | # date urgent high normal low feature support cleanup tasks doc
12 | #
13 | # This plot file does not care what the time unit is, it just uses
14 | # column 1 as a label
15 | #
16 | # TODO
17 | #
18 | # (1) I'd like to come up with a function I can use for
19 | # generating cumulative backlog (as the sum of new-fix).
20 | # But I'm having trouble using that in a histogram.
21 | #
22 | # (2) Having this script know what the names and colors of
23 | # the issue classifications ties this to the database
24 | # and the reduction script. Much better would be if
25 | # the reduction script could pass the titles and colors
26 | # in to me. Maybe 'lc variable' can help here.
27 | #
28 | # NOTE:
29 | # the variable BASE, which controls input and output file names,
30 | # must have been initialized ... e.g.
31 | # BASE = "weekly"
32 | # INFILE = "bugs.".BASE
33 | # output files will have names of the form $BASE-{new,fix,net}.png
34 | #
35 |
36 | print "Processing input file: ".INFILE." to create output ".BASE."-{new,fix,net}.png"
37 |
38 | # output to png files
39 | set term png font
40 |
41 | # things usually get busier to the right
42 | set key left top
43 |
44 | # dates print better rotated
45 | set xtics out nomirror rotate
46 |
47 | # stacked histograms
48 | set ytics out nomirror
49 | set style data histograms
50 | set style histogram rowstacked
51 | set style fill solid border -1
52 | set boxwidth 0.8 relative
53 |
54 | set output BASE."-new.png"
55 | set title "Issue Arrival Rates";
56 | plot INFILE u 2:xticlabels(1) \
57 | t "Immediate" lc rgb 'violet',\
58 | '' u 3 t "Urgent" lc rgb 'red', \
59 | '' u 4 t "High" lc rgb 'pink', \
60 | '' u 5 t "Normal" lc rgb 'orange',\
61 | '' u 6 t "Low" lc rgb 'yellow',\
62 | '' u 7 t "Feature" lc rgb 'green', \
63 | '' u 8 t "Support" lc rgb 'blue', \
64 | '' u 9 t "Cleanup" lc rgb 'cyan', \
65 | '' u 10 t "Tasks" lc rgb 'white', \
66 | '' u 11 t "Doc" lc rgb 'grey';
67 |
68 | set output BASE."-fix.png"
69 | set title "Issue Fix Rates";
70 | plot INFILE u 12:xticlabels(1) \
71 | t "Immediate" lc rgb 'violet',\
72 | '' u 13 t "Urgent" lc rgb 'red', \
73 | '' u 14 t "High" lc rgb 'pink', \
74 | '' u 15 t "Normal" lc rgb 'orange',\
75 | '' u 16 t "Low" lc rgb 'yellow',\
76 | '' u 17 t "Feature" lc rgb 'green', \
77 | '' u 18 t "Support" lc rgb 'blue', \
78 | '' u 19 t "Cleanup" lc rgb 'cyan', \
79 | '' u 20 t "Tasks" lc rgb 'white', \
80 | '' u 21 t "Doc" lc rgb 'grey';
81 |
82 |
83 | set output BASE."-net.png"
84 | set title "Issue Backlog";
85 | plot INFILE u 22:xticlabels(1) \
86 | t "Immediate" lc rgb 'violet',\
87 | '' u 23 t "Urgent" lc rgb 'red', \
88 | '' u 24 t "High" lc rgb 'pink', \
89 | '' u 25 t "Normal" lc rgb 'orange',\
90 | '' u 26 t "Low" lc rgb 'yellow',\
91 | '' u 27 t "Feature" lc rgb 'green', \
92 | '' u 28 t "Support" lc rgb 'blue', \
93 | '' u 29 t "Cleanup" lc rgb 'cyan', \
94 | '' u 30 t "Tasks" lc rgb 'white', \
95 | '' u 31 t "Doc" lc rgb 'grey';
96 |
97 | #
98 | # functions to compute cumulative bug backlogs
99 | #
100 | b1 = 0
101 | b2 = 0
102 | b3 = 0
103 | b4 = 0
104 | b5 = 0
105 | b6 = 0
106 | b7 = 0
107 | b8 = 0
108 | b9 = 0
109 | f1(n,f) = ( n - f )
110 | f2(n,f) = ( n - f )
111 | f3(n,f) = ( n - f )
112 | f4(n,f) = ( n - f )
113 | f5(n,f) = ( n - f )
114 | f6(n,f) = ( n - f )
115 | f7(n,f) = ( n - f )
116 | f8(n,f) = ( n - f )
117 | f9(n,f) = ( n - f )
118 |
119 | #set output "ceph-NET.png"
120 | #set title "Issue Backlog";
121 | #plot 'buglist' using f1($2,$11):xticlabels(1)
122 |
123 |
--------------------------------------------------------------------------------
/redmine/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Ceph Issue Breakdowns by type
4 |
5 |
6 |
7 | Issue Breakdowns
8 |
9 |
arrival rates, fix rates, backlog
10 |
11 |
12 |
13 | Weekly |
14 | Monthly |
15 |
16 |
17 |  |
18 |  |
19 |
20 |
21 |  |
22 |  |
23 |
24 |
25 |  |
26 |  |
27 |
28 |
29 |
30 |
31 | Issue/Report Sources
32 |
33 |
34 |
35 |  |
36 |  |
37 |
38 |
39 |  |
40 |  |
41 |
42 |
43 |
44 |
45 | bug fix times and ages
46 |
47 |
48 |
49 |  |
50 |  |
51 |
52 |
53 |
54 |
55 | per Sprint Statistics
56 |
57 |
58 |
59 |  |
60 |  |
61 |
62 |
63 |  |
64 |  |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
--------------------------------------------------------------------------------
/redmine/progress.pl:
--------------------------------------------------------------------------------
1 | #!/usr/bin/perl
2 | #
3 | # script: progress
4 | #
5 | # purpose:
6 | # to read through an issue status list, pull out
7 | # issues with specified tags, and generate
8 | # per-project done and backlog sizes (tasks and points).
9 | #
10 | # output: (for now)
11 | # one row per reporting period per project
12 | # date project total-tasks total-points done-tasks done-points
13 | #
14 | use warnings;
15 | use strict;
16 | use Carp;
17 |
18 | use Getopt::Std;
19 | use File::Basename;
20 | use Time::Local;
21 |
22 | use Bugparse;
23 |
24 | use constant { true => 1, false => 0 };
25 |
26 | sub usage()
27 | {
28 | print STDERR "Usage: progress.pl [switches] [file ...]\n";
29 | print STDERR " -t ......... tags to be reported\n";
30 | print STDERR " -s date .... report start date\n";
31 | print STDERR " -e date .... report end date\n";
32 | print STDERR " -p prefix .. prefix for output file names\n";
33 | }
34 |
35 | # parameters
36 | my $prefix; # output file prefix
37 | my $start_date; # report starting date
38 | my $end_date; # report end date
39 | my @tag_list; # tags to be reported
40 |
41 | # accumulated information
42 | my %projects;
43 | my %proj_points_todo;
44 | my %proj_tasks_todo;
45 | my %proj_points_done;
46 | my %proj_tasks_done;
47 |
48 | my $points_todo = 0;
49 | my $points_done = 0;
50 | my $tasks_todo = 0;
51 | my $tasks_done = 0;
52 |
53 | #
54 | # routine: parse_date
55 | #
56 | # parameters: date (mm/dd/yyyy)
57 | #
58 | # returns: time value for that date
59 | #
60 | sub parse_date
61 | {
62 | # discect the specified time
63 | (my $mon, my $day, my $year) = split( '/', $_[0] );
64 | return timegm( 0, 0, 0, $day, $mon-1, $year );
65 | }
66 |
67 | #
68 | # routine: flush_report
69 | #
70 | # purpose: flush out the accumuated statistics
71 | #
72 | sub flush_report
73 | {
74 | (my $sec, my $min, my $hour, my $d, my $m, my $y, my $wd, my $yd, my $dst) = localtime($end_date);
75 | $m += 1;
76 | $y += 1900;
77 |
78 | print "# date \ttasks\tdone\tpoints\tdone\tsprint\n";
79 | print "# ---- \t-----\t----\t------\t----\t------\n";
80 | printf "%02d/%02d/%04d", $m, $d, $y;
81 | print "\t$tasks_todo\t$tasks_done\t$points_todo\t$points_done\tsprint\n";
82 |
83 | # print "# date \ttasks\tdone\tpoints\tdone\tsprint\tproject\n";
84 | # print "# ---- \t-----\t----\t------\t----\t------\t-------\n";
85 | # foreach my $proj (keys %projects) {
86 | # my $tasks_todo = defined( $proj_tasks_todo{$proj} ) ? $proj_tasks_todo{$proj} : 0;
87 | # my $tasks_done = defined( $proj_tasks_done{$proj} ) ? $proj_tasks_done{$proj} : 0;
88 | # my $points_todo = defined( $proj_points_todo{$proj} ) ? $proj_points_todo{$proj} : 0;
89 | # my $points_done = defined( $proj_points_done{$proj} ) ? $proj_points_done{$proj} : 0;
90 | #
91 | # printf "%02d/%02d/%04d", $m, $d, $y;
92 | # print "\t$tasks_todo\t$tasks_done\t$points_todo\t$points_done\tsprint\t$proj\n";
93 | # }
94 | }
95 |
96 | #
97 | # routine: process_newbug
98 | #
99 | # purpose:
100 | # accumulate another bug report
101 | #
102 | sub process_newbug
103 | {
104 | (my $tag, my $project, my $closed, my $points) = ($_[0], $_[1], $_[2], $_[3]);
105 |
106 | if (!defined( $projects{$project} )) {
107 | $projects{$project} = true;
108 | $proj_points_todo{$project} = 0;
109 | $proj_points_done{$project} = 0;
110 | $proj_tasks_todo{$project} = 0;
111 | $proj_tasks_done{$project} = 0;
112 | }
113 |
114 | if ($closed eq 'none') {
115 | $proj_points_todo{$project} = $proj_points_todo{$project} + $points;
116 | $points_todo += $points;
117 | $proj_tasks_todo{$project} = $proj_tasks_todo{$project} + 1;
118 | $tasks_todo++;
119 | } else {
120 | $proj_points_done{$project} = $proj_points_done{$project} + $points;
121 | $points_done += $points;
122 | $proj_tasks_done{$project} = $proj_tasks_done{$project} + 1;
123 | $tasks_done++;
124 | }
125 | }
126 |
127 | #
128 | # routine: process_file
129 | #
130 | # purpose:
131 | # to read the lines of an input file and pass the non-comments
132 | # to the appropriate accumulation routines.
133 | #
134 | # expected input: lines containing at least ...
135 | # a type, priority, create date and close date
136 | #
137 | sub process_file
138 | { (my $file) = ($_[0]);
139 |
140 | # first line should be a headers comment
141 | my $first = <$file>;
142 | my %columns = Bugparse::parser($first);
143 |
144 | # make sure we got all the columns we needed
145 | foreach my $c ('tags', 'project', 'closed', 'points') {
146 | if (!defined( $columns{$c})) {
147 | die("Unable to find column: $c\n");
148 | }
149 | }
150 | my $tag = $columns{'tags'};
151 | my $prj = $columns{'project'};
152 | my $cls = $columns{'closed'};
153 | my $pts = $columns{'points'};
154 |
155 | # use those columns to find what we want in the following lines
156 | while( <$file> ) {
157 | if (!/^#/) { # ignore comments
158 | # carve it into tab separated fields
159 | my @fields = split( '\t', $_ );
160 |
161 | # remove any leading or trailing blanks
162 | for ( my $i = 0; $i < scalar @fields; $i++ ) {
163 | $fields[$i] =~ s/^\s+//;
164 | $fields[$i] =~ s/\s+$//;
165 | }
166 |
167 | # see if it contains a requested tag
168 | my $good_tag = false;
169 | for( my $i = 0; $i < scalar @tag_list; $i++ ) {
170 | if (index( $fields[$tag], $tag_list[$i] ) != -1) {
171 | $good_tag = true;
172 | }
173 | }
174 |
175 | # and process the fields we care about
176 | if ($good_tag) {
177 | process_newbug( $fields[$tag], $fields[$prj], $fields[$cls], $fields[$pts]);
178 | }
179 | }
180 | }
181 |
182 | flush_report();
183 | }
184 |
185 |
186 | #
187 | # routine: main
188 | #
189 | # purpose:
190 | # process arguments
191 | # figure out what operations we are supposed to do
192 | # perform them
193 | #
194 | # notes:
195 | # we require a command just to make sure the caller
196 | # knows what he is doing
197 | #
198 | sub main
199 | {
200 | # parse the input parameters
201 | my %options = ();
202 | if (!getopts('s:e:p:t:', \%options)) {
203 | usage();
204 | exit(1);
205 | }
206 |
207 | # see what tag we are extracting
208 | if (defined($options{'t'})) {
209 | my $list = $options{'t'};
210 | @tag_list = split(',',$list);
211 | }
212 |
213 | # see what our reporting period is
214 | $start_date = defined( $options{'s'} ) ? get_period($options{'s'}) : 0;
215 | $end_date = defined( $options{'e'} ) ? get_period($options{'e'}) : time();
216 |
217 | # see if we have a specified output file prefix
218 | $prefix = defined( $options{'p'} ) ? $options{'p'} : '';
219 |
220 | # then process the input file(s)
221 | my $args = scalar @ARGV;
222 | if ( $args < 1 ) {
223 | process_file( 'STDIN' );
224 | } else {
225 | for( my $i = 0; $i < $args; $i++ ) {
226 | open(my $file, "<$ARGV[$i]") ||
227 | die "Unable to open input file $ARGV[$i]";
228 | process_file( $file );
229 | close( $file );
230 | }
231 | }
232 |
233 | exit(0);
234 | }
235 |
236 | main();
237 |
--------------------------------------------------------------------------------
/redmine/progress.plot:
--------------------------------------------------------------------------------
1 | #! /usr/bin/gnuplot
2 | #
3 | # generate plots of outstanding and done work,
4 | # per project, over time
5 | #
6 | # usage:
7 | # gnuplot progress.plot bugdata
8 | #
9 | # expected input format:
10 | # date tasks-todo tasks-done points-todo points-done sprint
11 | #
12 | # This plot file does not care what the time unit is, it just uses
13 | # column 1 as a label
14 | #
15 | # NOTE:
16 | # the variable BASE, which controls input and output file names,
17 | # must have been initialized ... e.g.
18 | # BASE = "weekly"
19 | # INFILE = "bugs.".BASE
20 | # output files will have names of the form $BASE-{new,fix,net}.png
21 | #
22 |
23 | BASE="CY2012"
24 | INFILE="/tmp/bugtemp/".BASE.".out"
25 | OUTFILE="/tmp/bugtmp/".BASE.".png"
26 |
27 | print "Processing input file: ".INFILE." to create output ".BASE."-{points,tasks}.png"
28 |
29 | # output to png files
30 | set term png font
31 |
32 | # things usually get busier to the right
33 | set key left top
34 |
35 | # dates print better rotated
36 | set xtics out nomirror rotate
37 |
38 | # stacked histograms
39 | set ytics out nomirror
40 | set style data histograms
41 | set style histogram rowstacked
42 | set style fill solid border -1
43 | set boxwidth 0.8 relative
44 |
45 |
46 | set output BASE."-points.png"
47 | set ylabel "Points"
48 | set title "Complete vs TODO (points) for ".BASE
49 | plot INFILE u 4:xticlabels(6) t 'points remaining' lc rgb 'red',\
50 | '' u 5 t 'points complete' lc rgb 'green
51 |
52 | set output BASE."-tasks.png"
53 | set ylabel "Tasks"
54 | set title "Complete vs TODO (tasks) for ".BASE
55 | plot INFILE u 2:xticlabels(6) t 'tasks remaining' lc rgb 'red',\
56 | '' u 3 t 'tasks complete' lc rgb 'green
57 |
--------------------------------------------------------------------------------
/redmine/sources.plot:
--------------------------------------------------------------------------------
1 | #! /usr/bin/gnuplot
2 | #
3 | # generate plots of numbers of bugs submitted in each time period
4 | # broken down by source and tracker-type or priority
5 | #
6 | # usage:
7 | # gnuplot weekly.plot sources.plot
8 | #
9 | # expected input:
10 | # date bucket dev qa com-dev com-user none
11 | #
12 | # This plot file does not care what the time unit is, it just uses
13 | # column 1 as a label
14 | #
15 | # TODO
16 | #
17 | # (1) Having this script know what the names and colors of
18 | # the issue classifications ties this to the database
19 | # and the reduction script. Much better would be if
20 | # the reduction script could pass the titles and colors
21 | # in to me. Maybe 'lc variable' can help here.
22 | #
23 | # NOTE:
24 | # the variable BASE, which controls input and output file names,
25 | # must have been initialized ... e.g.
26 | # BASE = "Urgent"
27 | # INFILE = "sources.".BASE
28 | # output files will have names of the form $BASE-{new,fix,net}.png
29 | #
30 |
31 | print "Processing input file: ".INFILE." to create output ".BASE."-src.png"
32 |
33 | # output to png files
34 | set term png font
35 |
36 | # things usually get busier to the right
37 | set key left top
38 |
39 | # dates print better rotated
40 | set xtics out nomirror rotate
41 |
42 | # stacked histograms
43 | set ytics out nomirror
44 | set style data histograms
45 | set style histogram rowstacked
46 | set style fill solid border -1
47 | set boxwidth 0.8 relative
48 |
49 | set output BASE."-src.png"
50 | set title BASE." Issue Sources";
51 | plot INFILE u 3:xticlabels(1) \
52 | t "Developers" lc rgb 'green', \
53 | '' u 4 t "Q/A" lc rgb 'blue', \
54 | '' u 5 t "Comm (dev)" lc rgb 'yellow',\
55 | '' u 6 t "Comm (usr)" lc rgb 'orange',\
56 | '' u 7 t "Support" lc rgb 'red', \
57 | '' u 8 t "other" lc rgb 'grey', \
58 | '' u 9 t "none" lc rgb 'white';
59 |
--------------------------------------------------------------------------------
/redmine/sprints.plot:
--------------------------------------------------------------------------------
1 | #! /usr/bin/gnuplot
2 | #
3 | # generate plots of numbers of issues assigned to and fixed to each version
4 | # broken down by tracker-type or priority
5 | #
6 | # usage:
7 | # gnuplot sprints.plot
8 | #
9 | # expected input format:
10 | # version name
11 | # assigned: urgent high normal low feature support cleanup tasks doc
12 | # fixed: urgent high normal low feature support cleanup tasks doc
13 | #
14 | # (1) Having this script know what the names and colors of
15 | # the issue classifications ties this to the database
16 | # and the reduction script. Much better would be if
17 | # the reduction script could pass the titles and colors
18 | # in to me. Maybe 'lc variable' can help here.
19 | #
20 |
21 | print "Processing input file: ".INFILE." to create output ".BASE."-{asgn,fix,spill,net}.png"
22 |
23 | # output to png files
24 | set term png font
25 |
26 | # things usually get busier to the right
27 | set key left top
28 |
29 | # dates print better rotated
30 | set xtics out nomirror rotate
31 |
32 | # stacked histograms
33 | set ytics out nomirror
34 | set style data histograms
35 | set style histogram rowstacked
36 | set style fill solid border -1
37 | set boxwidth 0.8 relative
38 |
39 | set output BASE."-asgn.png"
40 | set title "Issues assigned to each sprint";
41 | plot INFILE u 3:xticlabels(1) \
42 | t "Immediate" lc rgb 'violet',\
43 | '' u 4 t "Urgent" lc rgb 'red', \
44 | '' u 5 t "High" lc rgb 'pink', \
45 | '' u 6 t "Normal" lc rgb 'orange',\
46 | '' u 7 t "Low" lc rgb 'yellow',\
47 | '' u 8 t "Feature" lc rgb 'green', \
48 | '' u 9 t "Support" lc rgb 'blue', \
49 | '' u 10 t "Cleanup" lc rgb 'cyan', \
50 | '' u 11 t "Tasks" lc rgb 'white', \
51 | '' u 12 t "Doc" lc rgb 'grey';
52 |
53 | set output BASE."-fix.png"
54 | set title "Issues fixed in each sprint";
55 | plot INFILE u 14:xticlabels(1) \
56 | t "Immediate" lc rgb 'violet',\
57 | '' u 15 t "Urgent" lc rgb 'red', \
58 | '' u 16 t "High" lc rgb 'pink', \
59 | '' u 17 t "Normal" lc rgb 'orange',\
60 | '' u 18 t "Low" lc rgb 'yellow',\
61 | '' u 19 t "Feature" lc rgb 'green', \
62 | '' u 20 t "Support" lc rgb 'blue', \
63 | '' u 21 t "Cleanup" lc rgb 'cyan', \
64 | '' u 22 t "Tasks" lc rgb 'white', \
65 | '' u 23 t "Doc" lc rgb 'grey';
66 |
67 | set output BASE."-spill.png"
68 | set title "Breakdown of unaccomplished work in each sprint";
69 | plot INFILE u ($3-$14):xticlabels(1) \
70 | t "Immediate" lc rgb 'violet',\
71 | '' u ($4-$15) t "Urgent" lc rgb 'red', \
72 | '' u ($5-$16) t "High" lc rgb 'pink', \
73 | '' u ($6-$17) t "Normal" lc rgb 'orange',\
74 | '' u ($7-$18) t "Low" lc rgb 'yellow',\
75 | '' u ($8-$19) t "Feature" lc rgb 'green', \
76 | '' u ($9-$20) t "Support" lc rgb 'blue', \
77 | '' u ($10-$21) t "Cleanup" lc rgb 'cyan', \
78 | '' u ($11-$22) t "Tasks" lc rgb 'white', \
79 | '' u ($12-$23) t "Doc" lc rgb 'grey';
80 |
81 | set output BASE."-net.png"
82 | set title "Committed vs Accomplished for each sprint";
83 | plot INFILE u 13:xticlabels(1) \
84 | t "fixed" lc rgb 'green' ,\
85 | '' u ($2-$13) t "not fixed" lc rgb 'red' ;
86 |
--------------------------------------------------------------------------------
/regression/burnupi-available/3xRep/ceph.conf:
--------------------------------------------------------------------------------
1 |
2 | [global]
3 | osd crush update on start = false
4 | osd crush chooseleaf type = 0
5 | osd pg bits = 10
6 | osd pgp bits = 10
7 | auth client required = none
8 | auth cluster required = none
9 | auth service required = none
10 | keyring = /tmp/cbt/ceph/keyring
11 | log to syslog = false
12 | log file = /tmp/cbt/ceph/log/$name.log
13 | rbd cache = true
14 | filestore merge threshold = 40
15 | filestore split multiple = 8
16 | osd op threads = 8
17 | mon pg warn max object skew = 100000
18 | mon pg warn min per osd = 0
19 | mon pg warn max per osd = 32768
20 | [mon.a]
21 | mon addr = 10.214.144.25:6789
22 | host = plana15
23 | mon data = /tmp/cbt/ceph/mon.$id
24 |
25 | [osd]
26 | public network = 10.214.148.0/20
27 | cluster network = 10.214.148.0/20
28 |
29 | [osd.0]
30 | host = burnupi37
31 | osd data = /tmp/cbt/mnt/osd-device-0-data
32 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
33 |
34 | [osd.1]
35 | host = burnupi37
36 | osd data = /tmp/cbt/mnt/osd-device-1-data
37 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
38 |
39 | [osd.2]
40 | host = burnupi37
41 | osd data = /tmp/cbt/mnt/osd-device-2-data
42 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
43 |
44 | [osd.3]
45 | host = burnupi37
46 | osd data = /tmp/cbt/mnt/osd-device-3-data
47 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
48 |
49 | [osd.4]
50 | host = burnupi37
51 | osd data = /tmp/cbt/mnt/osd-device-4-data
52 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
53 |
54 | [osd.5]
55 | host = burnupi37
56 | osd data = /tmp/cbt/mnt/osd-device-5-data
57 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
58 |
59 | [osd.6]
60 | host = burnupi38
61 | osd data = /tmp/cbt/mnt/osd-device-0-data
62 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
63 |
64 | [osd.7]
65 | host = burnupi38
66 | osd data = /tmp/cbt/mnt/osd-device-1-data
67 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
68 |
69 | [osd.8]
70 | host = burnupi38
71 | osd data = /tmp/cbt/mnt/osd-device-2-data
72 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
73 |
74 | [osd.9]
75 | host = burnupi38
76 | osd data = /tmp/cbt/mnt/osd-device-3-data
77 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
78 |
79 | [osd.10]
80 | host = burnupi38
81 | osd data = /tmp/cbt/mnt/osd-device-4-data
82 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
83 |
84 | [osd.11]
85 | host = burnupi38
86 | osd data = /tmp/cbt/mnt/osd-device-5-data
87 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
88 |
89 | [osd.12]
90 | host = burnupi39
91 | osd data = /tmp/cbt/mnt/osd-device-0-data
92 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
93 |
94 | [osd.13]
95 | host = burnupi39
96 | osd data = /tmp/cbt/mnt/osd-device-1-data
97 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
98 |
99 | [osd.14]
100 | host = burnupi39
101 | osd data = /tmp/cbt/mnt/osd-device-2-data
102 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
103 |
104 | [osd.15]
105 | host = burnupi39
106 | osd data = /tmp/cbt/mnt/osd-device-3-data
107 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
108 |
109 | [osd.16]
110 | host = burnupi39
111 | osd data = /tmp/cbt/mnt/osd-device-4-data
112 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
113 |
114 | [osd.17]
115 | host = burnupi39
116 | osd data = /tmp/cbt/mnt/osd-device-5-data
117 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
118 |
119 |
--------------------------------------------------------------------------------
/regression/burnupi-available/3xRep/runtests.btrfs.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'regression'
3 | head: "plana15"
4 | clients: ["plana15"]
5 | osds: ["burnupi37", "burnupi38", "burnupi39"]
6 | mons:
7 | plana15:
8 | a: "10.214.144.25:6789"
9 | osds_per_node: 6
10 | fs: btrfs
11 | mkfs_opts: -l 16k -n 16k -f
12 | mount_opts: -o noatime
13 | conf_file: '/home/regression/regression/tests-available/3xRep/ceph.conf'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | radosbench:
20 | pg_size: 1024
21 | pgp_size: 1024
22 | replication: 3
23 | rbd:
24 | pg_size: 4096
25 | pgp_size: 4096
26 | replication: 3
27 |
28 | benchmarks:
29 | radosbench:
30 | op_size: [4194304, 131072, 4096]
31 | write_only: False
32 | time: 300
33 | concurrent_ops: [32]
34 | concurrent_procs: 4
35 | osd_ra: [4096]
36 | pool_profile: 'radosbench'
37 | # librbdfio:
38 | # time: 300
39 | # vol_size: 65536
40 | # mode: ['read', 'write', 'randread', 'randwrite', 'rw', 'randrw']
41 | # rwmixread: 50
42 | # op_size: [4194304, 131072, 4096]
43 | # concurrent_procs: [1]
44 | # iodepth: [128]
45 | # osd_ra: [4096]
46 | # cmd_path: '/home/regression/src/fio/fio'
47 | # pool_profile: 'rbd'
48 | # log_avg_msec: 100
49 | # rbdfio:
50 | # time: 300
51 | # vol_size: 65536
52 | # mode: ['read', 'write', 'randread', 'randwrite', 'rw', 'randrw']
53 | # rwmixread: 50
54 | # op_size: [4194304, 131072, 4096]
55 | # concurrent_procs: [1]
56 | # iodepth: [128]
57 | # osd_ra: [4096]
58 | # cmd_path: '/home/regression/src/fio/fio'
59 | # pool_profile: 'rbd'
60 | # log_avg_msec: 100
61 |
62 |
--------------------------------------------------------------------------------
/regression/burnupi-available/3xRep/runtests.xfs.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'regression'
3 | head: "plana15"
4 | clients: ["plana15"]
5 | osds: ["burnupi37", "burnupi38", "burnupi39"]
6 | mons:
7 | plana15:
8 | a: "10.214.144.25:6789"
9 | osds_per_node: 6
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/regression/regression/tests-available/3xRep/ceph.conf'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | radosbench:
20 | pg_size: 1024
21 | pgp_size: 1024
22 | replication: 3
23 | rbd:
24 | pg_size: 4096
25 | pgp_size: 4096
26 | replication: 3
27 |
28 | benchmarks:
29 | radosbench:
30 | op_size: [4194304, 131072, 4096]
31 | write_only: False
32 | time: 300
33 | concurrent_ops: [32]
34 | concurrent_procs: 4
35 | osd_ra: [4096]
36 | pool_profile: 'radosbench'
37 | librbdfio:
38 | time: 300
39 | vol_size: 65536
40 | mode: ['read', 'write', 'randread', 'randwrite', 'rw', 'randrw']
41 | rwmixread: 50
42 | op_size: [4194304, 131072, 4096]
43 | concurrent_procs: [1]
44 | iodepth: [128]
45 | osd_ra: [4096]
46 | cmd_path: '/home/regression/src/fio/fio'
47 | pool_profile: 'rbd'
48 | log_avg_msec: 100
49 | rbdfio:
50 | time: 300
51 | vol_size: 65536
52 | mode: ['read', 'write', 'randread', 'randwrite', 'rw', 'randrw']
53 | rwmixread: 50
54 | op_size: [4194304, 131072, 4096]
55 | concurrent_procs: [1]
56 | iodepth: [128]
57 | osd_ra: [4096]
58 | cmd_path: '/home/regression/src/fio/fio'
59 | pool_profile: 'rbd'
60 | log_avg_msec: 100
61 |
62 |
--------------------------------------------------------------------------------
/regression/burnupi-available/EC/ceph.conf:
--------------------------------------------------------------------------------
1 |
2 | [global]
3 | osd crush update on start = false
4 | osd crush chooseleaf type = 0
5 | osd pg bits = 10
6 | osd pgp bits = 10
7 | auth client required = none
8 | auth cluster required = none
9 | auth service required = none
10 | keyring = /tmp/cbt/ceph/keyring
11 | log to syslog = false
12 | log file = /tmp/cbt/ceph/log/$name.log
13 | rbd cache = true
14 | filestore merge threshold = 40
15 | filestore split multiple = 8
16 | osd op threads = 8
17 | mon pg warn max object skew = 100000
18 | mon pg warn min per osd = 0
19 | mon pg warn max per osd = 32768
20 | [mon.a]
21 | mon addr = 10.214.144.25:6789
22 | host = plana15
23 | mon data = /tmp/cbt/ceph/mon.$id
24 |
25 | [osd]
26 | public network = 10.214.148.0/20
27 | cluster network = 10.214.148.0/20
28 |
29 | [osd.0]
30 | host = burnupi37
31 | osd data = /tmp/cbt/mnt/osd-device-0-data
32 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
33 |
34 | [osd.1]
35 | host = burnupi37
36 | osd data = /tmp/cbt/mnt/osd-device-1-data
37 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
38 |
39 | [osd.2]
40 | host = burnupi37
41 | osd data = /tmp/cbt/mnt/osd-device-2-data
42 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
43 |
44 | [osd.3]
45 | host = burnupi37
46 | osd data = /tmp/cbt/mnt/osd-device-3-data
47 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
48 |
49 | [osd.4]
50 | host = burnupi37
51 | osd data = /tmp/cbt/mnt/osd-device-4-data
52 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
53 |
54 | [osd.5]
55 | host = burnupi37
56 | osd data = /tmp/cbt/mnt/osd-device-5-data
57 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
58 |
59 | [osd.6]
60 | host = burnupi38
61 | osd data = /tmp/cbt/mnt/osd-device-0-data
62 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
63 |
64 | [osd.7]
65 | host = burnupi38
66 | osd data = /tmp/cbt/mnt/osd-device-1-data
67 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
68 |
69 | [osd.8]
70 | host = burnupi38
71 | osd data = /tmp/cbt/mnt/osd-device-2-data
72 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
73 |
74 | [osd.9]
75 | host = burnupi38
76 | osd data = /tmp/cbt/mnt/osd-device-3-data
77 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
78 |
79 | [osd.10]
80 | host = burnupi38
81 | osd data = /tmp/cbt/mnt/osd-device-4-data
82 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
83 |
84 | [osd.11]
85 | host = burnupi38
86 | osd data = /tmp/cbt/mnt/osd-device-5-data
87 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
88 |
89 | [osd.12]
90 | host = burnupi39
91 | osd data = /tmp/cbt/mnt/osd-device-0-data
92 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
93 |
94 | [osd.13]
95 | host = burnupi39
96 | osd data = /tmp/cbt/mnt/osd-device-1-data
97 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
98 |
99 | [osd.14]
100 | host = burnupi39
101 | osd data = /tmp/cbt/mnt/osd-device-2-data
102 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
103 |
104 | [osd.15]
105 | host = burnupi39
106 | osd data = /tmp/cbt/mnt/osd-device-3-data
107 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
108 |
109 | [osd.16]
110 | host = burnupi39
111 | osd data = /tmp/cbt/mnt/osd-device-4-data
112 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
113 |
114 | [osd.17]
115 | host = burnupi39
116 | osd data = /tmp/cbt/mnt/osd-device-5-data
117 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
118 |
119 |
--------------------------------------------------------------------------------
/regression/burnupi-available/EC/runtests.EC31.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'regression'
3 | head: "plana15"
4 | clients: ["plana15"]
5 | osds: ["burnupi37", "burnupi38", "burnupi39"]
6 | mons:
7 | plana15:
8 | a: "10.214.144.25:6789"
9 | osds_per_node: 6
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/regression/regression/tests-available/EC/ceph.conf'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | radosbench:
20 | pg_size: 1024
21 | pgp_size: 1024
22 | replication: 'erasure'
23 | erasure_profile: 'ec31'
24 | erasure_profiles:
25 | ec31:
26 | erasure_k: 3
27 | erasure_m: 1
28 | benchmarks:
29 | radosbench:
30 | op_size: [4194304, 131072, 4096]
31 | write_only: False
32 | time: 300
33 | concurrent_ops: [32]
34 | concurrent_procs: 4
35 | osd_ra: [4096]
36 | pool_profile: 'radosbench'
37 |
--------------------------------------------------------------------------------
/regression/burnupi-available/EC/runtests.EC62.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'regression'
3 | head: "plana15"
4 | clients: ["plana15"]
5 | osds: ["burnupi37", "burnupi38", "burnupi39"]
6 | mons:
7 | plana15:
8 | a: "10.214.144.25:6789"
9 | osds_per_node: 6
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/regression/regression/tests-available/EC/ceph.conf'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | radosbench:
20 | pg_size: 1024
21 | pgp_size: 1024
22 | replication: 'erasure'
23 | erasure_profile: 'ec62'
24 | erasure_profiles:
25 | ec62:
26 | erasure_k: 6
27 | erasure_m: 2
28 | benchmarks:
29 | radosbench:
30 | op_size: [4194304, 131072, 4096]
31 | write_only: False
32 | time: 300
33 | concurrent_ops: [32]
34 | concurrent_procs: 4
35 | osd_ra: [4096]
36 | pool_profile: 'radosbench'
37 |
--------------------------------------------------------------------------------
/regression/burnupi-available/EC/runtests.EC93.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'regression'
3 | head: "plana15"
4 | clients: ["plana15"]
5 | osds: ["burnupi37", "burnupi38", "burnupi39"]
6 | mons:
7 | plana15:
8 | a: "10.214.144.25:6789"
9 | osds_per_node: 6
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/regression/regression/tests-available/EC/ceph.conf'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | radosbench:
20 | pg_size: 1024
21 | pgp_size: 1024
22 | replication: 'erasure'
23 | erasure_profile: 'ec93'
24 | erasure_profiles:
25 | ec93:
26 | erasure_k: 9
27 | erasure_m: 3
28 | benchmarks:
29 | radosbench:
30 | op_size: [4194304, 131072, 4096]
31 | write_only: False
32 | time: 300
33 | concurrent_ops: [32]
34 | concurrent_procs: 4
35 | osd_ra: [4096]
36 | pool_profile: 'radosbench'
37 |
--------------------------------------------------------------------------------
/regression/burnupi-available/backfill/ceph.conf.high_rp:
--------------------------------------------------------------------------------
1 |
2 | [global]
3 | osd max backfills = 20
4 | osd recovery max active = 30
5 | osd recovery op priority = 20
6 |
7 | osd crush update on start = false
8 | osd crush chooseleaf type = 0
9 | osd pg bits = 10
10 | osd pgp bits = 10
11 | auth client required = none
12 | auth cluster required = none
13 | auth service required = none
14 | keyring = /tmp/cbt/ceph/keyring
15 | log to syslog = false
16 | log file = /tmp/cbt/ceph/log/$name.log
17 | rbd cache = true
18 | filestore merge threshold = 40
19 | filestore split multiple = 8
20 | osd op threads = 8
21 | mon pg warn max object skew = 100000
22 | mon pg warn min per osd = 0
23 | mon pg warn max per osd = 32768
24 |
25 | [mon.a]
26 | mon addr = 10.214.144.25:6789
27 | host = plana15
28 | mon data = /tmp/cbt/ceph/mon.$id
29 |
30 | [osd]
31 | public network = 10.214.148.0/20
32 | cluster network = 10.214.148.0/20
33 |
34 | [osd.0]
35 | host = burnupi37
36 | osd data = /tmp/cbt/mnt/osd-device-0-data
37 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
38 |
39 | [osd.1]
40 | host = burnupi37
41 | osd data = /tmp/cbt/mnt/osd-device-1-data
42 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
43 |
44 | [osd.2]
45 | host = burnupi37
46 | osd data = /tmp/cbt/mnt/osd-device-2-data
47 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
48 |
49 | [osd.3]
50 | host = burnupi37
51 | osd data = /tmp/cbt/mnt/osd-device-3-data
52 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
53 |
54 | [osd.4]
55 | host = burnupi37
56 | osd data = /tmp/cbt/mnt/osd-device-4-data
57 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
58 |
59 | [osd.5]
60 | host = burnupi37
61 | osd data = /tmp/cbt/mnt/osd-device-5-data
62 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
63 |
64 | [osd.6]
65 | host = burnupi38
66 | osd data = /tmp/cbt/mnt/osd-device-0-data
67 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
68 |
69 | [osd.7]
70 | host = burnupi38
71 | osd data = /tmp/cbt/mnt/osd-device-1-data
72 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
73 |
74 | [osd.8]
75 | host = burnupi38
76 | osd data = /tmp/cbt/mnt/osd-device-2-data
77 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
78 |
79 | [osd.9]
80 | host = burnupi38
81 | osd data = /tmp/cbt/mnt/osd-device-3-data
82 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
83 |
84 | [osd.10]
85 | host = burnupi38
86 | osd data = /tmp/cbt/mnt/osd-device-4-data
87 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
88 |
89 | [osd.11]
90 | host = burnupi38
91 | osd data = /tmp/cbt/mnt/osd-device-5-data
92 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
93 |
94 | [osd.12]
95 | host = burnupi39
96 | osd data = /tmp/cbt/mnt/osd-device-0-data
97 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
98 |
99 | [osd.13]
100 | host = burnupi39
101 | osd data = /tmp/cbt/mnt/osd-device-1-data
102 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
103 |
104 | [osd.14]
105 | host = burnupi39
106 | osd data = /tmp/cbt/mnt/osd-device-2-data
107 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
108 |
109 | [osd.15]
110 | host = burnupi39
111 | osd data = /tmp/cbt/mnt/osd-device-3-data
112 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
113 |
114 | [osd.16]
115 | host = burnupi39
116 | osd data = /tmp/cbt/mnt/osd-device-4-data
117 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
118 |
119 | [osd.17]
120 | host = burnupi39
121 | osd data = /tmp/cbt/mnt/osd-device-5-data
122 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
123 |
124 |
--------------------------------------------------------------------------------
/regression/burnupi-available/backfill/ceph.conf.low_rp:
--------------------------------------------------------------------------------
1 |
2 | [global]
3 | osd max backfills = 5
4 | osd recovery max active = 7
5 | osd recovery op priority = 5
6 |
7 | osd crush update on start = false
8 | osd crush chooseleaf type = 0
9 | osd pg bits = 10
10 | osd pgp bits = 10
11 | auth client required = none
12 | auth cluster required = none
13 | auth service required = none
14 | keyring = /tmp/cbt/ceph/keyring
15 | log to syslog = false
16 | log file = /tmp/cbt/ceph/log/$name.log
17 | rbd cache = true
18 | filestore merge threshold = 40
19 | filestore split multiple = 8
20 | osd op threads = 8
21 | mon pg warn max object skew = 100000
22 | mon pg warn min per osd = 0
23 | mon pg warn max per osd = 32768
24 |
25 | [mon.a]
26 | mon addr = 10.214.144.25:6789
27 | host = plana15
28 | mon data = /tmp/cbt/ceph/mon.$id
29 |
30 | [osd]
31 | public network = 10.214.148.0/20
32 | cluster network = 10.214.148.0/20
33 |
34 | [osd.0]
35 | host = burnupi37
36 | osd data = /tmp/cbt/mnt/osd-device-0-data
37 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
38 |
39 | [osd.1]
40 | host = burnupi37
41 | osd data = /tmp/cbt/mnt/osd-device-1-data
42 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
43 |
44 | [osd.2]
45 | host = burnupi37
46 | osd data = /tmp/cbt/mnt/osd-device-2-data
47 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
48 |
49 | [osd.3]
50 | host = burnupi37
51 | osd data = /tmp/cbt/mnt/osd-device-3-data
52 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
53 |
54 | [osd.4]
55 | host = burnupi37
56 | osd data = /tmp/cbt/mnt/osd-device-4-data
57 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
58 |
59 | [osd.5]
60 | host = burnupi37
61 | osd data = /tmp/cbt/mnt/osd-device-5-data
62 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
63 |
64 | [osd.6]
65 | host = burnupi38
66 | osd data = /tmp/cbt/mnt/osd-device-0-data
67 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
68 |
69 | [osd.7]
70 | host = burnupi38
71 | osd data = /tmp/cbt/mnt/osd-device-1-data
72 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
73 |
74 | [osd.8]
75 | host = burnupi38
76 | osd data = /tmp/cbt/mnt/osd-device-2-data
77 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
78 |
79 | [osd.9]
80 | host = burnupi38
81 | osd data = /tmp/cbt/mnt/osd-device-3-data
82 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
83 |
84 | [osd.10]
85 | host = burnupi38
86 | osd data = /tmp/cbt/mnt/osd-device-4-data
87 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
88 |
89 | [osd.11]
90 | host = burnupi38
91 | osd data = /tmp/cbt/mnt/osd-device-5-data
92 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
93 |
94 | [osd.12]
95 | host = burnupi39
96 | osd data = /tmp/cbt/mnt/osd-device-0-data
97 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
98 |
99 | [osd.13]
100 | host = burnupi39
101 | osd data = /tmp/cbt/mnt/osd-device-1-data
102 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
103 |
104 | [osd.14]
105 | host = burnupi39
106 | osd data = /tmp/cbt/mnt/osd-device-2-data
107 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
108 |
109 | [osd.15]
110 | host = burnupi39
111 | osd data = /tmp/cbt/mnt/osd-device-3-data
112 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
113 |
114 | [osd.16]
115 | host = burnupi39
116 | osd data = /tmp/cbt/mnt/osd-device-4-data
117 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
118 |
119 | [osd.17]
120 | host = burnupi39
121 | osd data = /tmp/cbt/mnt/osd-device-5-data
122 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
123 |
124 |
--------------------------------------------------------------------------------
/regression/burnupi-available/backfill/ceph.conf.norm_rp:
--------------------------------------------------------------------------------
1 |
2 | [global]
3 | osd max backfills = 10
4 | osd recovery max active = 15
5 | osd recovery op priority = 10
6 |
7 | osd crush update on start = false
8 | osd crush chooseleaf type = 0
9 | osd pg bits = 10
10 | osd pgp bits = 10
11 | auth client required = none
12 | auth cluster required = none
13 | auth service required = none
14 | keyring = /tmp/cbt/ceph/keyring
15 | log to syslog = false
16 | log file = /tmp/cbt/ceph/log/$name.log
17 | rbd cache = true
18 | filestore merge threshold = 40
19 | filestore split multiple = 8
20 | osd op threads = 8
21 | mon pg warn max object skew = 100000
22 | mon pg warn min per osd = 0
23 | mon pg warn max per osd = 32768
24 |
25 | [mon.a]
26 | mon addr = 10.214.144.25:6789
27 | host = plana15
28 | mon data = /tmp/cbt/ceph/mon.$id
29 |
30 | [osd]
31 | public network = 10.214.148.0/20
32 | cluster network = 10.214.148.0/20
33 |
34 | [osd.0]
35 | host = burnupi37
36 | osd data = /tmp/cbt/mnt/osd-device-0-data
37 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
38 |
39 | [osd.1]
40 | host = burnupi37
41 | osd data = /tmp/cbt/mnt/osd-device-1-data
42 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
43 |
44 | [osd.2]
45 | host = burnupi37
46 | osd data = /tmp/cbt/mnt/osd-device-2-data
47 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
48 |
49 | [osd.3]
50 | host = burnupi37
51 | osd data = /tmp/cbt/mnt/osd-device-3-data
52 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
53 |
54 | [osd.4]
55 | host = burnupi37
56 | osd data = /tmp/cbt/mnt/osd-device-4-data
57 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
58 |
59 | [osd.5]
60 | host = burnupi37
61 | osd data = /tmp/cbt/mnt/osd-device-5-data
62 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
63 |
64 | [osd.6]
65 | host = burnupi38
66 | osd data = /tmp/cbt/mnt/osd-device-0-data
67 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
68 |
69 | [osd.7]
70 | host = burnupi38
71 | osd data = /tmp/cbt/mnt/osd-device-1-data
72 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
73 |
74 | [osd.8]
75 | host = burnupi38
76 | osd data = /tmp/cbt/mnt/osd-device-2-data
77 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
78 |
79 | [osd.9]
80 | host = burnupi38
81 | osd data = /tmp/cbt/mnt/osd-device-3-data
82 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
83 |
84 | [osd.10]
85 | host = burnupi38
86 | osd data = /tmp/cbt/mnt/osd-device-4-data
87 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
88 |
89 | [osd.11]
90 | host = burnupi38
91 | osd data = /tmp/cbt/mnt/osd-device-5-data
92 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
93 |
94 | [osd.12]
95 | host = burnupi39
96 | osd data = /tmp/cbt/mnt/osd-device-0-data
97 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
98 |
99 | [osd.13]
100 | host = burnupi39
101 | osd data = /tmp/cbt/mnt/osd-device-1-data
102 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
103 |
104 | [osd.14]
105 | host = burnupi39
106 | osd data = /tmp/cbt/mnt/osd-device-2-data
107 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
108 |
109 | [osd.15]
110 | host = burnupi39
111 | osd data = /tmp/cbt/mnt/osd-device-3-data
112 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
113 |
114 | [osd.16]
115 | host = burnupi39
116 | osd data = /tmp/cbt/mnt/osd-device-4-data
117 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
118 |
119 | [osd.17]
120 | host = burnupi39
121 | osd data = /tmp/cbt/mnt/osd-device-5-data
122 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
123 |
124 |
--------------------------------------------------------------------------------
/regression/burnupi-available/backfill/runtests.high_rp.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'regression'
3 | head: "plana15"
4 | clients: ["plana15"]
5 | osds: ["burnupi37", "burnupi38", "burnupi39"]
6 | mons:
7 | plana15:
8 | a: "10.214.144.25:6789"
9 | osds_per_node: 6
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/regression/regression/tests-available/backfill/ceph.conf.high_rp'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | rbd:
20 | pg_size: 4096
21 | pgp_size: 4096
22 | replication: 3
23 | recovery_test:
24 | osds: [17]
25 | benchmarks:
26 | librbdfio:
27 | time: 7200
28 | vol_size: 10240
29 | mode: ['write']
30 | op_size: [4194304]
31 | concurrent_procs: [1]
32 | iodepth: [128]
33 | osd_ra: [4096]
34 | cmd_path: '/home/regression/src/fio/fio'
35 | pool_profile: 'rbd'
36 | log_avg_msec: 100
37 |
38 |
--------------------------------------------------------------------------------
/regression/burnupi-available/backfill/runtests.low_rp.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'regression'
3 | head: "plana15"
4 | clients: ["plana15"]
5 | osds: ["burnupi37", "burnupi38", "burnupi39"]
6 | mons:
7 | plana15:
8 | a: "10.214.144.25:6789"
9 | osds_per_node: 6
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/regression/regression/tests-available/backfill/ceph.conf.low_rp'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | rbd:
20 | pg_size: 4096
21 | pgp_size: 4096
22 | replication: 3
23 | recovery_test:
24 | osds: [17]
25 | benchmarks:
26 | librbdfio:
27 | time: 7200
28 | vol_size: 10240
29 | mode: ['write']
30 | op_size: [4194304]
31 | concurrent_procs: [1]
32 | iodepth: [128]
33 | osd_ra: [4096]
34 | cmd_path: '/home/regression/src/fio/fio'
35 | pool_profile: 'rbd'
36 | log_avg_msec: 100
37 |
38 |
--------------------------------------------------------------------------------
/regression/burnupi-available/backfill/runtests.norm_rp.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'regression'
3 | head: "plana15"
4 | clients: ["plana15"]
5 | osds: ["burnupi37", "burnupi38", "burnupi39"]
6 | mons:
7 | plana15:
8 | a: "10.214.144.25:6789"
9 | osds_per_node: 6
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/regression/regression/tests-available/backfill/ceph.conf.norm_rp'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | rbd:
20 | pg_size: 4096
21 | pgp_size: 4096
22 | replication: 3
23 | recovery_test:
24 | osds: [17]
25 | benchmarks:
26 | librbdfio:
27 | time: 7200
28 | vol_size: 10240
29 | mode: ['write']
30 | op_size: [4194304]
31 | concurrent_procs: [1]
32 | iodepth: [128]
33 | osd_ra: [4096]
34 | cmd_path: '/home/regression/src/fio/fio'
35 | pool_profile: 'rbd'
36 | log_avg_msec: 100
37 |
38 |
--------------------------------------------------------------------------------
/regression/burnupi-available/tiering/ceph.base.conf:
--------------------------------------------------------------------------------
1 |
2 | [global]
3 | osd crush update on start = false
4 | osd crush chooseleaf type = 0
5 | osd pg bits = 10
6 | osd pgp bits = 10
7 | auth client required = none
8 | auth cluster required = none
9 | auth service required = none
10 | keyring = /tmp/cbt/ceph/keyring
11 | log to syslog = false
12 | log file = /tmp/cbt/ceph/log/$name.log
13 | rbd cache = true
14 | filestore merge threshold = 40
15 | filestore split multiple = 8
16 | osd op threads = 8
17 | mon pg warn max object skew = 100000
18 | mon pg warn min per osd = 0
19 | mon pg warn max per osd = 32768
20 | [mon.a]
21 | mon addr = 10.214.144.25:6789
22 | host = plana15
23 | mon data = /tmp/cbt/ceph/mon.$id
24 |
25 | [osd]
26 | public network = 10.214.148.0/20
27 | cluster network = 10.214.148.0/20
28 |
29 | [osd.0]
30 | host = burnupi37
31 | osd data = /tmp/cbt/mnt/osd-device-0-data
32 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
33 |
34 | [osd.1]
35 | host = burnupi37
36 | osd data = /tmp/cbt/mnt/osd-device-1-data
37 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
38 |
39 | [osd.2]
40 | host = burnupi37
41 | osd data = /tmp/cbt/mnt/osd-device-2-data
42 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
43 |
44 | [osd.3]
45 | host = burnupi37
46 | osd data = /tmp/cbt/mnt/osd-device-3-data
47 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
48 |
49 | [osd.4]
50 | host = burnupi37
51 | osd data = /tmp/cbt/mnt/osd-device-4-data
52 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
53 |
54 | [osd.5]
55 | host = burnupi37
56 | osd data = /tmp/cbt/mnt/osd-device-5-data
57 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
58 |
59 | [osd.6]
60 | host = burnupi38
61 | osd data = /tmp/cbt/mnt/osd-device-0-data
62 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
63 |
64 | [osd.7]
65 | host = burnupi38
66 | osd data = /tmp/cbt/mnt/osd-device-1-data
67 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
68 |
69 | [osd.8]
70 | host = burnupi38
71 | osd data = /tmp/cbt/mnt/osd-device-2-data
72 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
73 |
74 | [osd.9]
75 | host = burnupi38
76 | osd data = /tmp/cbt/mnt/osd-device-3-data
77 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
78 |
79 | [osd.10]
80 | host = burnupi38
81 | osd data = /tmp/cbt/mnt/osd-device-4-data
82 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
83 |
84 | [osd.11]
85 | host = burnupi38
86 | osd data = /tmp/cbt/mnt/osd-device-5-data
87 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
88 |
89 | [osd.12]
90 | host = burnupi39
91 | osd data = /tmp/cbt/mnt/osd-device-0-data
92 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
93 |
94 | [osd.13]
95 | host = burnupi39
96 | osd data = /tmp/cbt/mnt/osd-device-1-data
97 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
98 |
99 | [osd.14]
100 | host = burnupi39
101 | osd data = /tmp/cbt/mnt/osd-device-2-data
102 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
103 |
104 | [osd.15]
105 | host = burnupi39
106 | osd data = /tmp/cbt/mnt/osd-device-3-data
107 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
108 |
109 | [osd.16]
110 | host = burnupi39
111 | osd data = /tmp/cbt/mnt/osd-device-4-data
112 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
113 |
114 | [osd.17]
115 | host = burnupi39
116 | osd data = /tmp/cbt/mnt/osd-device-5-data
117 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
118 |
119 |
--------------------------------------------------------------------------------
/regression/burnupi-available/tiering/ceph.cache.conf:
--------------------------------------------------------------------------------
1 |
2 | [global]
3 | osd crush update on start = false
4 | osd crush chooseleaf type = 0
5 | osd pg bits = 10
6 | osd pgp bits = 10
7 | auth client required = none
8 | auth cluster required = none
9 | auth service required = none
10 | keyring = /tmp/cbt/ceph/keyring
11 | log to syslog = false
12 | log file = /tmp/cbt/ceph/log/$name.log
13 | rbd cache = true
14 | filestore merge threshold = 40
15 | filestore split multiple = 8
16 | osd op threads = 8
17 | mon pg warn max object skew = 100000
18 | mon pg warn min per osd = 0
19 | mon pg warn max per osd = 32768
20 |
21 | [mon.a]
22 | mon addr = 10.214.144.25:6789
23 | host = plana15
24 | mon data = /tmp/cbt/ceph/mon.$id
25 |
26 | [osd]
27 | public network = 10.214.148.0/20
28 | cluster network = 10.214.148.0/20
29 |
30 | [osd.0]
31 | host = burnupi37
32 | osd data = /tmp/cbt/mnt/osd-cache-0-data
33 | osd journal = /dev/disk/by-partlabel/osd-cache-0-journal
34 |
35 | [osd.1]
36 | host = burnupi38
37 | osd data = /tmp/cbt/mnt/osd-cache-0-data
38 | osd journal = /dev/disk/by-partlabel/osd-cache-0-journal
39 |
40 | [osd.3]
41 | host = burnupi39
42 | osd data = /tmp/cbt/mnt/osd-cache-0-data
43 | osd journal = /dev/disk/by-partlabel/osd-cache-0-journal
44 |
45 |
--------------------------------------------------------------------------------
/regression/burnupi-available/tiering/ceph.tiered.conf:
--------------------------------------------------------------------------------
1 |
2 | [global]
3 | osd crush update on start = false
4 | osd crush chooseleaf type = 0
5 | osd pg bits = 10
6 | osd pgp bits = 10
7 | auth client required = none
8 | auth cluster required = none
9 | auth service required = none
10 | keyring = /tmp/cbt/ceph/keyring
11 | log to syslog = false
12 | log file = /tmp/cbt/ceph/log/$name.log
13 | rbd cache = true
14 | filestore merge threshold = 40
15 | filestore split multiple = 8
16 | osd op threads = 8
17 | mon pg warn max object skew = 100000
18 | mon pg warn min per osd = 0
19 | mon pg warn max per osd = 32768
20 |
21 | [mon.a]
22 | mon addr = 10.214.144.25:6789
23 | host = plana15
24 | mon data = /tmp/cbt/ceph/mon.$id
25 |
26 | [osd]
27 | public network = 10.214.148.0/20
28 | cluster network = 10.214.148.0/20
29 |
30 | [osd.0]
31 | host = burnupi37
32 | osd data = /tmp/cbt/mnt/osd-device-0-data
33 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
34 |
35 | [osd.1]
36 | host = burnupi37
37 | osd data = /tmp/cbt/mnt/osd-device-1-data
38 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
39 |
40 | [osd.2]
41 | host = burnupi37
42 | osd data = /tmp/cbt/mnt/osd-device-2-data
43 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
44 |
45 | [osd.3]
46 | host = burnupi37
47 | osd data = /tmp/cbt/mnt/osd-device-3-data
48 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
49 |
50 | [osd.4]
51 | host = burnupi37
52 | osd data = /tmp/cbt/mnt/osd-device-4-data
53 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
54 |
55 | [osd.5]
56 | host = burnupi37
57 | osd data = /tmp/cbt/mnt/osd-device-5-data
58 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
59 |
60 | [osd.6]
61 | host = burnupi38
62 | osd data = /tmp/cbt/mnt/osd-device-0-data
63 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
64 |
65 | [osd.7]
66 | host = burnupi38
67 | osd data = /tmp/cbt/mnt/osd-device-1-data
68 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
69 |
70 | [osd.8]
71 | host = burnupi38
72 | osd data = /tmp/cbt/mnt/osd-device-2-data
73 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
74 |
75 | [osd.9]
76 | host = burnupi38
77 | osd data = /tmp/cbt/mnt/osd-device-3-data
78 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
79 |
80 | [osd.10]
81 | host = burnupi38
82 | osd data = /tmp/cbt/mnt/osd-device-4-data
83 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
84 |
85 | [osd.11]
86 | host = burnupi38
87 | osd data = /tmp/cbt/mnt/osd-device-5-data
88 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
89 |
90 | [osd.12]
91 | host = burnupi39
92 | osd data = /tmp/cbt/mnt/osd-device-0-data
93 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
94 |
95 | [osd.13]
96 | host = burnupi39
97 | osd data = /tmp/cbt/mnt/osd-device-1-data
98 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
99 |
100 | [osd.14]
101 | host = burnupi39
102 | osd data = /tmp/cbt/mnt/osd-device-2-data
103 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
104 |
105 | [osd.15]
106 | host = burnupi39
107 | osd data = /tmp/cbt/mnt/osd-device-3-data
108 | osd journal = /dev/disk/by-partlabel/osd-device-3-journal
109 |
110 | [osd.16]
111 | host = burnupi39
112 | osd data = /tmp/cbt/mnt/osd-device-4-data
113 | osd journal = /dev/disk/by-partlabel/osd-device-4-journal
114 |
115 | [osd.17]
116 | host = burnupi39
117 | osd data = /tmp/cbt/mnt/osd-device-5-data
118 | osd journal = /dev/disk/by-partlabel/osd-device-5-journal
119 |
120 | [osd.18]
121 | host = burnupi37
122 | osd data = /tmp/cbt/mnt/osd-cache-0-data
123 | osd journal = /dev/disk/by-partlabel/osd-cache-0-journal
124 |
125 | [osd.19]
126 | host = burnupi38
127 | osd data = /tmp/cbt/mnt/osd-cache-0-data
128 | osd journal = /dev/disk/by-partlabel/osd-cache-0-journal
129 |
130 | [osd.20]
131 | host = burnupi39
132 | osd data = /tmp/cbt/mnt/osd-cache-0-data
133 | osd journal = /dev/disk/by-partlabel/osd-cache-0-journal
134 |
135 |
--------------------------------------------------------------------------------
/regression/burnupi-available/tiering/runtests.cache.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'regression'
3 | head: "plana15"
4 | clients: ["plana15"]
5 | osds: ["burnupi37", "burnupi38", "burnupi39"]
6 | mons:
7 | plana15:
8 | a: "10.214.144.25:6789"
9 | osds_per_node: 1
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/regression/regression/tests-available/tiering/ceph.cache.conf'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | cache:
20 | pg_size: 512
21 | pgp_size: 512
22 | replication: 1
23 |
24 | benchmarks:
25 | librbdfio:
26 | time: 300
27 | vol_size: 65536
28 | mode: ['read', 'write', 'rw']
29 | rwmixread: 50
30 | op_size: [4194304, 131072, 4096]
31 | concurrent_procs: [1]
32 | iodepth: [128]
33 | osd_ra: [4096]
34 | cmd_path: '/home/regression/src/fio/fio'
35 | pool_profile: 'rbd'
36 | log_avg_msec: 100
37 | librbdfio:
38 | time: 300
39 | vol_size: 65536
40 | mode: ['randread', 'randwrite', 'randrw']
41 | rwmixread: 50
42 | op_size: [4194304, 131072, 4096]
43 | concurrent_procs: [1]
44 | iodepth: [128]
45 | osd_ra: [4096]
46 | cmd_path: '/home/regression/src/fio/fio'
47 | pool_profile: 'rbd'
48 | log_avg_msec: 100
49 | librbdfio:
50 | time: 300
51 | vol_size: 65536
52 | mode: ['randread', 'randwrite', 'randrw']
53 | rwmixread: 50
54 | random_distribution: 'zipf:1.2'
55 | op_size: [4194304, 131072, 4096]
56 | concurrent_procs: [1]
57 | iodepth: [128]
58 | osd_ra: [4096]
59 | cmd_path: '/home/regression/src/fio/fio'
60 | pool_profile: 'rbd'
61 | log_avg_msec: 100
62 |
63 |
--------------------------------------------------------------------------------
/regression/magna-available/3xRep/ceph.conf:
--------------------------------------------------------------------------------
1 |
2 | [global]
3 | osd crush update on start = false
4 | osd crush chooseleaf type = 0
5 | osd pg bits = 10
6 | osd pgp bits = 10
7 | auth client required = none
8 | auth cluster required = none
9 | auth service required = none
10 | keyring = /tmp/cbt/ceph/keyring
11 | log to syslog = false
12 | log file = /tmp/cbt/ceph/log/$name.log
13 | rbd cache = true
14 | filestore merge threshold = 40
15 | filestore split multiple = 8
16 | osd op threads = 8
17 | mon pg warn max object skew = 100000
18 | mon pg warn min per osd = 0
19 | mon pg warn max per osd = 32768
20 |
21 | [mon.a]
22 | mon addr = 10.8.128.3:6789
23 | host = magna003
24 | mon data = /tmp/cbt/ceph/mon.$id
25 |
26 | [osd.0]
27 | host = magna004
28 | osd data = /tmp/cbt/mnt/osd-device-0-data
29 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
30 |
31 | [osd.1]
32 | host = magna004
33 | osd data = /tmp/cbt/mnt/osd-device-1-data
34 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
35 |
36 | [osd.2]
37 | host = magna004
38 | osd data = /tmp/cbt/mnt/osd-device-2-data
39 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
40 |
41 | [osd.3]
42 | host = magna007
43 | osd data = /tmp/cbt/mnt/osd-device-0-data
44 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
45 |
46 | [osd.4]
47 | host = magna007
48 | osd data = /tmp/cbt/mnt/osd-device-1-data
49 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
50 |
51 | [osd.5]
52 | host = magna007
53 | osd data = /tmp/cbt/mnt/osd-device-2-data
54 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
55 |
56 | [osd.6]
57 | host = magna012
58 | osd data = /tmp/cbt/mnt/osd-device-0-data
59 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
60 |
61 | [osd.7]
62 | host = magna012
63 | osd data = /tmp/cbt/mnt/osd-device-1-data
64 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
65 |
66 | [osd.8]
67 | host = magna012
68 | osd data = /tmp/cbt/mnt/osd-device-2-data
69 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
70 |
71 | [osd.9]
72 | host = magna031
73 | osd data = /tmp/cbt/mnt/osd-device-0-data
74 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
75 |
76 | [osd.10]
77 | host = magna031
78 | osd data = /tmp/cbt/mnt/osd-device-1-data
79 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
80 |
81 | [osd.11]
82 | host = magna031
83 | osd data = /tmp/cbt/mnt/osd-device-2-data
84 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
85 |
86 |
--------------------------------------------------------------------------------
/regression/magna-available/3xRep/runtests.btrfs.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'perf'
3 | head: "magna003"
4 | clients: ["magna003"]
5 | osds: ["magna004", "magna007", "magna012", "magna031"]
6 | mons:
7 | magna003:
8 | a: "10.8.128.3:6789"
9 | osds_per_node: 3
10 | fs: btrfs
11 | mkfs_opts: -l 16k -n 16k -f
12 | mount_opts: -o noatime
13 | conf_file: '/home/perf/src/ceph-tools/regression/magna-available/3xRep/ceph.conf'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | radosbench:
20 | pg_size: 1024
21 | pgp_size: 1024
22 | replication: 3
23 | rbd:
24 | pg_size: 4096
25 | pgp_size: 4096
26 | replication: 3
27 |
28 | benchmarks:
29 | radosbench:
30 | op_size: [4194304, 131072, 4096]
31 | write_only: False
32 | time: 300
33 | concurrent_ops: [32]
34 | concurrent_procs: 4
35 | osd_ra: [4096]
36 | pool_profile: 'radosbench'
37 | # librbdfio:
38 | # time: 300
39 | # vol_size: 65536
40 | # mode: ['read', 'write', 'randread', 'randwrite', 'rw', 'randrw']
41 | # rwmixread: 50
42 | # op_size: [4194304, 131072, 4096]
43 | # concurrent_procs: [1]
44 | # iodepth: [128]
45 | # osd_ra: [4096]
46 | # cmd_path: '/home/regression/src/fio/fio'
47 | # pool_profile: 'rbd'
48 | # log_avg_msec: 100
49 | # rbdfio:
50 | # time: 300
51 | # vol_size: 65536
52 | # mode: ['read', 'write', 'randread', 'randwrite', 'rw', 'randrw']
53 | # rwmixread: 50
54 | # op_size: [4194304, 131072, 4096]
55 | # concurrent_procs: [1]
56 | # iodepth: [128]
57 | # osd_ra: [4096]
58 | # cmd_path: '/home/regression/src/fio/fio'
59 | # pool_profile: 'rbd'
60 | # log_avg_msec: 100
61 |
62 |
--------------------------------------------------------------------------------
/regression/magna-available/3xRep/runtests.xfs.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'perf'
3 | head: "magna003"
4 | clients: ["magna003"]
5 | osds: ["magna004", "magna007", "magna012", "magna031"]
6 | mons:
7 | magna003:
8 | a: "10.8.128.3:6789"
9 | osds_per_node: 3
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/perf/src/ceph-tools/regression/magna-available/3xRep/ceph.conf'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | radosbench:
20 | pg_size: 1024
21 | pgp_size: 1024
22 | replication: 3
23 | rbd:
24 | pg_size: 4096
25 | pgp_size: 4096
26 | replication: 3
27 |
28 | benchmarks:
29 | radosbench:
30 | op_size: [4194304, 131072, 4096]
31 | write_only: False
32 | time: 300
33 | concurrent_ops: [32]
34 | concurrent_procs: 4
35 | osd_ra: [4096]
36 | pool_profile: 'radosbench'
37 | librbdfio:
38 | time: 300
39 | vol_size: 65536
40 | mode: ['read', 'write', 'randread', 'randwrite', 'rw', 'randrw']
41 | rwmixread: 50
42 | op_size: [4194304, 131072, 4096]
43 | concurrent_procs: [1]
44 | iodepth: [128]
45 | osd_ra: [4096]
46 | cmd_path: '/home/perf/src/fio/fio'
47 | pool_profile: 'rbd'
48 | log_avg_msec: 100
49 | rbdfio:
50 | time: 300
51 | vol_size: 65536
52 | mode: ['read', 'write', 'randread', 'randwrite', 'rw', 'randrw']
53 | rwmixread: 50
54 | op_size: [4194304, 131072, 4096]
55 | concurrent_procs: [1]
56 | iodepth: [128]
57 | osd_ra: [4096]
58 | cmd_path: '/home/perf/src/fio/fio'
59 | pool_profile: 'rbd'
60 | log_avg_msec: 100
61 |
62 |
--------------------------------------------------------------------------------
/regression/magna-available/EC/ceph.conf:
--------------------------------------------------------------------------------
1 |
2 | [global]
3 | osd crush update on start = false
4 | osd crush chooseleaf type = 0
5 | osd pg bits = 10
6 | osd pgp bits = 10
7 | auth client required = none
8 | auth cluster required = none
9 | auth service required = none
10 | keyring = /tmp/cbt/ceph/keyring
11 | log to syslog = false
12 | log file = /tmp/cbt/ceph/log/$name.log
13 | rbd cache = true
14 | filestore merge threshold = 40
15 | filestore split multiple = 8
16 | osd op threads = 8
17 | mon pg warn max object skew = 100000
18 | mon pg warn min per osd = 0
19 | mon pg warn max per osd = 32768
20 | [mon.a]
21 | mon addr = 10.8.128.3:6789
22 | host = magna003
23 | mon data = /tmp/cbt/ceph/mon.$id
24 |
25 | [osd.0]
26 | host = magna004
27 | osd data = /tmp/cbt/mnt/osd-device-0-data
28 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
29 |
30 | [osd.1]
31 | host = magna004
32 | osd data = /tmp/cbt/mnt/osd-device-1-data
33 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
34 |
35 | [osd.2]
36 | host = magna004
37 | osd data = /tmp/cbt/mnt/osd-device-2-data
38 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
39 |
40 | [osd.3]
41 | host = magna007
42 | osd data = /tmp/cbt/mnt/osd-device-0-data
43 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
44 |
45 | [osd.4]
46 | host = magna007
47 | osd data = /tmp/cbt/mnt/osd-device-1-data
48 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
49 |
50 | [osd.5]
51 | host = magna007
52 | osd data = /tmp/cbt/mnt/osd-device-2-data
53 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
54 |
55 | [osd.6]
56 | host = magna012
57 | osd data = /tmp/cbt/mnt/osd-device-0-data
58 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
59 |
60 | [osd.7]
61 | host = magna012
62 | osd data = /tmp/cbt/mnt/osd-device-1-data
63 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
64 |
65 | [osd.8]
66 | host = magna012
67 | osd data = /tmp/cbt/mnt/osd-device-2-data
68 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
69 |
70 | [osd.9]
71 | host = magna031
72 | osd data = /tmp/cbt/mnt/osd-device-0-data
73 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
74 |
75 | [osd.10]
76 | host = magna031
77 | osd data = /tmp/cbt/mnt/osd-device-1-data
78 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
79 |
80 | [osd.11]
81 | host = magna031
82 | osd data = /tmp/cbt/mnt/osd-device-2-data
83 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
84 |
85 |
--------------------------------------------------------------------------------
/regression/magna-available/EC/runtests.EC31.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'perf'
3 | head: "magna003"
4 | clients: ["magna003"]
5 | osds: ["magna004", "magna007", "magna012", "magna031"]
6 | mons:
7 | magna003:
8 | a: "10.8.128.3:6789"
9 | osds_per_node: 3
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/perf/src/ceph-tools/regression/magna-available/EC/ceph.conf'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | radosbench:
20 | pg_size: 1024
21 | pgp_size: 1024
22 | replication: 'erasure'
23 | erasure_profile: 'ec31'
24 | erasure_profiles:
25 | ec31:
26 | erasure_k: 3
27 | erasure_m: 1
28 | benchmarks:
29 | radosbench:
30 | op_size: [4194304, 131072, 4096]
31 | write_only: False
32 | time: 300
33 | concurrent_ops: [32]
34 | concurrent_procs: 4
35 | osd_ra: [4096]
36 | pool_profile: 'radosbench'
37 |
--------------------------------------------------------------------------------
/regression/magna-available/EC/runtests.EC62.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'perf'
3 | head: "magna003"
4 | clients: ["magna003"]
5 | osds: ["magna004", "magna007", "magna012", "magna031"]
6 | mons:
7 | magna003:
8 | a: "10.8.128.3:6789"
9 | osds_per_node: 3
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/perf/src/ceph-tools/regression/magna-available/EC/ceph.conf'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | radosbench:
20 | pg_size: 1024
21 | pgp_size: 1024
22 | replication: 'erasure'
23 | erasure_profile: 'ec62'
24 | erasure_profiles:
25 | ec62:
26 | erasure_k: 6
27 | erasure_m: 2
28 | benchmarks:
29 | radosbench:
30 | op_size: [4194304, 131072, 4096]
31 | write_only: False
32 | time: 300
33 | concurrent_ops: [32]
34 | concurrent_procs: 4
35 | osd_ra: [4096]
36 | pool_profile: 'radosbench'
37 |
--------------------------------------------------------------------------------
/regression/magna-available/EC/runtests.EC93.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'perf'
3 | head: "magna003"
4 | clients: ["magna003"]
5 | osds: ["magna004", "magna007", "magna012", "magna031"]
6 | mons:
7 | magna003:
8 | a: "10.8.128.3:6789"
9 | osds_per_node: 3
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/perf/src/ceph-tools/regression/magna-available/3xRep/ceph.conf'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | radosbench:
20 | pg_size: 1024
21 | pgp_size: 1024
22 | replication: 'erasure'
23 | erasure_profile: 'ec93'
24 | erasure_profiles:
25 | ec93:
26 | erasure_k: 9
27 | erasure_m: 3
28 | benchmarks:
29 | radosbench:
30 | op_size: [4194304, 131072, 4096]
31 | write_only: False
32 | time: 300
33 | concurrent_ops: [32]
34 | concurrent_procs: 4
35 | osd_ra: [4096]
36 | pool_profile: 'radosbench'
37 |
--------------------------------------------------------------------------------
/regression/magna-available/backfill/ceph.conf.high_rp:
--------------------------------------------------------------------------------
1 |
2 | [global]
3 | osd max backfills = 20
4 | osd recovery max active = 30
5 | osd recovery op priority = 20
6 |
7 | osd crush update on start = false
8 | osd crush chooseleaf type = 0
9 | osd pg bits = 10
10 | osd pgp bits = 10
11 | auth client required = none
12 | auth cluster required = none
13 | auth service required = none
14 | keyring = /tmp/cbt/ceph/keyring
15 | log to syslog = false
16 | log file = /tmp/cbt/ceph/log/$name.log
17 | rbd cache = true
18 | filestore merge threshold = 40
19 | filestore split multiple = 8
20 | osd op threads = 8
21 | mon pg warn max object skew = 100000
22 | mon pg warn min per osd = 0
23 | mon pg warn max per osd = 32768
24 |
25 | [mon.a]
26 | mon addr = 10.8.128.3:6789
27 | host = magna003
28 | mon data = /tmp/cbt/ceph/mon.$id
29 |
30 | [osd.0]
31 | host = magna004
32 | osd data = /tmp/cbt/mnt/osd-device-0-data
33 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
34 |
35 | [osd.1]
36 | host = magna004
37 | osd data = /tmp/cbt/mnt/osd-device-1-data
38 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
39 |
40 | [osd.2]
41 | host = magna004
42 | osd data = /tmp/cbt/mnt/osd-device-2-data
43 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
44 |
45 | [osd.3]
46 | host = magna007
47 | osd data = /tmp/cbt/mnt/osd-device-0-data
48 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
49 |
50 | [osd.4]
51 | host = magna007
52 | osd data = /tmp/cbt/mnt/osd-device-1-data
53 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
54 |
55 | [osd.5]
56 | host = magna007
57 | osd data = /tmp/cbt/mnt/osd-device-2-data
58 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
59 |
60 | [osd.6]
61 | host = magna012
62 | osd data = /tmp/cbt/mnt/osd-device-0-data
63 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
64 |
65 | [osd.7]
66 | host = magna012
67 | osd data = /tmp/cbt/mnt/osd-device-1-data
68 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
69 |
70 | [osd.8]
71 | host = magna012
72 | osd data = /tmp/cbt/mnt/osd-device-2-data
73 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
74 |
75 | [osd.9]
76 | host = magna031
77 | osd data = /tmp/cbt/mnt/osd-device-0-data
78 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
79 |
80 | [osd.10]
81 | host = magna031
82 | osd data = /tmp/cbt/mnt/osd-device-1-data
83 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
84 |
85 | [osd.11]
86 | host = magna031
87 | osd data = /tmp/cbt/mnt/osd-device-2-data
88 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
89 |
90 |
--------------------------------------------------------------------------------
/regression/magna-available/backfill/ceph.conf.low_rp:
--------------------------------------------------------------------------------
1 |
2 | [global]
3 | osd max backfills = 5
4 | osd recovery max active = 8
5 | osd recovery op priority = 5
6 |
7 | osd crush update on start = false
8 | osd crush chooseleaf type = 0
9 | osd pg bits = 10
10 | osd pgp bits = 10
11 | auth client required = none
12 | auth cluster required = none
13 | auth service required = none
14 | keyring = /tmp/cbt/ceph/keyring
15 | log to syslog = false
16 | log file = /tmp/cbt/ceph/log/$name.log
17 | rbd cache = true
18 | filestore merge threshold = 40
19 | filestore split multiple = 8
20 | osd op threads = 8
21 | mon pg warn max object skew = 100000
22 | mon pg warn min per osd = 0
23 | mon pg warn max per osd = 32768
24 |
25 | [mon.a]
26 | mon addr = 10.8.128.3:6789
27 | host = magna003
28 | mon data = /tmp/cbt/ceph/mon.$id
29 |
30 | [osd.0]
31 | host = magna004
32 | osd data = /tmp/cbt/mnt/osd-device-0-data
33 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
34 |
35 | [osd.1]
36 | host = magna004
37 | osd data = /tmp/cbt/mnt/osd-device-1-data
38 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
39 |
40 | [osd.2]
41 | host = magna004
42 | osd data = /tmp/cbt/mnt/osd-device-2-data
43 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
44 |
45 | [osd.3]
46 | host = magna007
47 | osd data = /tmp/cbt/mnt/osd-device-0-data
48 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
49 |
50 | [osd.4]
51 | host = magna007
52 | osd data = /tmp/cbt/mnt/osd-device-1-data
53 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
54 |
55 | [osd.5]
56 | host = magna007
57 | osd data = /tmp/cbt/mnt/osd-device-2-data
58 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
59 |
60 | [osd.6]
61 | host = magna012
62 | osd data = /tmp/cbt/mnt/osd-device-0-data
63 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
64 |
65 | [osd.7]
66 | host = magna012
67 | osd data = /tmp/cbt/mnt/osd-device-1-data
68 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
69 |
70 | [osd.8]
71 | host = magna012
72 | osd data = /tmp/cbt/mnt/osd-device-2-data
73 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
74 |
75 | [osd.9]
76 | host = magna031
77 | osd data = /tmp/cbt/mnt/osd-device-0-data
78 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
79 |
80 | [osd.10]
81 | host = magna031
82 | osd data = /tmp/cbt/mnt/osd-device-1-data
83 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
84 |
85 | [osd.11]
86 | host = magna031
87 | osd data = /tmp/cbt/mnt/osd-device-2-data
88 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
89 |
90 |
--------------------------------------------------------------------------------
/regression/magna-available/backfill/ceph.conf.norm_rp:
--------------------------------------------------------------------------------
1 |
2 | [global]
3 | osd max backfills = 10
4 | osd recovery max active = 15
5 | osd recovery op priority = 10
6 |
7 | osd crush update on start = false
8 | osd crush chooseleaf type = 0
9 | osd pg bits = 10
10 | osd pgp bits = 10
11 | auth client required = none
12 | auth cluster required = none
13 | auth service required = none
14 | keyring = /tmp/cbt/ceph/keyring
15 | log to syslog = false
16 | log file = /tmp/cbt/ceph/log/$name.log
17 | rbd cache = true
18 | filestore merge threshold = 40
19 | filestore split multiple = 8
20 | osd op threads = 8
21 | mon pg warn max object skew = 100000
22 | mon pg warn min per osd = 0
23 | mon pg warn max per osd = 32768
24 |
25 | [mon.a]
26 | mon addr = 10.8.128.3:6789
27 | host = magna003
28 | mon data = /tmp/cbt/ceph/mon.$id
29 |
30 | [osd.0]
31 | host = magna004
32 | osd data = /tmp/cbt/mnt/osd-device-0-data
33 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
34 |
35 | [osd.1]
36 | host = magna004
37 | osd data = /tmp/cbt/mnt/osd-device-1-data
38 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
39 |
40 | [osd.2]
41 | host = magna004
42 | osd data = /tmp/cbt/mnt/osd-device-2-data
43 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
44 |
45 | [osd.3]
46 | host = magna007
47 | osd data = /tmp/cbt/mnt/osd-device-0-data
48 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
49 |
50 | [osd.4]
51 | host = magna007
52 | osd data = /tmp/cbt/mnt/osd-device-1-data
53 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
54 |
55 | [osd.5]
56 | host = magna007
57 | osd data = /tmp/cbt/mnt/osd-device-2-data
58 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
59 |
60 | [osd.6]
61 | host = magna012
62 | osd data = /tmp/cbt/mnt/osd-device-0-data
63 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
64 |
65 | [osd.7]
66 | host = magna012
67 | osd data = /tmp/cbt/mnt/osd-device-1-data
68 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
69 |
70 | [osd.8]
71 | host = magna012
72 | osd data = /tmp/cbt/mnt/osd-device-2-data
73 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
74 |
75 | [osd.9]
76 | host = magna031
77 | osd data = /tmp/cbt/mnt/osd-device-0-data
78 | osd journal = /dev/disk/by-partlabel/osd-device-0-journal
79 |
80 | [osd.10]
81 | host = magna031
82 | osd data = /tmp/cbt/mnt/osd-device-1-data
83 | osd journal = /dev/disk/by-partlabel/osd-device-1-journal
84 |
85 | [osd.11]
86 | host = magna031
87 | osd data = /tmp/cbt/mnt/osd-device-2-data
88 | osd journal = /dev/disk/by-partlabel/osd-device-2-journal
89 |
90 |
--------------------------------------------------------------------------------
/regression/magna-available/backfill/runtests.high_rp.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'perf'
3 | head: "magna003"
4 | clients: ["magna003"]
5 | osds: ["magna004", "magna007", "magna012", "magna031"]
6 | mons:
7 | magna003:
8 | a: "10.8.128.3:6789"
9 | osds_per_node: 3
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/perf/src/ceph-tools/regression/magna-available/backfill/ceph.conf.high_rp'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | rbd:
20 | pg_size: 4096
21 | pgp_size: 4096
22 | replication: 3
23 | recovery_test:
24 | osds: [11]
25 | benchmarks:
26 | librbdfio:
27 | time: 7200
28 | vol_size: 10240
29 | mode: ['write']
30 | op_size: [4194304]
31 | concurrent_procs: [1]
32 | iodepth: [128]
33 | osd_ra: [4096]
34 | cmd_path: '/home/perf/src/fio/fio'
35 | pool_profile: 'rbd'
36 | log_avg_msec: 100
37 |
38 |
--------------------------------------------------------------------------------
/regression/magna-available/backfill/runtests.low_rp.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'perf'
3 | head: "magna003"
4 | clients: ["magna003"]
5 | osds: ["magna004", "magna007", "magna012", "magna031"]
6 | mons:
7 | magna003:
8 | a: "10.8.128.3:6789"
9 | osds_per_node: 3
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/perf/src/ceph-tools/regression/magna-available/backfill/ceph.conf.low_rp'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | rbd:
20 | pg_size: 4096
21 | pgp_size: 4096
22 | replication: 3
23 | recovery_test:
24 | osds: [11]
25 | benchmarks:
26 | librbdfio:
27 | time: 7200
28 | vol_size: 10240
29 | mode: ['write']
30 | op_size: [4194304]
31 | concurrent_procs: [1]
32 | iodepth: [128]
33 | osd_ra: [4096]
34 | cmd_path: '/home/perf/src/fio/fio'
35 | pool_profile: 'rbd'
36 | log_avg_msec: 100
37 |
38 |
--------------------------------------------------------------------------------
/regression/magna-available/backfill/runtests.norm_rp.yaml:
--------------------------------------------------------------------------------
1 | cluster:
2 | user: 'perf'
3 | head: "magna003"
4 | clients: ["magna003"]
5 | osds: ["magna004", "magna007", "magna012", "magna031"]
6 | mons:
7 | magna003:
8 | a: "10.8.128.3:6789"
9 | osds_per_node: 3
10 | fs: 'xfs'
11 | mkfs_opts: '-f -i size=2048 -n size=64k'
12 | mount_opts: '-o inode64,noatime,logbsize=256k'
13 | conf_file: '/home/perf/src/ceph-tools/regression/magna-available/backfill/ceph.conf.norm_rp'
14 | iterations: 1
15 | use_existing: False
16 | clusterid: "ceph"
17 | tmp_dir: "/tmp/cbt"
18 | pool_profiles:
19 | rbd:
20 | pg_size: 4096
21 | pgp_size: 4096
22 | replication: 3
23 | recovery_test:
24 | osds: [11]
25 | benchmarks:
26 | librbdfio:
27 | time: 7200
28 | vol_size: 10240
29 | mode: ['write']
30 | op_size: [4194304]
31 | concurrent_procs: [1]
32 | iodepth: [128]
33 | osd_ra: [4096]
34 | cmd_path: '/home/perf/src/fio/fio'
35 | pool_profile: 'rbd'
36 | log_avg_msec: 100
37 |
38 |
--------------------------------------------------------------------------------
/regression/runtests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #HOME="/home/regression"
3 | #VERSIONS=( "firefly" "giant" "master")
4 | VERSIONS=( "firefly" "master" )
5 | UPGRADE_CMD="$HOME/bin/upgrade-ceph.sh"
6 | CONFDIR=$1
7 | CBT="$HOME/src/ceph-tools/cbt/cbt.py"
8 | DATE=`date +%Y%m%d`
9 |
10 | for VERSION in "${VERSIONS[@]}"
11 | do
12 | # Upgrade Ceph
13 | $UPGRADE_CMD reg $VERSION
14 |
15 | # Run Tests
16 | for SUITE in $( find $CONFDIR/*/ -type d -exec basename {} \;)
17 | do
18 | for TEST in $( find $CONFDIR/$SUITE/*.yaml -type f -exec basename {} \; | cut -d"." -f 2)
19 | do
20 | CBTCONF="$CONFDIR/$SUITE/runtests.$TEST.yaml"
21 | ARCHIVE="/$HOME/data/$DATE/$VERSION/$SUITE/$TEST"
22 | mkdir -p $ARCHIVE
23 | $CBT --archive $ARCHIVE $CBTCONF 2>&1 | tee $ARCHIVE/cbt.out
24 | done
25 | done
26 | done
27 |
--------------------------------------------------------------------------------