├── .idea
├── .name
├── dictionaries
│ ├── Amin (Case Conflict).xml
│ └── Amin.xml
├── encodings.xml
├── vcs.xml
├── misc.xml
├── codeStyleSettings.xml
├── inspectionProfiles
│ ├── profiles_settings.xml
│ └── Project_Default.xml
├── modules.xml
├── the1st.iml
├── the1st (leno's conflicted copy 2015-12-17).iml
├── the1st (leno's conflicted copy 2015-12-28).iml
├── misc (leno's conflicted copy 2015-12-17).xml
└── misc (leno's conflicted copy 2015-12-28).xml
├── db
├── __init__.py
├── files.py
├── connection.py
├── reader.py
├── definitions.py
└── writer.py
├── temp
├── a.sh
└── a.txt
├── .gitignore
├── Definitions
├── __init__.py
├── MultiWorkflow
│ ├── __init__.py
│ └── JobList.py
├── WorkflowReader
│ ├── __init__.py
│ └── reader.py
└── Graph.py
├── Scheduler
├── __init__.py
├── HEFT.py
├── BudgetPessimistic.py
├── DeadlineOptimisticAlpha.py
├── BHEFT.py
├── ICPCP.py
└── Multi_Workflow.py
├── reports
├── __init__.py
└── plot.py
├── enum
├── doc
│ └── enum.pdf
├── README
└── LICENSE
├── outputs
└── graphs
│ ├── g.ods
│ ├── diagrams.odt
│ └── diagrams.pdf
├── single_call.py
├── sample.py
├── starter.py
├── dbtest.py
├── workflows
├── S.txt
├── M.txt
└── L.txt
├── graph_check.py
├── README.md
├── planner.py
├── runAll.py
├── main1.py
└── main2.py
/.idea/.name:
--------------------------------------------------------------------------------
1 | the1st
--------------------------------------------------------------------------------
/db/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/temp/a.sh:
--------------------------------------------------------------------------------
1 | echo "ab$1c$2d '$1$2'"
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.png
3 | *.gif
4 |
--------------------------------------------------------------------------------
/.idea/dictionaries/Amin (Case Conflict).xml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/Definitions/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Amin'
2 |
--------------------------------------------------------------------------------
/Scheduler/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Amin'
2 |
--------------------------------------------------------------------------------
/reports/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Amin'
2 |
--------------------------------------------------------------------------------
/Definitions/MultiWorkflow/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Amin'
2 |
--------------------------------------------------------------------------------
/Definitions/WorkflowReader/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Amin'
2 |
--------------------------------------------------------------------------------
/enum/doc/enum.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ralthor/Scheduler/HEAD/enum/doc/enum.pdf
--------------------------------------------------------------------------------
/outputs/graphs/g.ods:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ralthor/Scheduler/HEAD/outputs/graphs/g.ods
--------------------------------------------------------------------------------
/outputs/graphs/diagrams.odt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ralthor/Scheduler/HEAD/outputs/graphs/diagrams.odt
--------------------------------------------------------------------------------
/outputs/graphs/diagrams.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ralthor/Scheduler/HEAD/outputs/graphs/diagrams.pdf
--------------------------------------------------------------------------------
/enum/README:
--------------------------------------------------------------------------------
1 | enum34 is the new Python stdlib enum module available in Python 3.4
2 | backported for previous versions of Python from 2.4 to 3.3.
3 | tested on 2.6, 2.7, and 3.3+
4 |
--------------------------------------------------------------------------------
/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/codeStyleSettings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/db/files.py:
--------------------------------------------------------------------------------
1 | from os import listdir
2 | from os.path import isfile, join
3 |
4 |
5 | def file_list(my_path, sub_str='', start=0, n=-1):
6 | only_files = [f for f in listdir(my_path) if (sub_str in f) and isfile(join(my_path, f))]
7 | if n > 0 or start != 0:
8 | only_files = only_files[start:n + start]
9 | return only_files
10 |
--------------------------------------------------------------------------------
/.idea/the1st.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/db/connection.py:
--------------------------------------------------------------------------------
1 | import sqlite3
2 |
3 |
4 | class Connection:
5 | def __init__(self, database_name):
6 | self.connection = sqlite3.connect(database_name)
7 |
8 | def get_cursor(self):
9 | return self.connection.cursor()
10 |
11 | def commit(self):
12 | self.connection.commit()
13 |
14 | def close(self):
15 | self.connection.close()
16 |
--------------------------------------------------------------------------------
/.idea/the1st (leno's conflicted copy 2015-12-17).iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/the1st (leno's conflicted copy 2015-12-28).iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/dictionaries/Amin.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | amin
5 | bheft
6 | cpus
7 | cyber
8 | epigenomics
9 | icpcp
10 | inspiral
11 | makespan
12 | sipht
13 | timeslot
14 | timeslots
15 |
16 |
17 |
--------------------------------------------------------------------------------
/db/reader.py:
--------------------------------------------------------------------------------
1 | import db.connection
2 | import db.definitions
3 |
4 |
5 | class Reader:
6 | def __init__(self, db_file):
7 | self.connection = db.connection.Connection(db_file)
8 | self.cursor = self.connection.get_cursor()
9 |
10 | def read_test(self, test_name):
11 | rows = self.cursor.execute('''select testname, workload_len, resources,
12 | budget_ratio, small, medium, large, bw
13 | from `tests` where testname = ? ''', (test_name,))
14 | return rows
15 |
16 | def select_query(self, query, params):
17 | rows = self.cursor.execute(query, params)
18 | return rows
19 |
--------------------------------------------------------------------------------
/.idea/misc (leno's conflicted copy 2015-12-17).xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.idea/misc (leno's conflicted copy 2015-12-28).xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/single_call.py:
--------------------------------------------------------------------------------
1 | import Definitions
2 | import Definitions.WorkflowReader.reader
3 | #import db.definitions
4 | import Definitions.Resources
5 | import Scheduler.ICPCP
6 |
7 | all_jobs = {'S': Definitions.WorkflowReader.reader.read_jobs('S.txt'), # 25-30
8 | 'M': Definitions.WorkflowReader.reader.read_jobs('M.txt'), # 50-60
9 | 'L': Definitions.WorkflowReader.reader.read_jobs('L.txt'), # 100
10 | 'XXL': Definitions.WorkflowReader.reader.read_jobs('XXL.txt')}
11 | n = 8
12 | processorList = [2] * n + [2] * n + [4] * (n * 2 / 3) + [8] * (n / 2) + [16] * (n / 3)
13 | resources = Definitions.Resources.CostAwareResources(processorList, processorList, [60] * len(processorList), 20000)
14 |
15 | g = all_jobs['S']['CyberShake']
16 |
17 | Scheduler.ICPCP.schedule(g, resources, 150)
18 | print resources.makespan
19 | print resources.plan_cost
20 |
21 |
22 | def run(s, name, dl):
23 | g1 = all_jobs[s][name]
24 | Scheduler.ICPCP.schedule(g1, resources, dl)
25 | print resources.makespan
26 | print resources.plan_cost
27 | print list(map(resources.resource_cost, range(resources.len)))
28 |
--------------------------------------------------------------------------------
/enum/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2013, Ethan Furman.
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions
6 | are met:
7 |
8 | Redistributions of source code must retain the above
9 | copyright notice, this list of conditions and the
10 | following disclaimer.
11 |
12 | Redistributions in binary form must reproduce the above
13 | copyright notice, this list of conditions and the following
14 | disclaimer in the documentation and/or other materials
15 | provided with the distribution.
16 |
17 | Neither the name Ethan Furman nor the names of any
18 | contributors may be used to endorse or promote products
19 | derived from this software without specific prior written
20 | permission.
21 |
22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 | POSSIBILITY OF SUCH DAMAGE.
33 |
--------------------------------------------------------------------------------
/Definitions/WorkflowReader/reader.py:
--------------------------------------------------------------------------------
1 | import codecs
2 | import Definitions.Graph as Graph
3 |
4 |
5 | def read_jobs(file_name):
6 | """
7 | Reads workflows as jobs from file with file_name
8 | :param file_name:
9 | :rtype : dict
10 | """
11 | jobs = {}
12 | file = codecs.open(file_name, "r", "utf-8")
13 | while 1:
14 | lines = file.readlines(10000000) # ~10M buffer.
15 | if not lines:
16 | break
17 | # Process lines.
18 | for line in lines:
19 | if len(line) == 0 or line[0] == ';':
20 | continue
21 | try:
22 | g = Graph.Graph()
23 | rest = line.split('|')
24 | name_part = rest[0]
25 | parts = name_part.split(',')
26 | job_id = parts[0]
27 | g.set_name(job_id)
28 | g.set_type(job_id + file_name[0])
29 | start_id, end_id = map(int, parts[1:])
30 | g.set_start_end(start_id, end_id)
31 | task_id = 0
32 | for component in rest[1:]:
33 | children = component.split(',')
34 | task_id, w, children = task_id + 1, float(children[0]), children[1:]
35 | children, edge_weights = children[0:(len(children) // 2)], children[(len(children) // 2):]
36 | children = list(map(int, children))
37 | edge_weights = list(map(float, edge_weights))
38 | g.add_task(task_id, w, children, edge_weights)
39 | jobs[job_id] = g
40 | g.set_predecessors()
41 | g.tasks[start_id].dummy_task = True
42 | g.tasks[end_id].dummy_task = True
43 | except RuntimeError:
44 | print('Runtime Exception!')
45 | file.close()
46 | return jobs
47 |
--------------------------------------------------------------------------------
/sample.py:
--------------------------------------------------------------------------------
1 | import random
2 | import numpy as np
3 | from matplotlib import pyplot as pyplot
4 | from matplotlib.patches import Rectangle
5 | import reports.plot
6 | from db import reader
7 |
8 | r = reader.Reader('/var/scratch/rezaeian/mydb_medium')
9 | rows = r.select_query('''
10 | select job_name, job_type, task_id, jobs_id, start_time, finish_time, resource_id,
11 | resource_speed
12 | from plans p
13 | where jobs_id = 5
14 | --extra_params=? and job_component_id=?
15 | --where job_name=?
16 | ''',())
17 | #['multi','fcfs'])
18 | # ('jxxbb', ))
19 |
20 | def name_2_color(name):
21 | ords = list(map(ord, name))
22 | c1 = (ords[0]*ords[1]) % 255
23 | c2 = (ords[3]*ords[4]) % 255
24 | c3 = (ords[2]*ords[4]*ords[0]) % 255
25 | return "#{:0>2x}{:0>2x}{:0>2x}".format(c1, c2, c3)
26 |
27 |
28 | fig = pyplot.figure()
29 | ax = fig.add_subplot(111)
30 | max_x = 0
31 | max_y = 0
32 | job_name_to_color = dict()
33 | for row in rows:
34 | job_name, job_type, task_id, job_id, start_time, \
35 | finish_time, resource_id, resource_speed = row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7]
36 |
37 | max_y = max(max_y, resource_id)
38 | max_x = max(max_x, finish_time)
39 |
40 | reports.plot.add_rect(resource_id, start_time, finish_time, name_2_color(job_name))
41 | # reports.plot.add_rect(resource_id, start_time, finish_time, job_name_to_color[job_name])
42 |
43 | ax.plot(max_x, 1, zorder=1)
44 | ax.plot(0, 0, zorder=1)
45 | pyplot.show()
46 |
47 | # colors = ['g','r','y','b','c','m','k']
48 | # fig = pyplot.figure()
49 | # ax = fig.add_subplot(111)
50 | # maxim = 0
51 | # for i in range(100):
52 | # r = random.randint(0, 9)
53 | # start = random.random() * 100
54 | # stop = start + random.random() * 10
55 | # color = random.randint(0, len(colors) - 1)
56 | # reports.plot.add_rect(r, start, stop, colors[color])
57 | # if stop > maxim:
58 | # maxim = stop
59 | # ax.plot(maxim, 1, zorder=1)
60 | # ax.plot(0, 0, zorder=1)
61 | # pyplot.show()
62 |
--------------------------------------------------------------------------------
/starter.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import subprocess
3 |
4 | test_names = ['t3b00r20p8n3',
5 | 't3b00r40p8n3',
6 | 't3b00r80p8n3',
7 | 't3b05r20p8n3',
8 | 't3b05r40p8n3',
9 | 't3b05r80p8n3', # 5
10 | 't3b10r20p8n3',
11 | 't3b10r40p8n3',
12 | 't3b10r80p8n3',
13 | 't6b00r20p8n3',
14 | 't6b00r40p8n3', # 10
15 | 't6b00r80p8n3',
16 | 't6b05r20p8n3',
17 | 't6b05r40p8n3',
18 | 't6b05r80p8n3',
19 | 't6b10r20p8n3', # 15
20 | 't6b10r40p8n3',
21 | 't6b10r80p8n3',
22 | 't60b00r20p8n3', # 18
23 | 't60b00r40p8n3', # 19
24 | 't60b00r80p8n3',
25 | 't60b05r20p8n3',
26 | 't60b05r40p8n3', # 22
27 | 't60b05r80p8n3',
28 | 't60b10r20p8n3', # 24
29 | 't60b10r40p8n3',
30 | 't60b10r80p8n3', # 26
31 |
32 | 't3b00r80p8n3', # 27
33 | 't3b00r120p8n3', # 28
34 | 't3b00r160p8n3',
35 | 't3b05r80p8n3', # 30
36 | 't3b05r120p8n3',
37 | 't3b05r160p8n3', # 32
38 | 't3b10r80p8n3',
39 | 't3b10r120p8n3', # 34
40 | 't3b10r160p8n3',
41 |
42 | 't1b00r100p3n2', # 36
43 | 't1b05r100p3n2', # 37
44 | 't1b10r100p3n2' # 38
45 | ]
46 | methods = ['prr', 'rr', 'fcfs', 'fair']
47 |
48 | args = sys.argv[1:]
49 | i_start = int(args[0])
50 | j_start = int(args[1])
51 | k_start = int(args[2])
52 |
53 | i_end = int(args[3])
54 | j_end = int(args[4])
55 | k_end = int(args[5])
56 |
57 | db = args[6]
58 |
59 | for k in range(k_start, k_end + 1):
60 | for i in range(i_start, i_end + 1):
61 | for j in range(j_start, j_end + 1):
62 |
63 | test_name, method, number = test_names[i], methods[j], k
64 | cmd = 'python main1.py {0} /var/scratch/rezaeian/{3} ' \
65 | '.p 0 {1} {2}'.format(test_name, number, method, db)
66 |
67 | # print cmd
68 |
69 | command_list = cmd.split()
70 | output_string = subprocess.check_output(command_list)
71 | output_file = "outputs/{0}_{1:0>2}_{2}.txt".format(test_name, number, method)
72 | file_handle = open(output_file, 'w')
73 | file_handle.write(output_string)
74 | file_handle.close()
75 |
--------------------------------------------------------------------------------
/temp/a.txt:
--------------------------------------------------------------------------------
1 | Hostname: leno
2 | ==
3 | .............()
4 | +---+----------+--------+------+------+---------+--------+--------+------+------+
5 | |job|constraint| value |ms old|ms new|prev cost|new cost|gap-rate|c-rate|m-rate|
6 | +---+----------+--------+------+------+---------+--------+--------+------+------+
7 | | 3| Deadline | 3.864| 3.334| 3.230| 38| 51.82| 0.06250|0.7333|1.1961|
8 | | 15| Budget | 67.200| 2.332| 2.332| 56| 54.99| 0.07231|1.2220|1.0000|
9 | | 8| Budget | 115.200| 1.646| 1.646| 96| 94.34| 0.14096|1.2211|1.0000|
10 | | 14| Budget | 48.000| 3.122| 3.122| 48| 47.84| 0.06474|1.0033|1.0000|
11 | | 21| Deadline | 1.946| 1.684| 1.845| 30| 25.00| 0.17985|1.2000|1.0548|
12 | | 7| Budget | 80.000| 2.440| 2.440| 80| 79.87| 0.08671|1.0016|1.0000|
13 | | 13| Budget | 72.000| 2.758| 2.758| 72| 46.88| 0.08833|1.5360|1.0000|
14 | | 25| Deadline | 4.217| 4.195| 4.081| 36| 67.70| 0.12189|0.5317|1.0334|
15 | | 2| Deadline | 2.212| 1.912| 1.913| 30| 35.48| 0.14425|0.8455|1.1558|
16 | | 11| Deadline | 1.689| 1.682| 1.610| 25| 32.94| 0.40651|0.7590|1.0488|
17 | | 6| Deadline | 2.970| 2.905| 2.905| 36| 42.52| 0.05488|0.8467|1.0223|
18 | | 5| Deadline | 4.501| 3.883| 4.223| 38| 43.80| 0.03703|0.8677|1.0660|
19 | | 23| Budget | 72.000| 1.989| 1.989| 72| 64.07| 0.12049|1.1237|1.0000|
20 | | 12| Budget | 67.200| 1.273| 1.273| 56| 45.42| 0.20606|1.4795|1.0000|
21 | | 9| Deadline | 1.216| 1.193| 1.214| 248| 223.32| 0.50541|1.1105|1.0010|
22 | | 18| Deadline | 1.983| 1.963| 1.800| 57| 81.84| 0.17994|0.6965|1.1020|
23 | | 16| Deadline | 2.927| 2.769| 2.720| 55| 57.36| 0.10898|0.9588|1.0763|
24 | | 24| Budget | 48.000| 3.137| 3.137| 48| 47.84| 0.06406|1.0034|1.0000|
25 | | 4| Budget | 67.200| 2.319| 2.319| 56| 55.31| 0.07322|1.2149|1.0000|
26 | | 29| Budget | 230.400| 0.877| 0.877| 192| 134.36| 0.34542|1.7147|1.0000|
27 | | 22| Budget | 86.400| 1.987| 1.987| 72| 55.40| 0.14112|1.5595|1.0000|
28 | | 20| Budget | 72.000| 0.873| 0.873| 72| 44.14| 0.36166|1.6311|1.0000|
29 | | 27| Deadline | 1.666| 1.655| 1.656| 109| 109.00| 0.30140|1.0000|1.0061|
30 | | 26| Deadline | 2.956| 2.795| 2.894| 55| 59.75| 0.10710|0.9206|1.0212|
31 | | 10| Deadline | 0.699| 0.662| 0.698| 55| 39.85| 0.73637|1.3800|1.0014|
32 | | 17| Deadline | 1.084| 1.080| 0.985| 117| 91.40| 0.44696|1.2801|1.0998|
33 | | 28| Deadline | 1.941| 1.922| 1.811| 57| 84.00| 0.18751|0.6786|1.0721|
34 | | 19| Deadline | 2.473| 2.447| 2.436| 57| 83.58| 0.11094|0.6820|1.0154|
35 | | 0| Budget | 78.400| 0.554| 0.554| 56| 36.17| 0.58871|2.1676|1.0000|
36 | | 1| Deadline | 1.046| 1.028| 1.028| 36| 41.00| 0.42054|0.8780|1.0176|
37 | +---+----------+--------+------+------+---------+--------+--------+------+------+
38 | ()
39 | Overall Cloud Cost:1978.000
40 | Separate Runs Cost:2055.000
41 |
42 | Unfairness: 0.13082
43 |
--------------------------------------------------------------------------------
/Definitions/Graph.py:
--------------------------------------------------------------------------------
1 | class Task:
2 | weight = 0
3 | upward_rank = -1
4 | id = -1
5 | dummy_task = False
6 |
7 | def __init__(self, weight=-1, ids=None, es=None, task_id=-1, graph_object=None):
8 | self.asap = None
9 | self.successor = {}
10 | self.predecessor = {}
11 | self.weight = weight
12 | if es is not None:
13 | self.add_successors(ids, es)
14 | self.graph = graph_object
15 | self.id = task_id
16 |
17 | def add_successor(self, id, e):
18 | """
19 | add a successor edge like this: self.id -(e)-> id
20 | :type e: float
21 | :type id: int
22 | """
23 | self.successor[id] = e
24 |
25 | def add_successors(self, ids, es):
26 | for i in range(0, len(ids)):
27 | self.add_successor(ids[i], es[i])
28 |
29 | sub_budget = -1
30 | sub_deadline = -1
31 | max_cost = -1
32 | max_time = -1
33 |
34 | @property
35 | def is_budget_task(self):
36 | return self.sub_budget != -1
37 |
38 | @property
39 | def is_deadline_task(self):
40 | return self.sub_deadline != -1
41 |
42 |
43 | class Graph:
44 | startID = -1
45 | endID = -1
46 | type = 'unknown'
47 |
48 | def __init__(self):
49 | self.tasks = {}
50 | self.name = ''
51 |
52 | def set_name(self, name):
53 | self.name = name
54 |
55 | def set_type(self, g_type):
56 | self.type = g_type
57 |
58 | def add_task(self, id, weight, ids, es):
59 | temp = Task(weight, ids, es, id, self)
60 | self.tasks[id] = temp
61 |
62 | def set_start_end(self, start_id, end_id):
63 | self.startID, self.endID = start_id, end_id
64 |
65 | def upward_rank(self, node_id, average_resource_power=1, average_bandwidth=1):
66 | if self.tasks[node_id].upward_rank != -1:
67 | pass
68 | elif len(self.tasks[node_id].successor) == 0:
69 | self.tasks[node_id].upward_rank = self.tasks[node_id].weight/average_resource_power
70 | else:
71 | max_children_rank = -1
72 | for childID in self.tasks[node_id].successor:
73 | ur_child = self.upward_rank(childID,
74 | average_resource_power,
75 | average_bandwidth) + \
76 | self.tasks[node_id].successor[childID]/average_bandwidth
77 | if max_children_rank < ur_child:
78 | max_children_rank = ur_child
79 |
80 | self.tasks[node_id].upward_rank = self.tasks[node_id].weight/average_resource_power + max_children_rank
81 | return self.tasks[node_id].upward_rank
82 |
83 | def set_predecessors(self):
84 | for tid in self.tasks:
85 | t = self.tasks[tid]
86 | for s in t.successor:
87 | self.tasks[s].predecessor[tid] = t.successor[s]
88 |
--------------------------------------------------------------------------------
/Scheduler/HEFT.py:
--------------------------------------------------------------------------------
1 | import Definitions.Resources
2 |
3 |
4 | def list_of_task_id_on_upward_rank(g):
5 | """
6 | calculates and returns an ordered list of task ids based on their decreasing rank
7 | :type g: Definitions.Graph.Graph
8 | :rtype : list
9 | """
10 | task_list = [g.startID]
11 | ready_tasks = {g.startID}
12 | children_pool = {}
13 | for p in ready_tasks:
14 | children = g.tasks[p].successor
15 | children = set(children.keys())
16 | children_pool = set.union(children, children_pool)
17 | ready_tasks = children_pool
18 | while len(ready_tasks) != 0:
19 | p = max(ready_tasks, key=lambda x: g.tasks[x].upward_rank)
20 | task_list.append(p)
21 | ready_tasks.remove(p)
22 | children = g.tasks[p].successor
23 | children = set(children.keys())
24 | ready_tasks = set.union(children, ready_tasks)
25 | return task_list
26 |
27 |
28 | def schedule(g, resources, upward_rank_is_calculated=False, priority_list=None):
29 | if not upward_rank_is_calculated:
30 | g.upward_rank(g.startID, resources.average_power, resources.bandwidth)
31 | if priority_list is None:
32 | priority_list = list_of_task_id_on_upward_rank(g)
33 |
34 | for tId in priority_list:
35 | task = g.tasks[tId]
36 | est_best, runtime_on_resource_best, eft_best, resource_id_best, place_id_best = resources.select_resource(task)
37 | task_schedule = Definitions.Resources.TaskSchedule(task, est_best, runtime_on_resource_best, eft_best,
38 | resource_id_best)
39 | resources.schedule(task_schedule, place_id_best)
40 |
41 |
42 | class SchedulerClass:
43 | def __init__(self, g, resources, budget, upward_rank_is_calculated=False, priority_list=None):
44 | if not upward_rank_is_calculated:
45 | g.upward_rank(g.startID, resources.average_power, resources.bandwidth)
46 | if priority_list is None:
47 | self.priority_list = list_of_task_id_on_upward_rank(g)
48 |
49 | self.g = g
50 | self.resources = resources
51 | self.last_unscheduled_task_id = 0
52 |
53 | def schedule_next(self, only_test=False):
54 | if self.last_unscheduled_task_id not in range(0, len(self.priority_list)):
55 | return
56 | t_id = self.priority_list[self.last_unscheduled_task_id]
57 | task = self.g.tasks[t_id]
58 | for tId in self.priority_list:
59 | task = self.g.tasks[tId]
60 | est_best, runtime_on_resource_best, eft_best, resource_id_best, place_id_best = \
61 | self.resources.select_resource(task)
62 | task_schedule = Definitions.Resources.TaskSchedule(task, est_best, runtime_on_resource_best, eft_best,
63 | resource_id_best)
64 | self.resources.schedule(task_schedule, place_id_best)
65 |
66 | @property
67 | def finished(self):
68 | return self.last_unscheduled_task_id >= len(self.priority_list)
69 |
--------------------------------------------------------------------------------
/dbtest.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import db.reader
3 | import db.definitions
4 |
5 |
6 | def tests():
7 | reader = db.reader.Reader('mydb')
8 | rows = reader.read_test('2')
9 | row = rows.fetchone()
10 | test = db.definitions.Test(row)
11 | print(test.test_name)
12 | print(len(test.resource_array))
13 | for r in test.resource_array:
14 | print(r) # r[0] power, r[1] price, r[2] number
15 |
16 |
17 | def main(args):
18 | if len(args) < 1:
19 | print('Required command line arguments are not specified: make_test or make_super_test or planner?')
20 | exit()
21 | elif args[0] == 'make_test':
22 | if args[1] == '-h':
23 | print("params are: "
24 | "test_name "
25 | "node_name "
26 | "test_numbers_from test_numbers_to "
27 | "dbfilename filenamepart start_number number_of_test_sets policy")
28 | else:
29 | test_name = args[1]
30 | node_name = args[2]
31 | n_from = int(args[3])
32 | n_to = int(args[4])
33 | dbfilename = args[5]
34 | file_name_part = args[6]
35 | start_number = int(args[7])
36 | number_of_test_sets = int(args[8])
37 | policy = args[9]
38 | for i in range(n_from, n_to + 1):
39 | print("ssh {} './mystarter.sh {} {} {} {} {} {} >./results/test{:0>3d}.txt &"
40 | "'".format(node_name,
41 | test_name, dbfilename, file_name_part, start_number, number_of_test_sets, policy,
42 | i))
43 | elif args[0] == 'make_super_test':
44 | if args[1] == '-h':
45 | print("params are: test_name run_on_each_node "
46 | "dbfilename filenamepart start_number number_of_test_sets policy"
47 | " node_name1 node_name2 node_name3 ...")
48 | else:
49 | test_name = args[1]
50 | jump = int(args[2])
51 | dbfilename = args[3]
52 | file_name_part = args[4]
53 | start_number = int(args[5])
54 | number_of_test_sets = int(args[6])
55 | policy = args[7]
56 | start = 1
57 | for node_name in args[8:]:
58 | print("python dbtest.py make_test"
59 | " {} {} {} {} {} {} {} {} {}"
60 | "".format(test_name, node_name, start, start + jump - 1,
61 | dbfilename, file_name_part, start_number, number_of_test_sets, policy))
62 | start += jump
63 | elif args[0] == 'planner':
64 | if args[1] == '-h':
65 | print("params are: test_name dbname test_numbers_from test_numbers_to")
66 | else:
67 | test_name = args[1]
68 | db_name = args[2]
69 | n_from = int(args[3])
70 | n_to = int(args[4])
71 | # print("mkdir $1")
72 | for i in range(n_from, n_to + 1):
73 | print("python planner.py {0} {1} ../plans/{0}/save{2:0>3}.p "
74 | "> outputs/{0}_{2:0>3}.txt &".format(test_name, db_name, i))
75 |
76 |
77 | if __name__ == "__main__":
78 | main(sys.argv[1:])
79 |
--------------------------------------------------------------------------------
/Scheduler/BudgetPessimistic.py:
--------------------------------------------------------------------------------
1 | import math
2 | import Scheduler.HEFT
3 | import Definitions.Resources
4 |
5 |
6 | def schedule(g, resources, budget, upward_rank_is_calculated=False, priority_list=None):
7 | """
8 | Schedules using BudgetPessimistic algorithm, it tries not to go further than the Budget, if so, it selects the
9 | less cost effective resource
10 | :param priority_list: list
11 | :param upward_rank_is_calculated: boolean
12 | :type g: Graph
13 | :type resources: Definitions.Resources.CostAwareResources
14 | :type budget: float
15 | """
16 | if not upward_rank_is_calculated:
17 | g.upward_rank(g.startID, resources.average_power, resources.bandwidth)
18 | if priority_list is None:
19 | priority_list = Scheduler.HEFT.list_of_task_id_on_upward_rank(g)
20 |
21 | sum_budget_remaining = budget
22 | sum_budget_allocated = 0
23 |
24 | sum_weight_remaining = math.fsum(map(lambda t: t.weight, g.tasks.values()))
25 | sum_weight_allocated = 0
26 | for t_id in priority_list:
27 | task = g.tasks[t_id]
28 | if sum_weight_remaining == 0:
29 | task.sub_budget = 0
30 | else:
31 | task.sub_budget = task.weight / sum_weight_remaining * sum_budget_remaining
32 | # resource selection:
33 | est, runtime_on_resource, eft, resource_id, place_id, cost = resources.select_resource(task)
34 |
35 | # scheduling:
36 | task_schedule = Definitions.Resources.TaskSchedule(task, est, runtime_on_resource, eft, resource_id)
37 | resources.schedule(task_schedule, place_id)
38 |
39 | # allocation and remaining budget and weight:
40 | sum_weight_remaining -= task.weight
41 | sum_weight_allocated += task.weight
42 |
43 | sum_budget_remaining -= cost
44 | sum_budget_allocated += cost
45 |
46 |
47 | class SchedulerClass:
48 | def __init__(self, g, resources, budget, upward_rank_is_calculated=False, priority_list=None):
49 | if not upward_rank_is_calculated:
50 | g.upward_rank(g.startID, resources.average_power, resources.bandwidth)
51 | if priority_list is None:
52 | self.priority_list = Scheduler.HEFT.list_of_task_id_on_upward_rank(g)
53 | self.g = g
54 | self.resources = resources
55 | self.last_unscheduled_task_id = 0
56 |
57 | self.sum_budget_remaining = budget
58 | self.sum_budget_allocated = 0
59 | self.sum_weight_remaining = math.fsum(map(lambda t: t.weight, g.tasks.values()))
60 | self.sum_weight_allocated = 0
61 |
62 | def schedule_next(self, only_test=False):
63 | if self.last_unscheduled_task_id not in range(0, len(self.priority_list)):
64 | return
65 | t_id = self.priority_list[self.last_unscheduled_task_id]
66 | task = self.g.tasks[t_id]
67 | if self.sum_weight_remaining == 0:
68 | task.sub_budget = 0
69 | else:
70 | task.sub_budget = task.weight / self.sum_weight_remaining * self.sum_budget_remaining
71 | # resource selection:
72 | est, runtime_on_resource, eft, resource_id, place_id, cost = self.resources.select_resource(task)
73 |
74 | if not only_test:
75 | # scheduling:
76 | task_schedule = Definitions.Resources.TaskSchedule(task, est, runtime_on_resource, eft, resource_id)
77 | self.resources.schedule(task_schedule, place_id)
78 |
79 | # allocation and remaining budget and weight:
80 | self.sum_weight_remaining -= task.weight
81 | self.sum_weight_allocated += task.weight
82 |
83 | self.sum_budget_remaining -= cost
84 | self.sum_budget_allocated += cost
85 | self.last_unscheduled_task_id += 1
86 | else:
87 | return eft, cost
88 |
89 | @property
90 | def finished(self):
91 | return self.last_unscheduled_task_id >= len(self.priority_list)
92 |
--------------------------------------------------------------------------------
/workflows/S.txt:
--------------------------------------------------------------------------------
1 | Mine_10, 11, 12| 8, 4, 4000| 12, 5, 6, 5000, 2000| 6, 5, 3000| 7, 6, 7, 2000, 3000| 15, 8, 6000| 9, 9, 7000| 4, 9, 5000| 2, 9, 1000| 6, 12, 0| 5, 1, 2, 3, 7000, 5000, 3000| 0, 10, 0| 0
2 | Mine_35, 36, 37| 8, 4, 5, 4000, 6000| 12, 6, 2000| 6, 7, 8, 13, 5000, 2000, 8000| 7, 9, 35, 3000, 2000| 15, 10, 8000| 9, 11, 7000| 4, 12, 3000| 2, 14, 1000| 3, 16, 5000| 7, 11, 16, 17, 3000, 9000, 4000| 18, 18, 5000| 14, 19, 12000| 8, 19, 2000| 13, 20, 10000| 3, 21, 22, 4000, 17000| 4, 22, 6000| 9, 26, 12000| 18, 23, 7000| 10, 11, 37, 5000, 0| 3, 19, 7000| 20, 30, 2000| 3, 25, 26, 11000, 7000| 8, 24, 26, 27, 5000, 6000, 3000| 13, 27, 28, 3000, 8000| 14, 30, 7000| 10, 29, 5000| 3, 32, 5000| 7, 32, 2000| 18, 31, 8000| 8, 31, 15000| 3, 32, 5000| 12, 33, 0| 8, 37, 0| 10, 1, 2, 3, 7000, 5000, 3000| 6, 15, 5000| 0, 34, 35, 0, 0| 0
3 | CyberShake, 31, 32| 0.07, 32, 0| 0.19, 32, 0|39.06, 2, 4, 0, 24000| 0.63, 1, 0|38.49, 2, 6, 0, 24000| 0.8, 1, 0|36.27, 2, 8, 0, 24000| 0.73, 1, 0|32.29, 2, 10, 0, 24000| 0.74, 1, 0|62.25, 2, 12, 0, 24000| 1.42, 1, 0|47.44, 2, 14, 0, 24000| 1.53, 1, 0| 45.6, 2, 16, 0, 24000| 1.53, 1, 0|28.67, 2, 18, 0, 24000| 1.36, 1, 0|24.56, 2, 20, 0, 24000| 1.36, 1, 0|31.05, 2, 22, 0, 24000| 1.43, 1, 0|54.87, 2, 24, 0, 24000| 0.78, 1, 0|23.99, 2, 26, 0, 24000| 1.1, 1, 0|26.46, 2, 28, 0, 24000| 0.85, 1, 0|158.1, 3, 5, 7, 9, 11, 3.10893e+08, 0, 0, 0, 0|96.91, 13, 15, 17, 19, 21, 23, 25, 27, 2.73965e+08, 0, 0, 0, 0, 0, 0, 0| 0, 29, 30, 0, 0| 0
4 | Epigenomics, 25, 26| 7.01, 6, 4.4066e+07| 8.23, 7, 4.81423e+07| 8.15, 8, 3.85535e+07| 9.25, 9, 4.74357e+07|11.89, 10, 4.33145e+07| 2.36, 11, 2.46133e+07| 3.54, 12, 4.4754e+07| 3.26, 13, 2.49693e+07| 3.06, 14, 3.67018e+07| 5.02, 15, 3.731e+07| 4.25, 16, 6.91872e+06| 5.91, 17, 1.03233e+07| 4.03, 18, 5.95687e+06| 4.72, 19, 9.01591e+06| 5.16, 20, 9.03943e+06|2830.54, 21, 7.52035e+06|3196.4, 21, 1.1221e+07|2425.31, 21, 6.47486e+06|3623.08, 21, 9.7999e+06|4062.96, 21, 9.82547e+06|17.23, 22, 4.23034e+07| 0.05, 23, 641552|1375.22, 26, 0|103.52, 1, 2, 3, 4, 5, 8.55343e+07, 9.82622e+07, 8.06232e+07, 9.85925e+07, 8.38472e+07| 0, 24, 0| 0
5 | Inspiral, 31, 32|674.74, 8, 341444|350.25, 8, 165251|594.63, 8, 402601|519.19, 8, 228253|535.21, 8, 233774|332.09, 8, 275315|344.11, 8, 410359| 5.61, 9, 10, 11, 12, 13, 14, 15, 41274, 0, 0, 0, 0, 0, 0| 5.77, 16, 7733| 5.43, 17, 15310| 4.93, 18, 16310| 5.26, 19, 8309| 4.98, 20, 6501| 5.35, 21, 6836| 5.26, 22, 16024|626.38, 23, 455353|612.78, 23, 273971|418.39, 23, 308593|255.61, 23, 412189|305.7, 23, 278778|510.37, 23, 395715|365.16, 23, 367983| 4.85, 32, 0|17.83, 1, 992860|17.97, 2, 983877|17.62, 3, 982316|17.75, 4, 1.0095e+06|18.05, 5, 989550|17.57, 6, 995701|18.23, 7, 978215| 0, 24, 25, 26, 27, 28, 29, 30, 0, 0, 0, 0, 0, 0, 0| 0
6 | Montage, 26, 27|10.59, 10, 408404|10.59, 10, 314191|10.88, 10, 228602|10.81, 10, 176356|10.49, 10, 233476|10.51, 10, 251206|10.51, 10, 271248|10.62, 10, 313128|10.37, 10, 297231| 0.72, 11, 1889| 1.42, 12, 13, 14, 15, 16, 265, 0, 0, 0, 0|10.39, 17, 4.16308e+06|10.64, 17, 4.16183e+06|10.83, 17, 4.18232e+06|10.93, 17, 4.17086e+06|10.76, 17, 4.15765e+06| 1.39, 18, 1599| 3.03, 19, 4.65096e+07| 3.86, 20, 1.86113e+06| 0.45, 27, 0|13.83, 1, 2, 3, 5, 7, 8, 13, 4.17185e+06, 0, 0, 0, 0, 0, 0|13.39, 1, 2, 4, 12, 4.16731e+06, 0, 0, 0| 13.6, 3, 7, 9, 15, 4.174e+06, 0, 0, 0|13.36, 4, 5, 6, 14, 4.15712e+06, 0, 0, 0|13.78, 8, 9, 16, 4.15352e+06, 0, 0| 0, 21, 22, 23, 24, 25, 0, 0, 0, 0, 0| 0
7 | Sipht, 31, 32| 0.1, 8, 1.63629e+06|307.17, 3, 4, 5, 6, 7, 8, 80233, 0, 0, 0, 0, 325028| 1.47, 6, 719003| 4.7, 8, 545|1488.84, 8, 167763| 0, 8, 1.13384e+06| 6.63, 8, 970705| 2.27, 9, 277325| 0, 32, 0| 0.99, 1, 55623| 1.76, 1, 86880| 1.43, 1, 48442| 1.36, 1, 79991| 1.46, 1, 75033| 1.1, 1, 52904| 1.81, 1, 123718| 1.46, 1, 136779| 1.51, 1, 114265| 1.51, 1, 96473| 1.63, 1, 93727| 1.32, 1, 87749| 1.62, 1, 88217| 1.05, 1, 124094| 1, 1, 134298| 1.06, 1, 110071| 1.45, 1, 128024|58.21, 2, 95|1597.92, 2, 6.74374e+06|763.23, 2, 5.16589e+06|49.65, 2, 310| 0, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0| 0
8 |
--------------------------------------------------------------------------------
/db/definitions.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 |
4 | class Result:
5 | def __init__(self, head_id, test_name, method, constraint, deadline, budget, makespan_old, makespan_new,
6 | cost_old, cost_new, gap_rate, c_rate, m_rate, job_name, job_size):
7 | self.head_id = head_id
8 | self.test_name = test_name
9 | self.method = method
10 | self.constraint = constraint
11 | self.deadline = deadline
12 | self.budget = budget
13 | self.makespan_old = makespan_old
14 | self.makespan_new = makespan_new
15 | self.cost_old = cost_old
16 | self.cost_new = cost_new
17 | self.gap_rate = gap_rate
18 | self.c_rate = c_rate
19 | self.m_rate = m_rate
20 | self.job_name = job_name
21 | self.job_size = job_size
22 |
23 | def set_value(self, head_id, test_name, method, constraint, deadline, budget, makespan_old, makespan_new,
24 | cost_old, cost_new, gap_rate, c_rate, m_rate, job_name, job_size):
25 | self.head_id = head_id
26 | self.test_name = test_name
27 | self.method = method
28 | self.constraint = constraint
29 | self.deadline = deadline
30 | self.budget = budget
31 | self.makespan_old = makespan_old
32 | self.makespan_new = makespan_new
33 | self.cost_old = cost_old
34 | self.cost_new = cost_new
35 | self.gap_rate = gap_rate
36 | self.c_rate = c_rate
37 | self.m_rate = m_rate
38 | self.job_name = job_name
39 | self.job_size = job_size
40 |
41 | def set_value_by_row(self, row):
42 | head_id, test_name, method, constraint, deadline, budget, makespan_old, makespan_new,\
43 | cost_old, cost_new, gap_rate, c_rate, m_rate, job_name, job_size = row
44 |
45 | self.set_value(head_id, test_name, method, constraint, deadline, budget, makespan_old, makespan_new,
46 | cost_old, cost_new, gap_rate, c_rate, m_rate, job_name, job_size)
47 |
48 | def get_row(self):
49 | return self.head_id, self.test_name, self.method, self.constraint, self.deadline, self.budget, \
50 | self.makespan_old, self.makespan_new, self.cost_old, self.cost_new, self.gap_rate, self.c_rate, \
51 | self.m_rate, self.job_name, self.job_size
52 |
53 |
54 | class Test:
55 | def __init__(self, row):
56 | '''
57 | row = test_name, workload_len, resources, budget_ratio, small, medium, large, bandwidth
58 | workload_len is the number of workflows
59 | 0 <= budget_ratio <= 1
60 | small + medium + large = 1
61 | resources = '{"t": timeslot length, "r": [[resource type specification], ...]}', where "resource type specification" is power, price and count.
62 | For example: '{"t": 5, "r": [[1, 1, 3], [2, 3, 1]]}' means timeslot == 5, and two types of resources: type 1: power 1, price 1 and 3 resources, and
63 | type 2: power 2, price 3 and 1 resource.
64 | '''
65 | test_name, workload_len, resources, budget_ratio, small, medium, large, bandwidth = row
66 | self.test_name = test_name
67 | self.workload_len = workload_len
68 | self.resources = resources
69 | self.budget_ratio = budget_ratio
70 | self.small = small
71 | self.medium = medium
72 | self.large = large
73 | self.bandwidth = bandwidth
74 |
75 | resource_def = json.loads(self.resources)
76 | self.time_slot = resource_def['t']
77 | self.resource_array = resource_def['r']
78 |
79 | def set_value(self, test_name, workload_len, resources, budget_ratio, small, medium, large, bandwidth):
80 | self.test_name = test_name
81 | self.workload_len = workload_len
82 | self.resources = resources
83 | self.budget_ratio = budget_ratio
84 | self.small = small
85 | self.medium = medium
86 | self.large = large
87 | self.bandwidth = bandwidth
88 |
89 | resource_def = json.loads(self.resources)
90 | self.time_slot = resource_def['t']
91 | self.resource_array = resource_def['r']
92 |
93 | def set_value_by_row(self, row):
94 | test_name, workload_len, resources, budget_ratio, small, medium, large, bandwidth = row
95 | self.set_value(test_name, workload_len, resources, budget_ratio, small, medium, large, bandwidth)
96 |
--------------------------------------------------------------------------------
/reports/plot.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | import matplotlib.pyplot as pyplot
4 | from matplotlib.backends.backend_pdf import PdfPages
5 | from matplotlib.patches import Rectangle
6 |
7 | def max_min(bars):
8 | min_b = bars[0][0]
9 | max_b = min_b
10 | for bar in bars:
11 | for x in bar:
12 | if x < min_b:
13 | min_b = x
14 | elif x > max_b:
15 | max_b = x
16 | return min_b, max_b
17 |
18 | def plot(x_list, y_list, xlable, ylable, legend=None, legend_place='best',
19 | pdf_name=None, show=True, figure_num=None, show_title=False,
20 | start_y_from_zero=False, do_low_high=True, new_low=None, new_high=None):
21 | figure = pyplot.figure(num=figure_num, figsize=(6, 3.5), dpi=80)
22 | ax1 = figure.add_subplot(111)
23 | pyplot.xlabel(xlable)
24 | pyplot.ylabel(ylable)
25 | pyplot.tight_layout()
26 | pyplot.grid(True,axis='both')
27 | line_spec = ['b.-', 'rx-', 'go-', 'md-', 'b.-', 'rx-', 'go-', 'md-']
28 |
29 | # ax1.yaxis
30 | ax1.ticklabel_format(axis='y', style='sci', useOffset=False)
31 |
32 | if do_low_high:
33 | low, high = max_min(y_list)
34 | new_low = (low-0.2*(high-low))
35 | new_high = (high+0.2*(high-low))
36 | if new_low < 0:
37 | low = 0
38 |
39 | if new_low is not None:
40 | pyplot.ylim([new_low, new_high])
41 |
42 | for i, x in enumerate(x_list):
43 | y = y_list[i]
44 | if legend is None:
45 | legend_label = ''
46 | else:
47 | legend_label = legend[i]
48 | pyplot.plot(x, y, line_spec[i], label=legend_label)
49 |
50 | if legend is not None:
51 | print(f'legend is {legend}')
52 | pyplot.legend(loc=legend_place)
53 | if pdf_name is not None:
54 | if show_title:
55 | pyplot.title(pdf_name)
56 | pdf = PdfPages(pdf_name + '.pdf')
57 | pdf.savefig()
58 | pdf.close()
59 |
60 | if show:
61 | pyplot.show()
62 | # =========== SAMPLE: ============ #
63 |
64 | # x1 = np.arange(1, 10.1, 0.1)
65 | # y1 = np.sin(x1)
66 | #
67 | # x2 = np.arange(1, 10.1, 0.1)
68 | # y2 = np.tan(x2)
69 | # plot([x1, x2], [y1, y2], 'Utilization', 'Slowdown', ('Sin', 'Tan'), 'upper left')
70 | #
71 |
72 | def bar(bars, title, xlabel, ylabel, xticklabels, legend,
73 | legend_place='best', width=0.15, pdf_name=None, do_low_high=True):
74 | n = len(bars[0])
75 |
76 | ind = np.arange(n) # the x locations for the groups
77 | # width = 0.15 # the width of the bars
78 |
79 | fig, ax = pyplot.subplots()
80 |
81 | pyplot.tight_layout()
82 | pyplot.grid(True,axis='both')
83 |
84 | ax.ticklabel_format(axis='y', style='sci', useOffset=False)
85 |
86 | if do_low_high:
87 | low, high = max_min(bars)
88 | new_low = (low-0.2*(high-low))
89 | new_high = (high+0.2*(high-low))
90 | if new_low < 0:
91 | low = 0
92 |
93 | pyplot.ylim([new_low, new_high])
94 |
95 | colors = ['r', 'y', 'm', 'b']
96 |
97 | for i, single_bar in enumerate(bars):
98 | ax.bar(ind + i * width, single_bar, width, color=colors[i])
99 |
100 | # add some text for labels, title and axes ticks
101 | ax.set_ylabel(ylabel)
102 | ax.set_xlabel(xlabel)
103 | ax.set_title(title)
104 | ax.set_xticks(ind + len(bars) / 2.0 * width)
105 | ax.set_xticklabels(xticklabels)
106 |
107 | ax.legend(legend, legend_place)
108 |
109 | pyplot.tight_layout()
110 | pyplot.legend(legend, legend_place)
111 |
112 | if pdf_name is not None:
113 | pdf = PdfPages(pdf_name)
114 | pdf.savefig()
115 | pdf.close()
116 |
117 | pyplot.show()
118 |
119 |
120 | # =========== SAMPLE: ============ #
121 |
122 | # bars = [(20, 35, 30, 35, 27), (25, 32, 34, 20, 25), (23, 27, 24, 30, 15), (12, 45, 17, 9, 16)]
123 | # xticklabels = ('G1', 'G2', 'G3', 'G4', 'G5')
124 | # xlabel = 'groups'
125 | # ylabel = 'Scores'
126 | # title = 'some fancy title'
127 | # legend = ('Men', 'Women', 'Other', 'Another')
128 | #
129 | # bar(bars, title, xlabel, ylabel, xticklabels, legend)
130 |
131 | def add_rect(resource, start_time, finish_time, color, tickness=0.1):
132 | pyplot.gca().add_patch(Rectangle(
133 | (start_time, resource * tickness), # lower left point of rectangle
134 | finish_time - start_time, tickness, # width/height of rectangle
135 | # transform=ax.transAxes,
136 | facecolor=color,
137 | edgecolor=color,
138 | alpha=0.75,
139 | zorder=2,))
140 |
--------------------------------------------------------------------------------
/graph_check.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import random
3 | import sys
4 | import socket
5 | # import math
6 | import pickle
7 |
8 | import os
9 |
10 | from Definitions.MultiWorkflow.JobList import Constraint, JobItem
11 | import Definitions.Resources
12 | import copy
13 | import db.definitions
14 | import db.reader
15 | import db.writer
16 | import db.files
17 | from os.path import join
18 |
19 |
20 | def print_list(l):
21 | for kk, member in enumerate(l):
22 | if kk != 0:
23 | str = ", {}"
24 | else:
25 | str = "{}"
26 | print(str.format(member), end='')
27 | print()
28 |
29 |
30 | all_jobs = {'S': Definitions.WorkflowReader.reader.read_jobs('S.txt'), # 25-30
31 | 'M': Definitions.WorkflowReader.reader.read_jobs('M.txt'), # 50-60
32 | 'L': Definitions.WorkflowReader.reader.read_jobs('L.txt'), # 100
33 | 'XXL': Definitions.WorkflowReader.reader.read_jobs('XXL.txt') # 1000
34 | }
35 | for job_class in all_jobs.keys():
36 | for name in all_jobs[job_class].keys():
37 | output_file = name + '_' + job_class
38 | print(output_file, end=', ')
39 | g = all_jobs[job_class][name]
40 | tasks = g.tasks
41 | runtimes = list(sorted(map(lambda t: t.weight, tasks.values())))
42 | print_list(runtimes[2:])
43 |
44 |
45 | exit()
46 |
47 |
48 | def main(args):
49 | try:
50 | if len(args) < 1:
51 | print('Required command line arguments are not specified\n'
52 | ' usage: python main1.py testname dbfilename filenamepart start_number number_of_test_sets policy')
53 | exit()
54 | test_name = args[0]
55 | file_name_part = args[1]
56 | start = int(args[2])
57 | file_number = int(args[3])
58 |
59 | test_directory = join('../plans', test_name)
60 |
61 | file_names = db.files.file_list(test_directory, file_name_part, start, file_number)
62 | file_list = []
63 | for f in file_names:
64 | file_list.append(join(test_directory, f))
65 |
66 | if len(file_list) == 0:
67 | print("No input file")
68 | exit()
69 | # ----------------------- Retrieving Everything needed:
70 | numbers = []
71 | resources_set = []
72 | graph_set = []
73 | makespan_list = []
74 | cost_list = []
75 | constraint_values = []
76 | constraint = []
77 | job = []
78 | names = []
79 | test = 0
80 |
81 | for dumb_file in file_list:
82 | from_retrieved = pickle.load(open(dumb_file, 'rb'))
83 |
84 | # test, numbers, resources_set, graph_set, makespan_list, cost_list, constraint_values,\
85 | # constraint, job, names = from_retrieved
86 | test, numbers2, resources_set2, graph_set2, makespan_list2, cost_list2, constraint_values2,\
87 | constraint2, job2, names2 = from_retrieved
88 |
89 | numbers += numbers2
90 | resources_set += resources_set2
91 | graph_set += graph_set2
92 | makespan_list += makespan_list2
93 | cost_list += cost_list2
94 | constraint_values += constraint_values2
95 | constraint += constraint2
96 | job += job2
97 | names += names2
98 |
99 | # --------------
100 | to_do = list(range(len(names)))
101 | random.shuffle(to_do)
102 | # --------------
103 |
104 | host_name = socket.gethostname()
105 |
106 | print("Hostname: {}".format(host_name))
107 |
108 | test.workload_len = len(to_do)
109 | workload_len = test.workload_len
110 |
111 | def print_list(l):
112 | for kk, member in enumerate(l):
113 | if kk != 0:
114 | str = ", {}"
115 | else:
116 | str = "{}"
117 | print(str.format(member), end='')
118 | print()
119 |
120 | for i in range(workload_len):
121 | tasks = graph_set[i].tasks
122 | runtimes = list(sorted(map(lambda t: t.weight, tasks.values())))
123 | print_list(runtimes)
124 |
125 | except Exception as e:
126 | exc_type, exc_obj, exc_tb = sys.exc_info()
127 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
128 | print(exc_type, fname, exc_tb.tb_lineno)
129 | raise e
130 |
131 | if __name__ == "__main__":
132 | try:
133 | main(sys.argv[1:])
134 | except Exception as e:
135 | exc_type, exc_obj, exc_tb = sys.exc_info()
136 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
137 | print(exc_type, fname, exc_tb.tb_lineno)
--------------------------------------------------------------------------------
/workflows/M.txt:
--------------------------------------------------------------------------------
1 | Epigenomics, 48, 49| 5.48, 11, 2.67662e+07|11.18, 12, 3.14357e+07| 9.13, 13, 2.43896e+07| 5.82, 14, 3.24926e+07| 4.22, 15, 2.43221e+07|11.17, 16, 3.17608e+07| 8.67, 17, 3.01627e+07| 8.72, 18, 2.95032e+07|11.09, 19, 3.59853e+07| 5.68, 20, 2.73005e+07| 1.23, 21, 3.14857e+07| 1.45, 22, 1.93601e+07| 1.89, 23, 2.89001e+07| 1.71, 24, 2.04631e+07| 1.58, 25, 1.71149e+07| 2.24, 26, 2.21471e+07| 2.59, 27, 1.87186e+07| 2.4, 28, 1.64022e+07| 1.68, 29, 3.24935e+07| 1.82, 30, 3.87698e+07| 3.37, 31, 7.13214e+06| 2.54, 32, 5.61031e+06| 3.57, 33, 6.76396e+06| 3.1, 34, 4.72355e+06| 3.17, 35, 3.9511e+06| 3, 36, 5.06072e+06| 2.45, 37, 5.36554e+06| 2.36, 38, 4.26592e+06| 5.23, 39, 7.60187e+06| 5.51, 40, 9.78306e+06|4560.56, 41, 7.75233e+06|4488.47, 41, 6.09817e+06|3730.73, 41, 7.35213e+06|4700.76, 41, 5.13429e+06|4269.66, 41, 4.29467e+06|2701.07, 42, 5.50078e+06|3780.17, 42, 5.83211e+06|2322.34, 42, 4.63687e+06|6007.38, 42, 8.2629e+06|2946.16, 42, 1.06338e+07|13.12, 43, 2.88977e+07|15.02, 43, 3.28928e+07|25.53, 44, 5.8293e+07| 0.05, 45, 793749|1633.96, 49, 0|44.45, 1, 2, 3, 4, 5, 5.30135e+07, 6.05171e+07, 5.01909e+07, 6.36095e+07, 4.75609e+07| 28.3, 6, 7, 8, 9, 10, 6.45142e+07, 6.22112e+07, 5.69548e+07, 6.92984e+07, 5.53655e+07| 0, 46, 47, 0, 0| 0
2 | Inspiral, 51, 52| 378, 13, 221205|483.32, 13, 336074|285.03, 13, 321352|300.19, 13, 468598|560.33, 13, 159433|689.29, 13, 199001|662.65, 13, 202897|542.77, 13, 303901|495.09, 13, 345061|244.47, 13, 160601|470.14, 13, 182454|430.23, 13, 261596| 5.29, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 35410, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0| 5.13, 26, 10511| 5.06, 27, 13843| 4.94, 28, 18477| 4.89, 29, 10399| 5.34, 30, 13329| 4.95, 31, 10556| 4.95, 32, 13896| 5.26, 33, 15855| 5.68, 34, 11162| 5.21, 35, 6744| 5.32, 36, 12659| 4.89, 37, 7182|687.76, 38, 394336|271.31, 38, 219786|494.07, 38, 245057|636.81, 38, 371080|472.41, 38, 310860|616.34, 38, 417224|475.5, 38, 255028|641.92, 38, 414920|393.45, 38, 348073|381.65, 38, 236051|381.21, 38, 331834|477.44, 38, 443350| 5.28, 52, 0|18.06, 1, 986360|18.17, 2, 972431|17.95, 3, 998873|18.63, 4, 950797|18.27, 5, 993450|18.05, 6, 954179| 18.1, 7, 958002|17.46, 8, 974790| 18.4, 9, 987959|18.81, 10, 980842|18.29, 11, 996105|18.19, 12, 1.02819e+06| 0, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0| 0
3 | CyberShake, 51, 52| 0.24, 52, 0| 0.17, 52, 0|59.57, 2, 4, 0, 24000| 1.4, 1, 0| 39.9, 2, 6, 0, 24000| 1.01, 1, 0|64.21, 2, 8, 0, 24000| 1.32, 1, 0|61.21, 2, 10, 0, 24000| 0.99, 1, 0|49.37, 2, 12, 0, 24000| 0.88, 1, 0| 41.4, 2, 14, 0, 24000| 1.3, 1, 0|55.11, 2, 16, 0, 24000| 1.57, 1, 0|50.96, 2, 18, 0, 24000| 1.44, 1, 0|36.65, 2, 20, 0, 24000| 1.56, 1, 0|42.76, 2, 22, 0, 24000| 1.03, 1, 0|29.64, 2, 24, 0, 24000| 0.77, 1, 0| 39.3, 2, 26, 0, 24000| 1.38, 1, 0|54.24, 2, 28, 0, 24000| 1.31, 1, 0|41.72, 2, 30, 0, 24000| 0.68, 1, 0|64.06, 2, 32, 0, 24000| 1.28, 1, 0|23.13, 2, 34, 0, 24000| 0.77, 1, 0|43.73, 2, 36, 0, 24000| 1.37, 1, 0|37.83, 2, 38, 0, 24000| 1.54, 1, 0|57.97, 2, 40, 0, 24000| 1.36, 1, 0| 44.4, 2, 42, 0, 24000| 1.25, 1, 0| 39.7, 2, 44, 0, 24000| 1.29, 1, 0|46.22, 2, 46, 0, 24000| 1.21, 1, 0|73.14, 3, 5, 7, 9, 2.73969e+08, 0, 0, 0|185.98, 11, 13, 15, 17, 19, 2.58348e+08, 0, 0, 0, 0|90.56, 21, 23, 25, 27, 29, 31, 2.9595e+08, 0, 0, 0, 0, 0|124.68, 33, 35, 37, 39, 41, 43, 45, 1.46306e+08, 0, 0, 0, 0, 0, 0| 0, 47, 48, 49, 50, 0, 0, 0, 0| 0
4 | Sipht, 61, 62| 0.05, 8, 1.47896e+06|221.3, 3, 4, 5, 6, 7, 8, 89340, 0, 0, 0, 0, 303157| 1.33, 6, 715862| 5.17, 8, 544|1263.92, 8, 185410| 0, 8, 691501| 5.15, 8, 754021| 2.2, 9, 294715| 0, 62, 0| 0.05, 8, 17, 1.58334e+06, 0|245.43, 3, 8, 12, 13, 14, 15, 16, 17, 81291, 364871, 0, 0, 0, 0, 0, 0| 1.46, 6, 15, 876666, 0| 4.08, 8, 17, 544, 0|1285.05, 8, 17, 169882, 0| 0, 8, 17, 1.36494e+06, 0| 6.37, 8, 17, 658444, 0| 1.38, 9, 18, 294715, 0| 0, 9, 62, 294715, 0| 0.89, 1, 85232| 1.67, 1, 93665| 1.31, 1, 77176| 0.75, 1, 90999| 1.05, 1, 71862| 1.3, 1, 116996| 0.88, 1, 56899| 1.17, 1, 92450| 1.49, 1, 77945| 1.18, 1, 99450| 1.59, 1, 101435| 1.34, 1, 51299| 1.66, 1, 90880| 1.07, 1, 81462| 1.13, 1, 69995| 0.98, 1, 78039| 1.43, 1, 50862| 1.13, 1, 92317| 45.9, 2, 310|67.18, 2, 96|3000.99, 2, 4.65506e+06|987.53, 2, 6.45994e+06| 1.17, 10, 122885| 1.21, 10, 47106| 0.95, 10, 126151| 1.17, 10, 51626| 1.52, 10, 49205| 1.24, 10, 119097| 1.59, 10, 76257| 1.41, 10, 114426| 1.41, 10, 119876| 0.7, 10, 129191| 1.21, 10, 99890| 1.46, 10, 91742| 1.26, 10, 136299| 1.3, 10, 89818| 1.18, 10, 118384| 1.12, 10, 91386|2074.14, 2, 11, 5.50432e+06, 0|32.99, 2, 11, 96, 0|1065.08, 2, 11, 4.8875e+06, 0|29.63, 2, 11, 310, 0| 0, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0| 0
5 | Montage, 51, 52|10.65, 29, 425541|10.68, 29, 330000|10.69, 29, 351987|10.78, 29, 378266|10.56, 29, 267120|10.59, 29, 190570| 10.7, 29, 345692|10.54, 29, 157793|10.51, 29, 332486|10.71, 29, 405131|10.48, 29, 287937|10.31, 29, 366476|10.63, 29, 390046|10.36, 29, 302010| 10.6, 29, 338777|10.56, 29, 386009|10.28, 29, 296484|10.51, 29, 218054|10.41, 29, 328551|10.54, 29, 386935|10.64, 29, 314756|10.62, 29, 277759|10.65, 29, 419684|10.68, 29, 342607|10.69, 29, 147754| 10.6, 29, 298295|10.56, 29, 168183|10.56, 29, 172213| 2.24, 30, 5878| 4.2, 31, 32, 33, 34, 35, 36, 37, 38, 425, 0, 0, 0, 0, 0, 0, 0|10.99, 39, 4.16247e+06|10.66, 39, 4.17252e+06|10.47, 39, 4.15523e+06|10.53, 39, 4.15466e+06|10.74, 39, 4.16547e+06|10.49, 39, 4.14291e+06|10.54, 39, 4.15333e+06|10.42, 39, 4.14612e+06| 3.2, 40, 3558| 4.75, 41, 6.96933e+07| 4.88, 42, 2.78885e+06| 1.22, 52, 0|13.08, 1, 2, 3, 4, 5, 6, 17, 20, 31, 4.16124e+06, 0, 0, 0, 0, 0, 0, 0, 0|13.48, 2, 6, 7, 8, 9, 10, 13, 21, 32, 4.15433e+06, 0, 0, 0, 0, 0, 0, 0, 0|13.55, 3, 8, 11, 12, 25, 33, 4.14652e+06, 0, 0, 0, 0, 0|13.31, 4, 12, 13, 14, 15, 16, 18, 22, 26, 34, 4.16962e+06, 0, 0, 0, 0, 0, 0, 0, 0, 0| 13.6, 5, 16, 20, 21, 22, 23, 27, 36, 4.16552e+06, 0, 0, 0, 0, 0, 0, 0|13.43, 9, 15, 17, 18, 19, 24, 35, 4.16855e+06, 0, 0, 0, 0, 0, 0|13.17, 10, 25, 26, 27, 28, 38, 4.16008e+06, 0, 0, 0, 0, 0| 13.6, 23, 24, 37, 4.15828e+06, 0, 0| 0, 43, 44, 45, 46, 47, 48, 49, 50, 0, 0, 0, 0, 0, 0, 0, 0| 0
6 |
--------------------------------------------------------------------------------
/db/writer.py:
--------------------------------------------------------------------------------
1 | import db.connection
2 | import db.definitions
3 |
4 |
5 | class Writer:
6 | def __init__(self, db_file, buffer_size=1000):
7 | self.connection = db.connection.Connection(db_file)
8 | self.cursor = self.connection.get_cursor()
9 | self.internal_counter = 0
10 | self.buffer_size = buffer_size
11 |
12 | def close(self):
13 | self.connection.commit()
14 | self.connection.close()
15 |
16 | def create_plan(self, recreate=False):
17 | if recreate:
18 | self.cursor.execute('''DROP Table If Exists plans ''')
19 |
20 | self.cursor.execute('''
21 | Create Table If Not Exists plans
22 | (`id` integer primary key autoincrement,
23 | `job_name` text,
24 | `job_type` text,
25 | `task_id` integer,
26 | `jobs_id` integer,
27 | `start_time` real,
28 | `finish_time` real,
29 | `resource_id` integer,
30 | `resource_speed` integer,
31 | `job_component_id` text default NULL,
32 | `extra_params` text default NULL
33 | )
34 | ''')
35 | self.connection.commit()
36 |
37 | def create_plan_head(self, recreate=False):
38 | if recreate:
39 | self.cursor.execute('''DROP Table If Exists plan_head ''')
40 |
41 | self.cursor.execute('''
42 | Create Table If Not Exists plan_head
43 | (`id` integer primary key autoincrement,
44 | `testname` text,
45 | `method` text,
46 | `job_count` integer,
47 | `job_component_id` text default NULL,
48 | `extra_params` text default NULL
49 | )
50 | ''')
51 | self.connection.commit()
52 |
53 | def create_tests(self):
54 | self.cursor.execute('''CREATE TABLE tests ("testname" TEXT NOT NULL DEFAULT (1),"workload_len" INTEGER NOT NULL
55 | DEFAULT (20),"resources" TEXT NOT NULL DEFAULT ('{"t":3, "r":[[1, 1, 10], [2, 3, 10], [3, 8, 10]]}'),
56 | "budget_ratio" REAL NOT NULL DEFAULT (0.5),"small" REAL NOT NULL DEFAULT (0.5),"medium" REAL NOT NULL
57 | DEFAULT (0.3),"large" REAL NOT NULL DEFAULT (0.2),"BW" INTEGER NOT NULL DEFAULT (1000))''')
58 | self.connection.commit()
59 |
60 | def create_tasks(self):
61 | self.cursor.execute('''CREATE TABLE "tasks" ("testname" TEXT NOT NULL,"job_id" INTEGER NOT NULL,
62 | "task_id" INTEGER NOT NULL,"resource_number" INTEGER NOT NULL,"start" REAL NOT NULL,"end" REAL NOT NULL,
63 | "id" INTEGER PRIMARY KEY NOT NULL)''')
64 | self.connection.commit()
65 |
66 | def create_results(self):
67 | self.cursor.execute('''CREATE TABLE "results" (
68 | "id" INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
69 | "head_id" INTEGER NOT NULL,
70 | "testname" TEXT NOT NULL,
71 | "method" TEXT NOT NULL,
72 | "constraint" TEXT NOT NULL DEFAULT ('Budget'),
73 | "deadline" REAL NOT NULL DEFAULT (-1),
74 | "budget" REAL NOT NULL DEFAULT (-1),
75 | "makespan_old" REAL NOT NULL,
76 | "makespan_new" REAL NOT NULL,
77 | "cost_old" REAL NOT NULL,
78 | "cost_new" REAL NOT NULL,
79 | "gap_rate" REAL NOT NULL,
80 | "c_rate" REAL NOT NULL,
81 | "m_rate" REAL NOT NULL),
82 | "job_name" TEXT NOT NULL DEFAULT ('Not Specified'),
83 | "job_size" INTEGER NOT NULL DEFAULT(0)''')
84 | self.connection.commit()
85 |
86 | def write_results(self, results, commit_now=True):
87 | self.cursor.execute('''INSERT INTO results (head_id, testname, method, `constraint`, deadline, budget,
88 | makespan_old, makespan_new, cost_old, cost_new, gap_rate, c_rate, m_rate, job_name, job_size)
89 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''', results.get_row())
90 | if commit_now:
91 | self.connection.commit()
92 |
93 | def commit(self):
94 | self.connection.commit()
95 |
96 | def create_result_head(self):
97 | self.cursor.execute('''CREATE TABLE "result_head" (
98 | "id" INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
99 | "testname" TEXT NOT NULL,
100 | "separate_cost" INTEGER NOT NULL DEFAULT (-1),
101 | "U" REAL NOT NULL DEFAULT(-1),
102 | "gap_rate" REAL NOT NULL DEFAULT(-1),
103 | "workload_len" INTEGER NOT NULL DEFAULT(0))''')
104 | self.connection.commit()
105 |
106 | def write_result_head(self, test_name):
107 | self.cursor.execute('''INSERT INTO result_head (id, testname) VALUES (NULL, ?)''', (test_name,))
108 | id = self.cursor.lastrowid # query_db('SELECT last_insert_rowid()')
109 | self.connection.commit()
110 | return id
111 |
112 | def write_plan_head(self, test_name, method='single', job_count=1):
113 | self.cursor.execute('''
114 | INSERT INTO plan_head (id, testname, method, job_count)
115 | VALUES (NULL, ?, ?, ?)
116 | ''', (test_name, method, job_count))
117 | id = self.cursor.lastrowid
118 | self.connection.commit()
119 | return id
120 |
121 | def change_result_head(self, id, test_name, separate_cost, u, workload_len, final_gap_rate, t, c):
122 | self.cursor.execute('''UPDATE result_head
123 | SET testname=?, separate_cost=?, U=?, workload_len=?,
124 | gap_rate=?, t=?, c=?
125 | WHERE id=?''',
126 | (test_name, separate_cost, u,
127 | workload_len, final_gap_rate, t, c, id))
128 | self.connection.commit()
129 |
130 | def write_plan(self, job_name, job_type, task_id, jobs_id, start_time, finish_time, resource_id,
131 | resource_speed, job_component_id='', extra_params=''):
132 | self.cursor.execute('''
133 | INSERT INTO plans (id, job_name, job_type, task_id, jobs_id, start_time, finish_time, resource_id,
134 | resource_speed, job_component_id, extra_params)
135 | VALUES (NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
136 | (job_name, job_type, task_id, jobs_id, start_time, finish_time, resource_id,
137 | resource_speed, job_component_id, extra_params))
138 | self.buffer_based_commit()
139 |
140 | def buffer_based_commit(self):
141 | self.internal_counter += 1
142 | if self.internal_counter > self.buffer_size:
143 | self.commit()
144 | self.internal_counter = 0
145 |
146 | # writing and getting the row_id:
147 | # g.db.execute('INSERT INTO downloads (name, owner, mimetype) VALUES (?, ?, ?)', [name, owner, mimetype])
148 | # file_entry = query_db('SELECT last_insert_rowid()')
149 | # g.db.commit()
150 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Multi-workflow Scheduler
2 | This is an implementation for a multi-workflow scheduler on cloud.
3 |
4 | ```
5 | +------------+
6 | | Schedulers |
7 | workflow | - HEFT |
8 | files | - BHEFT |
9 | | | - ICPCP |
10 | | +------------+
11 | v |
12 | +------------------------------+ |
13 | |WorkflowReader.reader.read_job| | +----------------------+
14 | +------------------------------+ v | Resources |
15 | | +--------------------------+ | +------------------+ |
16 | | | Multi-workflow Scheduler | | |CostAwareResources| |
17 | v +-------->| Policies: |<------>| +------------------+ |
18 | Jobs | | - RR | +----------------------+
19 | +--------+ | | - PRR | |
20 | | Graphs |----------+ | - FCFS | v
21 | +--------+ | - Fair | Schedule Plans
22 | +--------------------------+
23 | ```
24 |
25 | ## Workflows
26 | The plannings are carried on the workflows. They are stored in S.txt, M.txt, L.txt, XXL.txt files which represent the size of the workflows (S for Short, M for Medium, etc.).
27 |
28 | Workflows are read from those files by `Definitions.WorkflowReader.reader.read_jobs` function. The read instance is a list of `Definitions.Graph` instances. A `Graph` class provides required information and operation on a set of `Task` instances (implemented in the same file).
29 |
30 | I have generated several scientific workflows (SIPHT, CyberShake, LIGO, Montage, and Epigenomics) using Pegasus, and stored them in a straightforward (but not standard) text format. I have also developed a minimal javascript tool to visualize those workflows. I'll add that to the project soon.
31 |
32 | ## Resources
33 | `Resources` class is implemented in `Definitions` folder. It provides the behavior we expect from the resources on a computing cluster. Since the proposed method is designed for computing clouds, where cost of the resources is important, `Resources` class is inherited by `CostAwareResources` class, considering the costs of the resources. Resource classes are responsible for assigning, querying, scheduling (on specific resource), showing, and saving the resources.
34 |
35 | ## workflow schedulers
36 | There are several schedulers implemented in this project.
37 |
38 | ### multi-workflow schedulers
39 | The main task is to implement a multi-workflow scheduler. The whole multi-workflow scheduling is separated among different files of the project. There are four policies implemented to do so:
40 | - FCFS
41 | - Round-Robin
42 | - Priority Round-Robin
43 | - Fair
44 |
45 | These policies are implemented in a function in `main1.py`, and the are called like this:
46 | ```py
47 | if policy == 'prr':
48 | prr_policy()
49 | elif policy == 'rr':
50 | rr_policy()
51 | elif policy == 'fcfs':
52 | fcfs_policy()
53 | elif policy == 'fair':
54 | fair_policy()
55 | ```
56 | Above functions are called based on the selected policy in the arguments of the `main1.py`. In each policy, a set of workflows stored in `jobs` array are scheduled. Each entry of `jobs` array is an instance of `JobItem` class in `Definitions\MultiWorkflow` folder.
57 |
58 | The difference between policies is the way they select the next workflow to schedule from. Each job has a `scheduler` instance (the class is in `Scheduler` folder), which schedules its next task on an instance of `Resources` class (the class is in `Definitions` folder). Since we are doing multi-workflow scheduling, the resources are the same for all of the workflows.
59 |
60 | ### HEFT workflow scheduler
61 | It is implemented in `Scheduler/HEFT.py` file. This file contains a `SchedulerClass` which accepts a `Resources` instance and schedules the given workflow (stored in a `Graph` instance `g`) on them.
62 |
63 | It's an implementation for the Heterogeneous Earliest-Finish-Time (HEFT) algorithm presented [here 2002](https://ieeexplore.ieee.org/document/993206).
64 |
65 | ### BHEFT workflow scheduler
66 | It is implemented in `Scheduler/BHEFT.py` file. This file contains a `SchedulerClass` which accepts a `CostAwareResources` instance and schedules the given workflow on them. There are several call samples in the python file.
67 |
68 | It's a implementation for the Budget-constrained Heterogeneous Earliest Finish Time (BHEFT) algorithm presented [here 2012](https://link.springer.com/chapter/10.1007/978-3-642-28675-9_8).
69 |
70 | ### IC-PCP workflow scheduler
71 | It is implemented in `Scheduler/ICPCP.py` file. This file contains a `SchedulerClass` which accepts a `CostAwareResources` instance and schedules the given workflow on them. There is a small guide at the end of that python file.
72 |
73 | It is an implementation for IaaS Cloud Partial Critical Paths (IC-PCP) algorithm presented [in this paper 2013](https://www.sciencedirect.com/science/article/pii/S0167739X12001008).
74 |
75 |
76 | ## How to run?
77 | There are three main runners:
78 |
79 | - test creator,
80 | - test runner,
81 | - query.
82 |
83 | In addition, since I have implemented several other schedulers (for single workflows), those also can be run on separate workflows. I have described it on scheduler part.
84 |
85 | ## test creator
86 | `planner.py` is getting the `testname`, `db-filename` `outputfile` and an optional `write_plan`, and prepares a set of tests.
87 |
88 | ## test runner
89 | This task is done by `main1.py` file. It gets the `testname`, `db-filename`, `filenamepart`, `start_number`, `number_of_test_sets`, and `policy`.
90 |
91 | The `testname` argument is the master key for the table of tests. It helps to distinguish between different test cases, for example, it helps to query on different tests which have something in common in their name.
92 |
93 | As it is shown in `main1.py`, `db-filename` is used to retrive the aspects of the test and to store the results. I used SQLite for db, because I wanted to share the database file via NTFS, and different instances of runners can connect it using its mounted location.
94 |
95 | `outputfile` is the file to store some results that are not going to be saved in the database. These data may be used to debug or to describe wiered results.
96 |
97 | After the runner is finished, results can be queried in the next running process.
98 |
99 | ## query
100 | `report_plots.py` is responsible for querying and plotting the results. There are several functions in this file which use sql queries to to retrive the stored results in the database and use `matplotlib.pyplot` to plot the diagram.
101 |
--------------------------------------------------------------------------------
/planner.py:
--------------------------------------------------------------------------------
1 | #
2 | #
3 | # Important notice:
4 | #
5 | # call this file via 'dbtest.py' output
6 | #
7 |
8 | import sys
9 | import socket
10 | import pickle
11 | from Definitions.MultiWorkflow.JobList import Constraint
12 | import Scheduler.ICPCP
13 | import Scheduler.HEFT
14 | import Scheduler.BHEFT
15 | import Definitions.Resources
16 | import Scheduler.BudgetPessimistic
17 | import Scheduler.DeadlineOptimisticAlpha
18 | import copy
19 | import Scheduler.Multi_Workflow
20 | import db.definitions
21 | import db.reader
22 | import db.writer
23 |
24 |
25 | def main(args):
26 | if len(args) < 1:
27 | print('Required command line arguments are not specified\n'
28 | ' usage: python planner.py testname dbfilename outputfile ')
29 | exit()
30 | # testname represents a test in the database (it is the key in the tests table)
31 | test_name = args[0]
32 | database_file = args[1]
33 | dumb_file = args[2]
34 | write_plan = False
35 | if len(args) > 3:
36 | if args[3] == 'write_plan':
37 | write_plan = True
38 | reader = db.reader.Reader(database_file)
39 | rows = reader.read_test(test_name) # reading the test from the table
40 | row = rows.fetchone()
41 | test = db.definitions.Test(row)
42 | timeslot = test.time_slot
43 | bandwidth = test.bandwidth
44 | workload_len = test.workload_len
45 | powers = []
46 | prices = []
47 | numbers = []
48 | for r in test.resource_array:
49 | powers.append(r[0])
50 | prices.append(r[1])
51 | numbers.append(r[2])
52 |
53 | host_name = socket.gethostname()
54 |
55 | print("Hostname: {}".format(host_name))
56 |
57 | power_list, price_list, timeslot_list = [], [], []
58 | for i in range(len(test.resource_array)):
59 | power_list += [powers[i]] * numbers[i]
60 | price_list += [prices[i]] * numbers[i]
61 | # TODO: Tests must be changed, but it works for now (in case of change: both planner and main):
62 | timeslot_list += [60 * timeslot] * numbers[i]
63 |
64 | resource_spec = (power_list, price_list, timeslot_list)
65 |
66 | main_resources = Definitions.Resources.CostAwareResources(resource_spec[0], resource_spec[1], resource_spec[2],
67 | bandwidth)
68 |
69 | # ----------Workload Generation:
70 | job, constraint, names, sizes = Scheduler.Multi_Workflow.make_workload(test)
71 |
72 | # ---------Schedule Jobs one by one on the reference system
73 | # ---------(it includes finding a good constraint for them and some measurements)
74 | makespan_list = []
75 | cost_list = []
76 | resources_set = []
77 | graph_set = []
78 | constraint_values = []
79 |
80 | for i in range(workload_len):
81 | resources = copy.deepcopy(main_resources)
82 | g = copy.deepcopy(job[i])
83 | Scheduler.HEFT.schedule(g, resources)
84 | g_heft = g
85 | cost = resources.plan_cost
86 | makespan = resources.makespan
87 |
88 | heft_resources = resources
89 |
90 | if constraint[i] is Constraint.budget:
91 | c = 'Budget'
92 | budget_factor = 0.2
93 | while True:
94 | resources = copy.deepcopy(main_resources)
95 | g = copy.deepcopy(job[i])
96 | Scheduler.BHEFT.schedule(g, resources, cost * budget_factor)
97 | if cost * budget_factor >= resources.plan_cost:
98 | break
99 | budget_factor += 0.2
100 | # if budget_factor >= 1:
101 | # resources = heft_resources
102 | # g = g_heft
103 | # constraint_factor = 1
104 | # else:
105 |
106 | constraint_factor = budget_factor
107 | constraint_value = cost * budget_factor
108 | else:
109 | c = 'Deadline'
110 | deadline_factor = 1.2
111 |
112 | if sizes[i] == 'XXL':
113 | if names[i] == 'Montage':
114 | deadline_factor = 27
115 | elif names[i] == 'Sipht':
116 | deadline_factor = 13
117 | elif names[i] == 'Inspiral':
118 | deadline_factor = 4
119 |
120 | resources = heft_resources
121 | first_computation = True
122 | while True:
123 | resources = copy.deepcopy(main_resources)
124 | g = copy.deepcopy(job[i])
125 | Scheduler.ICPCP.schedule(g, resources, makespan * deadline_factor)
126 | if makespan * deadline_factor >= resources.makespan:
127 | # if resources.plan_cost >= cost:
128 | # resources = heft_resources
129 | # deadline_factor = 1
130 | if not first_computation:
131 | break
132 | else:
133 | deadline_factor *= 2
134 | first_computation = False
135 | continue
136 | deadline_factor *= 1.2
137 | constraint_factor = deadline_factor
138 | constraint_value = makespan * deadline_factor
139 | print("heft cost:{0:5.1f} | cost:{1:5.1f} | heft ms:{2:5.2f} | ms:{3:5.2f} "
140 | "| Nodes:{4:4d} | {5:>8} | factor: {6:5.2f}".format(cost, resources.plan_cost, makespan,
141 | resources.makespan, len(g.tasks) - 2, c,
142 | constraint_factor))
143 |
144 | # ---Store results for next use:
145 | makespan_list.append(resources.makespan)
146 | cost_list.append(resources.plan_cost)
147 | resources_set.append(copy.deepcopy(resources))
148 | graph_set.append(g)
149 | constraint_values.append(constraint_value)
150 |
151 | if write_plan:
152 | resources.write_schedule(database_file, test_name)
153 |
154 | # ---------- End of workload generation
155 |
156 | # ---------Starting Method:
157 |
158 | # find out each task's deadline and start-time (it's stored for deadline wfs in est & lft of task, do it for
159 | # budget ones).
160 | # Done: it's stored for deadline wfs in est & lft of task, do it for budget ones
161 | for i in range(workload_len):
162 | if constraint[i] is Constraint.deadline:
163 | continue
164 | resources = resources_set[i]
165 | for tor in resources.tasksOfResource:
166 | for sch in tor:
167 | sch.task.est = sch.EST
168 | sch.task.eft = sch.EFT
169 |
170 | # ---------- End of sub-budget and sub-deadline assignments
171 |
172 | # =================================================================================== vvv Current changes vvv
173 | # ----------------------- storing Everything needed with pickle:
174 | to_store = [test, numbers, resources_set, graph_set, makespan_list, cost_list,
175 | constraint_values, constraint, job, names]
176 | pickle.dump(to_store, open(dumb_file, 'wb'))
177 |
178 | if __name__ == "__main__":
179 | try:
180 | main(sys.argv[1:])
181 | except:
182 | print(" === ERROR :")
183 | e = sys.exc_info()
184 | for m in e:
185 | print(m)
186 |
--------------------------------------------------------------------------------
/Scheduler/DeadlineOptimisticAlpha.py:
--------------------------------------------------------------------------------
1 | import Definitions.Resources
2 | import Scheduler.HEFT
3 |
4 |
5 | def schedule(g, resources, deadline, alpha=0.5, upward_rank_is_calculated=False, priority_list=None):
6 | """
7 | Schedules using DeadlineOptimisticAlpha algorithm, it tries not to go further than the deadline, except for the
8 | tasks with early deadlines.
9 | :type g: Graph
10 | :type resources: Definitions.Resources.CostAwareResources
11 | :type deadline: float
12 | :type alpha: float
13 | """
14 | if not upward_rank_is_calculated:
15 | g.upward_rank(g.startID, resources.average_power, resources.bandwidth)
16 | if priority_list is None:
17 | priority_list = Scheduler.HEFT.list_of_task_id_on_upward_rank(g)
18 |
19 | limit = deadline / 2
20 |
21 | for t_id in priority_list:
22 | task = g.tasks[t_id]
23 | beta = - alpha / limit * task.sub_deadline + alpha
24 | task.sub_deadline *= (1 + beta)
25 | # resource selection:
26 | est, runtime_on_resource, eft, resource_id, place_id, cost = resources.select_resource(task)
27 |
28 | # scheduling:
29 | task_schedule = Definitions.Resources.TaskSchedule(task, est, runtime_on_resource, eft, resource_id)
30 | resources.schedule(task_schedule, place_id)
31 |
32 |
33 | class SchedulerClass:
34 | def __init__(self, g, resources, deadline, alpha=0.0, upward_rank_is_calculated=False, priority_list=None):
35 | if not upward_rank_is_calculated:
36 | g.upward_rank(g.startID, resources.average_power, resources.bandwidth)
37 | if priority_list is None:
38 | self.priority_list = Scheduler.HEFT.list_of_task_id_on_upward_rank(g)
39 | self.g = g
40 | self.resources = resources
41 | self.last_unscheduled_task_id = 0
42 |
43 | self.limit = deadline / 2
44 | self.alpha = alpha
45 | self.remaining_budget = 0
46 | self.total_budget = 0
47 | self.sum_share = 0
48 |
49 | def set_budget(self, budget):
50 | """
51 | must be called after setting task.sub_budgets.
52 | :param budget:
53 | :return:
54 | """
55 | self.remaining_budget = budget
56 | self.total_budget = budget
57 | self.sum_share = sum(map(lambda t: t.sub_budget, self.g.tasks.values()))
58 |
59 | def scheduled_task_ids(self):
60 | if self.finished:
61 | return self.priority_list[:]
62 | task_id_in_p_list = self.last_unscheduled_task_id
63 | if task_id_in_p_list == 0:
64 | return []
65 | else:
66 | task_id_in_p_list -= 1
67 | scheduled = self.priority_list[:task_id_in_p_list]
68 | return scheduled
69 |
70 |
71 | def recalculate_sub_budget(self):
72 | if self.finished:
73 | return
74 | task_id_in_p_list = self.last_unscheduled_task_id
75 | t_id = self.priority_list[task_id_in_p_list]
76 | task = self.g.tasks[t_id]
77 | unscheduled = self.priority_list[task_id_in_p_list:]
78 | sum_unscheduled = sum(map(lambda u: self.g.tasks[u].sub_budget, unscheduled))
79 | if sum_unscheduled == 0:
80 | return
81 | task.sub_budget = self.remaining_budget * task.sub_budget / sum_unscheduled
82 |
83 |
84 | def next_ready_task(self, arrival_time, verbose=0):
85 | """
86 | Finds the ready task with the highest upward rank at the given time.
87 | There may be several unscheduled tasks, but it is important that
88 | their predecessors are finished before the given time.
89 |
90 | Returns -1 if no task is found
91 | """
92 | if self.last_unscheduled_task_id == 0:
93 | return self.priority_list[self.last_unscheduled_task_id]
94 |
95 | candidates = self.priority_list[self.last_unscheduled_task_id:]
96 | scheduled_tasks = self.priority_list[:self.last_unscheduled_task_id]
97 | gname = self.g.name
98 | if verbose:
99 | print(f' candidates: {candidates}\n scheduleds: {scheduled_tasks}\n gname: {gname}')
100 |
101 | ready_tasks = []
102 | for t_id in candidates:
103 | task = self.g.tasks[t_id]
104 | unscheduled_predecessors = [p for p in task.predecessor if not p in scheduled_tasks]
105 |
106 | if verbose:
107 | print(f'task id: {t_id}\n unscheduled_predecessors: {unscheduled_predecessors}')
108 |
109 | if unscheduled_predecessors:
110 | continue
111 | predecessors_finished_after_current_time = [p for p in task.predecessor
112 | if self.resources.job_task_schedule[gname][p].EFT > arrival_time]
113 |
114 | if verbose:
115 | print(f'task id: {t_id}\n predecessors_finished_after_current_time: {predecessors_finished_after_current_time}')
116 |
117 | if predecessors_finished_after_current_time:
118 | continue
119 | ready_tasks.append(t_id)
120 | if not ready_tasks:
121 | return -1
122 | ready_tasks_with_index = [(self.priority_list.index(x), x) for x in ready_tasks]
123 | return sorted(ready_tasks_with_index)[0][1]
124 |
125 |
126 | def next_event(self, arrival_time, verbose=0):
127 | scheduled_tasks = self.priority_list[:self.last_unscheduled_task_id]
128 | gname = self.g.name
129 |
130 | if verbose:
131 | print(f'scheduled_tasks: {scheduled_tasks}')
132 | print(f'gname: {gname}')
133 |
134 | min_eft = None
135 | for t_id in scheduled_tasks:
136 | task_eft = self.resources.job_task_schedule[gname][t_id].EFT
137 | if verbose:
138 | print(f'task id:{t_id}, task EFT: {task_eft} ')
139 | if task_eft > arrival_time and (min_eft is None or task_eft < min_eft):
140 | min_eft = task_eft
141 | return min_eft
142 |
143 |
144 | def schedule_next(self, only_test=False, do_head_nodes=False, calc_resource_cost_change=False, arrival_time=0):
145 | if self.finished:
146 | return
147 | t_id = self.next_ready_task(arrival_time)
148 | if t_id != self.priority_list[self.last_unscheduled_task_id] and not only_test:
149 | self.priority_list = self.priority_list[:self.last_unscheduled_task_id] + [t_id] + [x for x in self.priority_list[self.last_unscheduled_task_id:] if x != t_id]
150 |
151 | task = self.g.tasks[t_id]
152 |
153 | beta = - self.alpha / self.limit * task.sub_deadline + self.alpha
154 | task.sub_deadline *= (1 + beta)
155 | # resource selection:
156 | est, runtime_on_resource, eft, resource_id, place_id, cost = self.resources.select_resource(
157 | task, test=only_test, arrival_time=arrival_time)
158 |
159 | if not only_test:
160 | # scheduling:
161 | task_schedule = Definitions.Resources.TaskSchedule(task, est, runtime_on_resource, eft, resource_id)
162 | self.resources.schedule(task_schedule, place_id, do_head_nodes)
163 | self.last_unscheduled_task_id += 1
164 | return eft, cost, resource_id
165 | else:
166 | if calc_resource_cost_change:
167 | change = self.resources.calculate_share_cost_change(resource_id, est, eft, task.graph.name)
168 | return eft, cost, change
169 | return eft, cost, resource_id
170 |
171 | @property
172 | def finished(self):
173 | return self.last_unscheduled_task_id >= len(self.priority_list)
174 |
--------------------------------------------------------------------------------
/Scheduler/BHEFT.py:
--------------------------------------------------------------------------------
1 | import math
2 | import Scheduler.HEFT
3 | import Definitions.Resources
4 |
5 |
6 | def schedule(g, resources, budget, upward_rank_is_calculated=False, priority_list=None):
7 | """
8 | Schedules using BHEFT algorithm
9 | :type g: Graph
10 | :type resources: Definitions.Resources.CostAwareResources
11 | :type budget: float
12 | """
13 | if not upward_rank_is_calculated:
14 | g.upward_rank(g.startID, resources.average_power, resources.bandwidth)
15 | if priority_list is None:
16 | priority_list = Scheduler.HEFT.list_of_task_id_on_upward_rank(g)
17 |
18 | sum_budget_remaining = 0
19 | for i in range(0, resources.len):
20 | sum_budget_remaining += resources.price[i] / (resources.timeslot[i] * resources.power[i])
21 | sum_budget_allocated = 0
22 | average_price_of_computation = sum_budget_remaining / resources.len
23 |
24 | sum_weight = math.fsum(map(lambda t: t.weight, g.tasks.values()))
25 | sum_budget_remaining = average_price_of_computation * sum_weight
26 | for tId in priority_list:
27 | est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \
28 | -1, -1, -1, -1, -1, -1
29 | sab_k = budget - sum_budget_allocated - sum_budget_remaining
30 | ctb_k = g.tasks[tId].weight * average_price_of_computation
31 | if sab_k >= 0 and sum_budget_remaining != 0:
32 | ctb_k += sab_k + ctb_k * sab_k / sum_budget_remaining
33 | affordable_found = False
34 | for r in range(0, resources.len):
35 | start_time, eft, runtime_on_resource, place_id, cost = resources.calculate_eft_and_cost(
36 | g.tasks[tId], r)
37 | if (not affordable_found and cost <= ctb_k) or (
38 | affordable_found and cost <= ctb_k and eft < eft_best) or (
39 | not affordable_found and sab_k >= 0 and (eft < eft_best or eft_best == -1)) or(
40 | not affordable_found and sab_k < 0 and (cost < cost_best or cost_best == -1)
41 | ):
42 | if cost <= ctb_k:
43 | affordable_found = True
44 | est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \
45 | start_time, eft, runtime_on_resource, place_id, r, cost
46 | continue
47 |
48 | task_schedule = Definitions.Resources.TaskSchedule(g.tasks[tId], est_best, runtime_on_resource_best, eft_best,
49 | resource_id_best)
50 | sum_budget_remaining -= g.tasks[tId].weight * average_price_of_computation
51 | sum_budget_allocated += cost_best
52 | resources.schedule(task_schedule, place_id_best)
53 |
54 | # sample call series for this (above) algorithm:
55 | # min_resources = Definitions.Resources.CostAwareResources([1], [1], [5], BW)
56 | # Scheduler.HEFT.schedule(g, min_resources)
57 | # cost_min = min_resources.plan_cost
58 | #
59 | # max_resources = Definitions.Resources.CostAwareResources([1, 2], [1, 3], [5, 5], BW)
60 | # Scheduler.HEFT.schedule(g, max_resources)
61 | # cost_max = max_resources.plan_cost
62 | #
63 | # for alpha in range(3, 10, 2):
64 | # budget = cost_min + alpha / 10 * (cost_max - cost_min)
65 | # resources = Definitions.Resources.CostAwareResources([1, 2], [1, 3], [5, 5], BW)
66 | # Scheduler.BHEFT.schedule(g, resources, budget)
67 | # # resources.show_schedule(g.name)
68 | # # print('--')
69 | # cost = resources.plan_cost
70 | # print(budget, cost, resources.makespan)
71 | # print('--')
72 |
73 |
74 | class SchedulerClass:
75 | def __init__(self, g, resources, budget, upward_rank_is_calculated=False, priority_list=None):
76 | if not upward_rank_is_calculated:
77 | g.upward_rank(g.startID, resources.average_power, resources.bandwidth)
78 | if priority_list is None:
79 | self.priority_list = Scheduler.HEFT.list_of_task_id_on_upward_rank(g)
80 | self.g = g
81 | self.resources = resources
82 | self.last_unscheduled_task_id = 0
83 |
84 | self.sum_budget_remaining = 0
85 | for i in range(0, resources.len):
86 | self.sum_budget_remaining += resources.price[i] / (resources.timeslot[i] * resources.power[i])
87 | self.sum_budget_allocated = 0
88 | self.average_price_of_computation = self.sum_budget_remaining / resources.len
89 |
90 | sum_weight = math.fsum(map(lambda t: t.weight, g.tasks.values()))
91 | self.sum_budget_remaining = self.average_price_of_computation * sum_weight
92 | self.budget = budget
93 |
94 | def schedule_next(self, only_test=False):
95 | if self.last_unscheduled_task_id not in range(0, len(self.priority_list)):
96 | return
97 | t_id = self.priority_list[self.last_unscheduled_task_id]
98 | task = self.g.tasks[t_id]
99 | est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \
100 | -1, -1, -1, -1, -1, -1
101 | sab_k = self.budget - self.sum_budget_allocated - self.sum_budget_remaining
102 | ctb_k = task.weight * self.average_price_of_computation
103 | if sab_k >= 0 and self.sum_budget_remaining != 0:
104 | ctb_k += sab_k + ctb_k * sab_k / self.sum_budget_remaining
105 | affordable_found = False
106 | for r in range(0, self.resources.len):
107 | start_time, eft, runtime_on_resource, place_id, cost = self.resources.calculate_eft_and_cost(task, r)
108 | if (not affordable_found and cost <= ctb_k) or (
109 | affordable_found and cost <= ctb_k and eft < eft_best) or (
110 | not affordable_found and sab_k >= 0 and (eft < eft_best or eft_best == -1)) or(
111 | not affordable_found and sab_k < 0 and (cost < cost_best or cost_best == -1)
112 | ):
113 | if cost <= ctb_k:
114 | affordable_found = True
115 | est_best, eft_best, runtime_on_resource_best, place_id_best, resource_id_best, cost_best = \
116 | start_time, eft, runtime_on_resource, place_id, r, cost
117 | continue
118 |
119 | if not only_test:
120 | task_schedule = Definitions.Resources.TaskSchedule(task, est_best, runtime_on_resource_best, eft_best,
121 | resource_id_best)
122 | self.sum_budget_remaining -= task.weight * self.average_price_of_computation
123 | self.sum_budget_allocated += cost_best
124 | self.resources.schedule(task_schedule, place_id_best)
125 | self.last_unscheduled_task_id += 1
126 | else:
127 | return eft_best, cost_best
128 |
129 | @property
130 | def finished(self):
131 | return self.last_unscheduled_task_id >= len(self.priority_list)
132 |
133 | # sample call series for this (above) algorithm:
134 | # min_resources = Definitions.Resources.CostAwareResources([1], [1], [5], BW)
135 | # Scheduler.HEFT.schedule(g, min_resources)
136 | # cost_min = min_resources.plan_cost
137 | #
138 | # max_resources = Definitions.Resources.CostAwareResources([1, 2], [1, 3], [5, 5], BW)
139 | # Scheduler.HEFT.schedule(g, max_resources)
140 | # cost_max = max_resources.plan_cost
141 | #
142 | # for alpha in range(3, 10, 2):
143 | # budget = cost_min + alpha / 10 * (cost_max - cost_min)
144 | # resources = Definitions.Resources.CostAwareResources([1, 2], [1, 3], [5, 5], BW)
145 | # bheft = Scheduler.BHEFT.SchedulerClass(g, resources, budget)
146 | # while not bheft.finished
147 | # bheft.schedule_next()
148 | # # resources.show_schedule(g.name)
149 | # # print('--')
150 | # cost = resources.plan_cost
151 | # print(budget, cost, resources.makespan)
152 | # print('--')
153 |
--------------------------------------------------------------------------------
/Definitions/MultiWorkflow/JobList.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import copy
4 | from Definitions.Graph import Graph
5 | from Definitions.Resources import CostAwareResources, Constraint
6 | import Scheduler.BHEFT
7 | import Scheduler.ICPCP
8 | import Scheduler.DeadlineOptimisticAlpha
9 | from Scheduler import Multi_Workflow
10 |
11 |
12 | class JobItem:
13 | def __init__(self, g=Graph(), constraint_type=Constraint.none, constraint=0,
14 | resources=CostAwareResources([], [], [], 0), reference_graph=None, reference_resources=None):
15 | if constraint_type is Constraint.none:
16 | raise Exception('Constraint type must be defined')
17 | elif constraint <= 0:
18 | raise Exception('Constraint must be a non-zero positive value')
19 |
20 | self.g = copy.deepcopy(g)
21 | self.type = constraint_type
22 | self.constraint = constraint
23 | self.reference_graph = reference_graph
24 |
25 | if constraint_type is Constraint.budget:
26 | if self.reference_graph is None:
27 | self.reference_graph = copy.deepcopy(g)
28 | resources_copy = copy.deepcopy(resources)
29 | Scheduler.BHEFT.schedule(self.reference_graph, resources_copy, constraint)
30 | self.reference_graph.resources = resources_copy
31 | # verbose:
32 | # resources_copy.show_schedule(self.reference_graph.name,
33 | # '--' + str(resources_copy.makespan) + ', '
34 | # + str(resources_copy.plan_cost))
35 | self.reference_graph.cost = resources_copy.plan_cost
36 | self.reference_graph.budget = constraint
37 | self.reference_graph.critical_first = self.reference_graph.budget / self.reference_graph.cost
38 | self.critical_first = self.reference_graph.critical_first
39 | # self.scheduler = Scheduler.BHEFT.SchedulerClass(self.g, resources, constraint)
40 | # reference_resources = reference_graph.resources # it is moved to method parameters
41 | for t in reference_graph.tasks.values():
42 | t.lft = reference_resources.job_task_schedule[reference_graph.name][t.id].EFT
43 | Multi_Workflow.assign_sub_deadlines(self.reference_graph, self.g, asap=False)
44 | Multi_Workflow.assign_sub_budgets(self.reference_graph, self.g, reference_resources)
45 | self.scheduler = Scheduler.DeadlineOptimisticAlpha.SchedulerClass(
46 | self.g, resources, reference_graph.makespan)
47 | elif constraint_type is Constraint.deadline:
48 | if self.reference_graph is None:
49 | self.reference_graph = copy.deepcopy(g)
50 | resources_copy = copy.deepcopy(resources)
51 | Scheduler.ICPCP.schedule(self.reference_graph, resources_copy, constraint)
52 | self.reference_graph.resources = resources_copy
53 | # resources_copy.show_schedule(self.reference_graph.name,
54 | # '--' + str(resources_copy.makespan) + ', '
55 | # + str(resources_copy.plan_cost))
56 | self.reference_graph.makespan = resources_copy.makespan
57 | self.reference_graph.deadline = constraint
58 | self.reference_graph.critical_first = self.reference_graph.deadline / self.reference_graph.makespan
59 | self.critical_first = self.reference_graph.critical_first
60 | Multi_Workflow.assign_sub_deadlines(self.reference_graph, self.g, asap=True)
61 | Multi_Workflow.assign_sub_budgets(self.reference_graph, self.g, reference_resources)
62 | self.scheduler = Scheduler.DeadlineOptimisticAlpha.SchedulerClass(self.g, resources, constraint)
63 | else:
64 | raise Exception("Define a constraint!")
65 | Multi_Workflow.assign_cost_for_each_task(self.reference_graph, reference_resources)
66 | # print('', end='')
67 | self.sum_w = sum(map(lambda task: task.weight, self.reference_graph.tasks.values()))
68 | self.overall_budget = reference_resources.plan_cost
69 | self.scheduler.set_budget(reference_resources.plan_cost)
70 |
71 | @property
72 | def critical_now(self):
73 | if self.scheduler.finished:
74 | return 1e10
75 |
76 | if self.g.name in self.scheduler.resources.head_nodes:
77 | head_nodes = self.scheduler.resources.head_nodes[self.g.name]
78 | else:
79 | head_nodes = set()
80 |
81 | def calc_cr(task_id):
82 | current_finish = self.scheduler.resources.job_task_schedule[self.g.name][task_id].EFT
83 | if hasattr(self.reference_graph.tasks[task_id], 'eft'):
84 | reference_finish = self.reference_graph.tasks[task_id].eft
85 | # else:
86 | # print('.', end='')
87 | if current_finish == 0:
88 | return 0
89 | return float(reference_finish) / current_finish
90 |
91 | if len(head_nodes) == 0:
92 | average = 0
93 | else:
94 | average = sum(map(calc_cr, head_nodes)) / len(head_nodes)
95 | # if self.g.name in self.scheduler.resources.sum_weight_scheduled:
96 | # sum_weight_scheduled = self.scheduler.resources.sum_weight_scheduled[self.g.name]
97 | # else:
98 | # sum_weight_scheduled = 0
99 | # sum_weight_unscheduled = self.sum_w - sum_weight_scheduled
100 |
101 | time_critical = average
102 |
103 | if time_critical < 1 and self.type is Constraint.deadline:
104 | return -2 * time_critical
105 |
106 | # Cost computations:
107 | scheduled_tasks = self.scheduler.scheduled_task_ids()
108 | reference_cost = sum(map(lambda t: self.reference_graph.tasks[t].sub_budget, scheduled_tasks))
109 |
110 | if self.g.name in self.scheduler.resources.costs:
111 | current_cost = self.scheduler.resources.costs[self.g.name]
112 | else:
113 | current_cost = 0
114 |
115 | if current_cost == 0:
116 | cost_critical = self.critical_first
117 | else:
118 | cost_critical = float(reference_cost) / current_cost
119 |
120 | return time_critical + cost_critical
121 |
122 | @property
123 | def critical(self):
124 | if self.scheduler.finished:
125 | return 1e10
126 |
127 | r, c, changes = self.scheduler.schedule_next(only_test=True, calc_resource_cost_change=True)
128 | rank_number = self.scheduler.last_unscheduled_task_id
129 | t_id = self.scheduler.priority_list[rank_number]
130 | if not hasattr(self.reference_graph.tasks[t_id], 'cost'):
131 | return self.critical_first
132 |
133 | predecessors = self.g.tasks[t_id].predecessor.keys()
134 | if len(predecessors) == 0:
135 | head_nodes = set()
136 | else:
137 | head_nodes = self.scheduler.resources.head_nodes[self.g.name].difference(predecessors)
138 | head_nodes.add(t_id)
139 |
140 | def calc_cr(task_id):
141 | if task_id == t_id:
142 | current_finish = r
143 | else:
144 | current_finish = self.scheduler.resources.job_task_schedule[self.g.name][task_id].EFT
145 | # TODO: reference finish must become the deadline (in case of deadline job)?!
146 | if not hasattr(self.reference_graph.tasks[task_id], 'eft'):
147 | print()
148 | reference_finish = self.reference_graph.tasks[task_id].eft
149 | if current_finish == 0:
150 | return 0
151 | return float(reference_finish) / current_finish
152 |
153 | average = sum(map(calc_cr, head_nodes)) / len(head_nodes)
154 | if self.g.name in self.scheduler.resources.sum_weight_scheduled:
155 | sum_weight_scheduled = self.scheduler.resources.sum_weight_scheduled[self.g.name]
156 | else:
157 | sum_weight_scheduled = 0
158 | sum_weight_unscheduled = self.sum_w - sum_weight_scheduled
159 | time_critical = (sum_weight_scheduled * average + sum_weight_unscheduled * self.critical_first) / self.sum_w
160 |
161 | if self.g.name in self.scheduler.resources.costs:
162 | current_cost = self.scheduler.resources.costs[self.g.name]
163 | else:
164 | current_cost = 0
165 | reference_cost = float(self.overall_budget) * sum_weight_scheduled / self.sum_w
166 |
167 | if current_cost == 0:
168 | cost_critical = self.critical_first
169 | else:
170 | cost_critical = float(reference_cost) / current_cost
171 |
172 | return cost_critical
173 | # return time_critical + cost_critical
174 |
175 | # R, C = self.reference_graph.tasks[t_id].eft, self.reference_graph.tasks[t_id].cost
176 | # # if self.type == Constraint.deadline:
177 | # # if c == 0:
178 | # # return self.critical_first
179 | # # else:
180 | # # return c / C
181 | # # else:
182 | # if r == 0:
183 | # return self.critical_first
184 | # else:
185 | # return R / r
186 |
--------------------------------------------------------------------------------
/Scheduler/ICPCP.py:
--------------------------------------------------------------------------------
1 | import math
2 | import Scheduler.HEFT
3 | import Definitions.Resources
4 |
5 |
6 | class CPU:
7 | def __init__(self, number_of_cpus):
8 | self.len = number_of_cpus
9 | self.cpu = []
10 | self.tasks = {}
11 | self.eft = [0] * number_of_cpus
12 | self.est = self.eft[:]
13 | self.backup_eft = []
14 | self.backup_est = []
15 | for i in range(0, self.len):
16 | self.cpu.append(set())
17 |
18 | def add_task(self, task_id, cpu_id):
19 | self.cpu[cpu_id].add(task_id)
20 | self.tasks[task_id] = cpu_id
21 |
22 | def add_tasks(self, task_list, cpu_id):
23 | for t in task_list:
24 | self.add_task(t, cpu_id)
25 |
26 | def cpu_task(self, task_id):
27 | if task_id in self.tasks:
28 | return self.tasks[task_id]
29 | else:
30 | return -1
31 |
32 | def task_cpu(self, cpu_id):
33 | return self.cpu[cpu_id]
34 |
35 | def remove_task(self, task_id):
36 | if task_id in self.tasks:
37 | cpu_id = self.tasks.pop(task_id)
38 | self.cpu[cpu_id].remove(task_id)
39 |
40 | def remove_tasks(self, task_list):
41 | for t in task_list:
42 | self.remove_task(t)
43 |
44 | def backup(self):
45 | self.backup_eft = self.eft[:]
46 | self.backup_est = self.est[:]
47 | self.eft = [0] * self.len
48 | self.est = self.eft[:]
49 |
50 | def restore(self):
51 | self.eft = self.backup_eft[:]
52 | self.est = self.backup_est[:]
53 |
54 | def cost_resource(self, price, timeslot, resource_id):
55 | return math.ceil((self.eft[resource_id] - self.est[resource_id]) / timeslot) * price
56 |
57 | def cost_all(self, prices, timeslots):
58 | return sum(map(lambda r: self.cost_resource(prices[r], timeslots[r], r), range(0, self.len)))
59 |
60 |
61 | def calc_est(g, priority_list, resources, cpu_s=CPU(1), task_id=-1):
62 | power_max = max(resources.power)
63 |
64 | if task_id == -1:
65 | start_index = 0
66 | else:
67 | start_index = priority_list.index(task_id)
68 |
69 | for i in range(start_index, len(priority_list)):
70 | t_id = priority_list[i]
71 | resource_id = cpu_s.cpu_task(t_id)
72 | if resource_id == -1:
73 | tasks_on_resource_tid = []
74 | else:
75 | tasks_on_resource_tid = cpu_s.task_cpu(resource_id)
76 | max_est = 0
77 | parents = g.tasks[t_id].predecessor
78 | for parent in parents:
79 | network_latency = g.tasks[t_id].predecessor[parent] / resources.bandwidth
80 | parent_resource_id = cpu_s.cpu_task(parent)
81 | if parent_resource_id == -1:
82 | parent_runtime = g.tasks[parent].weight / power_max
83 | else:
84 | if parent in tasks_on_resource_tid:
85 | network_latency = 0
86 | parent_runtime = g.tasks[parent].weight / resources.power[parent_resource_id]
87 | if hasattr(g.tasks[parent], 'est'):
88 | est = g.tasks[parent].est + parent_runtime + network_latency
89 | else:
90 | continue
91 | max_est = max(est, max_est) if max_est != -1 else est
92 | if resource_id != -1:
93 | if max_est < cpu_s.eft[resource_id]:
94 | max_est = cpu_s.eft[resource_id]
95 | runtime = g.tasks[t_id].weight / resources.power[resource_id]
96 | if max_est + runtime > cpu_s.eft[resource_id]:
97 | if cpu_s.eft[resource_id] == 0: # task is the resource's 1st task
98 | cpu_s.est[resource_id] = max_est
99 | cpu_s.eft[resource_id] = max_est + runtime
100 | g.tasks[t_id].est = max_est
101 |
102 |
103 | def calc_lft(g, deadline, priority_list, resources, cpu_s=CPU(1), task_id=-1):
104 | power_max = max(resources.power)
105 |
106 | if task_id == -1:
107 | start_index = len(priority_list)-1
108 | else:
109 | start_index = priority_list.index(task_id)
110 |
111 | for i in range(start_index, -1, -1):
112 | t_id = priority_list[i]
113 | resource_id = cpu_s.cpu_task(t_id)
114 | if resource_id == -1:
115 | tasks_on_resource_tid = []
116 | else:
117 | tasks_on_resource_tid = cpu_s.task_cpu(resource_id)
118 | min_lft = deadline
119 | children = g.tasks[t_id].successor
120 | for child in children:
121 | network_latency = g.tasks[t_id].successor[child] / resources.bandwidth
122 | child_resource_id = cpu_s.cpu_task(child)
123 | if child_resource_id == -1:
124 | child_runtime = g.tasks[child].weight / power_max
125 | else:
126 | if child in tasks_on_resource_tid:
127 | network_latency = 0
128 | child_runtime = g.tasks[child].weight / resources.power[child_resource_id]
129 |
130 | lft = g.tasks[child].lft - child_runtime - network_latency
131 | min_lft = min(lft, min_lft)
132 | g.tasks[t_id].lft = min_lft
133 |
134 |
135 | def put_path_on_resource(pcp, g, resources, resource_id, cpu_s=CPU(1), just_check=True):
136 | """
137 | checks running of a path (stored in pcp) on a resource (stored in resource_id)
138 | :param pcp: list
139 | :param g: Definitions.Graph
140 | :param resources:
141 | :param resource_id:
142 | :param cpu_s: CPU
143 | :return:
144 | """
145 | cost_before = cpu_s.cost_all(resources.price, resources.timeslot)
146 |
147 | cpu_s.backup()
148 |
149 | cpu_s.add_tasks(pcp, resource_id)
150 | calc_est(g, resources.priority_list, resources, cpu_s) # , pcp[0])
151 | calc_lft(g, g.tasks[g.endID].lft, resources.priority_list, resources, cpu_s, pcp[-1])
152 | cost_after = cpu_s.cost_all(resources.price, resources.timeslot)
153 | min_flexibility = min(map(lambda x: g.tasks[x].lft - g.tasks[x].est, g.tasks.keys()))
154 |
155 | if just_check:
156 | cpu_s.remove_tasks(pcp)
157 | cpu_s.restore()
158 |
159 | return min_flexibility >= 0, cost_after - cost_before, min_flexibility
160 |
161 |
162 | def assign_path(pcp, g, resources, cpu_s=CPU(1)):
163 | min_cost = -1
164 | selected_resource = -1
165 | max_flexibility = -1
166 | for resource_id in range(0, resources.len):
167 | possible, cost, flexibility = put_path_on_resource(pcp, g, resources, resource_id, cpu_s, just_check=True)
168 | if possible and (min_cost == -1 or cost < min_cost or (cost == min_cost and flexibility > max_flexibility)):
169 | selected_resource = resource_id
170 | min_cost = cost
171 | max_flexibility = flexibility
172 | if selected_resource == -1:
173 | selected_resource = resources.get_cheapest_empty_resource()
174 | # raise Exception('No Resource is selected!!!')
175 | put_path_on_resource(pcp, g, resources, selected_resource, cpu_s, just_check=False)
176 | # print(pcp)
177 | # print('', end='')
178 | for t in pcp:
179 | g.tasks[t].scheduled = True
180 |
181 |
182 | def assign_parents(t_id, g, resources, cpu_s=CPU(1)):
183 | def has_unscheduled(task_list):
184 | for t in task_list:
185 | if not g.tasks[t].scheduled:
186 | return True
187 | else:
188 | return False
189 |
190 | def critical_parent_id(task_id):
191 | # it finds the unassigned parent with minimum lft-est
192 | parents = g.tasks[task_id].predecessor.keys()
193 | min_ct = -1
194 | critical_parent_id_inside_function = -1
195 | for p in parents:
196 | if g.tasks[p].scheduled:
197 | continue
198 | ct = g.tasks[p].lft - g.tasks[p].est
199 | if min_ct == -1 or ct < min_ct:
200 | min_ct = ct
201 | critical_parent_id_inside_function = p
202 | continue
203 | return critical_parent_id_inside_function
204 |
205 | while has_unscheduled(g.tasks[t_id].predecessor.keys()):
206 | pcp = []
207 | ti = t_id
208 | while has_unscheduled(g.tasks[ti].predecessor.keys()):
209 | critical_parent = critical_parent_id(ti)
210 | pcp.insert(0, critical_parent)
211 | ti = critical_parent
212 | assign_path(pcp, g, resources, cpu_s)
213 |
214 | # Here, it must calculate EST and LFT again, though it is done in assign_path function
215 |
216 | for ti in pcp:
217 | assign_parents(ti, g, resources, cpu_s)
218 |
219 |
220 | def schedule(g, resources, deadline, upward_rank_is_calculated=False, priority_list=None, no_change_on_resources=False):
221 | """
222 | Schedules using a variation of IC-PCP algorithm
223 | This algorithms sorts tasks on resources based on their upward rank.
224 | :type resources: Definitions.Resources.CostAwareResources
225 | :type deadline: float
226 | """
227 | if not upward_rank_is_calculated:
228 | g.upward_rank(g.startID, resources.average_power, resources.bandwidth)
229 | if priority_list is None:
230 | priority_list = Scheduler.HEFT.list_of_task_id_on_upward_rank(g)
231 |
232 | for t in g.tasks:
233 | g.tasks[t].scheduled = False
234 | g.tasks[t].delay = 0
235 |
236 | g.tasks[g.startID].scheduled = True
237 | g.tasks[g.endID].scheduled = True
238 |
239 | cpu_s = CPU(resources.len)
240 | resources.job_task_schedule[g.name] = {}
241 | calc_est(g, priority_list, resources, cpu_s)
242 | calc_lft(g, deadline, priority_list, resources, cpu_s)
243 | resources.priority_list = priority_list
244 | assign_parents(g.endID, g, resources, cpu_s)
245 |
246 | for t_id in resources.priority_list:
247 | t = g.tasks[t_id]
248 | resource_id = cpu_s.cpu_task(t_id)
249 | runtime_on_resource = t.weight / resources.power[resource_id]
250 | t.eft = t.est + runtime_on_resource
251 | task_schedule = Definitions.Resources.TaskSchedule(t, t.est, runtime_on_resource, t.eft, resource_id)
252 | if no_change_on_resources:
253 | pass
254 | else:
255 | resources.schedule(task_schedule)
256 |
257 | if no_change_on_resources:
258 | del resources.priority_list
259 |
260 | # HOW to USE:
261 | # resources = Definitions.Resources.CostAwareResources([1] * 2 + [2] * 2, [1] * 2 + [3] * 2, [5] * 4, BW)
262 | # Scheduler.ICPCP.schedule(g, resources, deadline=38)
263 |
--------------------------------------------------------------------------------
/workflows/L.txt:
--------------------------------------------------------------------------------
1 | Epigenomics, 101, 102| 1.1, 25, 5.02905e+06| 1.41, 26, 4.69792e+06| 1.4, 27, 5.75466e+06| 0.94, 28, 4.78347e+06| 0.83, 29, 4.10445e+06| 1.06, 30, 5.94542e+06| 0.51, 31, 2.88627e+06| 1.37, 32, 4.93463e+06| 1.9, 33, 6.89952e+06| 0.84, 34, 3.93838e+06| 1.06, 35, 5.98335e+06| 1.45, 36, 5.13288e+06| 0.53, 37, 2.0781e+06| 1, 38, 3.60047e+06| 2.36, 39, 1.08095e+07| 1.02, 40, 4.88687e+06| 0.45, 41, 2.09412e+06| 1.23, 42, 7.60661e+06| 0.41, 43, 2.08658e+06| 1.08, 44, 6.336e+06| 3.26, 45, 8.51164e+06| 0.5, 46, 2.74233e+06| 0.95, 47, 4.5618e+06| 0.57, 48, 3.00943e+06| 0.57, 49, 4.27401e+06| 0.26, 50, 4.13538e+06| 0.4, 51, 3.44773e+06| 0.42, 52, 5.02546e+06| 0.27, 53, 2.58195e+06| 0.38, 54, 3.41752e+06| 0.13, 55, 2.26488e+06| 0.34, 56, 3.68286e+06| 0.46, 57, 6.62616e+06| 0.3, 58, 4.21561e+06| 0.43, 59, 4.44574e+06| 0.36, 60, 3.15817e+06| 0.18, 61, 1.37867e+06| 0.24, 62, 2.18255e+06| 0.73, 63, 1.00514e+07| 0.35, 64, 3.4064e+06| 0.24, 65, 1.46661e+06| 0.38, 66, 4.29424e+06| 0.13, 67, 1.76305e+06| 0.69, 68, 4.464e+06| 0.53, 69, 1.1559e+07| 0.15, 70, 2.35618e+06| 0.33, 71, 4.48146e+06| 0.22, 72, 2.23923e+06| 0.88, 73, 1.0266e+06| 0.55, 74, 1.03059e+06| 0.42, 75, 830621| 0.96, 76, 996426| 0.26, 77, 684915| 0.48, 78, 816684| 0.42, 79, 691546| 0.6, 80, 914862| 1.02, 81, 1.42621e+06| 0.8, 82, 988955| 0.67, 83, 1.02776e+06| 0.41, 84, 725432| 0.2, 85, 288629| 0.41, 86, 406202| 1.29, 87, 2.24412e+06| 0.43, 88, 830606| 0.21, 89, 365008| 0.57, 90, 1.11137e+06| 0.28, 91, 417622| 0.82, 92, 1.07565e+06| 1.99, 93, 3.22314e+06| 0.3, 94, 509670| 0.74, 95, 1.0849e+06| 0.25, 96, 545816|13132.4, 97, 1.11587e+06|16543.1, 97, 1.12021e+06|19949.8, 97, 902848|18058, 97, 1.08307e+06|20347.2, 97, 744472|14667.5, 97, 887700|23571.5, 97, 751680|18115.7, 97, 994415|13362.1, 97, 1.55022e+06|19854.1, 97, 1.07495e+06|13508.8, 97, 1.11713e+06|11553.9, 97, 788513|14189, 97, 313727|19348.2, 97, 441523|13683.9, 97, 2.43927e+06|10238.6, 97, 902832|21339.4, 97, 396747|14142.3, 97, 1.20802e+06|17499.2, 97, 453936|16134.2, 97, 1.16919e+06|15513.1, 97, 3.50341e+06|23456.1, 97, 553989|15840.6, 97, 1.17924e+06|13000.3, 97, 593278|10.92, 98, 2.38549e+07| 0.02, 99, 422973|6255.01, 102, 0|34.75, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 1.00002e+07, 9.83917e+06, 1.0978e+07, 9.15746e+06, 8.30139e+06, 1.15016e+07, 5.77889e+06, 9.80872e+06, 1.37622e+07, 8.11915e+06, 1.17796e+07, 9.9176e+06, 3.99279e+06, 7.37744e+06, 2.13378e+07, 9.58036e+06, 4.04359e+06, 1.54863e+07, 3.98052e+06, 1.23432e+07, 1.7893e+07, 5.2405e+06, 8.84322e+06, 5.90523e+06| 0, 100, 0| 0
2 | Inspiral, 101, 102|472.07, 24, 340334|322.5, 24, 279653|564.17, 24, 432813|443.95, 24, 248290|344.22, 24, 294114|237.67, 24, 261711|234.71, 24, 177226|482.75, 24, 377039|246.94, 24, 328713|425.01, 24, 461090|297.13, 24, 283529|343.57, 25, 195890|338.02, 25, 431124|546.64, 25, 324818|525.94, 25, 436496|356.63, 25, 352593|670.45, 25, 296519|342.18, 25, 325831|450.21, 25, 412154|552.33, 25, 26, 466512, 0|545.41, 26, 402892|418.11, 26, 316636|516.87, 26, 371471| 5.46, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 17378, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0| 5.68, 38, 39, 40, 41, 42, 43, 44, 45, 46, 27408, 0, 0, 0, 0, 0, 0, 0, 0| 4.61, 47, 48, 49, 50, 29796, 0, 0, 0| 4.92, 51, 11776| 5.82, 52, 12445| 5.38, 53, 15297| 5.21, 54, 13656| 4.52, 55, 9428| 4.66, 56, 13258| 5.06, 57, 9638| 5.44, 58, 10103| 5.25, 59, 16857| 4.25, 60, 7681| 5.27, 61, 10352| 4.91, 62, 16701| 4.27, 63, 11283| 4.35, 64, 8347| 5.34, 65, 12088| 4.71, 66, 14657| 5.16, 67, 14945| 4.67, 68, 9398| 5.14, 69, 18870| 5.65, 70, 7563| 5.48, 71, 7943| 5.2, 72, 17623| 5.14, 73, 18614| 5.32, 74, 11549|451.46, 75, 413145|587.36, 75, 303583|616.79, 75, 262489|476.75, 75, 368061|424.61, 75, 309371|500.55, 75, 253407|297.36, 75, 223154|280.43, 75, 369500|531.85, 75, 287393|369.43, 75, 213927|421.58, 75, 257884|501.11, 76, 313844|628.17, 76, 248892|614.49, 76, 162445|451.08, 76, 436956|325.35, 76, 282000|436.36, 76, 235298|509.46, 76, 190974|307.28, 76, 175908|341.89, 76, 387986|502.1, 77, 416786|441.36, 77, 366809|256.49, 77, 211933|498.93, 77, 233564| 5.21, 102, 0| 5.46, 102, 0| 4.89, 102, 0|17.72, 1, 1.00412e+06|18.41, 2, 976415|18.29, 3, 968090|18.36, 4, 998458|18.73, 5, 963553|17.99, 6, 986203|18.06, 7, 973847|18.96, 8, 1.00881e+06|18.18, 9, 994951|18.24, 10, 983246|18.84, 11, 985623|17.34, 12, 995582|17.91, 13, 999506| 18, 14, 1.01206e+06|19.09, 15, 964333|18.68, 16, 978312|18.73, 17, 998548| 18.1, 18, 974827|17.85, 19, 996995|18.35, 20, 988084|18.35, 21, 977696|19.17, 22, 981285|18.46, 23, 952738| 0, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0| 0
3 | CyberShake, 101, 102| 0.34, 102, 0| 0.8, 102, 0|54.89, 2, 4, 0, 24000| 1.1, 1, 0|54.78, 2, 6, 0, 24000| 0.58, 1, 0|48.95, 2, 8, 0, 24000| 0.66, 1, 0|51.92, 2, 10, 0, 24000| 0.76, 1, 0|25.71, 2, 12, 0, 24000| 0.69, 1, 0|25.37, 2, 14, 0, 24000| 1.56, 1, 0| 28.8, 2, 16, 0, 24000| 1.43, 1, 0|37.34, 2, 18, 0, 24000| 1.5, 1, 0|40.33, 2, 20, 0, 24000| 1.09, 1, 0| 44.4, 2, 22, 0, 24000| 1.57, 1, 0| 42.2, 2, 24, 0, 24000| 1.35, 1, 0|34.04, 2, 26, 0, 24000| 1.19, 1, 0|55.21, 2, 28, 0, 24000| 0.79, 1, 0|42.25, 2, 30, 0, 24000| 1.48, 1, 0|39.64, 2, 32, 0, 24000| 1, 1, 0|38.81, 2, 34, 0, 24000| 1.15, 1, 0|39.25, 2, 36, 0, 24000| 0.65, 1, 0|22.23, 2, 38, 0, 24000| 1.03, 1, 0|34.97, 2, 40, 0, 24000| 1.35, 1, 0|25.13, 2, 42, 0, 24000| 0.66, 1, 0|26.62, 2, 44, 0, 24000| 0.97, 1, 0|38.14, 2, 46, 0, 24000| 0.74, 1, 0|60.35, 2, 48, 0, 24000| 1.3, 1, 0|40.74, 2, 50, 0, 24000| 1.17, 1, 0|35.14, 2, 52, 0, 24000| 1.25, 1, 0|22.29, 2, 54, 0, 24000| 0.96, 1, 0|40.02, 2, 56, 0, 24000| 1.35, 1, 0|49.95, 2, 58, 0, 24000| 1.32, 1, 0|58.23, 2, 60, 0, 24000| 1.42, 1, 0|52.32, 2, 62, 0, 24000| 0.95, 1, 0|25.73, 2, 64, 0, 24000| 1, 1, 0|29.46, 2, 66, 0, 24000| 1.32, 1, 0|45.99, 2, 68, 0, 24000| 1.09, 1, 0|42.55, 2, 70, 0, 24000| 0.83, 1, 0|51.79, 2, 72, 0, 24000| 1.18, 1, 0|52.41, 2, 74, 0, 24000| 1, 1, 0|34.01, 2, 76, 0, 24000| 1.21, 1, 0| 58.2, 2, 78, 0, 24000| 0.84, 1, 0| 59.5, 2, 80, 0, 24000| 1.34, 1, 0| 63.8, 2, 82, 0, 24000| 1.16, 1, 0|63.54, 2, 84, 0, 24000| 1.17, 1, 0|55.84, 2, 86, 0, 24000| 0.7, 1, 0|48.48, 2, 88, 0, 24000| 0.74, 1, 0| 56.9, 2, 90, 0, 24000| 0.98, 1, 0|49.77, 2, 92, 0, 24000| 1.03, 1, 0|154.45, 3, 5, 7, 9, 2.51777e+08, 0, 0, 0|123.15, 11, 13, 15, 17, 19, 1.52944e+08, 0, 0, 0, 0|110.2, 21, 23, 25, 27, 29, 3.13806e+08, 0, 0, 0, 0|83.96, 31, 33, 35, 37, 39, 2.88487e+08, 0, 0, 0, 0|199.03, 41, 43, 45, 47, 49, 51, 2.81388e+08, 0, 0, 0, 0, 0|171.91, 53, 55, 57, 59, 61, 63, 1.60126e+08, 0, 0, 0, 0, 0|203.78, 65, 67, 69, 71, 73, 75, 77, 2.34866e+08, 0, 0, 0, 0, 0, 0|171.53, 79, 81, 83, 85, 87, 89, 91, 1.25549e+08, 0, 0, 0, 0, 0, 0| 0, 93, 94, 95, 96, 97, 98, 99, 100, 0, 0, 0, 0, 0, 0, 0, 0| 0
4 | Sipht, 101, 102| 0.07, 8, 1.99253e+06|537.24, 3, 4, 5, 6, 7, 8, 83316, 0, 0, 0, 0, 333710| 1.44, 6, 690730| 6.42, 8, 545|1590.74, 8, 178472| 0, 8, 1.44707e+06| 5.4, 8, 966715| 1.12, 9, 311376| 0, 102, 0| 0.1, 8, 17, 1.67554e+06, 0|386.09, 3, 8, 12, 13, 14, 15, 16, 17, 81851, 350399, 0, 0, 0, 0, 0, 0| 1.71, 6, 15, 911211, 0| 4.76, 8, 17, 543, 0|1516.96, 8, 17, 190349, 0| 0, 8, 17, 1.13884e+06, 0| 5.02, 8, 17, 626834, 0| 1.87, 9, 18, 311376, 0| 0, 9, 102, 311376, 0| 0.06, 8, 26, 2.29095e+06, 0|374.51, 3, 8, 21, 22, 23, 24, 25, 26, 82801, 324886, 0, 0, 0, 0, 0, 0| 1.65, 6, 24, 1.00666e+06, 0| 5.1, 8, 26, 544, 0|1482.2, 8, 26, 175147, 0| 0, 8, 26, 630405, 0| 4.56, 8, 26, 828855, 0| 1.32, 9, 27, 311376, 0| 0, 9, 102, 311376, 0| 0.88, 1, 88968| 1.38, 1, 136561| 0.95, 1, 73630| 1.17, 1, 90621| 0.98, 1, 65459| 1.23, 1, 97888| 1.47, 1, 97456| 1.41, 1, 122923| 1.37, 1, 109548| 1.5, 1, 103970| 1.34, 1, 83875| 1.17, 1, 70456| 1.64, 1, 137115| 1.61, 1, 71999| 1.77, 1, 123053| 1.51, 1, 71306| 1.37, 1, 97446| 1.11, 1, 46477| 1.65, 1, 89827| 1.2, 1, 85721| 1.48, 1, 128233|958.03, 2, 3.67118e+06|30.51, 2, 310|76.72, 2, 95|1921.44, 2, 6.72179e+06| 1.31, 10, 112973| 1.51, 10, 103875| 1.61, 10, 108166| 1.8, 10, 70979| 1.3, 10, 70633| 1.49, 10, 113860| 1.18, 10, 105628| 1.3, 10, 124402| 1.72, 10, 126005| 1.74, 10, 75812| 1.47, 10, 85560| 1.2, 10, 118121| 0.92, 10, 46619| 1.39, 10, 105380| 1.42, 10, 114089| 1.13, 10, 106645| 1.14, 10, 86796| 24.8, 2, 11, 310, 0|883.87, 2, 11, 6.15912e+06, 0|1499.47, 2, 11, 3.54916e+06, 0|35.53, 2, 11, 96, 0| 1.2, 19, 118300| 0.99, 19, 132352| 0.92, 19, 75852| 1.49, 19, 123847| 1.29, 19, 68395| 1.16, 19, 103969| 1.22, 19, 135932| 1.26, 19, 112564| 1.28, 19, 121691| 1.25, 19, 70211| 1.64, 19, 83483| 1.65, 19, 65533| 1.56, 19, 135723| 1.34, 19, 114624| 1.62, 19, 97263| 1.03, 19, 81326| 1.34, 19, 118809| 0.97, 19, 69062| 0.83, 19, 133709| 1.14, 19, 127957| 1.44, 19, 85226| 1.22, 19, 67318| 1.2, 19, 47807|1152.49, 2, 20, 7.98116e+06, 0|37.99, 2, 20, 310, 0|2142.51, 2, 20, 7.65522e+06, 0| 37.9, 2, 20, 96, 0| 0, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0| 0
5 | Montage, 101, 102|10.57, 63, 178931| 10.6, 63, 156550|10.58, 63, 399793|10.64, 63, 307002|10.45, 63, 368185|10.64, 63, 337755|10.58, 63, 198353|10.74, 63, 230511|10.61, 63, 400569|10.58, 63, 282709|10.67, 63, 351800|10.67, 63, 312503|10.57, 63, 373385|10.74, 63, 249020|10.91, 63, 378333|10.55, 63, 386166|10.49, 63, 236443|10.61, 63, 350882|10.53, 63, 221385|10.53, 63, 344835|10.67, 63, 358810|10.54, 63, 349453|10.41, 63, 183983|10.58, 63, 227424|10.51, 63, 345235|10.77, 63, 356906|10.46, 63, 411551| 10.4, 63, 249626|10.54, 63, 336954|10.56, 63, 211113|10.62, 63, 227822|10.59, 63, 321779|10.48, 63, 385592|10.69, 63, 193627|10.65, 63, 334486|10.55, 63, 418121|10.66, 63, 158263|10.55, 63, 360905|10.56, 63, 371340|10.47, 63, 143886|10.48, 63, 353366|10.51, 63, 321395|10.59, 63, 225744|10.72, 63, 352586|10.49, 63, 198205|10.64, 63, 168036|10.46, 63, 344267|10.35, 63, 213184|10.64, 63, 368991|10.59, 63, 157385|10.72, 63, 316446|10.57, 63, 274392|10.61, 63, 251338|10.66, 63, 259107|10.77, 63, 420860|10.43, 63, 165358|10.79, 63, 254590|10.65, 63, 308378|10.49, 63, 377305|10.61, 63, 286292|10.47, 63, 161693|10.65, 63, 309680| 4.96, 64, 13017| 5.34, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 850, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0|10.88, 81, 4.18157e+06|10.96, 81, 4.14507e+06|10.61, 81, 4.14806e+06|10.73, 81, 4.16893e+06|10.66, 81, 4.17505e+06|10.49, 81, 4.16825e+06| 10.9, 81, 4.15056e+06|10.75, 81, 4.16758e+06|10.72, 81, 4.16929e+06|10.86, 81, 4.16356e+06|10.74, 81, 4.15019e+06|10.69, 81, 4.17138e+06|10.74, 81, 4.16697e+06|10.69, 81, 4.17616e+06|10.73, 81, 4.17557e+06|10.78, 81, 4.16549e+06| 7.63, 82, 5734| 9.6, 83, 6.94322e+07| 6.91, 84, 2.7784e+06| 0.83, 102, 0|13.11, 1, 5, 6, 7, 8, 9, 10, 11, 12, 13, 23, 24, 40, 53, 66, 4.18317e+06, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0|13.85, 1, 2, 3, 4, 5, 30, 47, 65, 4.15772e+06, 0, 0, 0, 0, 0, 0, 0| 13.7, 2, 10, 27, 39, 74, 4.16221e+06, 0, 0, 0, 0|13.81, 3, 33, 38, 47, 48, 49, 61, 76, 4.16432e+06, 0, 0, 0, 0, 0, 0, 0|13.36, 4, 12, 18, 22, 45, 53, 54, 55, 56, 57, 78, 4.158e+06, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0| 13.3, 6, 14, 16, 17, 18, 19, 32, 41, 59, 68, 4.16048e+06, 0, 0, 0, 0, 0, 0, 0, 0, 0|13.25, 7, 15, 24, 25, 26, 27, 28, 29, 71, 4.14972e+06, 0, 0, 0, 0, 0, 0, 0, 0|13.65, 8, 16, 30, 31, 32, 33, 35, 43, 55, 72, 4.15144e+06, 0, 0, 0, 0, 0, 0, 0, 0, 0|13.63, 9, 26, 34, 35, 36, 37, 38, 48, 60, 73, 4.15807e+06, 0, 0, 0, 0, 0, 0, 0, 0, 0|13.62, 11, 17, 21, 29, 50, 51, 52, 56, 77, 4.16569e+06, 0, 0, 0, 0, 0, 0, 0, 0|13.58, 13, 19, 52, 58, 59, 60, 61, 62, 80, 4.15962e+06, 0, 0, 0, 0, 0, 0, 0, 0|13.42, 14, 15, 20, 31, 58, 67, 4.1631e+06, 0, 0, 0, 0, 0|13.16, 20, 21, 22, 25, 34, 50, 69, 4.14631e+06, 0, 0, 0, 0, 0, 0|13.57, 23, 42, 54, 70, 4.15738e+06, 0, 0, 0|13.55, 28, 37, 40, 41, 42, 43, 44, 45, 46, 75, 4.15669e+06, 0, 0, 0, 0, 0, 0, 0, 0, 0|13.17, 46, 49, 62, 79, 4.16032e+06, 0, 0, 0| 0, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0| 0
6 |
--------------------------------------------------------------------------------
/Scheduler/Multi_Workflow.py:
--------------------------------------------------------------------------------
1 | import math
2 | import random
3 | import copy
4 | import string
5 |
6 | from Definitions.Resources import Constraint
7 | import Definitions.WorkflowReader.reader
8 | import Definitions
9 | from Definitions import Graph
10 | from Definitions.Resources import CostAwareResources
11 |
12 |
13 | def assign_sub_deadlines(reference_graph, target_graph, asap=None):
14 | for t_id in reference_graph.tasks.keys():
15 | sub_deadline = reference_graph.tasks[t_id].lft
16 | target_graph.tasks[t_id].sub_deadline = sub_deadline
17 | if asap is not None:
18 | target_graph.tasks[t_id].asap = asap
19 |
20 |
21 | def assign_sub_budgets(reference_graph, target_graph, reference_resources):
22 | resources = reference_resources # reference_graph.resources
23 | for r in range(resources.len):
24 | resource_cost = resources.resource_cost(r)
25 | if resource_cost == 0:
26 | continue
27 | task_in_resource = resources.tasksOfResource[r]
28 | sum_runtime = sum(map(lambda s: s.runtime, task_in_resource))
29 | for sch in task_in_resource:
30 | task_id = sch.task.id
31 | task_runtime = sch.runtime
32 | # if sum_runtime == 0:
33 | # target_graph.tasks[task_id].sub_budget = 0
34 | # else:
35 | target_graph.tasks[task_id].sub_budget = float(task_runtime) / sum_runtime * resource_cost
36 |
37 |
38 | def assign_cost_for_each_task(g, resources=CostAwareResources([], [], [], 0)):
39 | for r in range(0, resources.len):
40 | resource_cost = resources.resource_cost(r)
41 | if resource_cost == 0:
42 | continue
43 | scheduled_task_in_resource = resources.tasksOfResource[r]
44 | sum_weight = sum(map(lambda t: t.task.weight, scheduled_task_in_resource))
45 | for schedule in scheduled_task_in_resource:
46 | g.tasks[schedule.task.id].cost = resource_cost * schedule.task.weight / sum_weight
47 | g.tasks[schedule.task.id].eft = schedule.EFT
48 |
49 |
50 | def normalize_dag(g=Graph.Graph(), w_coefficient=1, e_coefficient=1):
51 | sum_w = sum(map(lambda t: t.weight, g.tasks.values()))
52 | edges = map(lambda t: list(t.successor.values()), g.tasks.values())
53 | sum_e = sum([item for sub_list in edges for item in sub_list])
54 | for task in g.tasks.values():
55 | task.weight /= sum_w
56 | task.weight *= w_coefficient
57 |
58 | for c in task.successor:
59 | task.successor[c] /= sum_e / e_coefficient
60 | g.tasks[c].predecessor[task.id] /= sum_e / e_coefficient
61 |
62 |
63 | def randomize_dag(g=Graph.Graph(), w_coefficient=[0.3, 1.7], e_coefficient=[0.3, 1.7]):
64 | precision = 10000
65 | w_start = int(w_coefficient[0] * precision)
66 | w_end = int(w_coefficient[1] * precision)
67 | e_start = int(e_coefficient[0] * precision)
68 | e_end = int(e_coefficient[1] * precision)
69 | for task in g.tasks.values():
70 | task.weight *= float(random.randrange(w_start, w_end)) / precision
71 | for c in task.successor:
72 | task.successor[c] *= float(random.randrange(e_start, e_end)) / precision
73 | g.tasks[c].predecessor[task.id] = task.successor[c]
74 |
75 |
76 | def compute_gap_rate(resources=CostAwareResources([], [], [], 0), r=-1, t=-1, list_is_required=False):
77 | timeslot_len = resources.timeslot[r]
78 | t += float(timeslot_len / 2)
79 | tasks_in_resource = resources.tasksOfResource[r]
80 | length_r = len(tasks_in_resource)
81 | try:
82 | start_task_index = 0
83 | while tasks_in_resource[start_task_index].task.dummy_task:
84 | start_task_index += 1
85 | end_task_index = length_r - 1
86 | while tasks_in_resource[end_task_index].task.dummy_task:
87 | end_task_index -= 1
88 | if end_task_index < start_task_index:
89 | if list_is_required:
90 | return 0, -1, -1, []
91 | else:
92 | return 0, -1, -1
93 | except LookupError:
94 | if list_is_required:
95 | return 0, -1, -1, []
96 | else:
97 | return 0, -1, -1
98 | t0 = tasks_in_resource[start_task_index].EST
99 | x = float(t - t0) / timeslot_len
100 | if x < 0:
101 | if list_is_required:
102 | return 0, -1, -1, []
103 | else:
104 | return 0, -1, -1
105 | start_time_of_timeslot = math.floor(x) * timeslot_len + t0
106 | end_time_of_timeslot = math.ceil(x) * timeslot_len + t0
107 | if start_time_of_timeslot == end_time_of_timeslot:
108 | end_time_of_timeslot += timeslot_len
109 |
110 | sum_gap = 0
111 | gap_list = []
112 | for i in range(start_task_index + 1, end_task_index + 1):
113 | # find the gap between "i"th and "i-1"th tasks:
114 | gap_start = tasks_in_resource[i - 1].EFT
115 | gap_end = tasks_in_resource[i].EST
116 | if end_time_of_timeslot <= gap_start:
117 | break
118 | if gap_end <= start_time_of_timeslot:
119 | continue
120 | if gap_start < start_time_of_timeslot:
121 | gap_start = start_time_of_timeslot
122 | if end_time_of_timeslot < gap_end:
123 | gap_end = end_time_of_timeslot
124 | sum_gap += gap_end - gap_start
125 | if not list_is_required or gap_end == gap_start:
126 | continue
127 | gap_list.append((gap_start, gap_end))
128 | if start_time_of_timeslot < tasks_in_resource[end_task_index].EFT < end_time_of_timeslot:
129 | sum_gap += end_time_of_timeslot - tasks_in_resource[end_task_index].EFT
130 | if list_is_required:
131 | gap_list.append((tasks_in_resource[end_task_index].EFT, end_time_of_timeslot))
132 | if list_is_required:
133 | return float(sum_gap) / timeslot_len, start_time_of_timeslot, end_time_of_timeslot, gap_list
134 | else:
135 | return float(sum_gap) / timeslot_len, start_time_of_timeslot, end_time_of_timeslot
136 |
137 |
138 | def tasks_in_interval(resources=CostAwareResources([], [], [], 0), r=-1, t=-1):
139 | timeslot_len = resources.timeslot[r]
140 | t += float(timeslot_len / 2)
141 | tasks_in_resource = resources.tasksOfResource[r]
142 | length_r = len(tasks_in_resource)
143 | start_task_index = 0
144 | end_task_index = length_r - 1
145 |
146 | if length_r == 0:
147 | return []
148 |
149 | t0 = tasks_in_resource[start_task_index].EST
150 | x = float(t - t0) / timeslot_len
151 | if x < 0:
152 | return []
153 | start_time_of_timeslot = math.floor(x) * timeslot_len + t0
154 | end_time_of_timeslot = math.ceil(x) * timeslot_len + t0
155 | if start_time_of_timeslot == end_time_of_timeslot:
156 | end_time_of_timeslot += timeslot_len
157 |
158 | task_list = []
159 | for i in range(start_task_index, end_task_index + 1):
160 | # find the task ids within the timeslot (each portion of them)
161 | task_start_time = tasks_in_resource[i].EST
162 | task_finish_time = tasks_in_resource[i].EFT
163 | if start_time_of_timeslot <= task_finish_time <= end_time_of_timeslot or \
164 | start_time_of_timeslot <= task_start_time <= end_time_of_timeslot:
165 | task_list.append(tasks_in_resource[i].task.id)
166 | return task_list
167 |
168 |
169 | def generate_gammavariate_list(n, alpha, beta):
170 | r_list = []
171 | for i in range(n):
172 | r = random.gammavariate(alpha, beta)
173 | r_list.append(r)
174 | return r_list
175 |
176 |
177 | def generate_hyper_gammavariate_list(n=list(), alpha=list(), beta=list()):
178 | result = []
179 | for i in range(len(n)):
180 | random_list = generate_gammavariate_list(n[i], alpha[i], beta[i])
181 | result += random_list
182 |
183 | return sorted(result)
184 | # sample: generate_hyper_gammavariate_list([300, 700], [5, 45], [501.266, 136.709])
185 |
186 |
187 | def make_workload(test, desired_average_runtime=30):
188 | """
189 |
190 | :param test:db.definitions.Test
191 | :param desired_average_runtime:
192 | :return:
193 | """
194 | # ----------Read Jobs:
195 | all_jobs = {'S': Definitions.WorkflowReader.reader.read_jobs('workflows/S.txt'), # 25-30
196 | 'M': Definitions.WorkflowReader.reader.read_jobs('workflows/M.txt'), # 50-60
197 | 'L': Definitions.WorkflowReader.reader.read_jobs('workflows/L.txt'), # 100
198 | 'XXL': Definitions.WorkflowReader.reader.read_jobs('workflows/XXL.txt')} # 1000
199 |
200 | workload_len = test.workload_len
201 | bandwidth = test.bandwidth
202 |
203 | job_size = generate_hyper_gammavariate_list([int(0.3 * workload_len), int(0.7 * workload_len)],
204 | [5, 45], [501.266, 136.709])
205 |
206 | current_average_runtime = sum(job_size) / workload_len
207 |
208 | # 60 is multiplied in order to map workflow sizes to 60 timeslots. (they were set before for timeslot = 1)
209 | job_size = list(map(lambda s: 60 * s * desired_average_runtime / current_average_runtime, job_size))
210 | small_ratio = test.small
211 | medium_ratio = test.medium
212 | large_ratio = test.large
213 | budget_constraint_ratio = test.budget_ratio
214 | # small_ratio = 0.5
215 | # medium_ratio = 0.3
216 | # large_ratio = 0.2
217 | # budget_constraint_ratio = 0.5
218 |
219 | workflow_names = ['Montage', 'CyberShake', 'Epigenomics', 'Inspiral'] # 'Sipht',
220 |
221 | small_size = small_ratio * workload_len
222 | medium_size = medium_ratio * workload_len
223 | large_size = large_ratio * workload_len
224 | job = []
225 | constraint = []
226 | sizes = []
227 | names = []
228 | for i in range(workload_len):
229 | randomly_selected_workflow = random.randint(0, len(workflow_names) - 1)
230 | name = workflow_names[randomly_selected_workflow]
231 |
232 | if small_size != 0:
233 | selected_size = 'S'
234 | small_size -= 1
235 | elif medium_size != 0:
236 | mediums = ['M', 'L']
237 | selected_size = mediums[random.randint(0, 1)]
238 | medium_size -= 1
239 | elif large_size != 0:
240 | selected_size = 'XXL'
241 | large_size -= 1
242 | else:
243 | break
244 | g = copy.deepcopy(all_jobs[selected_size][name])
245 | sizes.append(selected_size)
246 | names.append(name)
247 |
248 | c = 'Deadline'
249 | if random.random() >= 1 - budget_constraint_ratio:
250 | constraint.append(Constraint.budget)
251 | c = 'Budget'
252 | else:
253 | constraint.append(Constraint.deadline)
254 | print("{0: >3}.{1:12} | Constraint: {4:>8} | "
255 | "Nodes:{2:4d} | Size:{3:5.2f}".format(selected_size, name, len(g.tasks) - 2, job_size[i], c))
256 | g.set_name(''.join(random.choice(string.ascii_lowercase) for _ in range(5)))
257 | # Scheduler.Multi_Workflow.randomize_dag(g)
258 | normalize_dag(g, job_size[i], bandwidth / 10)
259 | job.append(g)
260 |
261 | return job, constraint, names, sizes
262 |
263 |
264 | def make_static_workload(BW):
265 | # ----------Read Jobs:
266 | all_jobs = {'S': Definitions.WorkflowReader.reader.read_jobs('S.txt'), # 25-30
267 | 'M': Definitions.WorkflowReader.reader.read_jobs('M.txt'), # 50-60
268 | 'L': Definitions.WorkflowReader.reader.read_jobs('L.txt'), # 100
269 | 'XXL': Definitions.WorkflowReader.reader.read_jobs('XXL.txt')} # 1000
270 |
271 | job_size = [8, 10, 28, 29, 30, 32, 36, 40, 41, 43]
272 |
273 | workload_len = 10
274 | desired_average_runtime = 30 # on time-unit
275 | current_average_runtime = sum(job_size) / workload_len
276 | job_size = list(map(lambda s: s * desired_average_runtime / current_average_runtime, job_size))
277 |
278 | workflow_names = ['Montage', 'CyberShake', 'Epigenomics', 'Sipht', 'Inspiral']
279 |
280 | workflow_indices = [2, 1, 0, 4, 1, 1, 2, 0, 2, 4]
281 | # sizes = ['S', 'S', 'S', 'S', 'S', 'L', 'M', 'L', 'M', 'M']
282 | sizes = ['L'] * 10
283 | c = 'Deadline'
284 |
285 | job = []
286 | constraint = []
287 | for i in range(workload_len):
288 | selected_workflow_index = workflow_indices[i]
289 | name = workflow_names[selected_workflow_index]
290 | selected_size = sizes[i]
291 | g = copy.deepcopy(all_jobs[selected_size][name])
292 |
293 | # c = 'Deadline'
294 | # c = 'Budget'
295 | if c == 'Deadline':
296 | c = 'Budget'
297 | constraint.append(Constraint.budget)
298 | else:
299 | c = 'Deadline'
300 | constraint.append(Constraint.deadline)
301 |
302 | print("{0: >3}.{1:12} | Constraint: {4:>8} | "
303 | "Nodes:{2:4d} | Size:{3:5.2f}".format(selected_size, name, len(g.tasks) - 2, job_size[i], c))
304 | g.set_name(i)
305 | # Scheduler.Multi_Workflow.randomize_dag(g)
306 | normalize_dag(g, job_size[i], BW / 10)
307 | job.append(g)
308 |
309 | return job, constraint
310 |
311 |
312 | def make_static_workload_2(BW):
313 | # ----------Read Jobs:
314 | all_jobs = {'S': Definitions.WorkflowReader.reader.read_jobs('S.txt'), # 25-30
315 | 'M': Definitions.WorkflowReader.reader.read_jobs('M.txt'), # 50-60
316 | 'L': Definitions.WorkflowReader.reader.read_jobs('L.txt'), # 100
317 | 'XXL': Definitions.WorkflowReader.reader.read_jobs('XXL.txt')} # 1000
318 |
319 | job_size = [8, 8, 10, 10, 28, 28, 29, 29, 30, 30, 32, 32, 36, 36, 40, 40, 41, 41, 43, 43]
320 |
321 | workload_len = 20
322 | desired_average_runtime = 30 # on time-unit
323 | current_average_runtime = sum(job_size) / workload_len
324 | job_size = list(map(lambda s: s * desired_average_runtime / current_average_runtime, job_size))
325 |
326 | workflow_names = ['Montage', 'CyberShake', 'Epigenomics', 'Sipht', 'Inspiral']
327 |
328 | workflow_indices = [2, 2, 1, 1, 0, 0, 4, 4, 1, 1, 1, 1, 2, 2, 0, 0, 2, 2, 4, 4]
329 | # sizes = ['S', 'S', 'S', 'S', 'S', 'L', 'M', 'L', 'M', 'M']
330 | sizes = ['L'] * 20
331 | c = 'Deadline'
332 |
333 | job = []
334 | constraint = []
335 | for i in range(workload_len):
336 | selected_workflow_index = workflow_indices[i]
337 | name = workflow_names[selected_workflow_index]
338 | selected_size = sizes[i]
339 | g = copy.deepcopy(all_jobs[selected_size][name])
340 |
341 | # c = 'Deadline'
342 | # c = 'Budget'
343 | if c == 'Deadline':
344 | c = 'Budget'
345 | constraint.append(Constraint.budget)
346 | else:
347 | c = 'Deadline'
348 | constraint.append(Constraint.deadline)
349 |
350 | print("{0: >3}.{1:12} | Constraint: {4:>8} | "
351 | "Nodes:{2:4d} | Size:{3:5.2f}".format(selected_size, name, len(g.tasks) - 2, job_size[i], c))
352 | g.set_name(i)
353 | # Scheduler.Multi_Workflow.randomize_dag(g)
354 | normalize_dag(g, job_size[i], BW / 10)
355 | job.append(g)
356 |
357 | return job, constraint
358 |
--------------------------------------------------------------------------------
/runAll.py:
--------------------------------------------------------------------------------
1 | from Definitions.MultiWorkflow.JobList import Constraint, JobItem
2 |
3 | from matplotlib import pyplot as plt
4 | import numpy as np
5 | import random
6 |
7 | import Scheduler.ICPCP
8 | import Scheduler.HEFT
9 | import Scheduler.BHEFT
10 | import Definitions.Resources
11 | import Scheduler.BudgetPessimistic
12 | import Scheduler.DeadlineOptimisticAlpha
13 | import copy
14 | import Scheduler.Multi_Workflow
15 |
16 | from db.definitions import Test
17 | import pickle
18 | import Definitions
19 | from IPython.display import clear_output
20 |
21 | def run_all(verbose=False):
22 | create_pickle()
23 |
24 | row, test, job, constraint, names, sizes = read_jobs()
25 | test.time_slot = 1
26 | for r in test.resource_array:
27 | r[1] *= 12
28 | bandwidth, workload_len, timeslot_list, powers, prices, numbers, power_list, price_list, timeslot_list, resource_spec, main_resources = create_resources(test)
29 | makespan_list, cost_list, resources_set, graph_set, constraint_values = calculate_reference_plans(workload_len, main_resources, job, constraint, verbose=verbose)
30 |
31 | to_write = (bandwidth, workload_len, timeslot_list, powers, prices, numbers, power_list,
32 | price_list, timeslot_list, resource_spec, main_resources,
33 | makespan_list, cost_list, resources_set, graph_set, constraint_values)
34 |
35 | pickle.dump(to_write, open('refrence_plan2.pickle', 'wb'))
36 |
37 | read_object = pickle.load(open('refrence_plan2.pickle', 'rb'))
38 |
39 | bandwidth, workload_len, timeslot_list, powers, prices, numbers, power_list, price_list, timeslot_list, resource_spec, main_resources, makespan_list, cost_list, resources_set, graph_set, constraint_values = read_object
40 |
41 | jobs, cloud_resources = create_multi_workflow_resouces(test, resources_set, constraint, cost_list,
42 | makespan_list, job, graph_set, constraint_values)
43 | fair_policy(workload_len, jobs, cloud_resources)
44 | successful_sched = is_successful(cloud_resources, cost_list, makespan_list, constraint,
45 | constraint_values, jobs, graph_set, names, workload_len)
46 | if verbose:
47 | show_results(cloud_resources, cost_list, makespan_list, constraint,
48 | constraint_values, jobs, graph_set, names, workload_len, resources_set)
49 | show_schedule(cloud_resources)
50 | if successful_sched:
51 | print('successful scheduling')
52 | else:
53 | print('not successful')
54 | return successful_sched
55 |
56 |
57 | def create_pickle():
58 | row = 'test1', 10, '{"t": 1, "r": [[1, 1, 6], [2, 3, 3]]}', 0.5, 1, 0, 0, 1e50
59 |
60 | test = Test(row)
61 | test.c_resource = 0.8
62 | job, constraint, names, sizes = Scheduler.Multi_Workflow.make_workload(test)
63 |
64 | names = []
65 | for j in job:
66 | i = 1
67 | while f'{j.type[:-1]}:{i}' in names:
68 | i += 1
69 | j.name = f'{j.type[:-1]}:{i}'
70 | names.append(f'{j.type[:-1]}:{i}')
71 |
72 | [j.name for j in job]
73 | cnstr = [c is Constraint.budget for c in constraint]
74 | variable = (row, test, job, cnstr, names, sizes)
75 | pickle.dump(variable, open('dump.pickle', 'wb'))
76 |
77 |
78 | def read_jobs():
79 | row, test, job, cnstr, names, sizes = pickle.load(open('dump.pickle', 'rb'))
80 | constraint = [Constraint.budget if b else Constraint.deadline for b in cnstr]
81 | return row, test, job, constraint, names, sizes
82 |
83 |
84 | def create_resources(test):
85 | bandwidth = test.bandwidth
86 | workload_len = test.workload_len
87 | timeslot_list = []
88 | powers = []
89 | prices = []
90 | numbers = []
91 | for r in test.resource_array:
92 | powers.append(r[0])
93 | prices.append(r[1])
94 | numbers.append(r[2])
95 |
96 | power_list, price_list, timeslot_list = [], [], []
97 | for i in range(len(test.resource_array)):
98 | power_list += [powers[i]] * numbers[i]
99 | price_list += [prices[i]] * numbers[i]
100 | timeslot_list += [60 * test.time_slot] * numbers[i]
101 |
102 | resource_spec = (power_list, price_list, timeslot_list)
103 |
104 | main_resources = Definitions.Resources.CostAwareResources(resource_spec[0], resource_spec[1], resource_spec[2],bandwidth)
105 |
106 | return bandwidth, workload_len, timeslot_list, powers, prices, numbers, power_list, price_list, timeslot_list, resource_spec, main_resources
107 |
108 |
109 | def calculate_reference_plans(workload_len, main_resources, job, constraint, verbose=True):
110 | makespan_list = []
111 | cost_list = []
112 | resources_set = []
113 | graph_set = []
114 | constraint_values = []
115 |
116 | for i in range(workload_len):
117 | resources = copy.deepcopy(main_resources)
118 | g = copy.deepcopy(job[i])
119 | Scheduler.HEFT.schedule(g, resources)
120 | g_heft = g
121 | cost = resources.plan_cost
122 | makespan = resources.makespan
123 |
124 | heft_resources = resources
125 |
126 | if constraint[i] is Constraint.budget:
127 | c = 'Budget'
128 | budget_factor = np.random.normal(8, 3) if random.random() >= 0.2 else np.random.normal(2, 1.4)
129 | attempts = 3
130 | while attempts > 0:
131 | attempts -= 1
132 | resources = copy.deepcopy(main_resources)
133 | g = copy.deepcopy(job[i])
134 | Scheduler.BHEFT.schedule(g, resources, cost * budget_factor)
135 | if cost * budget_factor >= resources.plan_cost:
136 | break
137 | budget_factor = np.random.normal(8, 3) if random.random() >= 0.2 else np.random.normal(2, 1.4)
138 |
139 | constraint_factor = budget_factor
140 | constraint_value = cost * budget_factor
141 | else:
142 | c = 'Deadline'
143 | deadline_factor = np.random.normal(8, 1.4) if random.random() >= 0.2 else np.random.normal(2, 1.4)
144 |
145 | resources = heft_resources
146 | attempts = 3
147 | while attempts > 0:
148 | attempts -= 1
149 | resources = copy.deepcopy(main_resources)
150 | g = copy.deepcopy(job[i])
151 | Scheduler.ICPCP.schedule(g, resources, makespan * deadline_factor)
152 | if makespan * deadline_factor >= resources.makespan:
153 | break
154 | else:
155 | deadline_factor = np.random.normal(8, 1.4) if random.random() >= 0.2 else np.random.normal(2, 1.4)
156 | constraint_factor = deadline_factor
157 | constraint_value = makespan * deadline_factor
158 | if verbose:
159 | print("heft cost:{0:5.1f} | cost:{1:5.1f} | heft ms:{2:5.2f} | ms:{3:5.2f} "
160 | "| Nodes:{4:4d} | {5:>8} | factor: {6:5.2f}".format(cost, resources.plan_cost, makespan,
161 | resources.makespan, len(g.tasks) - 2, c,
162 | constraint_factor))
163 |
164 | # ---Store results for next use:
165 | makespan_list.append(resources.makespan)
166 | cost_list.append(resources.plan_cost)
167 | resources_set.append(copy.deepcopy(resources))
168 | graph_set.append(g)
169 | constraint_values.append(constraint_value)
170 | return makespan_list, cost_list, resources_set, graph_set, constraint_values
171 |
172 |
173 | def show_schedule(resources, save_number=None, current_time=None):
174 | sched = resources.show_schedule()
175 |
176 | num_plots = sum([len(item[0]) for item in sched])
177 | figure_number = random.randint(1, 10000)
178 | fig = plt.figure(figure_number, figsize=[10,10])
179 | colormap = plt.cm.gist_ncar
180 | plt.gca().set_prop_cycle('color', [colormap(i) for i in np.linspace(0, 0.9, num_plots)])
181 | colors = plt.rcParams["axes.prop_cycle"].by_key()["color"]
182 | my_label = []
183 | k = 0
184 |
185 | for i, entry in enumerate(sched):
186 | name = entry[0]
187 | est = entry[1]
188 | eft = entry[2]
189 | for j in range(len(est)):
190 | # print(f'({k}, {i}, {est[j]}, {eft[j]}),') ####################
191 | k += 1
192 | graph_name = name[j].split('-')[0]
193 | first_visit = False
194 | if not graph_name in my_label:
195 | my_label.append(graph_name)
196 | first_visit = True
197 |
198 | my_color = my_label.index(graph_name)
199 | if first_visit:
200 | plt.plot([est[j], eft[j]], [i/5, i/5], linewidth=2, label=graph_name,
201 | color=colors[my_color])
202 | else:
203 | plt.plot([est[j], eft[j]], [i/5, i/5], linewidth=2,
204 | color=colors[my_color])
205 |
206 | if current_time:
207 | plt.plot([current_time, current_time], [0, len(sched)/5], linewidth=1, color='red')
208 | plt.legend(loc='center')
209 |
210 | plt.legend()
211 | plt.title(f'total cost: {resources.plan_cost}')
212 | if not save_number is None:
213 | plt.savefig(f'images/{save_number}', bbox_inches='tight')
214 | plt.close(figure_number)
215 | else:
216 | plt.show()
217 |
218 | def create_multi_workflow_resouces(test, resources_set, constraint, cost_list,
219 | makespan_list, job, graph_set, constraint_values):
220 | timeslot = test.time_slot
221 | bandwidth = test.bandwidth
222 | # workload_len = test.workload_len
223 | powers = []
224 | prices = []
225 | numbers = []
226 | for r in test.resource_array:
227 | powers.append(r[0])
228 | prices.append(r[1])
229 | numbers.append(r[2])
230 |
231 | workload_len = test.workload_len
232 |
233 | # ----------------------- End of loading needed things.
234 |
235 | # Preparing the resources in the cloud:
236 |
237 | def type_of_resource(r_id):
238 | limit = 0
239 | for p in range(len(numbers)):
240 | limit += numbers[p]
241 | if r_id < limit:
242 | return p
243 | else:
244 | return -1
245 |
246 | n = [0] * len(numbers)
247 | for i in range(workload_len):
248 | resources = resources_set[i]
249 | for r in range(0, resources.len):
250 | if resources.resource_cost(r) != 0:
251 | n[type_of_resource(r)] += 1
252 |
253 | c_resource = test.c_resource # the best was 0.8 in the tests # 0.4..1.2
254 | for i in range(len(n)):
255 | n[i] = int(n[i] * c_resource)
256 |
257 | power_list, price_list, timeslot_list = [], [], []
258 | for i in range(len(test.resource_array)):
259 | power_list += [powers[i]] * n[i]
260 | price_list += [prices[i]] * n[i]
261 | # TODO: Tests must be changed, but it works for now (in case of change: both planner and main):
262 | timeslot_list += [60 * timeslot] * n[i]
263 |
264 | resource_spec = (power_list, price_list, timeslot_list)
265 |
266 | # resource_spec = ([power1] * n[0] + [power2] * n[1] + [power3] * n[2],
267 | # [price1] * n[0] + [price2] * n[1] + [price3] * n[2],
268 | # [timeslot] * (n[0] + n[1] + n[2]))
269 |
270 | cloud_resources = Definitions.Resources.CostAwareResources(resource_spec[0], resource_spec[1], resource_spec[2],
271 | bandwidth)
272 |
273 | # -------- Making a multi-workflow list, which contains all workflows (they will schedule together)
274 | jobs = []
275 | for i in range(workload_len):
276 | if constraint[i] is Constraint.deadline:
277 | graph_set[i].makespan = makespan_list[i] # resources_set[i].makespan
278 | else:
279 | graph_set[i].cost = cost_list[i] # resources_set[i].plan_cost
280 | graph_set[i].makespan = makespan_list[i] # resources_set[i].makespan
281 |
282 | prev_resources = resources_set[i]
283 |
284 | job_item = JobItem(copy.deepcopy(job[i]), constraint[i],
285 | constraint_values[i], cloud_resources, graph_set[i], prev_resources)
286 | jobs.append(job_item)
287 |
288 | # prev_cloud_cost = 0
289 | # previously_scheduled_graph = -1
290 |
291 | current_critical = [0] * workload_len
292 |
293 | # gap-rate calculation:
294 | gap_rate = [0] * workload_len
295 | s = gap_rate[:]
296 | sum_task_number = sum(map(lambda graph: len(graph.tasks), graph_set))
297 | for i in range(workload_len):
298 | gap_rate[i] = resources_set[i].gap_rate
299 | s[i] = len(graph_set[i].tasks) / (gap_rate[i] * sum_task_number)
300 | iterator = min(s)
301 | ref_s = s[:]
302 | return jobs, cloud_resources
303 |
304 | def fair_policy(workload_len, jobs, cloud_resources,
305 | show_online_schedule=False, arrivals=False):
306 | # scheduling dummy tasks (get rid of them!):
307 | for i in range(workload_len):
308 | jobs[i].scheduler.schedule_next(do_head_nodes=True)
309 | cloud_resources.costs = cloud_resources.price_of_each_graph()
310 | # current_critical[i] = jobs[i].critical_now
311 |
312 | figure_number = 1
313 |
314 | # MAIN WHILE of Scheduler:
315 | current_time = 0
316 | while any([not job.scheduler.finished for job in jobs]):
317 | cloud_resources.costs = cloud_resources.price_of_each_graph()
318 | for i in range(len(jobs)):
319 | job = jobs[i]
320 | consumed_cost = cloud_resources.costs[job.g.name]
321 | job.scheduler.remaining_budget = job.scheduler.total_budget - consumed_cost
322 | job.scheduler.recalculate_sub_budget()
323 |
324 | while all([job.scheduler.next_ready_task(current_time)==-1 for job in jobs]):
325 | current_time = min([x for x in [job.scheduler.next_event(current_time) for job in jobs] if not x is None])
326 | ready_list = [i for i, job in enumerate(jobs) if job.scheduler.next_ready_task(current_time) != -1]
327 |
328 | most_critical = max([(jobs[ii].critical_now, ii) for ii in ready_list])[1]
329 |
330 | if show_online_schedule:
331 | show_schedule(cloud_resources, figure_number, current_time)
332 | figure_number += 1
333 |
334 | job = jobs[most_critical]
335 |
336 | job.scheduler.schedule_next(do_head_nodes=True, arrival_time=current_time)
337 |
338 |
339 | def fair_policy_old(show_online_schedule=False, arrivals=False):
340 | try:
341 | # scheduling dummy tasks (get rid of them!):
342 | for i in range(workload_len):
343 | jobs[i].scheduler.schedule_next(do_head_nodes=True)
344 | cloud_resources.costs = cloud_resources.price_of_each_graph()
345 | # current_critical[i] = jobs[i].critical_now
346 |
347 | figure_number = 1
348 | ready_list = list(range(workload_len))
349 |
350 | # MAIN WHILE of Scheduler:
351 | arrival_time = 0
352 | while ready_list:
353 | # arrival_time += 10 # -------------------------------------------
354 | cloud_resources.costs = cloud_resources.price_of_each_graph()
355 | for i in range(len(jobs)):
356 | job = jobs[i]
357 | consumed_cost = cloud_resources.costs[job.g.name]
358 | job.scheduler.remaining_budget = job.scheduler.total_budget - consumed_cost
359 | job.scheduler.recalculate_sub_budget()
360 |
361 | most_critical = -1
362 | criticality = 100
363 | ready_list_index = -1
364 | for index, ii in enumerate(ready_list):
365 | job = jobs[ii]
366 | current_critical = job.critical_now
367 | if current_critical < criticality:
368 | criticality = current_critical
369 | most_critical = ii
370 | ready_list_index = index
371 |
372 | if show_online_schedule:
373 | show_schedule(cloud_resources, figure_number)
374 | figure_number += 1
375 | job_index = most_critical # ready_list[most_critical]
376 | job = jobs[job_index]
377 |
378 | del ready_list[ready_list_index]
379 |
380 | job.scheduler.schedule_next(do_head_nodes=True, arrival_time=arrival_time)
381 |
382 | if job.scheduler.finished:
383 | continue
384 | else:
385 | ready_list.append(job_index)
386 | return
387 | except Exception as e:
388 | exc_type, exc_obj, exc_tb = sys.exc_info()
389 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
390 | print(exc_type, fname, exc_tb.tb_lineno)
391 | raise e
392 |
393 | def show_results(cloud_resources, cost_list, makespan_list, constraint,
394 | constraint_values, jobs, graph_set, names, workload_len, resources_set):
395 | # ------------ printing the result of scheduling:
396 | print()
397 | costs = cloud_resources.price_of_each_graph()
398 | sum_separate = 0
399 | s_e = []
400 | print('+---+----------+--------+--------+--------+---------+--------+--------+------+------+')
401 | print('|job|constraint| value | ms old | ms new |prev cost|new cost|gap-rate|c-rate|m-rate|')
402 | print('+---+----------+--------+--------+--------+---------+--------+--------+------+------+')
403 | for i in range(len(jobs)):
404 | prev_makespan = makespan_list[i] # resources_set[i].makespan
405 | if graph_set[i].endID not in cloud_resources.job_task_schedule[graph_set[i].name]:
406 | print("|{:3d}|problem!".format(i))
407 | continue
408 | cloud_makespan = cloud_resources.job_task_schedule[graph_set[i].name][graph_set[i].endID].EFT
409 | prev_cost = cost_list[i] # resources_set[i].plan_cost
410 | cloud_cost = costs[graph_set[i].name]
411 | m_rate = prev_makespan / cloud_makespan
412 | c_rate = prev_cost / cloud_cost
413 | if constraint[i] is Constraint.deadline:
414 | c = ' Deadline '
415 | m_rate = constraint_values[i] / cloud_makespan
416 | s_e.append(c_rate)
417 | else:
418 | c = ' Budget '
419 | c_rate = constraint_values[i] / cloud_cost
420 | s_e.append(m_rate)
421 | print('|{:3d}|{}|{:8.3f}|{:8.3f}|{:8.3f}'
422 | '|{:9.0f}|{:8.2f}|{:8.5f}|{:6.4f}|{:6.4f}|'
423 | ''.format(i, c, constraint_values[i], prev_makespan, cloud_makespan,
424 | prev_cost, cloud_cost, resources_set[i].gap_rate,
425 | c_rate, m_rate))
426 | deadline = -1
427 | budget = -1
428 | if constraint[i] is Constraint.deadline:
429 | deadline = constraint_values[i]
430 | else:
431 | budget = constraint_values[i]
432 |
433 | job_name = names[i]
434 | job_size = len(graph_set[i].tasks) - 2
435 |
436 | sum_separate += cost_list[i] # resources_set[i].plan_cost
437 | print('+---+----------+--------+--------+--------+---------+--------+--------+------+------+')
438 |
439 | A = sum(s_e) / workload_len
440 | sigma_u = 0
441 | for se in s_e:
442 | sigma_u += abs(se - A)
443 | U = sigma_u / workload_len
444 | print()
445 | print("Overall Cloud Cost:{:6.3f}".format(cloud_resources.plan_cost))
446 | print("Separate Runs Cost:{:6.3f}".format(sum_separate))
447 | print("\nUnfairness:{:8.5f}".format(U))
448 |
449 | cloud_resources_gap_rate = cloud_resources.gap_rate
450 | print("\nCloud gap-ratio:{:8.5f}".format(cloud_resources_gap_rate))
451 |
452 | def is_successful(cloud_resources, cost_list, makespan_list, constraint,
453 | constraint_values, jobs, graph_set, names, workload_len):
454 | costs = cloud_resources.price_of_each_graph()
455 | sum_separate = 0
456 | s_e = []
457 | for i in range(len(jobs)):
458 | prev_makespan = makespan_list[i] # resources_set[i].makespan
459 | if graph_set[i].endID not in cloud_resources.job_task_schedule[graph_set[i].name]:
460 | print("|{:3d}|problem!".format(i))
461 | continue
462 | cloud_makespan = cloud_resources.job_task_schedule[graph_set[i].name][graph_set[i].endID].EFT
463 | prev_cost = cost_list[i]
464 | cloud_cost = costs[graph_set[i].name]
465 | m_rate = prev_makespan / cloud_makespan
466 | c_rate = prev_cost / cloud_cost
467 | if constraint[i] is Constraint.deadline:
468 | c = ' Deadline '
469 | m_rate = constraint_values[i] / cloud_makespan
470 | if m_rate < 1:
471 | print(f'it is {c} constrained, m_rate is {m_rate} -- constraint_values: {constraint_values[i]}, cloud_makespan: {cloud_makespan}')
472 | return False
473 | s_e.append(c_rate)
474 | else:
475 | c = ' Budget '
476 | c_rate = constraint_values[i] / cloud_cost
477 | if c_rate < 1:
478 | print(f'it is {c} constrained, c_rate is {c_rate} -- constraint_values: {constraint_values[i]}, cloud_cost: {cloud_cost}')
479 | return False
480 | s_e.append(m_rate)
481 | deadline = -1
482 | budget = -1
483 | if constraint[i] is Constraint.deadline:
484 | deadline = constraint_values[i]
485 | else:
486 | budget = constraint_values[i]
487 |
488 | job_name = names[i]
489 | job_size = len(graph_set[i].tasks) - 2
490 |
491 | sum_separate += cost_list[i] # resources_set[i].plan_cost
492 |
493 | A = sum(s_e) / workload_len
494 | sigma_u = 0
495 | for se in s_e:
496 | sigma_u += abs(se - A)
497 | U = sigma_u / workload_len
498 | if cloud_resources.plan_cost > sum_separate:
499 | return False
500 | return True
--------------------------------------------------------------------------------
/main1.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import random
3 | import sys
4 | import socket
5 |
6 | # import math
7 | import pickle
8 |
9 | import os
10 |
11 | from Definitions.MultiWorkflow.JobList import Constraint, JobItem
12 | import Definitions.Resources
13 | import copy
14 | import db.definitions
15 | import db.reader
16 | import db.writer
17 | import db.files
18 | from os.path import join
19 |
20 |
21 | def main(args):
22 | try:
23 | if len(args) < 1:
24 | print('Required command line arguments are not specified\n'
25 | ' usage: python main.py testname dbfilename filenamepart start_number number_of_test_sets policy')
26 | exit()
27 | test_name = args[0]
28 | database_file = args[1]
29 | file_name_part = args[2]
30 | start = int(args[3])
31 | file_number = int(args[4])
32 | policy = args[5]
33 |
34 | # test directory name comes from the test_name
35 | test_directory = join('../plans', test_name)
36 |
37 | file_names = db.files.file_list(test_directory, file_name_part, start, file_number)
38 | file_list = []
39 | for f in file_names:
40 | file_list.append(join(test_directory, f))
41 |
42 | if len(file_list) == 0:
43 | print("No input file")
44 | exit()
45 | # ----------------------- Retrieving Everything needed:
46 | numbers = []
47 | resources_set = []
48 | graph_set = []
49 | makespan_list = []
50 | cost_list = []
51 | constraint_values = []
52 | constraint = []
53 | job = []
54 | names = []
55 | test = 0
56 |
57 | for dumb_file in file_list:
58 | from_retrieved = pickle.load(open(dumb_file, 'rb'))
59 |
60 | # numbers: number of resources (numbers2: number of resources for this schedule)
61 | test, numbers2, resources_set2, graph_set2, makespan_list2, cost_list2, constraint_values2,\
62 | constraint2, job2, names2 = from_retrieved
63 |
64 | numbers += numbers2
65 | resources_set += resources_set2
66 | graph_set += graph_set2
67 | makespan_list += makespan_list2
68 | cost_list += cost_list2
69 | constraint_values += constraint_values2
70 | constraint += constraint2
71 | job += job2
72 | names += names2
73 |
74 | # --------------
75 | to_do = list(range(len(names)))
76 | random.shuffle(to_do)
77 | # --------------
78 |
79 | reader = db.reader.Reader(database_file)
80 | rows = reader.read_test(test.test_name)
81 | row = rows.fetchone()
82 | test = db.definitions.Test(row)
83 | timeslot = test.time_slot
84 | bandwidth = test.bandwidth
85 | # workload_len = test.workload_len
86 | powers = []
87 | prices = []
88 | numbers = []
89 | for r in test.resource_array:
90 | powers.append(r[0])
91 | prices.append(r[1])
92 | numbers.append(r[2])
93 | reader.connection.close()
94 |
95 | host_name = socket.gethostname()
96 |
97 | print("Hostname: {}".format(host_name))
98 |
99 | test.workload_len = len(to_do)
100 | workload_len = test.workload_len
101 |
102 | # ----------------------- End of loading needed things.
103 |
104 | # Preparing the resources in the cloud:
105 |
106 | def type_of_resource(r_id):
107 | limit = 0
108 | for p in range(len(numbers)):
109 | limit += numbers[p]
110 | if r_id < limit:
111 | return p
112 | else:
113 | return -1
114 |
115 | n = [0] * len(numbers)
116 | for jj in range(workload_len):
117 | i = to_do[jj]
118 | resources = resources_set[i]
119 | for r in range(0, resources.len):
120 | if resources.resource_cost(r) != 0:
121 | n[type_of_resource(r)] += 1
122 |
123 | # TODO: decreasing resources, to force efficient use of resources!
124 | c_resource = 0.8 # 0.4..1.2
125 | for i in range(len(n)):
126 | n[i] = int(n[i] * c_resource)
127 |
128 | power_list, price_list, timeslot_list = [], [], []
129 | for i in range(len(test.resource_array)):
130 | power_list += [powers[i]] * n[i]
131 | price_list += [prices[i]] * n[i]
132 | # TODO: Tests must be changed, but it works for now (in case of change: both planner and main):
133 | timeslot_list += [60 * timeslot] * n[i]
134 |
135 | resource_spec = (power_list, price_list, timeslot_list)
136 |
137 | # resource_spec = ([power1] * n[0] + [power2] * n[1] + [power3] * n[2],
138 | # [price1] * n[0] + [price2] * n[1] + [price3] * n[2],
139 | # [timeslot] * (n[0] + n[1] + n[2]))
140 |
141 | cloud_resources = Definitions.Resources.CostAwareResources(resource_spec[0], resource_spec[1], resource_spec[2],
142 | bandwidth)
143 |
144 | # -------- Making a multi-workflow list, which contains all workflows (they will schedule together)
145 | jobs = []
146 | for jj in range(workload_len):
147 | i = to_do[jj]
148 | if constraint[i] is Constraint.deadline:
149 | graph_set[i].makespan = makespan_list[i] # resources_set[i].makespan
150 | else:
151 | graph_set[i].cost = cost_list[i] # resources_set[i].plan_cost
152 | graph_set[i].makespan = makespan_list[i] # resources_set[i].makespan
153 |
154 | prev_resources = resources_set[i]
155 |
156 | job_item = JobItem(copy.deepcopy(job[i]), constraint[i],
157 | constraint_values[i], cloud_resources, graph_set[i], prev_resources)
158 | jobs.append(job_item)
159 |
160 | print("==")
161 | # prev_cloud_cost = 0
162 | # previously_scheduled_graph = -1
163 |
164 | current_critical = [0] * workload_len
165 |
166 | # gap-rate calculation:
167 | gap_rate = [0] * workload_len
168 | s = gap_rate[:]
169 | sum_task_number = sum(map(lambda graph: len(graph.tasks), graph_set))
170 | for jj in range(workload_len):
171 | i = to_do[jj]
172 | gap_rate[jj] = resources_set[i].gap_rate
173 | s[jj] = len(graph_set[i].tasks) / (gap_rate[jj] * sum_task_number)
174 | iterator = min(s)
175 | ref_s = s[:]
176 |
177 | # end of gap-rate calculation
178 |
179 | # ===============================================================================================================
180 | # ===============================================================================================================
181 | # ===============================================================================================================
182 | # ----------------------- START THE MAIN PHASE: (with different policies as functions):
183 | def prr_policy():
184 | try:
185 | # scheduling dummy tasks (get rid of them!):
186 | for i in range(workload_len):
187 | jobs[i].scheduler.schedule_next(do_head_nodes=True)
188 | cloud_resources.costs = cloud_resources.price_of_each_graph()
189 | current_critical[i] = jobs[i].critical_now
190 |
191 | # MAIN WHILE of Scheduler:
192 | while True:
193 | cloud_resources.costs = cloud_resources.price_of_each_graph()
194 | for i in range(len(jobs)):
195 | job = jobs[i]
196 | consumed_cost = cloud_resources.costs[job.g.name]
197 | job.scheduler.remaining_budget = job.scheduler.total_budget - consumed_cost
198 | job.scheduler.recalculate_sub_budget()
199 |
200 | max_s = max(s)
201 | if max_s <= 0:
202 | if max_s == -1e50:
203 | break
204 | for j in range(workload_len):
205 | if ref_s[j] != -1e50:
206 | s[j] += ref_s[j]
207 | else:
208 | s[j] = ref_s[j]
209 | epsilon = 0.00000002
210 | for k in range(workload_len):
211 | if abs(max_s - s[k]) < epsilon:
212 | j = k
213 | break
214 |
215 | s[j] -= iterator
216 |
217 | critical_job = jobs[j]
218 |
219 | first_task_in_round = True
220 | previous_resource = -1
221 | while True:
222 | if critical_job.scheduler.finished:
223 | s[j] = -1e50
224 | ref_s[j] = -1e50
225 | break
226 |
227 | eft, cost, resource_id = critical_job.scheduler.schedule_next(do_head_nodes=True)
228 | if first_task_in_round:
229 | if cost == 0:
230 | break
231 | first_task_in_round = False
232 | previous_resource = resource_id
233 | elif previous_resource == resource_id and cost == 0:
234 | if s[j] > 0:
235 | s[j] -= iterator
236 | continue
237 | else:
238 | break
239 | except Exception as e:
240 | exc_type, exc_obj, exc_tb = sys.exc_info()
241 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
242 | print(exc_type, fname, exc_tb.tb_lineno)
243 | raise e
244 |
245 | # ===============================================================================================================
246 | def rr_policy():
247 | try:
248 | # scheduling dummy tasks (get rid of them!):
249 | for i in range(workload_len):
250 | jobs[i].scheduler.schedule_next(do_head_nodes=True)
251 | cloud_resources.costs = cloud_resources.price_of_each_graph()
252 | current_critical[i] = jobs[i].critical_now
253 |
254 | # MAIN WHILE of Scheduler:
255 | ready_list = list(reversed(range(workload_len)))
256 | while ready_list:
257 | print(".")
258 | j = ready_list.pop()
259 | critical_job = jobs[j]
260 | if critical_job.scheduler.finished:
261 | continue
262 | else:
263 | ready_list.insert(0, j)
264 |
265 | cloud_resources.costs = cloud_resources.price_of_each_graph()
266 | for i in range(len(jobs)):
267 | job = jobs[i]
268 | consumed_cost = cloud_resources.costs[job.g.name]
269 | job.scheduler.remaining_budget = job.scheduler.total_budget - consumed_cost
270 | job.scheduler.recalculate_sub_budget()
271 |
272 | first_task_in_round = True
273 | previous_resource = -1
274 | while not critical_job.scheduler.finished:
275 | eft, cost, resource_id = critical_job.scheduler.schedule_next(do_head_nodes=True)
276 | if first_task_in_round:
277 | if cost == 0:
278 | break
279 | first_task_in_round = False
280 | previous_resource = resource_id
281 | elif previous_resource == resource_id and cost == 0:
282 | continue
283 | else:
284 | break
285 | except Exception as e:
286 | exc_type, exc_obj, exc_tb = sys.exc_info()
287 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
288 | print(exc_type, fname, exc_tb.tb_lineno)
289 | raise e
290 | # ===============================================================================================================
291 | def fcfs_policy():
292 | try:
293 | # scheduling dummy tasks (get rid of them!):
294 | for i in range(workload_len):
295 | jobs[i].scheduler.schedule_next(do_head_nodes=True)
296 | cloud_resources.costs = cloud_resources.price_of_each_graph()
297 | current_critical[i] = jobs[i].critical_now
298 |
299 | # MAIN WHILE of Scheduler:
300 | ready_list = list(reversed(range(workload_len)))
301 | while len(ready_list) > 0:
302 | j = ready_list.pop()
303 | critical_job = jobs[j]
304 | if critical_job.scheduler.finished:
305 | continue
306 | else:
307 | ready_list.append(j)
308 |
309 | cloud_resources.costs = cloud_resources.price_of_each_graph()
310 | for i in range(len(jobs)):
311 | job = jobs[i]
312 | consumed_cost = cloud_resources.costs[job.g.name]
313 | job.scheduler.remaining_budget = job.scheduler.total_budget - consumed_cost
314 | job.scheduler.recalculate_sub_budget()
315 |
316 | first_task_in_round = True
317 | previous_resource = -1
318 | while not critical_job.scheduler.finished:
319 | eft, cost, resource_id = critical_job.scheduler.schedule_next(do_head_nodes=True)
320 | if first_task_in_round:
321 | if cost == 0:
322 | break
323 | first_task_in_round = False
324 | previous_resource = resource_id
325 | elif previous_resource == resource_id and cost == 0:
326 | continue
327 | else:
328 | break
329 | except Exception as e:
330 | exc_type, exc_obj, exc_tb = sys.exc_info()
331 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
332 | print(exc_type, fname, exc_tb.tb_lineno)
333 | raise e
334 | # ===============================================================================================================
335 | def fair_policy():
336 | try:
337 | # scheduling dummy tasks (get rid of them!):
338 | for i in range(workload_len):
339 | jobs[i].scheduler.schedule_next(do_head_nodes=True)
340 | cloud_resources.costs = cloud_resources.price_of_each_graph()
341 | # current_critical[i] = jobs[i].critical_now
342 |
343 | ready_list = list(range(workload_len))
344 | # MAIN WHILE of Scheduler:
345 | while ready_list:
346 | cloud_resources.costs = cloud_resources.price_of_each_graph()
347 | for i in range(len(jobs)):
348 | job = jobs[i]
349 | consumed_cost = cloud_resources.costs[job.g.name]
350 | job.scheduler.remaining_budget = job.scheduler.total_budget - consumed_cost
351 | job.scheduler.recalculate_sub_budget()
352 |
353 | most_critical = -1
354 | criticality = 100
355 | ready_list_index = -1
356 | for index, ii in enumerate(ready_list):
357 | job = jobs[ii]
358 | current_critical = job.critical_now
359 | if current_critical < criticality:
360 | criticality = current_critical
361 | most_critical = ii
362 | ready_list_index = index
363 |
364 | job_index = most_critical # ready_list[most_critical]
365 | job = jobs[job_index]
366 | del ready_list[ready_list_index]
367 |
368 | job.scheduler.schedule_next(do_head_nodes=True)
369 |
370 | if job.scheduler.finished:
371 | continue
372 | else:
373 | ready_list.append(job_index)
374 | return
375 | except Exception as e:
376 | exc_type, exc_obj, exc_tb = sys.exc_info()
377 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
378 | print(exc_type, fname, exc_tb.tb_lineno)
379 | raise e
380 | # ===============================================================================================================
381 | if policy == 'prr':
382 | prr_policy()
383 | elif policy == 'rr':
384 | rr_policy()
385 | elif policy == 'fcfs':
386 | fcfs_policy()
387 | elif policy == 'fair':
388 | fair_policy()
389 | elif policy in ['zhao', 'interleaving']:
390 | print('Policy {} has not yet been implemented...'.format(policy))
391 | exit()
392 | else:
393 | print("Policy must be in {'prr', 'rr', 'fcfs', 'fair', 'zhao', 'interleaving'}")
394 | exit()
395 | # ===============================================================================================================
396 |
397 | writer = db.writer.Writer(database_file)
398 | result_id = writer.write_result_head(test.test_name)
399 |
400 | # ------------ printing the result of scheduling:
401 | print()
402 | costs = cloud_resources.price_of_each_graph()
403 | sum_separate = 0
404 | s_e = []
405 | print('+---+----------+--------+--------+--------+---------+--------+--------+------+------+')
406 | print('|job|constraint| value | ms old | ms new |prev cost|new cost|gap-rate|c-rate|m-rate|')
407 | print('+---+----------+--------+--------+--------+---------+--------+--------+------+------+')
408 | for jj in range(len(jobs)):
409 | i = to_do[jj]
410 | prev_makespan = makespan_list[i] # resources_set[i].makespan
411 | if graph_set[i].endID not in cloud_resources.job_task_schedule[graph_set[i].name]:
412 | print("|{:3d}|problem!".format(i))
413 | continue
414 | cloud_makespan = cloud_resources.job_task_schedule[graph_set[i].name][graph_set[i].endID].EFT
415 | prev_cost = cost_list[i] # resources_set[i].plan_cost
416 | cloud_cost = costs[graph_set[i].name]
417 | m_rate = prev_makespan / cloud_makespan
418 | c_rate = prev_cost / cloud_cost
419 | if constraint[i] is Constraint.deadline:
420 | c = ' Deadline '
421 | m_rate = constraint_values[i] / cloud_makespan
422 | s_e.append(c_rate)
423 | else:
424 | c = ' Budget '
425 | c_rate = constraint_values[i] / cloud_cost
426 | s_e.append(m_rate)
427 | print('|{:3d}|{}|{:8.3f}|{:8.3f}|{:8.3f}'
428 | '|{:9.0f}|{:8.2f}|{:8.5f}|{:6.4f}|{:6.4f}|'
429 | ''.format(i, c, constraint_values[i], prev_makespan, cloud_makespan,
430 | prev_cost, cloud_cost, resources_set[i].gap_rate,
431 | c_rate, m_rate))
432 | deadline = -1
433 | budget = -1
434 | if constraint[i] is Constraint.deadline:
435 | deadline = constraint_values[i]
436 | else:
437 | budget = constraint_values[i]
438 |
439 | job_name = names[i]
440 | job_size = len(graph_set[i].tasks) - 2
441 | result_object = db.definitions.Result(result_id, test.test_name, policy, c.strip(),
442 | deadline, budget, prev_makespan, cloud_makespan, prev_cost, cloud_cost,
443 | resources_set[i].gap_rate, c_rate, m_rate, job_name, job_size)
444 | writer.write_results(result_object, False)
445 |
446 | sum_separate += cost_list[i] # resources_set[i].plan_cost
447 | writer.commit()
448 | print('+---+----------+--------+--------+--------+---------+--------+--------+------+------+')
449 |
450 | A = sum(s_e) / workload_len
451 | sigma_u = 0
452 | for se in s_e:
453 | sigma_u += abs(se - A)
454 | U = sigma_u / workload_len
455 | print()
456 | print("Overall Cloud Cost:{:6.3f}".format(cloud_resources.plan_cost))
457 | print("Separate Runs Cost:{:6.3f}".format(sum_separate))
458 | print("\nUnfairness:{:8.5f}".format(U))
459 |
460 | cloud_resources_gap_rate = cloud_resources.gap_rate
461 | print("\nCloud gap-ratio:{:8.5f}".format(cloud_resources_gap_rate))
462 |
463 | writer.change_result_head(result_id, test.test_name, sum_separate,
464 | U, workload_len, cloud_resources_gap_rate, timeslot_list[0], c_resource)
465 |
466 | # cloud_resources.write_schedule(database_file, test.test_name, 'multi', policy, workload_len)
467 | except Exception as e:
468 | exc_type, exc_obj, exc_tb = sys.exc_info()
469 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
470 | print(exc_type, fname, exc_tb.tb_lineno)
471 | raise e
472 |
473 | if __name__ == "__main__":
474 | try:
475 | main(sys.argv[1:])
476 | except Exception as e:
477 | exc_type, exc_obj, exc_tb = sys.exc_info()
478 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
479 | print(exc_type, fname, exc_tb.tb_lineno)
--------------------------------------------------------------------------------
/main2.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import random
3 | import sys
4 | import socket
5 |
6 | # import math
7 | import pickle
8 |
9 | import os
10 |
11 | from Definitions.MultiWorkflow.JobList import Constraint, JobItem
12 | import Definitions.Resources
13 | import copy
14 | import db.definitions
15 | import db.reader
16 | import db.writer
17 | import db.files
18 | from os.path import join
19 |
20 |
21 | def main(args):
22 | try:
23 | if len(args) < 1:
24 | print('Required command line arguments are not specified\n'
25 | ' usage: python main1.py testname dbfilename filenamepart start_number number_of_test_sets policy')
26 | exit()
27 | test_name = args[0]
28 | database_file = args[1]
29 | file_name_part = args[2]
30 | start = int(args[3])
31 | file_number = int(args[4])
32 | policy = args[5]
33 |
34 | # test directory name comes from the test_name
35 | test_directory = join('../plans', test_name)
36 |
37 | file_names = db.files.file_list(test_directory, file_name_part, start, file_number)
38 | file_list = []
39 | for f in file_names:
40 | file_list.append(join(test_directory, f))
41 |
42 | if len(file_list) == 0:
43 | print("No input file")
44 | exit()
45 | # ----------------------- Retrieving Everything needed:
46 | numbers = []
47 | resources_set = []
48 | graph_set = []
49 | makespan_list = []
50 | cost_list = []
51 | constraint_values = []
52 | constraint = []
53 | job = []
54 | names = []
55 | test = 0
56 |
57 | for dumb_file in file_list:
58 | from_retrieved = pickle.load(open(dumb_file, 'rb'))
59 |
60 | # test, numbers, resources_set, graph_set, makespan_list, cost_list, constraint_values,\
61 | # constraint, job, names = from_retrieved
62 | test, numbers2, resources_set2, graph_set2, makespan_list2, cost_list2, constraint_values2,\
63 | constraint2, job2, names2 = from_retrieved
64 |
65 | numbers += numbers2
66 | resources_set += resources_set2
67 | graph_set += graph_set2
68 | makespan_list += makespan_list2
69 | cost_list += cost_list2
70 | constraint_values += constraint_values2
71 | constraint += constraint2
72 | job += job2
73 | names += names2
74 |
75 | # --------------
76 | to_do = list(range(len(names)))
77 | random.shuffle(to_do)
78 | # --------------
79 |
80 | reader = db.reader.Reader(database_file)
81 | rows = reader.read_test(test.test_name)
82 | row = rows.fetchone()
83 | test = db.definitions.Test(row)
84 | timeslot = test.time_slot
85 | bandwidth = test.bandwidth
86 | # workload_len = test.workload_len
87 | powers = []
88 | prices = []
89 | numbers = []
90 | for r in test.resource_array:
91 | powers.append(r[0])
92 | prices.append(r[1])
93 | numbers.append(r[2])
94 | reader.connection.close()
95 |
96 | host_name = socket.gethostname()
97 |
98 | print("Hostname: {}".format(host_name))
99 |
100 | test.workload_len = len(to_do)
101 | workload_len = test.workload_len
102 |
103 | # ----------------------- End of loading needed things.
104 |
105 | # Preparing the resources in the cloud:
106 |
107 | def type_of_resource(r_id):
108 | limit = 0
109 | for p in range(len(numbers)):
110 | limit += numbers[p]
111 | if r_id < limit:
112 | return p
113 | else:
114 | return -1
115 |
116 | n = [0] * len(numbers)
117 | for jj in range(workload_len):
118 | i = to_do[jj]
119 | resources = resources_set[i]
120 | for r in range(0, resources.len):
121 | if resources.resource_cost(r) != 0:
122 | n[type_of_resource(r)] += 1
123 |
124 | # TODO: decreasing resources, to force efficient use of resources!
125 | c_resource = 1.2
126 | for i in range(len(n)):
127 | n[i] = int(n[i] * c_resource)
128 |
129 | power_list, price_list, timeslot_list = [], [], []
130 | for i in range(len(test.resource_array)):
131 | power_list += [powers[i]] * n[i]
132 | price_list += [prices[i]] * n[i]
133 | # TODO: Tests must be changed, but it works for now (in case of change: both planner and main):
134 | timeslot_list += [60 * timeslot] * n[i]
135 |
136 | resource_spec = (power_list, price_list, timeslot_list)
137 |
138 | # resource_spec = ([power1] * n[0] + [power2] * n[1] + [power3] * n[2],
139 | # [price1] * n[0] + [price2] * n[1] + [price3] * n[2],
140 | # [timeslot] * (n[0] + n[1] + n[2]))
141 |
142 | cloud_resources = Definitions.Resources.CostAwareResources(resource_spec[0], resource_spec[1], resource_spec[2],
143 | bandwidth)
144 |
145 | # -------- Making a multi-workflow list, which contains all workflows (they will schedule together)
146 | jobs = []
147 | for jj in range(workload_len):
148 | i = to_do[jj]
149 | if constraint[i] is Constraint.deadline:
150 | graph_set[i].makespan = makespan_list[i] # resources_set[i].makespan
151 | else:
152 | graph_set[i].cost = cost_list[i] # resources_set[i].plan_cost
153 | graph_set[i].makespan = makespan_list[i] # resources_set[i].makespan
154 |
155 | prev_resources = resources_set[i]
156 |
157 | job_item = JobItem(copy.deepcopy(job[i]), constraint[i],
158 | constraint_values[i], cloud_resources, graph_set[i], prev_resources)
159 | jobs.append(job_item)
160 |
161 | print("==")
162 | # prev_cloud_cost = 0
163 | # previously_scheduled_graph = -1
164 |
165 | current_critical = [0] * workload_len
166 |
167 | # gap-rate calculation:
168 | gap_rate = [0] * workload_len
169 | s = gap_rate[:]
170 | sum_task_number = sum(map(lambda graph: len(graph.tasks), graph_set))
171 | for jj in range(workload_len):
172 | i = to_do[jj]
173 | gap_rate[jj] = resources_set[i].gap_rate
174 | s[jj] = len(graph_set[i].tasks) / (gap_rate[jj] * sum_task_number)
175 | iterator = min(s)
176 | ref_s = s[:]
177 |
178 | # end of gap-rate calculation
179 |
180 | # ===============================================================================================================
181 | # ===============================================================================================================
182 | # ===============================================================================================================
183 | # ----------------------- START THE MAIN PHASE: (with different policies as functions):
184 | def prr_policy():
185 | try:
186 | # scheduling dummy tasks (get rid of them!):
187 | for i in range(workload_len):
188 | jobs[i].scheduler.schedule_next(do_head_nodes=True)
189 | cloud_resources.costs = cloud_resources.price_of_each_graph()
190 | current_critical[i] = jobs[i].critical_now
191 |
192 | # MAIN WHILE of Scheduler:
193 | while True:
194 | cloud_resources.costs = cloud_resources.price_of_each_graph()
195 | for i in range(len(jobs)):
196 | job = jobs[i]
197 | consumed_cost = cloud_resources.costs[job.g.name]
198 | job.scheduler.remaining_budget = job.scheduler.total_budget - consumed_cost
199 | job.scheduler.recalculate_sub_budget()
200 |
201 | max_s = max(s)
202 | if max_s <= 0:
203 | if max_s == -1e50:
204 | break
205 | for j in range(workload_len):
206 | if ref_s[j] != -1e50:
207 | s[j] += ref_s[j]
208 | else:
209 | s[j] = ref_s[j]
210 | epsilon = 0.00000002
211 | for k in range(workload_len):
212 | if abs(max_s - s[k]) < epsilon:
213 | j = k
214 | break
215 |
216 | s[j] -= iterator
217 |
218 | critical_job = jobs[j]
219 |
220 | first_task_in_round = True
221 | previous_resource = -1
222 | while True:
223 | if critical_job.scheduler.finished:
224 | s[j] = -1e50
225 | ref_s[j] = -1e50
226 | break
227 |
228 | eft, cost, resource_id = critical_job.scheduler.schedule_next(do_head_nodes=True)
229 | if first_task_in_round:
230 | if cost == 0:
231 | break
232 | first_task_in_round = False
233 | previous_resource = resource_id
234 | elif previous_resource == resource_id and cost == 0:
235 | if s[j] > 0:
236 | s[j] -= iterator
237 | continue
238 | else:
239 | break
240 | except Exception as e:
241 | exc_type, exc_obj, exc_tb = sys.exc_info()
242 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
243 | print(exc_type, fname, exc_tb.tb_lineno)
244 | raise e
245 |
246 | # ===============================================================================================================
247 | def rr_policy():
248 | try:
249 | # scheduling dummy tasks (get rid of them!):
250 | for i in range(workload_len):
251 | jobs[i].scheduler.schedule_next(do_head_nodes=True)
252 | cloud_resources.costs = cloud_resources.price_of_each_graph()
253 | current_critical[i] = jobs[i].critical_now
254 |
255 | # MAIN WHILE of Scheduler:
256 | ready_list = list(reversed(range(workload_len)))
257 | while ready_list:
258 | j = ready_list.pop()
259 | critical_job = jobs[j]
260 | if critical_job.scheduler.finished:
261 | continue
262 | else:
263 | ready_list.insert(0, j)
264 |
265 | cloud_resources.costs = cloud_resources.price_of_each_graph()
266 | for i in range(len(jobs)):
267 | job = jobs[i]
268 | consumed_cost = cloud_resources.costs[job.g.name]
269 | job.scheduler.remaining_budget = job.scheduler.total_budget - consumed_cost
270 | job.scheduler.recalculate_sub_budget()
271 |
272 | first_task_in_round = True
273 | previous_resource = -1
274 | while not critical_job.scheduler.finished:
275 | eft, cost, resource_id = critical_job.scheduler.schedule_next(do_head_nodes=True)
276 | if first_task_in_round:
277 | if cost == 0:
278 | break
279 | first_task_in_round = False
280 | previous_resource = resource_id
281 | elif previous_resource == resource_id and cost == 0:
282 | continue
283 | else:
284 | break
285 | except Exception as e:
286 | exc_type, exc_obj, exc_tb = sys.exc_info()
287 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
288 | print(exc_type, fname, exc_tb.tb_lineno)
289 | raise e
290 | # ===============================================================================================================
291 | def fcfs_policy():
292 | try:
293 | # scheduling dummy tasks (get rid of them!):
294 | for i in range(workload_len):
295 | jobs[i].scheduler.schedule_next(do_head_nodes=True)
296 | cloud_resources.costs = cloud_resources.price_of_each_graph()
297 | current_critical[i] = jobs[i].critical_now
298 |
299 | # MAIN WHILE of Scheduler:
300 | ready_list = list(reversed(range(workload_len)))
301 | while len(ready_list) > 0:
302 | j = ready_list.pop()
303 | critical_job = jobs[j]
304 | if critical_job.scheduler.finished:
305 | continue
306 | else:
307 | ready_list.append(j)
308 |
309 | cloud_resources.costs = cloud_resources.price_of_each_graph()
310 | for i in range(len(jobs)):
311 | job = jobs[i]
312 | consumed_cost = cloud_resources.costs[job.g.name]
313 | job.scheduler.remaining_budget = job.scheduler.total_budget - consumed_cost
314 | job.scheduler.recalculate_sub_budget()
315 |
316 | first_task_in_round = True
317 | previous_resource = -1
318 | while not critical_job.scheduler.finished:
319 | eft, cost, resource_id = critical_job.scheduler.schedule_next(do_head_nodes=True)
320 | if first_task_in_round:
321 | if cost == 0:
322 | break
323 | first_task_in_round = False
324 | previous_resource = resource_id
325 | elif previous_resource == resource_id and cost == 0:
326 | continue
327 | else:
328 | break
329 | except Exception as e:
330 | exc_type, exc_obj, exc_tb = sys.exc_info()
331 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
332 | print(exc_type, fname, exc_tb.tb_lineno)
333 | raise e
334 | # ===============================================================================================================
335 | def fair_policy():
336 | try:
337 | # scheduling dummy tasks (get rid of them!):
338 | for i in range(workload_len):
339 | jobs[i].scheduler.schedule_next(do_head_nodes=True)
340 | cloud_resources.costs = cloud_resources.price_of_each_graph()
341 | # current_critical[i] = jobs[i].critical_now
342 |
343 | ready_list = list(range(workload_len))
344 | # MAIN WHILE of Scheduler:
345 | while ready_list:
346 | cloud_resources.costs = cloud_resources.price_of_each_graph()
347 | for i in range(len(jobs)):
348 | job = jobs[i]
349 | consumed_cost = cloud_resources.costs[job.g.name]
350 | job.scheduler.remaining_budget = job.scheduler.total_budget - consumed_cost
351 | job.scheduler.recalculate_sub_budget()
352 |
353 | most_critical = -1
354 | criticality = 100
355 | ready_list_index = -1
356 | for index, ii in enumerate(ready_list):
357 | job = jobs[ii]
358 | current_critical = job.critical_now
359 | if current_critical < criticality:
360 | criticality = current_critical
361 | most_critical = ii
362 | ready_list_index = index
363 |
364 | job_index = most_critical # ready_list[most_critical]
365 | job = jobs[job_index]
366 | del ready_list[ready_list_index]
367 |
368 | job.scheduler.schedule_next(do_head_nodes=True)
369 |
370 | if job.scheduler.finished:
371 | continue
372 | else:
373 | ready_list.append(job_index)
374 | return
375 | except Exception as e:
376 | exc_type, exc_obj, exc_tb = sys.exc_info()
377 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
378 | print(exc_type, fname, exc_tb.tb_lineno)
379 | raise e
380 | # ===============================================================================================================
381 | if policy == 'prr':
382 | prr_policy()
383 | elif policy == 'rr':
384 | rr_policy()
385 | elif policy == 'fcfs':
386 | fcfs_policy()
387 | elif policy == 'fair':
388 | fair_policy()
389 | elif policy in ['zhao', 'interleaving']:
390 | print('Policy {} has not yet been implemented...'.format(policy))
391 | exit()
392 | else:
393 | print("Policy must be in {'prr', 'rr', 'fcfs', 'fair', 'zhao', 'interleaving'}")
394 | exit()
395 | # ===============================================================================================================
396 |
397 | writer = db.writer.Writer(database_file)
398 | result_id = writer.write_result_head(test.test_name)
399 |
400 | # ------------ printing the result of scheduling:
401 | print()
402 | costs = cloud_resources.price_of_each_graph()
403 | sum_separate = 0
404 | s_e = []
405 | print('+---+----------+--------+--------+--------+---------+--------+--------+------+------+')
406 | print('|job|constraint| value | ms old | ms new |prev cost|new cost|gap-rate|c-rate|m-rate|')
407 | print('+---+----------+--------+--------+--------+---------+--------+--------+------+------+')
408 | for jj in range(len(jobs)):
409 | i = to_do[jj]
410 | prev_makespan = makespan_list[i] # resources_set[i].makespan
411 | if graph_set[i].endID not in cloud_resources.job_task_schedule[graph_set[i].name]:
412 | print("|{:3d}|problem!".format(i))
413 | continue
414 | cloud_makespan = cloud_resources.job_task_schedule[graph_set[i].name][graph_set[i].endID].EFT
415 | prev_cost = cost_list[i] # resources_set[i].plan_cost
416 | cloud_cost = costs[graph_set[i].name]
417 | m_rate = prev_makespan / cloud_makespan
418 | c_rate = prev_cost / cloud_cost
419 | if constraint[i] is Constraint.deadline:
420 | c = ' Deadline '
421 | m_rate = constraint_values[i] / cloud_makespan
422 | s_e.append(c_rate)
423 | else:
424 | c = ' Budget '
425 | c_rate = constraint_values[i] / cloud_cost
426 | s_e.append(m_rate)
427 | print('|{:3d}|{}|{:8.3f}|{:8.3f}|{:8.3f}'
428 | '|{:9.0f}|{:8.2f}|{:8.5f}|{:6.4f}|{:6.4f}|'
429 | ''.format(i, c, constraint_values[i], prev_makespan, cloud_makespan,
430 | prev_cost, cloud_cost, resources_set[i].gap_rate,
431 | c_rate, m_rate))
432 | deadline = -1
433 | budget = -1
434 | if constraint[i] is Constraint.deadline:
435 | deadline = constraint_values[i]
436 | else:
437 | budget = constraint_values[i]
438 |
439 | job_name = names[i]
440 | job_size = len(graph_set[i].tasks) - 2
441 | result_object = db.definitions.Result(result_id, test.test_name, policy, c.strip(),
442 | deadline, budget, prev_makespan, cloud_makespan, prev_cost, cloud_cost,
443 | resources_set[i].gap_rate, c_rate, m_rate, job_name, job_size)
444 | writer.write_results(result_object, False)
445 |
446 | sum_separate += cost_list[i] # resources_set[i].plan_cost
447 | writer.commit()
448 | print('+---+----------+--------+--------+--------+---------+--------+--------+------+------+')
449 |
450 | A = sum(s_e) / workload_len
451 | sigma_u = 0
452 | for se in s_e:
453 | sigma_u += abs(se - A)
454 | U = sigma_u / workload_len
455 | print()
456 | print("Overall Cloud Cost:{:6.3f}".format(cloud_resources.plan_cost))
457 | print("Separate Runs Cost:{:6.3f}".format(sum_separate))
458 | print("\nUnfairness:{:8.5f}".format(U))
459 |
460 | cloud_resources_gap_rate = cloud_resources.gap_rate
461 | print("\nCloud gap-ratio:{:8.5f}".format(cloud_resources_gap_rate))
462 |
463 | writer.change_result_head(result_id, test.test_name, sum_separate,
464 | U, workload_len, cloud_resources_gap_rate, timeslot_list[0], c_resource)
465 |
466 | # cloud_resources.write_schedule(database_file, test.test_name, 'multi', policy, workload_len)
467 | except Exception as e:
468 | exc_type, exc_obj, exc_tb = sys.exc_info()
469 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
470 | print(exc_type, fname, exc_tb.tb_lineno)
471 | raise e
472 |
473 | if __name__ == "__main__":
474 | try:
475 | main(sys.argv[1:])
476 | except Exception as e:
477 | exc_type, exc_obj, exc_tb = sys.exc_info()
478 | fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
479 | print(exc_type, fname, exc_tb.tb_lineno)
--------------------------------------------------------------------------------