├── tests ├── __init__.py ├── sim.py ├── __main__.py ├── pfair.py ├── generator.py ├── example_end_to_end.py ├── util.py ├── sched.py ├── pedf_spinlocks.py └── quanta.py ├── example ├── __init__.py ├── schedcat.odp ├── pmo_host=ludwig_background=load_stat=avg.csv ├── __main__.py ├── mapping.py ├── nolock_example_1 ├── oh_host=ludwig_scheduler=C-FL-L2-RM_stat=avg.csv ├── overheads.py ├── lock_example_1 ├── generator.py ├── driver.py ├── oh_host=ludwig_scheduler=C-FL-L2-RM_locks=MX-Q_stat=avg.csv ├── locking.py ├── lock_example_2 └── nolock_example_2 ├── schedcat ├── locking │ ├── __init__.py │ ├── linprog │ │ └── __init__.py │ └── partition.py ├── model │ └── __init__.py ├── generator │ ├── __init__.py │ └── tasksets.py ├── overheads │ ├── __init__.py │ ├── pfair.py │ ├── fp.py │ ├── quanta.py │ └── jlfp.py ├── sched │ ├── canbus │ │ ├── __init__.py │ │ ├── prio_assign.py │ │ └── broster.py │ ├── edf │ │ ├── gfb.py │ │ ├── bak.py │ │ ├── bcl.py │ │ ├── bcl_iterative.py │ │ ├── da.py │ │ ├── bar.py │ │ └── rta.py │ ├── fp │ │ └── __init__.py │ ├── pfair.py │ ├── __init__.py │ └── run.py ├── cansim │ ├── __init__.py │ └── canbus.py ├── util │ ├── __init__.py │ ├── time.py │ ├── storage.py │ ├── iter.py │ └── csv.py ├── __init__.py ├── mapping │ └── __init__.py └── sim │ ├── __init__.py │ └── edf.py ├── native ├── src │ ├── canbus │ │ ├── msgs.cpp │ │ └── job_completion_stats.cpp │ ├── edf │ │ ├── gfb.cpp │ │ ├── load.cpp │ │ ├── baker.cpp │ │ ├── gedf.cpp │ │ ├── bcl.cpp │ │ ├── qpa_msrp.cpp │ │ ├── sim.cpp │ │ └── bcl_iterative.cpp │ ├── linprog │ │ └── varmapperbase.cpp │ ├── blocking │ │ ├── linprog │ │ │ ├── lp_no_progress_fifo.cpp │ │ │ ├── lp_no_progress_priority.cpp │ │ │ ├── lp_global_pip.cpp │ │ │ ├── lp_global_fifo.cpp │ │ │ ├── lp_sa_gfmlp.cpp │ │ │ ├── lp_prsb.cpp │ │ │ └── lp_global_no.cpp │ │ ├── global-fmlp.cpp │ │ ├── global-omlp.cpp │ │ ├── part-omlp.cpp │ │ ├── nested_cs.cpp │ │ └── msrp-holistic.cpp │ ├── schedule_sim.cpp │ └── cpu_time.cpp ├── include │ ├── task_io.h │ ├── stl-hashmap.h │ ├── linprog │ │ ├── glpk.h │ │ ├── cplex.h │ │ ├── solver.h │ │ ├── io.h │ │ └── varmapperbase.h │ ├── res_io.h │ ├── schedulability.h │ ├── edf │ │ ├── gfb.h │ │ ├── load.h │ │ ├── baker.h │ │ ├── bcl.h │ │ ├── bcl_iterative.h │ │ ├── qpa.h │ │ ├── ffdbf.h │ │ ├── baruah.h │ │ ├── gedf.h │ │ ├── qpa_msrp.h │ │ ├── sim.h │ │ ├── rta.h │ │ ├── la.h │ │ └── gel_pl.h │ ├── time-types.h │ ├── mpcp.h │ ├── canbus │ │ ├── can_sim_ifs.h │ │ ├── job_completion_stats.h │ │ └── tardiness_stats.h │ ├── global-pip.h │ ├── event.h │ ├── rw-blocking.h │ ├── math-helper.h │ ├── cpu_time.h │ ├── apa_feas.h │ ├── stl-helper.h │ ├── stl-io-helper.h │ ├── lp_pedf_analysis.h │ ├── sharedres.h │ ├── nested_cs.h │ ├── blocking.h │ └── lp_pedf_lockfree_common.h └── interface │ ├── sharedres_types.i │ ├── sim.i │ ├── locking.i │ ├── cansim.i │ ├── lp_analysis.i │ └── sched.i ├── .gitignore ├── config.mk.example └── Makefile /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /example/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /schedcat/locking/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /schedcat/model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /schedcat/generator/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /schedcat/overheads/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /schedcat/sched/canbus/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /schedcat/locking/linprog/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /native/src/canbus/msgs.cpp: -------------------------------------------------------------------------------- 1 | #include "canbus/msgs.h" 2 | -------------------------------------------------------------------------------- /schedcat/cansim/__init__.py: -------------------------------------------------------------------------------- 1 | from .native import TaskSet 2 | -------------------------------------------------------------------------------- /schedcat/util/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | schedcat.util: misc. helpers 3 | """ 4 | -------------------------------------------------------------------------------- /schedcat/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | SchedCAT: Schedulability test Collection And Tools 3 | """ 4 | -------------------------------------------------------------------------------- /example/schedcat.odp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brandenburg/schedcat/HEAD/example/schedcat.odp -------------------------------------------------------------------------------- /schedcat/mapping/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | SchedCAT: Schedulability test Collection And Tools 3 | """ 4 | -------------------------------------------------------------------------------- /native/include/task_io.h: -------------------------------------------------------------------------------- 1 | #ifndef TASK_IO_H 2 | #define TASK_IO_H 3 | 4 | std::ostream& operator<<(std::ostream &os, const Task &t); 5 | 6 | #endif 7 | -------------------------------------------------------------------------------- /native/include/stl-hashmap.h: -------------------------------------------------------------------------------- 1 | #ifndef STL_HASHMAP_H_ 2 | #define STL_HASHMAP_H_ 3 | 4 | #include 5 | #include 6 | #define hashmap std::unordered_map 7 | #define hashset std::unordered_set 8 | 9 | #endif 10 | -------------------------------------------------------------------------------- /native/include/linprog/glpk.h: -------------------------------------------------------------------------------- 1 | #ifndef LINPROG_GLPK_H 2 | #define LINPROG_GLPK_H 3 | 4 | #include "linprog/model.h" 5 | 6 | class Solution; 7 | 8 | Solution *glpk_solve(const LinearProgram& lp, unsigned int max_num_vars); 9 | 10 | #include "linprog/solver.h" 11 | 12 | #endif 13 | -------------------------------------------------------------------------------- /native/include/res_io.h: -------------------------------------------------------------------------------- 1 | #ifndef RES_IO_H 2 | #define RES_IO_H 3 | 4 | std::ostream& operator<<(std::ostream &os, const RequestBound &rb); 5 | std::ostream& operator<<(std::ostream &os, const TaskInfo &ti); 6 | std::ostream& operator<<(std::ostream &os, const ResourceSharingInfo &rsi); 7 | 8 | #endif 9 | -------------------------------------------------------------------------------- /schedcat/sim/__init__.py: -------------------------------------------------------------------------------- 1 | from .native import TaskSet 2 | 3 | def get_native_taskset(tasks): 4 | ts = TaskSet() 5 | for t in tasks: 6 | if t.implicit_deadline(): 7 | ts.add_task(t.cost, t.period) 8 | else: 9 | ts.add_task(t.cost, t.period, t.deadline) 10 | return ts 11 | -------------------------------------------------------------------------------- /native/include/schedulability.h: -------------------------------------------------------------------------------- 1 | #ifndef SCHEDULABILITY_H 2 | #define SCHEDULABILITY_H 3 | 4 | class SchedulabilityTest 5 | { 6 | public: 7 | virtual bool is_schedulable(const TaskSet &ts, 8 | bool check_preconditions = true) = 0; 9 | 10 | virtual ~SchedulabilityTest() {}; 11 | }; 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /native/include/edf/gfb.h: -------------------------------------------------------------------------------- 1 | #ifndef GFB_H 2 | #define GFB_H 3 | 4 | class GFBGedf : public SchedulabilityTest 5 | { 6 | private: 7 | unsigned int m; 8 | 9 | public: 10 | GFBGedf(unsigned int num_processors) : m(num_processors) {}; 11 | 12 | bool is_schedulable(const TaskSet &ts, bool check_preconditions = true); 13 | 14 | }; 15 | 16 | #endif 17 | -------------------------------------------------------------------------------- /native/include/time-types.h: -------------------------------------------------------------------------------- 1 | #ifndef TIME_TYPES_H 2 | #define TIME_TYPES_H 3 | 4 | /* include string.h for gmpxx.h */ 5 | #include 6 | #include 7 | 8 | typedef mpz_class integral_t; 9 | typedef mpq_class fractional_t; 10 | 11 | static inline void truncate_fraction(fractional_t &val) 12 | { 13 | val.get_num() -= val.get_num() % val.get_den(); 14 | val.canonicalize(); 15 | } 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /schedcat/sched/edf/gfb.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | # The G-EDF density test. 4 | def is_schedulable(no_cpus, tasks): 5 | """Is the system schedulable according to the GFB test? 6 | Also known as the "density test." 7 | """ 8 | if not tasks: 9 | return True 10 | dmax = max([t.density() for t in tasks]) 11 | bound = no_cpus - (no_cpus - 1) * dmax 12 | return tasks.density() <= bound 13 | -------------------------------------------------------------------------------- /native/include/edf/load.h: -------------------------------------------------------------------------------- 1 | #ifndef LOAD_H 2 | #define LOAD_H 3 | 4 | class LoadGedf : public SchedulabilityTest 5 | { 6 | private: 7 | unsigned int m; 8 | fractional_t epsilon; 9 | 10 | public: 11 | LoadGedf(unsigned int num_processors, 12 | unsigned int milli_epsilon = 100 13 | ) : m(num_processors), epsilon(milli_epsilon, 1000) {}; 14 | 15 | bool is_schedulable(const TaskSet &ts, bool check_preconditions = true); 16 | 17 | }; 18 | 19 | #endif 20 | -------------------------------------------------------------------------------- /native/include/linprog/cplex.h: -------------------------------------------------------------------------------- 1 | #ifndef LINPROG_CPLEX_H 2 | #define LINPROG_CPLEX_H 3 | 4 | #include "linprog/model.h" 5 | 6 | class Solution; 7 | 8 | // solve with CPLEX connected via the "Concert Technology" API 9 | Solution *cplex_solve(const LinearProgram& lp, unsigned int max_num_vars); 10 | 11 | // solve with CPLEX connected via the plain, old C API 12 | Solution *cpx_solve(const LinearProgram& lp, unsigned int max_num_vars); 13 | 14 | #include "linprog/solver.h" 15 | 16 | #endif 17 | -------------------------------------------------------------------------------- /native/interface/sharedres_types.i: -------------------------------------------------------------------------------- 1 | %ignore Interference; 2 | %ignore RequestBound; 3 | %ignore TaskInfo; 4 | 5 | %ignore ResourceSharingInfo::get_tasks; 6 | 7 | %ignore BlockingBounds::raise_request_span; 8 | %ignore BlockingBounds::get_max_request_span; 9 | %ignore BlockingBounds::operator[](unsigned int); 10 | %ignore BlockingBounds::operator[](unsigned int) const; 11 | 12 | %ignore ResourceLocality::operator[](unsigned int) const; 13 | %ignore ReplicaInfo::operator[](unsigned int) const; 14 | -------------------------------------------------------------------------------- /schedcat/util/time.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from __future__ import absolute_import 3 | 4 | from math import ceil, floor 5 | 6 | # various time-related helpers 7 | 8 | def us2ms(us): 9 | return us / 1000 10 | 11 | def ms2us(ms): 12 | return ms * 1000 13 | 14 | def sec2us(sec): 15 | return sec * 1000000 16 | 17 | def ms2us_ru(ms): 18 | "Convert and round up." 19 | return int(ceil(ms * 1000)) 20 | 21 | def ms2us_rd(ms): 22 | "Convert and round down." 23 | return int(floor(ms * 1000)) 24 | -------------------------------------------------------------------------------- /example/pmo_host=ludwig_background=load_stat=avg.csv: -------------------------------------------------------------------------------- 1 | WSS,L1,L2,L3,MEM 2 | 4,5.24,5.37,5.66,5.77 3 | 8,9.14,9.24,9.93,10.09 4 | 16,17.02,17.05,18.58,18.78 5 | 32,32.88,32.93,35.82,35.99 6 | 64,65.05,65.33,70.72,70.50 7 | 128,128.64,127.09,137.35,141.05 8 | 256,248.81,246.34,267.73,272.56 9 | 512,478.45,476.95,507.27,509.18 10 | 1024,739.20,733.27,772.68,810.37 11 | 2048,740.10,773.22,837.53,853.27 12 | 3072,355.76,400.88,377.96,483.20 13 | 4096,247.88,291.93,274.51,350.07 14 | 8192,212.90,374.45,436.19,282.28 15 | 12288,201.20,333.80,467.50,274.23 -------------------------------------------------------------------------------- /native/include/mpcp.h: -------------------------------------------------------------------------------- 1 | #ifndef _MPCP_H_ 2 | #define _MPCP_H_ 3 | 4 | typedef std::vector ResponseTimes; 5 | typedef std::vector TaskResponseTimes; 6 | typedef std::vector ClusterResponseTimes; 7 | 8 | typedef std::vector MPCPCeilings; 9 | 10 | void determine_gcs_response_times(const Clusters& clusters, 11 | const MPCPCeilings& ceilings, 12 | ClusterResponseTimes& times); 13 | 14 | MPCPCeilings get_mpcp_ceilings(const ResourceSharingInfo& info); 15 | 16 | #endif 17 | -------------------------------------------------------------------------------- /example/__main__.py: -------------------------------------------------------------------------------- 1 | #Necessary includes and stuff 2 | 3 | from example.driver import nolock_example, lock_example, \ 4 | generate_random_nolock_sets, \ 5 | generate_random_lock_sets, print_bounds 6 | 7 | if __name__ == '__main__': 8 | #Actually run examples when this script is executed 9 | print "Running non-lock example" 10 | print_bounds(nolock_example(generate_random_nolock_sets())) 11 | print "Running lock example" 12 | print_bounds(lock_example(generate_random_lock_sets())) 13 | -------------------------------------------------------------------------------- /native/include/edf/baker.h: -------------------------------------------------------------------------------- 1 | #ifndef BAKER_H 2 | #define BAKER_H 3 | 4 | class BakerGedf : public SchedulabilityTest 5 | { 6 | 7 | private: 8 | unsigned int m; 9 | 10 | private: 11 | void beta(const Task &t_i, const Task &t_k, const fractional_t &lambda_k, 12 | fractional_t &beta_i); 13 | bool is_task_schedulable(unsigned int k, const TaskSet &ts); 14 | 15 | public: 16 | BakerGedf(unsigned int num_processors) : m(num_processors) {}; 17 | 18 | bool is_schedulable(const TaskSet &ts, bool check_preconditions = true); 19 | }; 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /native/include/edf/bcl.h: -------------------------------------------------------------------------------- 1 | #ifndef BCL_H 2 | #define BCL_H 3 | 4 | class BCLGedf : public SchedulabilityTest 5 | { 6 | 7 | private: 8 | unsigned int m; 9 | 10 | private: 11 | unsigned long max_jobs_contained(const Task &t_i, const Task &t_k); 12 | void beta(const Task &t_i, const Task &t_k, fractional_t &beta_i); 13 | bool is_task_schedulable(unsigned int k, const TaskSet &ts); 14 | 15 | public: 16 | BCLGedf(unsigned int num_processors) : m(num_processors) {}; 17 | 18 | bool is_schedulable(const TaskSet &ts, bool check_preconditions = true); 19 | }; 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /native/include/edf/bcl_iterative.h: -------------------------------------------------------------------------------- 1 | #ifndef BCL_ITERATIVE_H 2 | #define BCL_ITERATIVE_H 3 | 4 | class BCLIterativeGedf : public SchedulabilityTest 5 | { 6 | 7 | private: 8 | unsigned int m; 9 | unsigned int max_rounds; 10 | 11 | bool slack_update(unsigned int k, const TaskSet &ts, 12 | unsigned long *slack, bool &ok); 13 | 14 | public: 15 | BCLIterativeGedf(unsigned int num_processors, unsigned int max_rounds = 0) 16 | : m(num_processors), max_rounds(max_rounds) {}; 17 | 18 | bool is_schedulable(const TaskSet &ts, bool check_preconditions = true); 19 | }; 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /native/include/edf/qpa.h: -------------------------------------------------------------------------------- 1 | #ifndef QPA_H 2 | #define QPA_H 3 | 4 | class QPATest : public SchedulabilityTest 5 | { 6 | public: 7 | QPATest(unsigned int num_processors); 8 | 9 | bool is_schedulable(const TaskSet &ts, bool check_preconditions = true); 10 | 11 | virtual integral_t get_demand(integral_t interval, const TaskSet &ts); 12 | virtual integral_t get_max_interval(const TaskSet &ts, const fractional_t& util); 13 | }; 14 | 15 | // support for C=D semi-partitioning assignment heuristic 16 | unsigned long qpa_get_max_C_equal_D_cost( 17 | const TaskSet &ts, 18 | unsigned long wcet, 19 | unsigned long period); 20 | 21 | 22 | #endif 23 | -------------------------------------------------------------------------------- /schedcat/sim/edf.py: -------------------------------------------------------------------------------- 1 | 2 | import schedcat.sim as sim 3 | import schedcat.sim.native as cpp 4 | 5 | from schedcat.util.time import sec2us 6 | 7 | 8 | def is_deadline_missed(no_cpus, tasks, simulation_length=60, preemptive=True): 9 | ts = sim.get_native_taskset(tasks) 10 | return cpp.edf_misses_deadline(no_cpus, ts, int(sec2us(simulation_length))) 11 | 12 | def time_of_first_miss(no_cpus, tasks, simulation_length=60, preemptive=True): 13 | ts = sim.get_native_taskset(tasks) 14 | return cpp.edf_first_violation(no_cpus, ts, int(sec2us(simulation_length))) 15 | 16 | def no_counter_example(*args, **kargs): 17 | return not is_deadline_missed(*args, **kargs) 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | *~ 3 | *.pyc 4 | *.pyo 5 | ts 6 | \#* 7 | .\#* 8 | *.zapped 9 | *.sedbak* 10 | *.o 11 | *.os 12 | *.egg-info 13 | *.vscode 14 | .coverage 15 | native/testmain 16 | native/interface/*_wrap.cc 17 | native/*.py 18 | native/*.so 19 | native/.config 20 | schedcat/locking/_locking.so 21 | schedcat/locking/native.py 22 | schedcat/sched/_sched.so 23 | schedcat/sched/native.py 24 | schedcat/sim/_sim.so 25 | schedcat/sim/native.py 26 | schedcat/cansim/native.py 27 | schedcat/cansim/_cansim.so 28 | tmp/ 29 | TAGS 30 | schedcat/locking/linprog/_lp_analysis.so 31 | schedcat/locking/linprog/native.py 32 | schedcat/cansim/_cansim.so 33 | schedcat/cansim/native.py 34 | config.mk 35 | -------------------------------------------------------------------------------- /native/interface/sim.i: -------------------------------------------------------------------------------- 1 | %module sim 2 | %{ 3 | #define SWIG_FILE_WITH_INIT 4 | #include "tasks.h" 5 | #include "edf/sim.h" 6 | %} 7 | 8 | %ignore Task::get_utilization(fractional_t &util) const; 9 | %ignore Task::get_density(fractional_t &density) const; 10 | %ignore Task::bound_demand(const integral_t &time, integral_t &demand) const; 11 | %ignore Task::bound_load const; 12 | %ignore Task::approx_demand const; 13 | 14 | %ignore TaskSet::operator[](int); 15 | %ignore TaskSet::operator[](int) const; 16 | %ignore TaskSet::get_utilization const; 17 | %ignore TaskSet::get_density const; 18 | %ignore TaskSet::get_max_density const; 19 | %ignore TaskSet::approx_load const; 20 | 21 | #include "tasks.h" 22 | #include "edf/sim.h" 23 | -------------------------------------------------------------------------------- /native/interface/locking.i: -------------------------------------------------------------------------------- 1 | %module locking 2 | %{ 3 | #define SWIG_FILE_WITH_INIT 4 | #include "sharedres.h" 5 | %} 6 | 7 | %newobject task_fair_mutex_bounds; 8 | %newobject task_fair_rw_bounds; 9 | %newobject phase_fair_rw_bounds; 10 | %newobject msrp_bounds_holistic; 11 | 12 | %newobject global_omlp_bounds; 13 | %newobject global_fmlp_bounds; 14 | %newobject part_omlp_bounds; 15 | %newobject clustered_omlp_bounds; 16 | %newobject clustered_rw_omlp_bounds; 17 | 18 | %newobject part_fmlp_bounds; 19 | %newobject mpcp_bounds; 20 | %newobject dpcp_bounds; 21 | %newobject msrp_bounds; 22 | 23 | %newobject global_pip_bounds; 24 | %newobject ppcp_bounds; 25 | 26 | %include "sharedres_types.i" 27 | 28 | #include "sharedres.h" 29 | 30 | -------------------------------------------------------------------------------- /native/src/edf/gfb.cpp: -------------------------------------------------------------------------------- 1 | #include "tasks.h" 2 | #include "schedulability.h" 3 | 4 | #include "edf/gfb.h" 5 | 6 | bool GFBGedf::is_schedulable(const TaskSet &ts, bool check_preconditions) 7 | { 8 | if (check_preconditions) 9 | { 10 | if (!(ts.has_only_feasible_tasks() 11 | && ts.is_not_overutilized(m) 12 | && ts.has_only_constrained_deadlines() 13 | && ts.has_no_self_suspending_tasks())) 14 | return false; 15 | } 16 | 17 | fractional_t total_density, max_density, bound; 18 | 19 | ts.get_density(total_density); 20 | ts.get_max_density(max_density); 21 | 22 | bound = m - (m - 1) * max_density; 23 | 24 | return total_density <= bound; 25 | } 26 | -------------------------------------------------------------------------------- /native/include/canbus/can_sim_ifs.h: -------------------------------------------------------------------------------- 1 | #ifndef CANBUS_SIM_IFS_H 2 | #define CANBUS_SIM_IFS_H 3 | 4 | /* Methods invoked by Python through the Swig interface. */ 5 | 6 | void simulate_for_tardiness_stats(CANTaskSet &ts, 7 | unsigned long end_of_simulation, 8 | unsigned long boot_time_ms, 9 | unsigned int iterations); 10 | 11 | unsigned long get_job_completion_time(CANTaskSet &ts, 12 | unsigned long end_of_simulation, 13 | unsigned long taskid, 14 | unsigned long priority, 15 | unsigned long seqno); 16 | 17 | #endif 18 | -------------------------------------------------------------------------------- /native/interface/cansim.i: -------------------------------------------------------------------------------- 1 | %module cansim 2 | %{ 3 | #define SWIG_FILE_WITH_INIT 4 | #include "tasks.h" 5 | #include "canbus/msgs.h" 6 | #include "canbus/can_sim_ifs.h" 7 | %} 8 | 9 | %ignore Task::get_utilization(fractional_t &util) const; 10 | %ignore Task::get_density(fractional_t &density) const; 11 | %ignore Task::bound_demand(const integral_t &time, integral_t &demand) const; 12 | %ignore Task::bound_load const; 13 | %ignore Task::approx_demand const; 14 | 15 | %ignore TaskSet::operator[](int); 16 | %ignore TaskSet::operator[](int) const; 17 | %ignore TaskSet::get_utilization const; 18 | %ignore TaskSet::get_density const; 19 | %ignore TaskSet::get_max_density const; 20 | %ignore TaskSet::approx_load const; 21 | 22 | #include "tasks.h" 23 | #include "canbus/msgs.h" 24 | #include "canbus/can_sim_ifs.h" 25 | -------------------------------------------------------------------------------- /schedcat/sched/fp/__init__.py: -------------------------------------------------------------------------------- 1 | """Fixed-priority schedulability tests. 2 | """ 3 | 4 | from __future__ import division 5 | 6 | from .rta import bound_response_times as uni_bound_response_times 7 | from .rta import is_schedulable as uni_is_schedulable 8 | 9 | from .guan import bound_response_times as global_bound_response_times 10 | from .guan import is_schedulable as global_is_schedulable 11 | 12 | 13 | def is_schedulable(num_cpus, taskset): 14 | if num_cpus == 1: 15 | return uni_is_schedulable(num_cpus, taskset) 16 | else: 17 | return global_is_schedulable(num_cpus, taskset) 18 | 19 | def bound_response_times(num_cpus, taskset): 20 | if num_cpus == 1: 21 | return uni_bound_response_times(num_cpus, taskset) 22 | else: 23 | return global_bound_response_times(num_cpus, taskset) 24 | -------------------------------------------------------------------------------- /native/src/linprog/varmapperbase.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | 7 | #include "linprog/varmapperbase.h" 8 | 9 | 10 | std::string VarMapperBase::var2str(unsigned int var) const 11 | { 12 | uint64_t key; 13 | 14 | if (search_key_for_var(var, key)) 15 | { 16 | return key2str(key, var); 17 | } 18 | else 19 | return ""; 20 | } 21 | 22 | std::string VarMapperBase::key2str(uint64_t key, unsigned int var) const 23 | { 24 | std::ostringstream buf; 25 | buf << "X" << var; 26 | return buf.str(); 27 | } 28 | 29 | hashmap VarMapperBase::get_translation_table() const 30 | { 31 | hashmap table; 32 | 33 | foreach(map, kv) 34 | { 35 | table[kv->second] = key2str(kv->first, kv->second); 36 | } 37 | 38 | return table; 39 | } 40 | -------------------------------------------------------------------------------- /native/include/global-pip.h: -------------------------------------------------------------------------------- 1 | #ifndef _GLOBAL_PIP_H_ 2 | #define _GLOBAL_PIP_H_ 3 | 4 | unsigned long Ilp_i( 5 | const ResourceSharingInfo& info, 6 | const TaskInfo &tsk, 7 | unsigned int number_of_cpus); 8 | 9 | unsigned long lower_priority_with_higher_ceiling_time( 10 | const ResourceSharingInfo& info, 11 | const TaskInfo &tsk, 12 | const TaskInfo &tx, 13 | const PriorityCeilings &prio_ceilings); 14 | 15 | unsigned long common_sr_time( 16 | const ResourceSharingInfo& info, 17 | const TaskInfo* tsk, 18 | const TaskInfo &tx); 19 | 20 | unsigned long Ihp_i_dsr( 21 | const ResourceSharingInfo& info, 22 | const TaskInfo* tsk); 23 | 24 | unsigned long W_l_tx( 25 | const ResourceSharingInfo& info, 26 | unsigned long t, 27 | const TaskInfo &task, 28 | unsigned long x); 29 | 30 | unsigned long DB_i( 31 | const ResourceSharingInfo& info, 32 | const TaskInfo &tsk); 33 | 34 | #endif 35 | -------------------------------------------------------------------------------- /schedcat/sched/edf/bak.py: -------------------------------------------------------------------------------- 1 | """Implements the BAK G-EDF schedulability test. 2 | """ 3 | 4 | from __future__ import division 5 | 6 | from fractions import Fraction 7 | 8 | ONE = Fraction(1) 9 | 10 | def beta(t_i, t_k, l): 11 | # assumes integral time 12 | u_i = t_i.utilization_q() 13 | part1 = u_i * (ONE + Fraction(t_i.period - t_i.deadline, t_k.deadline)) 14 | if l < u_i: 15 | part2 = (t_i.cost - l * t_i.period) / Fraction(t_k.deadline) 16 | return part1 + part2 17 | else: 18 | return part1 19 | 20 | def task_schedulable(T, t_k, m): 21 | l = t_k.density_q() # lambda 22 | if l > ONE: 23 | return False 24 | beta_sum = sum([min(ONE, beta(t_i, t_k, l)) for t_i in T]) 25 | return beta_sum <= m - (m - 1) * l 26 | 27 | def is_schedulable(no_cpus, tasks): 28 | return all(task_schedulable(tasks, t_k, no_cpus) for t_k in tasks) 29 | -------------------------------------------------------------------------------- /native/include/edf/ffdbf.h: -------------------------------------------------------------------------------- 1 | #ifndef FFDBF_H 2 | #define FFDBF_H 3 | 4 | class FFDBFGedf : public SchedulabilityTest 5 | { 6 | private: 7 | const unsigned int m; 8 | const unsigned long epsilon_denom; 9 | const fractional_t sigma_step; 10 | 11 | private: 12 | bool witness_condition(const TaskSet &ts, 13 | const integral_t q[], const fractional_t r[], 14 | const fractional_t &time, const fractional_t &speed); 15 | 16 | public: 17 | FFDBFGedf(unsigned int num_processors, 18 | unsigned long epsilon_denom = 10, 19 | unsigned long sigma_granularity = 50) 20 | : m(num_processors), 21 | epsilon_denom(epsilon_denom), 22 | sigma_step(1, sigma_granularity) 23 | {}; 24 | 25 | bool is_schedulable(const TaskSet &ts, bool check_preconditions = true); 26 | }; 27 | 28 | #endif 29 | -------------------------------------------------------------------------------- /native/include/edf/baruah.h: -------------------------------------------------------------------------------- 1 | #ifndef BARUAH_H 2 | #define BARUAH_H 3 | 4 | class BaruahGedf : public SchedulabilityTest 5 | { 6 | 7 | private: 8 | unsigned int m; 9 | 10 | bool is_task_schedulable(unsigned int k, 11 | const TaskSet &ts, 12 | const integral_t &ilen, 13 | integral_t &i1, 14 | integral_t &sum, 15 | integral_t *idiff, 16 | integral_t **ptr); 17 | 18 | void get_max_test_points(const TaskSet &ts, fractional_t& m_minus_u, 19 | integral_t* maxp); 20 | 21 | public: 22 | BaruahGedf(unsigned int num_processors) : m(num_processors) {}; 23 | 24 | bool is_schedulable(const TaskSet &ts, bool check_preconditions = true); 25 | 26 | static const double MAX_RUNTIME; 27 | }; 28 | 29 | #endif 30 | -------------------------------------------------------------------------------- /native/src/canbus/job_completion_stats.cpp: -------------------------------------------------------------------------------- 1 | #include "canbus/job_completion_stats.h" 2 | #include "canbus/can_sim.h" 3 | 4 | void CANBusJobCompletionStats::job_completed(int proc, CANJob * job) 5 | { 6 | if (job->get_task().get_taskid() == taskid && 7 | job->get_task().get_priority() == priority && 8 | job->get_seqno() == seqno) 9 | { 10 | completion_time = current_time; 11 | abort(); 12 | } 13 | } 14 | 15 | unsigned long get_job_completion_time(CANTaskSet &ts, 16 | simtime_t end_of_simulation, 17 | unsigned long taskid, 18 | unsigned long priority, 19 | unsigned long seqno) 20 | { 21 | CANBusJobCompletionStats sim(taskid, priority, seqno); 22 | run_periodic_simulation(sim, ts, end_of_simulation); 23 | return sim.get_completion_time(); 24 | } 25 | -------------------------------------------------------------------------------- /schedcat/sched/edf/bcl.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | from math import floor 4 | from fractions import Fraction 5 | 6 | ONE = Fraction(1) 7 | 8 | def N(t_k, t_i): 9 | # assumes integral time 10 | return int(floor((t_k.deadline - t_i.deadline) / t_i.period)) + 1 11 | 12 | def beta(t_k, t_i): 13 | N_i = N(t_k, t_i) 14 | C_i = t_i.cost 15 | T_i = t_i.period 16 | D_k = t_k.deadline 17 | return Fraction(N_i * C_i + min(C_i, max(0, D_k - N_i * T_i)) , D_k) 18 | 19 | def task_schedulable(T, t_k, m): 20 | l_k = t_k.density_q() 21 | cap = m * (ONE - l_k) 22 | all_beta = [beta(t_k, t_i) for t_i in T if t_i != t_k] 23 | beta_sum = sum([min(b, ONE - l_k) for b in all_beta]) 24 | return beta_sum < cap or \ 25 | (beta_sum == cap and any([0 < b <= ONE - l_k for b in all_beta])) 26 | 27 | def is_schedulable(no_cpus, tasks): 28 | return all(task_schedulable(tasks, t_k, no_cpus) for t_k in tasks) 29 | -------------------------------------------------------------------------------- /native/include/edf/gedf.h: -------------------------------------------------------------------------------- 1 | #ifndef GEDF_H 2 | #define GEDF_H 3 | 4 | class GlobalEDF : public SchedulabilityTest 5 | { 6 | 7 | private: 8 | unsigned int m; 9 | unsigned long rta_step; 10 | bool want_ffdbf; 11 | bool want_load; 12 | bool want_baruah; 13 | bool want_rta; 14 | bool want_la; 15 | 16 | public: 17 | GlobalEDF(unsigned int num_processors, 18 | unsigned long rta_min_step = 1, 19 | bool want_baruah = true, 20 | bool want_rta = true, 21 | bool want_ffdbf = false, 22 | bool want_load = false, 23 | bool want_la = true) 24 | : m(num_processors), rta_step(rta_min_step), 25 | want_ffdbf(want_ffdbf), 26 | want_load(want_load), 27 | want_baruah(want_baruah), 28 | want_rta(want_rta), 29 | want_la(want_la) {}; 30 | 31 | bool is_schedulable(const TaskSet &ts, bool check_preconditions = true); 32 | }; 33 | 34 | 35 | #endif 36 | -------------------------------------------------------------------------------- /native/include/edf/qpa_msrp.h: -------------------------------------------------------------------------------- 1 | #ifndef QPA_MSRP_H 2 | #define QPA_MSRP_H 3 | 4 | #include "time-types.h" 5 | #include "sharedres.h" 6 | #include "sharedres_types.h" 7 | 8 | #include "tasks.h" 9 | #include "schedulability.h" 10 | 11 | #include "qpa.h" 12 | 13 | class QPA_MSRPTest : public QPATest 14 | { 15 | 16 | private: 17 | 18 | unsigned long max_relative_deadline; 19 | unsigned int num_cpus; 20 | unsigned int cpu_id; 21 | 22 | const ResourceSharingInfo& info; 23 | 24 | public: 25 | 26 | QPA_MSRPTest(unsigned int num_processors, const ResourceSharingInfo& _info, 27 | unsigned int _num_cpus, unsigned int _cpu_id); // Needed by msrp_bounds 28 | 29 | integral_t get_demand(integral_t interval, const TaskSet &ts); 30 | integral_t get_max_interval(const TaskSet &ts, const fractional_t& util); 31 | 32 | 33 | void set_max_relative_deadline(unsigned long d) 34 | {max_relative_deadline = d;} 35 | 36 | }; 37 | 38 | #endif 39 | -------------------------------------------------------------------------------- /native/include/event.h: -------------------------------------------------------------------------------- 1 | #ifndef EVENT_H 2 | #define EVENT_H 3 | 4 | #include 5 | 6 | template 7 | class Event 8 | { 9 | public: 10 | virtual void fire(const time_t &cur_time) {}; /* callback */ 11 | }; 12 | 13 | 14 | template 15 | class Timeout 16 | { 17 | private: 18 | time_t fire_time; 19 | Event *handler; 20 | 21 | public: 22 | Timeout(time_t when, Event *what) 23 | : fire_time(when), handler(what) {} 24 | 25 | const time_t& time() const 26 | { 27 | return fire_time; 28 | } 29 | 30 | Event& event() const 31 | { 32 | return *handler; 33 | } 34 | 35 | bool operator<(const Timeout &that) const 36 | { 37 | return this->time() < that.time(); 38 | } 39 | 40 | bool operator>(const Timeout &that) const 41 | { 42 | return this->time() > that.time(); 43 | } 44 | }; 45 | 46 | 47 | #endif 48 | -------------------------------------------------------------------------------- /native/include/edf/sim.h: -------------------------------------------------------------------------------- 1 | #ifndef EDF_SIM_H 2 | #define EDF_SIM_H 3 | 4 | struct Stats 5 | { 6 | unsigned long num_tardy_jobs; 7 | unsigned long num_ok_jobs; 8 | unsigned long total_tardiness; 9 | unsigned long max_tardiness; 10 | unsigned long first_miss; 11 | }; 12 | 13 | bool edf_misses_deadline(unsigned int num_procs, 14 | TaskSet &ts, 15 | unsigned long end_of_simulation, 16 | bool preemptive = true); 17 | 18 | unsigned long edf_first_violation(unsigned int num_procs, 19 | TaskSet &ts, 20 | unsigned long end_of_simulation, 21 | bool preemptive = true); 22 | 23 | Stats edf_observe_tardiness(unsigned int num_procs, 24 | TaskSet &ts, 25 | unsigned long end_of_simulation, 26 | bool preemptive = true); 27 | 28 | #endif 29 | -------------------------------------------------------------------------------- /native/include/rw-blocking.h: -------------------------------------------------------------------------------- 1 | #ifndef RW_BLOCKING_H 2 | #define RW_BLOCKING_H 3 | 4 | void split_by_type(const ContentionSet& requests, 5 | ContentionSet& reads, 6 | ContentionSet& writes); 7 | void split_by_type(const Resources& resources, 8 | Resources &reads, 9 | Resources &writes); 10 | void split_by_type(const ClusterResources& per_cluster, 11 | ClusterResources &reads); 12 | void split_by_type(const ClusterResources& per_cluster, 13 | ClusterResources &reads, 14 | ClusterResources &writes); 15 | 16 | struct RWCount { 17 | unsigned int res_id; 18 | unsigned int num_reads; 19 | unsigned int num_writes; 20 | unsigned int rlength; 21 | unsigned int wlength; 22 | 23 | RWCount(unsigned int id) : res_id(id), 24 | num_reads(0), 25 | num_writes(0), 26 | rlength(0), 27 | wlength(0) 28 | {} 29 | }; 30 | 31 | typedef std::vector RWCounts; 32 | 33 | void merge_rw_requests(const TaskInfo &tsk, RWCounts &counts); 34 | 35 | #endif 36 | -------------------------------------------------------------------------------- /tests/sim.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import unittest 4 | 5 | import schedcat.sim.edf as edf 6 | import schedcat.model.tasks as tasks 7 | 8 | from schedcat.util.math import is_integral 9 | 10 | class EDFSimulator(unittest.TestCase): 11 | def setUp(self): 12 | self.ts = tasks.TaskSystem([ 13 | tasks.SporadicTask(2, 3), 14 | tasks.SporadicTask(2, 3), 15 | tasks.SporadicTask(2, 3), 16 | ]) 17 | 18 | def test_deadline_miss(self): 19 | self.assertTrue(edf.is_deadline_missed(1, self.ts)) 20 | self.assertTrue(edf.is_deadline_missed(2, self.ts)) 21 | self.assertFalse(edf.is_deadline_missed(3, self.ts, simulation_length=1)) 22 | 23 | def test_deadline_miss_time(self): 24 | self.assertEqual(edf.time_of_first_miss(1, self.ts), 3) 25 | self.assertEqual(edf.time_of_first_miss(2, self.ts), 3) 26 | self.assertEqual(edf.time_of_first_miss(3, self.ts, simulation_length=1), 0) 27 | -------------------------------------------------------------------------------- /native/interface/lp_analysis.i: -------------------------------------------------------------------------------- 1 | %module lp_analysis 2 | %{ 3 | #define SWIG_FILE_WITH_INIT 4 | #include "lp_analysis.h" 5 | #include "nested_cs.h" 6 | %} 7 | 8 | %newobject lp_dpcp_bounds; 9 | %newobject lp_dflp_bounds; 10 | 11 | %newobject lp_msrp_bounds; 12 | %newobject lp_pfp_preemptive_fifo_spinlock_bounds; 13 | 14 | %newobject lp_pfp_unordered_spinlock_bounds; 15 | 16 | %newobject lp_pfp_prio_spinlock_bounds; 17 | 18 | %newobject lp_pfp_prio_fifo_spinlock_bounds; 19 | 20 | %newobject lp_pfp_baseline_spinlock_bounds; 21 | 22 | %newobject lp_global_pip_bounds; 23 | %newobject lp_ppcp_bounds; 24 | %newobject lp_global_fmlpp_bounds; 25 | %newobject lp_sa_gfmlp_bounds; 26 | %newobject lp_prsb_bounds; 27 | %newobject lb_no_progress_fifo_bounds; 28 | %newobject lb_no_progress_priority_bounds; 29 | 30 | %newobject dummy_bounds; 31 | 32 | %include "sharedres_types.i" 33 | 34 | %include "lp_analysis.h" 35 | 36 | %ignore CriticalSectionsOfTaskset::get_transitive_nesting_relationship; 37 | 38 | %include "nested_cs.h" 39 | -------------------------------------------------------------------------------- /native/include/math-helper.h: -------------------------------------------------------------------------------- 1 | #ifndef MATH_HELPER_H 2 | #define MATH_HELPER_H 3 | 4 | #include "time-types.h" 5 | 6 | static inline unsigned long divide_with_ceil(unsigned long numer, 7 | unsigned long denom) 8 | { 9 | if (numer % denom == 0) 10 | return numer / denom; 11 | else 12 | /* integer division computes implicit floor */ 13 | return (numer / denom) + 1; 14 | } 15 | 16 | static inline unsigned long divide_with_floor(unsigned long numer, 17 | unsigned long denom) 18 | { 19 | /* integer division computes implicit floor */ 20 | return (numer / denom); 21 | } 22 | 23 | static inline integral_t divide_with_ceil(const integral_t &numer, 24 | const integral_t &denom) 25 | { 26 | integral_t result; 27 | mpz_cdiv_q(result.get_mpz_t(), numer.get_mpz_t(), denom.get_mpz_t()); 28 | return result; 29 | } 30 | 31 | 32 | static inline integral_t round_up(const fractional_t &f) 33 | { 34 | integral_t result; 35 | mpz_cdiv_q(result.get_mpz_t(), f.get_num_mpz_t(), f.get_den_mpz_t()); 36 | return result; 37 | } 38 | 39 | 40 | #endif 41 | -------------------------------------------------------------------------------- /native/include/canbus/job_completion_stats.h: -------------------------------------------------------------------------------- 1 | #ifndef CANBUS_JOB_COMPLETION_STATS 2 | #define CANBUS_JOB_COMPLETION_STATS 3 | 4 | #include "canbus/can_sim.h" 5 | 6 | /* Wrapper around the CANBusScheduler simulator class to collect specific 7 | stats just for testing purposes. */ 8 | class CANBusJobCompletionStats: public CANBusScheduler 9 | { 10 | private: 11 | unsigned long taskid; 12 | unsigned long priority; 13 | unsigned long seqno; // job id 14 | simtime_t completion_time; 15 | 16 | public: 17 | CANBusJobCompletionStats(unsigned long taskid, 18 | unsigned long priority, 19 | unsigned long seqno) : 20 | CANBusScheduler (), 21 | taskid (taskid), 22 | priority (priority), 23 | seqno (seqno) {} 24 | 25 | unsigned long get_completion_time() { return completion_time; } 26 | virtual void job_completed(int proc, CANJob *job); 27 | }; 28 | 29 | #endif 30 | -------------------------------------------------------------------------------- /native/include/edf/rta.h: -------------------------------------------------------------------------------- 1 | #ifndef RTA_H 2 | #define RTA_H 3 | 4 | class RTAGedf : public SchedulabilityTest 5 | { 6 | 7 | private: 8 | unsigned int m; 9 | unsigned int max_rounds; 10 | unsigned int min_delta; 11 | 12 | bool response_estimate(unsigned int k, 13 | const TaskSet &ts, 14 | unsigned long const *slack, 15 | unsigned long response, 16 | unsigned long &new_response); 17 | 18 | bool rta_fixpoint(unsigned int k, 19 | const TaskSet &ts, 20 | unsigned long const *slack, 21 | unsigned long &response); 22 | 23 | public: 24 | RTAGedf(unsigned int num_processors, 25 | unsigned int min_fixpoint_step = 0, 26 | unsigned int max_rounds = 25) 27 | : m(num_processors), max_rounds(max_rounds), 28 | min_delta(min_fixpoint_step) {}; 29 | 30 | bool is_schedulable(const TaskSet &ts, bool check_preconditions = true); 31 | }; 32 | 33 | #endif 34 | -------------------------------------------------------------------------------- /schedcat/util/storage.py: -------------------------------------------------------------------------------- 1 | 2 | # from web.py (public domain code) 3 | class Storage(dict): 4 | """ 5 | A Storage object is like a dictionary except `obj.foo` can be used 6 | in addition to `obj['foo']`. 7 | 8 | >>> o = storage(a=1) 9 | >>> o.a 10 | 1 11 | >>> o['a'] 12 | 1 13 | >>> o.a = 2 14 | >>> o['a'] 15 | 2 16 | >>> del o.a 17 | >>> o.a 18 | Traceback (most recent call last): 19 | ... 20 | AttributeError: 'a' 21 | 22 | """ 23 | def __getattr__(self, key): 24 | try: 25 | return self[key] 26 | except KeyError, k: 27 | raise AttributeError, k 28 | 29 | def __setattr__(self, key, value): 30 | self[key] = value 31 | 32 | def __delattr__(self, key): 33 | try: 34 | del self[key] 35 | except KeyError, k: 36 | raise AttributeError, k 37 | 38 | def __repr__(self): 39 | return '' 40 | 41 | storage = Storage 42 | -------------------------------------------------------------------------------- /schedcat/cansim/canbus.py: -------------------------------------------------------------------------------- 1 | import schedcat.cansim as sim 2 | import schedcat.cansim.native as cpp 3 | from schedcat.cansim.native import CANTaskSet 4 | 5 | def get_native_canbus_msgset(msgs): 6 | ts = CANTaskSet() 7 | for msg in msgs: 8 | assert msg.implicit_deadline() 9 | ts.add_canbus_task(msg.max_framesize, msg.period * msgs.busrate, \ 10 | msg.id, msg.tid) 11 | ts.set_busrate(msgs.busrate) 12 | ts.add_fault_params(msgs.po, msgs.mfr) 13 | ts.mark_critical_tasks(msgs[0].tid) # assume ts[0] is replicated 14 | ts.set_rprime(msgs.rprime) 15 | return ts 16 | 17 | def completion_time(msgs, sim_len_ms, taskid, priority, seqno): 18 | sim_len_bit_time = sim_len_ms * msgs.busrate 19 | ts = get_native_canbus_msgset(msgs) 20 | return cpp.get_job_completion_time(ts, sim_len_bit_time, taskid, priority, seqno) 21 | 22 | def observe_tardiness(msgs, sim_len_ms, boot_time_ms, iterations): 23 | ts = get_native_canbus_msgset(msgs) 24 | cpp.simulate_for_tardiness_stats(ts, sim_len_ms, boot_time_ms, iterations) 25 | -------------------------------------------------------------------------------- /schedcat/sched/pfair.py: -------------------------------------------------------------------------------- 1 | def is_schedulable(no_cpus, tasks): 2 | """Simple utilization bound: tasks.utilization() <= no_cpus. 3 | Assumption: all parameters are quantum multiples and deadlines 4 | are not constrained. 5 | """ 6 | return tasks.utilization() <= no_cpus and \ 7 | all(t.deadline >= t.period >= t.cost for t in tasks) 8 | 9 | def has_bounded_tardiness(no_cpus, tasks): 10 | """Simple utilization bound: tasks.utilization() <= no_cpus. 11 | This is also true for constrained-deadline tasks. 12 | """ 13 | return tasks.utilization() <= no_cpus and \ 14 | all(t.period >= t.cost for t in tasks) 15 | 16 | def bound_response_times(no_cpus, tasks): 17 | """Upper bound the response time of each task. 18 | This assumes that all task parameters are quantum multiples, and 19 | that effects such as quantum staggering have already been accounted for. 20 | """ 21 | if has_bounded_tardiness(no_cpus, tasks): 22 | for t in tasks: 23 | t.response_time = t.period 24 | return True 25 | else: 26 | return False 27 | -------------------------------------------------------------------------------- /native/include/linprog/solver.h: -------------------------------------------------------------------------------- 1 | #ifndef LINPROG_SOLVER_H 2 | #define LINPROG_SOLVER_H 3 | 4 | #include "linprog/model.h" 5 | 6 | class Solution 7 | { 8 | 9 | public: 10 | virtual ~Solution() {}; 11 | 12 | virtual double get_value(unsigned int variable_index) const = 0; 13 | 14 | virtual double evaluate(const LinearExpression &exp) const 15 | { 16 | double sum = 0; 17 | foreach(exp.get_terms(), term) 18 | { 19 | double coeff = term->first; 20 | unsigned int var = term->second; 21 | sum += coeff * get_value(var); 22 | } 23 | return sum; 24 | } 25 | }; 26 | 27 | #if defined(CONFIG_HAVE_GLPK) 28 | #include "linprog/glpk.h" 29 | #elif defined(CONFIG_HAVE_CPLEX) 30 | #include "linprog/cplex.h" 31 | #else 32 | #warning No LP solver available. 33 | #endif 34 | 35 | 36 | static inline Solution *linprog_solve( 37 | const LinearProgram& lp, 38 | unsigned int max_num_vars) 39 | { 40 | 41 | #if defined(CONFIG_HAVE_GLPK) 42 | return glpk_solve(lp, max_num_vars); 43 | #elif defined(CONFIG_HAVE_CPLEX) 44 | return cpx_solve(lp, max_num_vars); 45 | #else 46 | assert(0); 47 | return NULL; 48 | #endif 49 | } 50 | 51 | #endif 52 | -------------------------------------------------------------------------------- /native/include/edf/la.h: -------------------------------------------------------------------------------- 1 | #ifndef LA_H 2 | #define LA_H 3 | 4 | class LAGedf : public SchedulabilityTest 5 | { 6 | 7 | private: 8 | unsigned int m; 9 | 10 | bool is_task_schedulable_for_interval( 11 | const TaskSet &ts, 12 | unsigned int l, 13 | unsigned long suspend, 14 | const integral_t &ilen, /* interval length is xi_l - d_l */ 15 | integral_t &i1, 16 | integral_t &sum, 17 | integral_t *idiff, 18 | integral_t **ptr); 19 | 20 | bool is_task_schedulable_for_suspension_length( 21 | const TaskSet &ts, 22 | unsigned int l, 23 | unsigned long suspend, 24 | const fractional_t &m_minus_u, 25 | const fractional_t &test_point_sum, 26 | const fractional_t &usum); 27 | 28 | integral_t get_max_test_point( 29 | const TaskSet &ts, 30 | unsigned int l, 31 | const fractional_t &m_minus_u, 32 | const fractional_t &test_point_sum, 33 | const fractional_t &usum, 34 | unsigned long suspension); 35 | 36 | public: 37 | LAGedf(unsigned int num_processors) : m(num_processors) {}; 38 | 39 | bool is_schedulable(const TaskSet &ts, bool check_preconditions = true); 40 | 41 | static const double MAX_RUNTIME; 42 | }; 43 | 44 | #endif 45 | -------------------------------------------------------------------------------- /schedcat/util/iter.py: -------------------------------------------------------------------------------- 1 | # assorted sequence helpers 2 | 3 | from heapq import heapify, heappop, heappush 4 | 5 | class PrioObj(object): 6 | def __init__(self, val, le): 7 | self.val = val 8 | self.le = le 9 | 10 | def __str__(self): 11 | return str(self.val) 12 | 13 | def __le__(self, other): 14 | return self.le(self.val, other.val) 15 | 16 | 17 | def imerge(le, *iters): 18 | nxtheap = [] 19 | _le = lambda a, b: le(a[0], b[0]) 20 | for i in iters: 21 | try: 22 | it = iter(i) 23 | nxtheap.append(PrioObj((it.next(), it), _le)) 24 | except StopIteration: 25 | pass 26 | heapify(nxtheap) 27 | while nxtheap: 28 | wrapper = heappop(nxtheap) 29 | x, it = wrapper.val 30 | yield x 31 | try: 32 | wrapper.val = (it.next(), it) 33 | heappush(nxtheap, wrapper) 34 | except StopIteration: 35 | pass 36 | 37 | def uniq(seq): 38 | it = iter(seq) 39 | last = it.next() 40 | yield last 41 | for x in it: 42 | if x != last: 43 | last = x 44 | yield x 45 | -------------------------------------------------------------------------------- /example/mapping.py: -------------------------------------------------------------------------------- 1 | #Necessary includes and stuff 2 | 3 | from schedcat.mapping.rollback import Bin, WorstFit 4 | from schedcat.model.tasks import SporadicTask, TaskSystem 5 | 6 | def partition_tasks(cluster_size, clusters, dedicated_irq, 7 | taskset): 8 | first_cap = cluster_size - 1 if dedicated_irq \ 9 | else cluster_size 10 | first_bin = Bin(size=SporadicTask.utilization, 11 | capacity=first_cap) 12 | other_bins = [Bin(size=SporadicTask.utilization, 13 | capacity=cluster_size) 14 | for _ in xrange(1, clusters)] 15 | heuristic = WorstFit(initial_bins=[first_bin] + other_bins) 16 | heuristic.binpack(taskset) 17 | if not (heuristic.misfits): 18 | clusts = [TaskSystem(b.items) for b in heuristic.bins] 19 | for i, c in enumerate(clusts): 20 | if i == 0 and dedicated_irq: 21 | c.cpus = cluster_size - 1 22 | else: 23 | c.cpus = cluster_size 24 | for task in c: 25 | task.partition = i 26 | return [c for c in clusts if len(c) > 0] 27 | else: 28 | return False 29 | -------------------------------------------------------------------------------- /tests/__main__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import unittest 4 | 5 | import tests.model 6 | import tests.util 7 | import tests.generator 8 | import tests.quanta 9 | import tests.pfair 10 | import tests.edf 11 | import tests.fp 12 | import tests.fp_blocking 13 | import tests.binpack 14 | import tests.locking 15 | import tests.global_locking_analysis 16 | import tests.sim 17 | import tests.overheads 18 | import tests.canbus 19 | import tests.example_end_to_end 20 | import tests.apa 21 | import tests.sched 22 | 23 | suite = unittest.TestSuite( 24 | [unittest.defaultTestLoader.loadTestsFromModule(x) for x in 25 | [tests.model, 26 | tests.util, 27 | tests.generator, 28 | tests.quanta, 29 | tests.pfair, 30 | tests.edf, 31 | tests.fp, 32 | tests.fp_blocking, 33 | tests.binpack, 34 | tests.locking, 35 | tests.global_locking_analysis, 36 | tests.sim, 37 | tests.overheads, 38 | tests.canbus, 39 | tests.apa, 40 | tests.example_end_to_end, 41 | tests.sched] 42 | ]) 43 | 44 | def run_all_tests(): 45 | unittest.TextTestRunner(verbosity=2).run(suite) 46 | 47 | if __name__ == '__main__': 48 | run_all_tests() 49 | -------------------------------------------------------------------------------- /native/include/linprog/io.h: -------------------------------------------------------------------------------- 1 | #ifndef LINPROG_IO_H 2 | #define LINPROG_IO_H 3 | 4 | #include 5 | 6 | #include "linprog/model.h" 7 | #include "linprog/solver.h" 8 | #include "lp_common.h" 9 | 10 | std::ostream& pretty_print_linear_expression( 11 | std::ostream &os, 12 | const LinearExpression &exp, 13 | hashmap &var_names, 14 | const Solution *solution, /* may be NULL */ 15 | bool skip_zero_vars); 16 | 17 | std::ostream& pretty_print_linear_program( 18 | std::ostream &os, 19 | const LinearProgram &lp, 20 | hashmap &var_names, 21 | const Solution *solution = NULL, /* may be NULL */ 22 | bool skip_zero_vars = false); 23 | 24 | std::ostream& operator<<(std::ostream &os, const LinearExpression &exp); 25 | std::ostream& operator<<(std::ostream &os, const LinearProgram &lp); 26 | 27 | void dump_lp_solution( 28 | VarMapper& vars, 29 | const ResourceSharingInfo& info, 30 | const TaskInfo& ti, 31 | const Solution& solution, 32 | std::ostream& out = std::cout, 33 | bool show_zeros = false); 34 | 35 | void explain_objective_value( 36 | hashmap &var_names, 37 | const LinearProgram &lp, 38 | const Solution& solution, 39 | std::ostream& out = std::cout); 40 | 41 | 42 | #endif 43 | -------------------------------------------------------------------------------- /example/nolock_example_1: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /native/src/blocking/linprog/lp_no_progress_fifo.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "linprog/model.h" 7 | #include "linprog/varmapperbase.h" 8 | #include "linprog/solver.h" 9 | 10 | #include "sharedres_types.h" 11 | 12 | #include "iter-helper.h" 13 | #include "stl-helper.h" 14 | #include "stl-io-helper.h" 15 | 16 | #include 17 | #include 18 | #include "res_io.h" 19 | #include "linprog/io.h" 20 | 21 | #include "lp_global.h" 22 | 23 | class GlobalFIFONoProgressAnalysis 24 | : public GlobalNoProgressMechanismLP, public GlobalFIFOQueuesLP 25 | { 26 | 27 | public: 28 | GlobalFIFONoProgressAnalysis(const ResourceSharingInfo& info, 29 | unsigned int i, 30 | unsigned int number_of_cpus) 31 | : GlobalSuspensionAwareLP(info, i, number_of_cpus), 32 | GlobalNoProgressMechanismLP(info, i, number_of_cpus), 33 | GlobalFIFOQueuesLP(info, i, number_of_cpus) 34 | { 35 | } 36 | }; 37 | 38 | BlockingBounds* lp_no_progress_fifo_bounds( 39 | const ResourceSharingInfo& info, 40 | unsigned int number_of_cpus) 41 | { 42 | BlockingBounds* results = new BlockingBounds(info); 43 | 44 | for (unsigned int i = 0; i < info.get_tasks().size(); i++) 45 | { 46 | GlobalFIFONoProgressAnalysis lp(info, i, number_of_cpus); 47 | (*results)[i] = lp.solve(); 48 | } 49 | 50 | return results; 51 | } 52 | -------------------------------------------------------------------------------- /native/src/blocking/global-fmlp.cpp: -------------------------------------------------------------------------------- 1 | #include "sharedres.h" 2 | #include "blocking.h" 3 | 4 | #include "stl-helper.h" 5 | 6 | 7 | BlockingBounds* global_fmlp_bounds(const ResourceSharingInfo& info) 8 | { 9 | // split every thing by resources, sort, and then start counting. 10 | Resources resources; 11 | 12 | split_by_resource(info, resources); 13 | sort_by_request_length(resources); 14 | 15 | 16 | unsigned int i; 17 | BlockingBounds* _results = new BlockingBounds(info); 18 | BlockingBounds& results = *_results; 19 | 20 | unsigned int num_tasks = info.get_tasks().size(); 21 | 22 | for (i = 0; i < info.get_tasks().size(); i++) 23 | { 24 | const TaskInfo& tsk = info.get_tasks()[i]; 25 | Interference bterm; 26 | 27 | 28 | foreach(tsk.get_requests(), jt) 29 | { 30 | const RequestBound& req = *jt; 31 | const ContentionSet& cs = 32 | resources[req.get_resource_id()]; 33 | 34 | unsigned long interval = tsk.get_response(); 35 | unsigned long issued = req.get_num_requests(); 36 | 37 | // every other task may block once per request 38 | unsigned int total_limit = (num_tasks - 1) * issued; 39 | unsigned int per_src_limit = issued; 40 | 41 | bterm += bound_blocking(cs, 42 | interval, 43 | total_limit, 44 | per_src_limit, 45 | &tsk); 46 | } 47 | 48 | results[i] = bterm; 49 | } 50 | 51 | return _results; 52 | } 53 | 54 | -------------------------------------------------------------------------------- /schedcat/locking/partition.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | 3 | from schedcat.model.tasks import TaskSystem 4 | 5 | def find_connected_components(taskset): 6 | """Determine sets of tasks that do not share any resources.""" 7 | by_res = defaultdict(set) 8 | by_task = {} 9 | for t in taskset: 10 | accessed = [res_id for res_id in t.resmodel 11 | if t.resmodel[res_id].max_requests > 0] 12 | if accessed: 13 | res_id = accessed[0] 14 | by_res[res_id].add(t) 15 | # merge all others, if they are different 16 | for other in accessed[1:]: 17 | by_res[res_id].update(by_res[other]) 18 | by_res[other] = by_res[res_id] 19 | else: 20 | # independent task -> singleton set 21 | by_task[t] = set([t]) 22 | 23 | for c in by_res.values(): 24 | for t in c: 25 | if t in by_task: 26 | break 27 | by_task[t] = c 28 | 29 | return by_task, by_res 30 | 31 | def find_independent_tasksubsets(taskset): 32 | by_task, by_res = find_connected_components(taskset) 33 | done = set() 34 | subsets = [] 35 | 36 | for t in by_task: 37 | if not t in done: 38 | subsets.append(TaskSystem(by_task[t])) 39 | done.update(by_task[t]) 40 | 41 | return subsets 42 | -------------------------------------------------------------------------------- /native/src/blocking/linprog/lp_no_progress_priority.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "linprog/model.h" 7 | #include "linprog/varmapperbase.h" 8 | #include "linprog/solver.h" 9 | 10 | #include "sharedres_types.h" 11 | 12 | #include "iter-helper.h" 13 | #include "stl-helper.h" 14 | #include "stl-io-helper.h" 15 | 16 | #include 17 | #include 18 | #include "res_io.h" 19 | #include "linprog/io.h" 20 | 21 | #include "lp_global.h" 22 | 23 | 24 | class GlobalPrioNoProgressAnalysis 25 | : public GlobalNoProgressMechanismLP, public GlobalPriorityQueuesLP 26 | { 27 | 28 | public: 29 | GlobalPrioNoProgressAnalysis(const ResourceSharingInfo& info, 30 | unsigned int i, 31 | unsigned int number_of_cpus) 32 | : GlobalSuspensionAwareLP(info, i, number_of_cpus), 33 | GlobalNoProgressMechanismLP(info, i, number_of_cpus), 34 | GlobalPriorityQueuesLP(info, i, number_of_cpus) 35 | { 36 | } 37 | }; 38 | 39 | BlockingBounds* lp_no_progress_priority_bounds( 40 | const ResourceSharingInfo& info, 41 | unsigned int number_of_cpus) 42 | { 43 | BlockingBounds* results = new BlockingBounds(info); 44 | 45 | for (unsigned int i = 0; i < info.get_tasks().size(); i++) 46 | { 47 | GlobalPrioNoProgressAnalysis lp(info, i, number_of_cpus); 48 | (*results)[i] = lp.solve(); 49 | } 50 | 51 | return results; 52 | } 53 | -------------------------------------------------------------------------------- /native/src/edf/load.cpp: -------------------------------------------------------------------------------- 1 | #include "tasks.h" 2 | #include "schedulability.h" 3 | 4 | #include "edf/load.h" 5 | 6 | #include 7 | #include 8 | 9 | /* This implements the LOAD test presented in: 10 | * 11 | * Baker & Baruah (2009), An analysis of global EDF schedulability for 12 | * arbitrary-deadline sporadic task systems, Real-Time Systems, volume 43, 13 | * pages 3-24. 14 | */ 15 | 16 | bool LoadGedf::is_schedulable(const TaskSet &ts, bool check_preconditions) 17 | { 18 | if (check_preconditions) 19 | { 20 | if (!(ts.has_only_feasible_tasks() 21 | && ts.is_not_overutilized(m) 22 | && ts.has_no_self_suspending_tasks())) 23 | return false; 24 | } 25 | 26 | fractional_t load, max_density, mu, bound, cond1, cond2; 27 | integral_t mu_ceil; 28 | 29 | // get the load of the task set 30 | ts.approx_load(load, epsilon); 31 | 32 | // compute bound (corollary 2) 33 | ts.get_max_density(max_density); 34 | 35 | mu = m - (m - 1) * max_density; 36 | 37 | mu_ceil = mu.get_num(); 38 | // divide with ceiling 39 | mpz_cdiv_q(mu_ceil.get_mpz_t(), 40 | mu.get_num().get_mpz_t(), 41 | mu.get_den().get_mpz_t()); 42 | 43 | cond1 = mu - (mu_ceil - 1) * max_density; 44 | cond2 = (mu_ceil - 1) - (mu_ceil - 2) * max_density; 45 | 46 | bound = std::max(cond1, cond2); 47 | 48 | return load <= bound; 49 | } 50 | -------------------------------------------------------------------------------- /native/include/cpu_time.h: -------------------------------------------------------------------------------- 1 | #ifndef CPU_TIME_H 2 | #define CPU_TIME_H 3 | 4 | #include 5 | 6 | // How much CPU time used (in seconds)? 7 | double get_cpu_usage(void); 8 | 9 | class CPUClock 10 | { 11 | private: 12 | const char *name; 13 | const char *func; 14 | 15 | unsigned int count; 16 | 17 | double start_time; 18 | double last; 19 | double total; 20 | 21 | public: 22 | CPUClock(const char *_name = 0, const char *_func = 0) 23 | : name(_name), func(_func), 24 | count(0), start_time(0), last(0), total(0) 25 | {} 26 | 27 | void start() 28 | { 29 | start_time = get_cpu_usage(); 30 | } 31 | 32 | void stop() 33 | { 34 | last = get_cpu_usage() - start_time; 35 | total += last; 36 | count++; 37 | } 38 | 39 | double get_total() const 40 | { 41 | return total; 42 | } 43 | 44 | double get_last() const 45 | { 46 | return last; 47 | } 48 | 49 | double get_count() const 50 | { 51 | return count; 52 | } 53 | 54 | double get_average() const 55 | { 56 | return total / ( count ? count : 1); 57 | } 58 | 59 | const char *get_name() const 60 | { 61 | return name; 62 | } 63 | 64 | const char *get_function() const 65 | { 66 | return func; 67 | } 68 | }; 69 | 70 | std::ostream& operator<<(std::ostream &os, const CPUClock &clock); 71 | 72 | char* strip_types(const char* pretty_func); 73 | 74 | #define DEFINE_CPU_CLOCK(var) CPUClock var = CPUClock(#var, strip_types(__PRETTY_FUNCTION__)) 75 | 76 | #endif 77 | -------------------------------------------------------------------------------- /native/src/blocking/linprog/lp_global_pip.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "linprog/model.h" 7 | #include "linprog/varmapperbase.h" 8 | #include "linprog/solver.h" 9 | 10 | #include "sharedres_types.h" 11 | 12 | #include "iter-helper.h" 13 | #include "stl-helper.h" 14 | #include "stl-io-helper.h" 15 | 16 | #include 17 | #include 18 | #include "res_io.h" 19 | #include "linprog/io.h" 20 | 21 | #include "lp_global.h" 22 | 23 | 24 | class GlobalPIPAnalysis : public GlobalPrioInheritanceLP, public GlobalPriorityQueuesLP 25 | { 26 | 27 | public: 28 | GlobalPIPAnalysis(const ResourceSharingInfo& info, 29 | unsigned int i, 30 | unsigned int number_of_cpus) 31 | : GlobalSuspensionAwareLP(info, i, number_of_cpus), 32 | GlobalPrioInheritanceLP(info, i, number_of_cpus), 33 | GlobalPriorityQueuesLP(info, i, number_of_cpus) 34 | { 35 | // Protocol-specific constraints 36 | 37 | // Constraint 11 38 | add_pip_fmlp_no_stalling_interference(); 39 | // Constraint 12 40 | add_pip_ppcp_indirect_preemption_constraints(); 41 | } 42 | }; 43 | 44 | 45 | BlockingBounds* lp_global_pip_bounds( 46 | const ResourceSharingInfo& info, 47 | unsigned int number_of_cpus) 48 | { 49 | BlockingBounds* results = new BlockingBounds(info); 50 | 51 | for (unsigned int i = 0; i < info.get_tasks().size(); i++) 52 | { 53 | GlobalPIPAnalysis lp(info, i, number_of_cpus); 54 | (*results)[i] = lp.solve(); 55 | } 56 | 57 | return results; 58 | } 59 | -------------------------------------------------------------------------------- /native/src/blocking/linprog/lp_global_fifo.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "linprog/varmapperbase.h" 7 | #include "linprog/solver.h" 8 | 9 | #include "sharedres_types.h" 10 | 11 | #include "iter-helper.h" 12 | #include "stl-helper.h" 13 | #include "stl-io-helper.h" 14 | #include "math-helper.h" 15 | 16 | #include 17 | #include 18 | #include "res_io.h" 19 | #include "linprog/io.h" 20 | 21 | #include "lp_global.h" 22 | 23 | GlobalFIFOQueuesLP::GlobalFIFOQueuesLP( 24 | const ResourceSharingInfo& info, 25 | unsigned int task_index, 26 | unsigned int number_of_cpus) 27 | : GlobalSuspensionAwareLP(info, task_index, number_of_cpus) 28 | { 29 | // Constraint 8 30 | add_fifo_direct_constraints(); 31 | } 32 | 33 | // Constraint 8: each other task directly delays Ji for at most once 34 | //under FIFO-based protocols. 35 | void GlobalFIFOQueuesLP::add_fifo_direct_constraints() 36 | { 37 | foreach(all_resources, res_id) 38 | { 39 | const unsigned int num_of_requests = ti.get_num_requests(*res_id); 40 | 41 | foreach_task_except(taskset, ti, tx) 42 | { 43 | const unsigned int x = tx->get_id(); 44 | const unsigned int q = *res_id; 45 | 46 | foreach_request_for(tx->get_requests(), q, request) 47 | { 48 | LinearExpression *exp = new LinearExpression(); 49 | 50 | foreach_request_instance(*request, ti, v) 51 | exp->add_var(vars.direct(x, q, v)); 52 | 53 | add_inequality(exp, num_of_requests); 54 | } 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /schedcat/sched/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | # Python model to C++ model conversion code. 3 | 4 | 5 | try: 6 | from .native import TaskSet 7 | 8 | using_native = True 9 | 10 | def get_native_taskset(tasks, with_suspensions=False): 11 | ts = TaskSet() 12 | if with_suspensions: 13 | for t in tasks: 14 | if (hasattr(t, 'prio_pt')): 15 | ts.add_task(t.cost, t.period, t.deadline, t.prio_pt, t.suspended) 16 | else: 17 | ts.add_task(t.cost, t.period, t.deadline, 0, t.suspended) 18 | else: 19 | for t in tasks: 20 | if (hasattr(t, 'prio_pt')): 21 | ts.add_task(t.cost, t.period, t.deadline, t.prio_pt) 22 | else: 23 | ts.add_task(t.cost, t.period, t.deadline) 24 | return ts 25 | 26 | except ImportError: 27 | # Nope, C++ impl. not available. Use Python implementation. 28 | using_native = False 29 | using_linprog = False 30 | def get_native_taskset(tasks): 31 | assert False # C++ implementation not available 32 | 33 | if using_native: 34 | try: 35 | from .native import AffinityRestrictions 36 | 37 | using_linprog = True 38 | 39 | def get_native_affinities(tasks): 40 | afs = AffinityRestrictions() 41 | 42 | for i, t in enumerate(tasks): 43 | for cpu in t.affinity: 44 | afs.add_cpu(i, cpu) 45 | 46 | return afs 47 | 48 | except ImportError: 49 | using_linprog = False 50 | -------------------------------------------------------------------------------- /tests/pfair.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import unittest 4 | 5 | from fractions import Fraction 6 | 7 | import schedcat.sched.pfair as p 8 | import schedcat.model.tasks as tasks 9 | 10 | class Pfair(unittest.TestCase): 11 | def setUp(self): 12 | self.ts = tasks.TaskSystem([ 13 | tasks.SporadicTask(80, 100), 14 | tasks.SporadicTask(33, 66), 15 | tasks.SporadicTask(7, 10), 16 | ]) 17 | 18 | def test_bound(self): 19 | self.assertTrue(p.is_schedulable(2, self.ts)) 20 | self.assertFalse(p.is_schedulable(1, self.ts)) 21 | 22 | def test_deadlines(self): 23 | self.ts[0].deadline = 300 24 | self.ts[2].deadline = 11 25 | 26 | self.assertTrue(p.is_schedulable(2, self.ts)) 27 | self.assertFalse(p.is_schedulable(1, self.ts)) 28 | 29 | self.ts[1].deadline = 50 30 | 31 | self.assertFalse(p.is_schedulable(2, self.ts)) 32 | self.assertFalse(p.is_schedulable(1, self.ts)) 33 | 34 | self.assertTrue(p.has_bounded_tardiness(2, self.ts)) 35 | self.assertFalse(p.has_bounded_tardiness(1, self.ts)) 36 | 37 | def test_tardiness(self): 38 | self.ts[0].deadline = 300 39 | self.ts[1].deadline = 50 40 | self.ts[2].deadline = 11 41 | 42 | self.assertTrue(p.bound_response_times(2, self.ts)) 43 | 44 | self.assertEqual(self.ts[0].tardiness(), 0) 45 | self.assertEqual(self.ts[1].tardiness(), 16) 46 | self.assertEqual(self.ts[2].tardiness(), 0) 47 | 48 | self.assertFalse(p.bound_response_times(1, self.ts)) 49 | -------------------------------------------------------------------------------- /native/src/schedule_sim.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include "tasks.h" 3 | #include "schedule_sim.h" 4 | 5 | Job::Job(const Task &tsk, 6 | unsigned long relt, 7 | unsigned long sequence_no, 8 | unsigned long cst) 9 | : task(tsk), release(relt), allocation(0), seqno(sequence_no) 10 | { 11 | if (!cst) 12 | cost = task.get_wcet(); 13 | else 14 | cost = cst; 15 | } 16 | 17 | void Job::init_next(simtime_t cost, 18 | simtime_t inter_arrival_time) 19 | { 20 | allocation = 0; 21 | /* if cost == 0, then we keep the last cost */ 22 | if (cost != 0) 23 | this->cost = cost; 24 | release += task.get_period() + inter_arrival_time; 25 | seqno++; 26 | } 27 | 28 | template<> 29 | void PeriodicJobSequence::completed(simtime_t when, int proc) 30 | { 31 | init_next(); 32 | get_sim()->add_release(this); 33 | } 34 | 35 | 36 | void run_periodic_simulation(ScheduleSimulation& sim, 37 | TaskSet& ts, 38 | simtime_t end_of_simulation) 39 | { 40 | PeriodicJobSequence** jobs; 41 | 42 | jobs = new PeriodicJobSequence*[ts.get_task_count()]; 43 | for (unsigned int i = 0; i < ts.get_task_count(); i++) 44 | { 45 | jobs[i] = new PeriodicJobSequence(ts[i]); 46 | jobs[i]->set_simulation(&sim); 47 | sim.add_release(jobs[i]); 48 | } 49 | 50 | sim.simulate_until(end_of_simulation); 51 | 52 | for (unsigned int i = 0; i < ts.get_task_count(); i++) 53 | delete jobs[i]; 54 | delete [] jobs; 55 | } 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /example/oh_host=ludwig_scheduler=C-FL-L2-RM_stat=avg.csv: -------------------------------------------------------------------------------- 1 | TASK-COUNT, CXS, RELEASE-LATENCY, RELEASE, SCHEDULE, IPI-LATENCY, TICK 2 | 2, 6.72978, 29.72057, 9.97856, 14.59542, 4.26253, 1.49529 3 | 4, 5.50067, 37.68176, 9.54492, 11.59883, 3.45727, 1.61648 4 | 6, 4.66079, 40.31913, 11.52656, 11.37082, 3.43112, 1.79663 5 | 8, 4.93466, 46.52168, 12.02322, 12.24551, 3.74841, 2.34484 6 | 10, 4.81129, 58.24624, 14.5638, 12.2199, 4.00761, 2.46287 7 | 12, 4.36334, 60.08797, 11.03959, 12.10727, 3.86881, 2.51114 8 | 14, 4.02139, 64.88597, 12.56629, 11.80076, 3.91408, 2.50994 9 | 16, 3.95173, 104.25153, 15.86581, 10.87721, 3.92055, 2.52389 10 | 18, 3.99141, 107.68098, 16.30253, 9.09979, 3.91341, 2.52411 11 | 20, 3.96936, 119.31053, 18.05341, 8.95408, 3.786, 2.46374 12 | 22, 4.35578, 123.65897, 19.04576, 8.5018, 3.82484, 2.50819 13 | 24, 3.94454, 123.59384, 19.86306, 8.36399, 3.81661, 2.49741 14 | 26, 4.20989, 120.2916, 19.31057, 8.15249, 3.85725, 2.52141 15 | 28, 3.95358, 132.32261, 20.64414, 7.9894, 3.81458, 2.531 16 | 30, 4.55631, 127.72966, 19.42275, 7.8974, 3.88953, 2.41506 17 | 32, 4.81653, 124.99143, 19.50481, 7.93598, 3.88261, 2.49613 18 | 34, 4.11259, 125.48338, 19.85067, 7.85752, 3.87543, 2.59852 19 | 36, 4.22586, 122.76203, 19.85806, 7.85018, 3.86897, 2.55698 20 | 38, 4.8531, 114.62965, 19.0961, 7.84962, 3.91242, 2.55948 21 | 40, 4.19305, 131.81302, 20.62952, 7.81311, 3.89458, 2.60879 22 | -------------------------------------------------------------------------------- /example/overheads.py: -------------------------------------------------------------------------------- 1 | #Necessary includes and stuff 2 | 3 | from schedcat.overheads.model import Overheads, CacheDelay 4 | from schedcat.overheads.jlfp import charge_scheduling_overheads, \ 5 | quantize_params 6 | from schedcat.sched.edf.gel_pl import bound_gfl_response_times, \ 7 | has_bounded_tardiness 8 | 9 | def copy_lock_overheads(oh, lock_oh): 10 | oh.lock = lock_oh.lock 11 | oh.unlock = lock_oh.unlock 12 | oh.read_lock = lock_oh.read_lock 13 | oh.read_unlock = lock_oh.read_unlock 14 | oh.syscall_in = lock_oh.syscall_in 15 | oh.syscall_out = lock_oh.syscall_out 16 | 17 | def get_oh_object(basic_oh, lock_oh, cache_oh, cache_level): 18 | oh = Overheads.from_file(basic_oh) 19 | oh.initial_cache_loss = \ 20 | CacheDelay.from_file(cache_oh).__dict__[cache_level] 21 | oh.cache_affinity_loss = \ 22 | CacheDelay.from_file(cache_oh).__dict__[cache_level] 23 | if lock_oh is not None: 24 | lock_oh = Overheads.from_file(lock_oh) 25 | copy_lock_overheads(oh, lock_oh) 26 | return oh 27 | 28 | #Assumes absence of locking 29 | def bound_cfl_with_oh(oheads, dedicated_irq, clusts): 30 | for clust in clusts: 31 | success = charge_scheduling_overheads(oheads, clust.cpus, 32 | dedicated_irq, 33 | clust) 34 | quantize_params(clust) 35 | if (success and has_bounded_tardiness(clust.cpus, clust)): 36 | bound_gfl_response_times(clust.cpus, clust, 15) 37 | else: 38 | return False 39 | return True 40 | -------------------------------------------------------------------------------- /native/src/cpu_time.cpp: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | 8 | #include "cpu_time.h" 9 | 10 | 11 | #if _POSIX_C_SOURCE >= 199309L 12 | 13 | // use clock_xxx() API 14 | 15 | double get_cpu_usage(void) 16 | { 17 | struct timespec ts; 18 | 19 | if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0) 20 | { 21 | return ts.tv_sec + ts.tv_nsec / 1E9; 22 | } 23 | else 24 | return 0.0; 25 | } 26 | 27 | 28 | #else 29 | 30 | // fall back to getrusage() 31 | 32 | #ifdef RUSAGE_THREAD 33 | // This is a Linuxism... 34 | #define ACCOUNTING_SCOPE RUSAGE_THREAD 35 | #else 36 | // This is POSIX. 37 | #define ACCOUNTING_SCOPE RUSAGE_SELF 38 | #endif 39 | 40 | double get_cpu_usage(void) 41 | { 42 | struct rusage u; 43 | if (getrusage(ACCOUNTING_SCOPE, &u) == 0) 44 | { 45 | return u.ru_utime.tv_sec + u.ru_utime.tv_usec / 1E6; 46 | } 47 | else 48 | return 0.0; 49 | } 50 | 51 | #endif 52 | 53 | 54 | std::ostream& operator<<(std::ostream &os, const CPUClock &clock) 55 | { 56 | if (clock.get_function()) 57 | os << clock.get_function() << "::"; 58 | os << clock.get_name() 59 | << ": total=" << clock.get_total() * 1000 << "ms " 60 | << "last=" << clock.get_last() * 1000 << "ms " 61 | << "average=" << clock.get_average() * 1000 << "ms " 62 | << "count=" << clock.get_count(); 63 | return os; 64 | } 65 | 66 | char *strip_types(const char* pretty_func) 67 | { 68 | char *copy = strdup(pretty_func); 69 | 70 | char *start = strchr(copy, ' '); 71 | char *end = strchr(copy, '('); 72 | 73 | if (start) 74 | copy = start + 1; 75 | if (end) 76 | *end = '\0'; 77 | 78 | return copy; 79 | } 80 | -------------------------------------------------------------------------------- /config.mk.example: -------------------------------------------------------------------------------- 1 | # Example Make configuration file 2 | # To use, rename this file to config.mk. 3 | 4 | ############ Dependencies ############ 5 | 6 | ### GMP_PATH ### 7 | # set GMP_PATH to find dependency in non-standard location 8 | # Example: 9 | # 10 | # GMP_PATH=/usr/local 11 | 12 | ### SWIG ### 13 | # set SWIG to find swig binary if not in current $PATH 14 | # Example: 15 | # 16 | # SWIG=/usr/local/latest-swig/bin/swig 17 | 18 | ### PYTHON_INC and PYTHON_LIB ### 19 | # Point the Makefile to where the Python headers can be found. 20 | # Usually, this option is not required, unless you have multiple Python 21 | # implementations (e.g., on macOS + homebrew). 22 | # Note: need to include the -I flag for gcc/clang. 23 | # Example linking against Homebrew Python on macOS Sierra: 24 | # 25 | # PYTHON_INC= -I/usr/local/Cellar/python/2.7.11/Frameworks/Python.framework/Headers 26 | # PYTHON_LIB= -F/usr/local/Cellar/python/2.7.11/Frameworks/ -framework Python 27 | 28 | ### CPLEX_PATH ### 29 | # Where to find the local CPLEX installation 30 | # Note: set to the empty string to prevent linking against CPLEX. 31 | # Examples: 32 | # 33 | # CPLEX_PATH=/opt/ibm/ILOG/CPLEX_Studio1263 34 | # 35 | # CPLEX_PATH= # This disables CPLEX even if it auto-discovered by the Makefile. 36 | 37 | ### GLPK_PATH ### 38 | # Where to find the local GLPK installation 39 | # Note: set to the empty string to prevent linking against GLPK. 40 | # Examples: 41 | # 42 | # GLPK_PATH=/usr/local 43 | # 44 | # GLPK_PATH= # This disables GLPK even if it auto-discovered by the Makefile. 45 | 46 | 47 | 48 | ############ Other Options ############ 49 | 50 | ### DEBUG ### 51 | # set DEBUG=y to disable optimizations and enable debug info 52 | # DEBUG=y 53 | 54 | -------------------------------------------------------------------------------- /native/src/blocking/global-omlp.cpp: -------------------------------------------------------------------------------- 1 | #include "sharedres.h" 2 | #include "blocking.h" 3 | 4 | #include "stl-helper.h" 5 | 6 | BlockingBounds* global_omlp_bounds(const ResourceSharingInfo& info, 7 | unsigned int num_procs) 8 | { 9 | // split every thing by resources, sort, and then start counting. 10 | Resources resources; 11 | 12 | split_by_resource(info, resources); 13 | sort_by_request_length(resources); 14 | 15 | unsigned int i; 16 | BlockingBounds* _results = new BlockingBounds(info); 17 | BlockingBounds& results = *_results; 18 | 19 | for (i = 0; i < info.get_tasks().size(); i++) 20 | { 21 | const TaskInfo& tsk = info.get_tasks()[i]; 22 | Interference bterm; 23 | 24 | foreach(tsk.get_requests(), jt) 25 | { 26 | const RequestBound& req = *jt; 27 | const ContentionSet& cs = 28 | resources[req.get_resource_id()]; 29 | 30 | unsigned int num_sources = cs.size(); 31 | unsigned long interval = tsk.get_response(); 32 | unsigned long issued = req.get_num_requests(); 33 | 34 | 35 | unsigned int total_limit = (2 * num_procs - 1) * issued; 36 | // Derived in the dissertation: at most twice per request. 37 | unsigned int per_src_limit = 2 * issued; 38 | 39 | if (num_sources <= num_procs + 1) { 40 | // FIFO case: no job is ever skipped in the 41 | // priority queue (since at most one job is in 42 | // PQ at any time). 43 | // Lemma 15 in RTSS'10: at most one blocking 44 | // request per source per issued request. 45 | per_src_limit = issued; 46 | total_limit = (num_sources - 1) * issued; 47 | } 48 | 49 | bterm += bound_blocking(cs, 50 | interval, 51 | total_limit, 52 | per_src_limit, 53 | &tsk); 54 | } 55 | 56 | results[i] = bterm; 57 | } 58 | 59 | return _results; 60 | } 61 | 62 | -------------------------------------------------------------------------------- /schedcat/util/csv.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import csv 4 | 5 | from .storage import storage 6 | 7 | def load_columns(fname, 8 | convert=lambda x: x, 9 | expect_uniform=True): 10 | """Load a file of CSV data. The first row is assumed 11 | to contain column labels. These labels can then be used to 12 | reference individual columns. 13 | 14 | x = load_column_csv(...) 15 | x.by_name -> columns by name 16 | x.by_idx -> columns by index in the file 17 | x.columns -> all columns 18 | """ 19 | if isinstance(fname, str): 20 | f = open(fname) 21 | else: 22 | # assume we got a file object 23 | f = fname 24 | d = list(csv.reader(f)) 25 | if fname != f: 26 | f.close() 27 | 28 | # infer column labels 29 | col_idx = {} 30 | for i, key in enumerate(d[0]): 31 | col_idx[key.strip()] = i 32 | 33 | max_idx = i 34 | 35 | data = d[1:] 36 | 37 | if expect_uniform: 38 | for row in data: 39 | if len(row) != max_idx + 1: 40 | print len(row), max_idx 41 | msg = "expected uniform row length (%s:%d)" % \ 42 | (fname, data.index(row) + 1) 43 | raise IOError, msg # bad row length 44 | 45 | # column iterator 46 | def col(i): 47 | for row in data: 48 | if row: 49 | yield convert(row[i]) 50 | 51 | by_col_name = {} 52 | by_col_idx = [0] * (max_idx + 1) 53 | 54 | for key in col_idx: 55 | by_col_name[key] = list(col(col_idx[key])) 56 | by_col_idx[col_idx[key]] = by_col_name[key] 57 | 58 | return storage(name=fname, columns=col_idx, 59 | by_name=by_col_name, by_idx=by_col_idx) 60 | -------------------------------------------------------------------------------- /native/include/linprog/varmapperbase.h: -------------------------------------------------------------------------------- 1 | #ifndef VARMAPPERBASE_H 2 | #define VARMAPPERBASE_H 3 | 4 | #include 5 | #include 6 | 7 | #include "stl-helper.h" 8 | #include "stl-hashmap.h" 9 | 10 | class VarMapperBase { 11 | 12 | private: 13 | hashmap map; 14 | unsigned int next_var; 15 | bool sealed; 16 | 17 | protected: 18 | void insert(uint64_t key) 19 | { 20 | assert(next_var < UINT_MAX); 21 | assert(!sealed); 22 | 23 | unsigned int idx = next_var++; 24 | map[key] = idx; 25 | } 26 | 27 | bool exists(uint64_t key) const 28 | { 29 | return map.count(key) > 0; 30 | } 31 | 32 | unsigned int get(uint64_t key) 33 | { 34 | return map[key]; 35 | } 36 | 37 | unsigned int var_for_key(uint64_t key) 38 | { 39 | if (!exists(key)) 40 | insert(key); 41 | return get(key); 42 | } 43 | 44 | bool search_key_for_var(unsigned int var, uint64_t &key) const 45 | { 46 | foreach(map, it) 47 | { 48 | if (it->second == var) 49 | { 50 | key = it->first; 51 | return true; 52 | } 53 | } 54 | return false; 55 | } 56 | 57 | public: 58 | 59 | VarMapperBase(unsigned int start_var = 0) 60 | : next_var(start_var), sealed(false) 61 | {} 62 | 63 | 64 | // stop new IDs from being generated 65 | void seal() 66 | { 67 | sealed = true; 68 | } 69 | 70 | unsigned int get_num_vars() const 71 | { 72 | return map.size(); 73 | } 74 | 75 | unsigned int get_next_var() const 76 | { 77 | return next_var; 78 | } 79 | 80 | 81 | // debugging support 82 | 83 | std::string var2str(unsigned int var) const; 84 | 85 | // should be overridden by children 86 | virtual std::string key2str(uint64_t key, unsigned int var) const; 87 | 88 | hashmap get_translation_table() const; 89 | }; 90 | 91 | 92 | #endif 93 | -------------------------------------------------------------------------------- /native/include/apa_feas.h: -------------------------------------------------------------------------------- 1 | #ifndef APA_FEAS_H 2 | #define APA_FEAS_H 3 | 4 | #ifndef SWIG 5 | 6 | #include 7 | 8 | #include "stl-hashmap.h" 9 | 10 | #endif 11 | 12 | 13 | typedef hashset Affinity; 14 | typedef std::vector Affinities; 15 | 16 | class AffinityRestrictions 17 | { 18 | 19 | private: 20 | // set of allowed CPUs for each task 21 | Affinities affinities; 22 | 23 | public: 24 | 25 | const Affinities & get_affinities() const 26 | { 27 | return affinities; 28 | } 29 | 30 | void add_cpu(unsigned int task_id, unsigned int allowed_cpu) 31 | { 32 | while (affinities.size() <= task_id) 33 | affinities.push_back(Affinity()); 34 | 35 | affinities[task_id].insert(allowed_cpu); 36 | } 37 | 38 | unsigned int get_task_count() const 39 | { 40 | return affinities.size(); 41 | } 42 | 43 | Affinity get_all_cpus() const; 44 | }; 45 | 46 | class APAFeasibleSolution 47 | { 48 | 49 | private: 50 | // for each task, for each CPU 51 | std::vector< std::vector > allocation; 52 | 53 | public: 54 | 55 | double get_fraction(unsigned int task_id, unsigned int on_cpu) const 56 | { 57 | if (allocation.size() <= task_id) 58 | return 0; 59 | 60 | if (allocation[task_id].size() <= on_cpu) 61 | return 0; 62 | 63 | return allocation[task_id][on_cpu]; 64 | } 65 | 66 | void set_fraction(unsigned int task_id, unsigned int on_cpu, double frac) 67 | { 68 | while (allocation.size() <= task_id) 69 | allocation.push_back(std::vector()); 70 | 71 | while (allocation[task_id].size() <= on_cpu) 72 | allocation[task_id].push_back(0); 73 | 74 | allocation[task_id][on_cpu] = frac; 75 | } 76 | 77 | }; 78 | 79 | 80 | APAFeasibleSolution* apa_implicit_deadline_feasible( 81 | const TaskSet &ts, const AffinityRestrictions &taskset_affinities); 82 | 83 | #endif 84 | -------------------------------------------------------------------------------- /native/interface/sched.i: -------------------------------------------------------------------------------- 1 | %module sched 2 | %{ 3 | #define SWIG_FILE_WITH_INIT 4 | #include "tasks.h" 5 | #include "schedulability.h" 6 | #include "edf/baker.h" 7 | #include "edf/gfb.h" 8 | #include "edf/baruah.h" 9 | #include "edf/bcl.h" 10 | #include "edf/bcl_iterative.h" 11 | #include "edf/rta.h" 12 | #include "edf/ffdbf.h" 13 | #include "edf/load.h" 14 | #include "edf/gedf.h" 15 | #include "edf/gel_pl.h" 16 | #include "edf/qpa.h" 17 | #include "edf/la.h" 18 | 19 | #ifdef CONFIG_HAVE_LP 20 | #include "apa_feas.h" 21 | #endif 22 | 23 | %} 24 | 25 | %ignore Task::get_utilization(fractional_t &util) const; 26 | %ignore Task::get_density(fractional_t &density) const; 27 | %ignore Task::bound_demand(const integral_t &time, integral_t &demand) const; 28 | %ignore Task::bound_load const; 29 | %ignore Task::approx_demand const; 30 | %ignore Task::dbf; 31 | 32 | %ignore TaskSet::operator[](int); 33 | %ignore TaskSet::operator[](int) const; 34 | %ignore TaskSet::get_utilization const; 35 | %ignore TaskSet::get_density const; 36 | %ignore TaskSet::get_max_density const; 37 | %ignore TaskSet::approx_load const; 38 | 39 | %ignore QPATest::get_demand(integral_t interval, const TaskSet &ts); 40 | %ignore QPATest::get_max_interval(const TaskSet &ts, const fractional_t& util); 41 | 42 | #include "tasks.h" 43 | #include "schedulability.h" 44 | #include "edf/baker.h" 45 | #include "edf/gfb.h" 46 | #include "edf/baruah.h" 47 | #include "edf/bcl.h" 48 | #include "edf/bcl_iterative.h" 49 | #include "edf/rta.h" 50 | #include "edf/ffdbf.h" 51 | #include "edf/load.h" 52 | #include "edf/gedf.h" 53 | #include "edf/gel_pl.h" 54 | #include "edf/qpa.h" 55 | #include "edf/la.h" 56 | 57 | #ifdef CONFIG_HAVE_LP 58 | %ignore APAFeasibleSolution::set_fraction; 59 | %ignore AffinityRestrictions::get_affinities; 60 | 61 | %newobject apa_implicit_deadline_feasible; 62 | 63 | #include "apa_feas.h" 64 | 65 | #endif -------------------------------------------------------------------------------- /native/include/stl-helper.h: -------------------------------------------------------------------------------- 1 | #ifndef STL_HELPER_H 2 | #define STL_HELPER_H 3 | 4 | #include 5 | 6 | #define foreach(collection, it) \ 7 | for (auto it = (collection).begin(); \ 8 | it != (collection).end(); \ 9 | it++) 10 | 11 | #define enumerate(collection, it, i) \ 12 | for (auto it = ({i = 0; (collection).begin();}); \ 13 | it != (collection).end(); \ 14 | it++, i++) 15 | 16 | #define apply_foreach(collection, fun, ...) \ 17 | foreach(collection, __apply_it_ ## collection) { \ 18 | fun(*__apply_it_ ## collection, ## __VA_ARGS__); \ 19 | } 20 | 21 | #define map_ref(from, to, init, fun, ...) \ 22 | { \ 23 | (to).clear(); \ 24 | (to).reserve((from).size()); \ 25 | foreach(from, __map_ref_it) { \ 26 | (to).push_back(init()); \ 27 | fun(*__map_ref_it, (to).back(), \ 28 | ## __VA_ARGS__); \ 29 | } \ 30 | } 31 | 32 | // From: http://stackoverflow.com/questions/1964150/c-test-if-2-sets-are-disjoint 33 | template 34 | bool is_disjoint(const Set1 &set1, const Set2 &set2) 35 | { 36 | if(set1.empty() || set2.empty()) return true; 37 | 38 | typename Set1::const_iterator 39 | it1 = set1.begin(), 40 | it1End = set1.end(); 41 | typename Set2::const_iterator 42 | it2 = set2.begin(), 43 | it2End = set2.end(); 44 | 45 | if(*it1 > *set2.rbegin() || *it2 > *set1.rbegin()) return true; 46 | 47 | while(it1 != it1End && it2 != it2End) 48 | { 49 | if(*it1 == *it2) return false; 50 | if(*it1 < *it2) { it1++; } 51 | else { it2++; } 52 | } 53 | 54 | return true; 55 | } 56 | 57 | template 58 | bool is_subset_of(const Set1 &set1, const Set2 &set2) 59 | { 60 | return std::includes(set2.begin(), set2.end(), 61 | set1.begin(), set1.end()); 62 | } 63 | 64 | 65 | #endif 66 | -------------------------------------------------------------------------------- /native/src/edf/baker.cpp: -------------------------------------------------------------------------------- 1 | #include // for min 2 | 3 | #include "tasks.h" 4 | #include "schedulability.h" 5 | 6 | #include "edf/baker.h" 7 | 8 | using namespace std; 9 | 10 | void BakerGedf::beta(const Task &t_i, const Task &t_k, 11 | const fractional_t &lambda_k, 12 | fractional_t &beta_i) 13 | { 14 | fractional_t u_i; 15 | 16 | // XXX: possible improvement would be to pre-compute u_i 17 | // instead of incurring quadratic u_i computations. 18 | t_i.get_utilization(u_i); 19 | 20 | beta_i = t_i.get_period() - t_i.get_deadline(); 21 | beta_i /= t_k.get_deadline(); 22 | beta_i += 1; 23 | beta_i *= u_i; 24 | 25 | if (lambda_k < u_i) 26 | { 27 | fractional_t tmp = t_i.get_wcet(); 28 | tmp -= lambda_k * t_i.get_period(); 29 | tmp /= t_k.get_deadline(); 30 | beta_i += tmp; 31 | } 32 | } 33 | 34 | bool BakerGedf::is_task_schedulable(unsigned int k, const TaskSet &ts) 35 | { 36 | fractional_t lambda, bound, beta_i, beta_sum = 0; 37 | fractional_t one = 1; 38 | 39 | ts[k].get_density(lambda); 40 | 41 | bound = m * (1 - lambda) + lambda; 42 | 43 | for (unsigned int i = 0; i < ts.get_task_count() && beta_sum <= bound; i++) 44 | { 45 | beta(ts[i], ts[k], lambda, beta_i); 46 | beta_sum += min(beta_i, one); 47 | } 48 | 49 | return beta_sum <= bound; 50 | } 51 | 52 | bool BakerGedf::is_schedulable(const TaskSet &ts, 53 | bool check_preconditions) 54 | { 55 | if (check_preconditions) 56 | { 57 | if (!(ts.has_only_feasible_tasks() && 58 | ts.is_not_overutilized(m) && 59 | ts.has_no_self_suspending_tasks())) 60 | return false; 61 | } 62 | 63 | for (unsigned int k = 0; k < ts.get_task_count(); k++) 64 | if (!is_task_schedulable(k, ts)) 65 | return false; 66 | 67 | return true; 68 | } 69 | 70 | -------------------------------------------------------------------------------- /native/src/blocking/part-omlp.cpp: -------------------------------------------------------------------------------- 1 | #include "sharedres.h" 2 | #include "blocking.h" 3 | 4 | #include "stl-helper.h" 5 | 6 | BlockingBounds* part_omlp_bounds(const ResourceSharingInfo& info) 7 | { 8 | // split everything by partition 9 | Clusters clusters; 10 | 11 | split_by_cluster(info, clusters); 12 | 13 | // split each partition by resource 14 | ClusterResources resources; 15 | 16 | split_by_resource(clusters, resources); 17 | 18 | // sort each contention set by request length 19 | sort_by_request_length(resources); 20 | 21 | // We need for each task the maximum request span. We also need the 22 | // maximum direct blocking from remote partitions for each request. We 23 | // can determine both in one pass. 24 | 25 | unsigned int i; 26 | 27 | // direct blocking results 28 | BlockingBounds* _results = new BlockingBounds(info); 29 | BlockingBounds& results = *_results; 30 | 31 | for (i = 0; i < info.get_tasks().size(); i++) 32 | { 33 | const TaskInfo& tsk = info.get_tasks()[i]; 34 | Interference bterm; 35 | 36 | foreach(tsk.get_requests(), jt) 37 | { 38 | const RequestBound& req = *jt; 39 | 40 | Interference blocking; 41 | 42 | blocking = np_fifo_per_resource( 43 | tsk, resources, 1, 44 | req.get_resource_id(), req.get_num_requests()); 45 | 46 | // add in blocking term 47 | bterm += blocking; 48 | 49 | // Keep track of maximum request span. 50 | // Is this already a single-issue request? 51 | if (req.get_num_requests() != 1) 52 | // nope, need to recompute 53 | blocking = np_fifo_per_resource( 54 | tsk, resources, 1, 55 | req.get_resource_id(), 1); 56 | 57 | // The span includes our own request. 58 | blocking.total_length += req.get_request_length(); 59 | blocking.count += 1; 60 | 61 | // Update max. request span. 62 | results.raise_request_span(i, blocking); 63 | } 64 | 65 | results[i] = bterm; 66 | } 67 | 68 | charge_arrival_blocking(info, results); 69 | 70 | return _results; 71 | } 72 | -------------------------------------------------------------------------------- /example/lock_example_1: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /native/src/edf/gedf.cpp: -------------------------------------------------------------------------------- 1 | #include "tasks.h" 2 | #include "schedulability.h" 3 | 4 | #include "edf/baker.h" 5 | #include "edf/baruah.h" 6 | #include "edf/gfb.h" 7 | #include "edf/bcl.h" 8 | #include "edf/bcl_iterative.h" 9 | #include "edf/rta.h" 10 | #include "edf/ffdbf.h" 11 | #include "edf/load.h" 12 | #include "edf/la.h" 13 | #include "edf/gedf.h" 14 | 15 | bool GlobalEDF::is_schedulable(const TaskSet &ts, 16 | bool check) 17 | { 18 | if (check) 19 | { 20 | if (!(ts.has_only_feasible_tasks() && ts.is_not_overutilized(m))) 21 | return false; 22 | 23 | if (ts.get_task_count() == 0) 24 | return true; 25 | } 26 | 27 | if (!ts.has_no_self_suspending_tasks()) 28 | return want_la && LAGedf(m).is_schedulable(ts, false); 29 | 30 | // density bound on a uniprocessor. 31 | if (m == 1) 32 | { 33 | fractional_t density; 34 | ts.get_density(density); 35 | if (density <= 1) 36 | return true; 37 | } 38 | 39 | // Baker's test can deal with arbitrary deadlines. 40 | // It's cheap, so do it first. 41 | if (BakerGedf(m).is_schedulable(ts, false)) 42 | return true; 43 | 44 | // Baruah's test and the BCL and GFB tests assume constrained deadlines. 45 | if (ts.has_only_constrained_deadlines()) 46 | if (GFBGedf(m).is_schedulable(ts, false) 47 | || (want_rta && RTAGedf(m, rta_step).is_schedulable(ts, false)) 48 | // The RTA test generalizes the BCL and BCLIterative tests. 49 | || (want_baruah && BaruahGedf(m).is_schedulable(ts, false)) 50 | || (want_ffdbf && FFDBFGedf(m).is_schedulable(ts, false))) 51 | return true; 52 | 53 | // LA test can handle arbitrary deadlines 54 | if (want_la && LAGedf(m).is_schedulable(ts, false)) 55 | return true; 56 | 57 | // Load-based test can handle arbitrary deadlines. 58 | if (want_load && LoadGedf(m).is_schedulable(ts, false)) 59 | return true; 60 | 61 | return false; 62 | } 63 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | # default tools 3 | PYTHON ?= python2.7 4 | SCONS ?= scons 5 | ETAGS ?= etags 6 | 7 | OS := $(shell uname) 8 | ifneq (,$(findstring CYGWIN,$(OS))) 9 | # Cygwin 10 | DLIB_EXT = dll 11 | else 12 | #Default Linux && MacOS 13 | DLIB_EXT = so 14 | endif 15 | 16 | .PHONY: all cpp clean clean-links test links 17 | 18 | all: links 19 | 20 | cpp: 21 | $(MAKE) -C native -j 22 | 23 | links: clean-links cpp 24 | cd schedcat/sched; ln -s ../../native/_sched.so _sched.$(DLIB_EXT); ln -s ../../native/sched.py native.py 25 | cd schedcat/locking; ln -s ../../native/_locking.so _locking.$(DLIB_EXT); ln -s ../../native/locking.py native.py 26 | cd schedcat/locking/linprog; ln -s ../../../native/_lp_analysis.so _lp_analysis.$(DLIB_EXT); ln -s ../../../native/lp_analysis.py native.py 27 | cd schedcat/sim; ln -s ../../native/_sim.so _sim.$(DLIB_EXT); ln -s ../../native/sim.py native.py 28 | cd schedcat/cansim; ln -s ../../native/_cansim.so _cansim.$(DLIB_EXT); ln -s ../../native/cansim.py native.py 29 | 30 | clean-links: 31 | cd schedcat/sched; rm -f _sched.$(DLIB_EXT) native.py 32 | cd schedcat/locking; rm -f _locking.$(DLIB_EXT) native.py 33 | cd schedcat/locking/linprog; rm -f _lp_analysis.$(DLIB_EXT) native.py 34 | cd schedcat/sim; rm -f _sim.$(DLIB_EXT) native.py; 35 | cd schedcat/cansim; rm -f _cansim.$(DLIB_EXT) native.py; 36 | 37 | clean: clean-links 38 | find . -iname '*.py[oc]' -exec rm '{}' ';' 39 | rm -rf TAGS tags native/config.log 40 | $(MAKE) -C native clean 41 | 42 | # run unit test suite 43 | test: 44 | @echo "=== Running unit tests" 45 | $(PYTHON) -m tests 46 | 47 | # Emacs Tags 48 | TAGS: 49 | find . -type f -and -iname '*.py' | xargs ${ETAGS} 50 | find native/include -type f -and -iname '*.h' | xargs ${ETAGS} -a 51 | find native/src -type f -and -iname '*.cpp' | xargs ${ETAGS} -a 52 | ${ETAGS} -l python -a run_exp 53 | 54 | # Vim Tags 55 | tags: 56 | find . -type f -and -iname '*.py' | xargs ctags 57 | find native/include -type f -and -iname '*.h' | xargs ctags -a 58 | find native/src -type f -and -iname '*.cpp' | xargs ctags -a 59 | ctags --language-force=Python -a run_exp 60 | -------------------------------------------------------------------------------- /native/include/stl-io-helper.h: -------------------------------------------------------------------------------- 1 | #ifndef STL_IO_HELPER_H 2 | #define STL_IO_HELPER_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "stl-hashmap.h" 10 | #include "stl-helper.h" 11 | 12 | template 13 | std::ostream& operator<<(std::ostream &os, const std::set &s) 14 | { 15 | bool first = true; 16 | os << "{"; 17 | foreach(s, e) 18 | { 19 | if (!first) 20 | os << ", "; 21 | os << *e; 22 | first = false; 23 | } 24 | os << "}"; 25 | 26 | return os; 27 | } 28 | 29 | template 30 | std::ostream& operator<<(std::ostream &os, const std::map &s) 31 | { 32 | bool first_elem = true; 33 | os << "{"; 34 | foreach(s, e) 35 | { 36 | if (!first_elem) 37 | os << ", "; 38 | os << e->first; 39 | os << ": "; 40 | os << e->second; 41 | first_elem = false; 42 | } 43 | os << "}"; 44 | 45 | return os; 46 | } 47 | 48 | template 49 | std::ostream& operator<<(std::ostream &os, const hashset &s) 50 | { 51 | std::set t; 52 | 53 | t.insert(s.begin(), s.end()); 54 | return os << t; 55 | } 56 | 57 | template 58 | std::ostream& operator<<(std::ostream &os, const hashmap &s) 59 | { 60 | bool first_elem = true; 61 | os << "{"; 62 | foreach(s, e) 63 | { 64 | if (!first_elem) 65 | os << ", "; 66 | os << e->first; 67 | os << ": "; 68 | os << e->second; 69 | first_elem = false; 70 | } 71 | os << "}"; 72 | 73 | return os; 74 | } 75 | 76 | template 77 | std::ostream& operator<<(std::ostream &os, const std::vector &s) 78 | { 79 | bool first = true; 80 | os << "["; 81 | foreach(s, e) 82 | { 83 | if (!first) 84 | os << ", "; 85 | os << *e; 86 | first = false; 87 | } 88 | os << "]"; 89 | 90 | return os; 91 | } 92 | 93 | 94 | #define NYI() \ 95 | { \ 96 | std::cerr << std::endl \ 97 | << __FUNCTION__ << " in " << __FILE__ \ 98 | << ": NOT YET IMPLEMENTED!" << std::endl; \ 99 | abort(); \ 100 | } 101 | 102 | #endif 103 | -------------------------------------------------------------------------------- /example/generator.py: -------------------------------------------------------------------------------- 1 | #Necessary includes and stuff 2 | 3 | from schedcat.model.serialize import write 4 | from schedcat.generator.tasksets import mkgen, \ 5 | NAMED_UTILIZATIONS, \ 6 | NAMED_PERIODS 7 | from schedcat.util.time import ms2us 8 | import schedcat.model.resources as resources 9 | import os 10 | import random 11 | 12 | CSLENGTH = { 'short' : lambda: random.randint(1, 15), 13 | 'medium' : lambda: random.randint(1, 100), 14 | 'long' : lambda: random.randint(5, 1280), } 15 | 16 | def generate_taskset_files(util_name, period_name, cap, number): 17 | generator = mkgen(NAMED_UTILIZATIONS[util_name], 18 | NAMED_PERIODS[period_name]) 19 | generated_sets = [] 20 | for i in range(number): 21 | taskset = generator(max_util=cap, time_conversion=ms2us) 22 | filename = "{0}_{1}_{2}_{3}".format(util_name, 23 | period_name, cap, i) 24 | write(taskset, filename) 25 | generated_sets.append(filename) 26 | return generated_sets 27 | 28 | def generate_lock_taskset_files(util_name, period_name, cap, 29 | cslength, nres, pacc, number): 30 | generator = mkgen(NAMED_UTILIZATIONS[util_name], 31 | NAMED_PERIODS[period_name]) 32 | generated_sets = [] 33 | for i in range(number): 34 | taskset = generator(max_util=cap, time_conversion=ms2us) 35 | resources.initialize_resource_model(taskset) 36 | for task in taskset: 37 | for res_id in range(nres): 38 | if random.random() < pacc: 39 | nreqs = random.randint(1, 5) 40 | length = CSLENGTH[cslength] 41 | for j in range(nreqs): 42 | task.resmodel[res_id].add_request(length()) 43 | filename = "{0}_{1}_{2}_{3}_{4}_{5}_{6}".format( 44 | util_name, period_name, cap, cslength, nres, pacc, 45 | i) 46 | write(taskset, filename) 47 | generated_sets.append(filename) 48 | return generated_sets 49 | -------------------------------------------------------------------------------- /schedcat/overheads/pfair.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | from .quanta import quantize_wcet, quantize_period, account_for_delayed_release, stagger_latency 4 | 5 | def charge_scheduling_overheads(oheads, num_cpus, dedicated_irq, taskset, 6 | staggered=False, total_cpus=None, 7 | aligned_periodic_releases=False): 8 | if not oheads or not taskset: 9 | return taskset 10 | 11 | qlen = oheads.quantum_length 12 | ev_lat = oheads.release_latency(taskset) 13 | rel_oh = oheads.release(taskset) 14 | 15 | # account for reduced effective quantum length 16 | qeff = qlen \ 17 | - ev_lat \ 18 | - oheads.tick(taskset) \ 19 | - oheads.schedule(taskset) \ 20 | - oheads.ctx_switch(taskset) \ 21 | - oheads.cache_affinity_loss(taskset) 22 | 23 | if not dedicated_irq: 24 | # account for release interrupts 25 | qeff -= (len(taskset) - 1) * rel_oh 26 | 27 | # Is any useful time left in the quantum? With short quanta and high 28 | # overheads, this may not be the case (in the analyzed worst case). 29 | if qeff <= 0: 30 | return False 31 | 32 | # apply reduction 33 | taskset = quantize_wcet(qlen, taskset, qeff) 34 | if not taskset: 35 | return False 36 | 37 | # Account for release delay. 38 | if not aligned_periodic_releases: 39 | # Default sporadic mode: job releases are triggered sporadically, 40 | # but newly released jobs are not considered for scheduling until 41 | # the next quantum boundary. 42 | release_delay = qlen + ev_lat + rel_oh 43 | else: 44 | # "Polling" mode. Periodic job releases are triggered 45 | # at each quantum boundary without any delays. 46 | release_delay = 0 47 | 48 | # shortcut: we roll staggering into release delay 49 | if staggered: 50 | if total_cpus is None: 51 | total_cpus = num_cpus; 52 | release_delay += stagger_latency(total_cpus, qlen) 53 | 54 | taskset = account_for_delayed_release(release_delay, taskset) 55 | if not taskset: 56 | return False 57 | 58 | return quantize_period(qlen, taskset, deadline=True) 59 | -------------------------------------------------------------------------------- /schedcat/overheads/fp.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | from math import ceil, floor 4 | 5 | from schedcat.model.tasks import SporadicTask, TaskSystem 6 | 7 | def charge_scheduling_overheads(oheads, num_cpus, dedicated_irq, taskset): 8 | if not oheads or not taskset: 9 | return TaskSystem(taskset) 10 | 11 | event_latency = oheads.release_latency(taskset) 12 | 13 | # pseudo-task representing the tick interrupt 14 | tck = oheads.tick(taskset) 15 | if tck > 0: 16 | tick_isr = SporadicTask(tck, oheads.quantum_length) 17 | tick_isr.jitter = event_latency 18 | tick_tasks = [tick_isr] 19 | else: 20 | tick_tasks = [] 21 | 22 | # pseudo-tasks representing release interrupts 23 | rel_cost = oheads.release(taskset) 24 | if not dedicated_irq and rel_cost > 0: 25 | release_tasks = [SporadicTask(rel_cost, t.period) for t in taskset] 26 | for isr in release_tasks: 27 | isr.jitter = event_latency 28 | else: 29 | release_tasks = [] # releases don't impact tasks directly 30 | 31 | # account for initial release delay as jitter 32 | release_delay = event_latency + oheads.release(taskset) 33 | if dedicated_irq: 34 | release_delay += oheads.ipi_latency(taskset) 35 | 36 | for t in taskset: 37 | if not 'jitter' in t.__dict__: 38 | t.jitter = 0 39 | t.jitter += release_delay 40 | 41 | # account for scheduling cost and CPMD 42 | sched = oheads.schedule(taskset) 43 | cxs = oheads.ctx_switch(taskset) 44 | cpmd = oheads.cache_affinity_loss(taskset) 45 | preemption = 2 * (sched + cxs) + cpmd 46 | for t in taskset: 47 | t.cost += preemption 48 | 49 | return TaskSystem(tick_tasks + release_tasks + taskset) 50 | 51 | def quantize_params(taskset): 52 | """After applying overheads, use this function to make 53 | task parameters integral again.""" 54 | 55 | for t in taskset: 56 | t.cost = int(ceil(t.cost)) 57 | t.period = int(floor(t.period)) 58 | t.deadline = int(floor(t.deadline)) 59 | t.jitter = int(ceil(t.jitter)) 60 | if t.density() > 1: 61 | return False 62 | 63 | return taskset 64 | -------------------------------------------------------------------------------- /native/src/edf/bcl.cpp: -------------------------------------------------------------------------------- 1 | #include // for min 2 | 3 | #include "tasks.h" 4 | #include "schedulability.h" 5 | 6 | #include "edf/bcl.h" 7 | 8 | using namespace std; 9 | 10 | unsigned long BCLGedf::max_jobs_contained(const Task &t_i, const Task &t_k) 11 | { 12 | if (t_i.get_deadline() > t_k.get_deadline()) 13 | return 0; 14 | else 15 | return 1 + (t_k.get_deadline() - t_i.get_deadline()) / t_i.get_period(); 16 | } 17 | 18 | void BCLGedf::beta(const Task &t_i, const Task &t_k, fractional_t &beta_i) 19 | { 20 | unsigned long n = max_jobs_contained(t_i, t_k); 21 | 22 | integral_t c_i, tmp; 23 | 24 | c_i = t_i.get_wcet(); 25 | tmp = t_i.get_period(); 26 | tmp *= n; 27 | if (tmp < t_k.get_deadline()) 28 | // no risk of overflow 29 | tmp = t_k.get_deadline() - n * t_i.get_period(); 30 | else 31 | // test says zero is lower limit 32 | tmp = 0; 33 | 34 | beta_i = n * c_i; 35 | beta_i += min(c_i, tmp); 36 | beta_i /= t_k.get_deadline(); 37 | } 38 | 39 | bool BCLGedf::is_task_schedulable(unsigned int k, const TaskSet &ts) 40 | { 41 | fractional_t beta_i, beta_sum = 0; 42 | fractional_t lambda_term; 43 | bool small_beta_exists = false; 44 | 45 | ts[k].get_density(lambda_term); 46 | lambda_term *= -1; 47 | lambda_term += 1; 48 | 49 | for (unsigned int i = 0; i < ts.get_task_count(); i++) 50 | { 51 | if (i != k) { 52 | beta(ts[i], ts[k], beta_i); 53 | beta_sum += min(beta_i, lambda_term); 54 | small_beta_exists = small_beta_exists || 55 | (0 < beta_i && beta_i <= lambda_term); 56 | } 57 | } 58 | 59 | lambda_term *= m; 60 | 61 | return beta_sum < lambda_term || 62 | (small_beta_exists && beta_sum == lambda_term); 63 | } 64 | 65 | bool BCLGedf::is_schedulable(const TaskSet &ts, 66 | bool check_preconditions) 67 | { 68 | if (check_preconditions) 69 | { 70 | if (!(ts.has_only_feasible_tasks() && 71 | ts.is_not_overutilized(m) && 72 | ts.has_only_constrained_deadlines() && 73 | ts.has_no_self_suspending_tasks())) 74 | return false; 75 | } 76 | 77 | for (unsigned int k = 0; k < ts.get_task_count(); k++) 78 | if (!is_task_schedulable(k, ts)) 79 | return false; 80 | 81 | return true; 82 | } 83 | 84 | -------------------------------------------------------------------------------- /schedcat/sched/canbus/prio_assign.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | from schedcat.model.canbus import CANMessage 4 | from schedcat.model.canbus import CANMessageSet 5 | import schedcat.sched.canbus.broster as br 6 | 7 | def set_priorities_david_and_burns(msgs): 8 | """ Implementation of "Robust priority assignment for messages on controller 9 | area network (CAN)", as proposed by R. Davis and A. Burns. We assume that 10 | lower number denotes a higher priority. In the following, alpha denotes 11 | the number of retransmissions any message can tolerate without violating 12 | its deadline and assuming that all messages with unassigned priorities have 13 | higher priority. We move from lowest priority level to the highest priority 14 | level. In each iteration, the priority level is assigned to the message that 15 | can tolerate maximum retransmissions (max alpha) at that priority. The 16 | following algorithm maximizes the number of retransmission errors 17 | messages can tolerate in the worst case. There are two other versions of 18 | this algorithm that maximise the delay tolerated and minimize the 19 | probability of deadline failure, respectively. 20 | 21 | Davis, Robert I., and Alan Burns. "Robust priority assignment for messages 22 | on Controller Area Network (CAN)." Real-Time Systems 41.2 (2009): 152-180. 23 | """ 24 | 25 | for m in msgs: 26 | m.id = -1 27 | 28 | for id in reversed(range(1, len(msgs) + 1)): 29 | alpha_global = None 30 | candidate_msg = None 31 | 32 | for m in reversed(msgs): 33 | if m.id > 0: 34 | continue 35 | assert m.id == -1 36 | m.id = 0 37 | 38 | alpha_min = 0 39 | if br.is_schedulable(msgs, m, alpha_min) == False: 40 | m.id = -1 41 | continue 42 | 43 | alpha_max = int(math.ceil((m.deadline) / msgs.max_error_frame_size)) 44 | assert br.is_schedulable(msgs, m, alpha_max) == False 45 | 46 | #print alpha_min, alpha_max 47 | while alpha_max - alpha_min > 1: 48 | alpha_mid = (alpha_min + alpha_max) / 2 49 | if br.is_schedulable(msgs, m, alpha_mid) == True: 50 | alpha_min = alpha_mid 51 | else: 52 | alpha_max = alpha_mid 53 | #print alpha_min, alpha_max 54 | 55 | if alpha_global == None or alpha_min > alpha_global: 56 | alpha_global = alpha_min 57 | candidate_msg = m 58 | 59 | m.id = -1 60 | 61 | if candidate_msg == None: 62 | raise Exception("Priority Assignment Failed") 63 | 64 | candidate_msg.id = id 65 | msgs.reset() 66 | -------------------------------------------------------------------------------- /native/include/lp_pedf_analysis.h: -------------------------------------------------------------------------------- 1 | #ifndef LP_PEDF_ANALYSIS_H 2 | #define LP_PEDF_ANALYSIS_H 3 | 4 | // ------------------------------------------------------------------ 5 | // --------------------[ A N A L Y S I S ]--------------------------- 6 | // ------------------------------------------------------------------ 7 | 8 | // ---------------------[ D E F I N E S ]---------------------------- 9 | 10 | // Enable debug prints: 11 | // #define __DEBUG_PEDF_BLK_ANALYSIS__ 12 | 13 | // Enable the stop of the analysis loop at the hyper-period of all 14 | // the tasks in the system: 15 | // #define __PEDF_BLK_ANALYSIS_ENABLE_HP_STOP__ 16 | 17 | // Enable a timeout for the analysis loop: 18 | // Warning: based on SIGALARM, hence not 100% portable 19 | // #define __PEDF_BLK_ANALYSIS_ENABLE_TIMEOUT__ 20 | 21 | // ------------------------------------------------------------------ 22 | 23 | 24 | // Default value used for blocking lower-bound 25 | static unsigned long AVAL = 0; 26 | 27 | class PEDFBlockingAnalysis 28 | { 29 | public: 30 | PEDFBlockingAnalysis(const ResourceSharingInfo& _info, unsigned int _cluster); 31 | 32 | bool is_schedulable(); 33 | 34 | protected: 35 | virtual unsigned long compute_blocking_PDC(unsigned long interval_length) = 0; 36 | virtual unsigned long compute_blocking_AC (unsigned long interval_length) = 0; 37 | 38 | virtual unsigned long compute_tighter_blocking_PDC(unsigned long interval_length, 39 | unsigned long blk_UB, 40 | unsigned long blk_LB = 0) 41 | { 42 | return blk_UB; 43 | } 44 | 45 | const ResourceSharingInfo& info; 46 | unsigned int cluster; 47 | unsigned int max_deadline, min_deadline; 48 | 49 | private: 50 | 51 | //bool processorDemandCriterion(std::map& nJobs, unsigned long maxTime); 52 | bool QPA(unsigned long t_LB, unsigned long t_UB, unsigned long blk_LB_in = 0, unsigned long& blk_LB_out = AVAL); 53 | bool raw_PDC(unsigned long t_LB, unsigned long t_UB); 54 | unsigned long DBF(unsigned long interval_length); 55 | unsigned long arrival_curve(unsigned long interval_length); 56 | unsigned long last_check_point_before(unsigned long interval_length); 57 | 58 | }; 59 | 60 | enum analysis_type_t 61 | { 62 | AC_MODE, // compute LP for an arrival curve 63 | PDC_MODE // compute processor-demand criterion LP 64 | }; 65 | 66 | #endif -------------------------------------------------------------------------------- /example/driver.py: -------------------------------------------------------------------------------- 1 | #Necessary includes and stuff 2 | 3 | from example.generator import generate_taskset_files, \ 4 | generate_lock_taskset_files 5 | from example.mapping import partition_tasks 6 | from example.overheads import get_oh_object, bound_cfl_with_oh 7 | from example.locking import bound_cfl_with_locks 8 | 9 | from schedcat.model.serialize import load 10 | 11 | import os 12 | 13 | def get_script_dir(): 14 | return os.path.dirname(os.path.realpath(__file__)) 15 | 16 | def example_overheads(): 17 | script_dir = get_script_dir() 18 | return get_oh_object( 19 | script_dir + "/oh_host=ludwig_scheduler=C-FL-L2-RM_stat=avg.csv", 20 | script_dir + 21 | "/oh_host=ludwig_scheduler=C-FL-L2-RM_locks=MX-Q_stat=avg.csv", 22 | script_dir + "/pmo_host=ludwig_background=load_stat=avg.csv", 23 | "L2") 24 | 25 | def nolock_example(task_files): 26 | oheads = example_overheads() 27 | for task_file in task_files: 28 | ts = load(task_file) 29 | for task in ts: 30 | task.wss = 256 31 | clusts = partition_tasks(2, 12, True, ts) 32 | if clusts and bound_cfl_with_oh(oheads, True, clusts): 33 | yield (task_file, clusts) 34 | else: 35 | yield (task_file, None) 36 | 37 | def lock_example(task_files): 38 | oheads = example_overheads() 39 | for task_file in task_files: 40 | ts = load(task_file) 41 | for task in ts: 42 | task.wss = 256 43 | clusts = partition_tasks(2, 12, True, ts) 44 | if clusts: 45 | clusts2 = bound_cfl_with_locks(ts, clusts, oheads, 2) 46 | if clusts2: 47 | yield (task_file, clusts2) 48 | else: 49 | yield (task_file, None) 50 | else: 51 | yield (task_file, None) 52 | 53 | def generate_random_nolock_sets(): 54 | return generate_taskset_files("uni-medium", "uni-moderate", 12, 2) 55 | 56 | def generate_random_lock_sets(): 57 | return generate_lock_taskset_files("uni-medium", "uni-moderate", 6, 58 | "medium", 6, 0.1, 2) 59 | 60 | def print_bounds(results_list): 61 | for task_file, clusts in results_list: 62 | print "Processed {}".format(task_file) 63 | if clusts is not None: 64 | for clust in clusts: 65 | for task in clust: 66 | print task.response_time - task.deadline 67 | -------------------------------------------------------------------------------- /native/include/edf/gel_pl.h: -------------------------------------------------------------------------------- 1 | #ifndef GEL_PL_H 2 | #define GEL_PL_H 3 | 4 | #ifndef SWIG 5 | #include 6 | #endif 7 | 8 | class GELPl 9 | { 10 | 11 | private: 12 | std::vector bounds; 13 | int no_cpus; 14 | const TaskSet& tasks; 15 | int util_ceil; 16 | int rounds; 17 | std::vector S_i; 18 | std::vector G_i; 19 | 20 | // For faster lookups, to avoid too many conversions. 21 | std::vector utilizations; 22 | 23 | void compute_exact_s(const fractional_t& S, 24 | const std::vector& Y_ints, 25 | fractional_t& s); 26 | void compute_binsearch_s(const fractional_t& S, 27 | const std::vector& Y_ints, 28 | fractional_t& s); 29 | 30 | inline bool M_lt_0(const fractional_t& s, const fractional_t& S, 31 | const std::vector& Y_ints); 32 | 33 | // These are basically just structs that override operator< to allow 34 | // sort algorithms to work. 35 | class ReplacementType { 36 | public: 37 | unsigned int old_task; 38 | unsigned int new_task; 39 | fractional_t location; 40 | fractional_t old_task_utilization; 41 | 42 | bool operator<(const ReplacementType& other) const { 43 | return (location < other.location) 44 | || ((location == other.location) 45 | && (old_task_utilization < other.old_task_utilization)); 46 | } 47 | }; 48 | 49 | class TaggedValue { 50 | public: 51 | unsigned int task; 52 | fractional_t value; 53 | 54 | //Order is reversed - we are going to want the largest, rather than the 55 | //smallest, values. 56 | bool operator<(const TaggedValue& other) const { 57 | return other.value < value; 58 | } 59 | }; 60 | 61 | public: 62 | 63 | GELPl(unsigned int num_processors, 64 | const TaskSet& tasks, 65 | unsigned int rounds); 66 | 67 | unsigned long get_bound(unsigned int index) { 68 | return bounds[index]; 69 | } 70 | 71 | // Converted to double for the sake of Python 72 | double get_Si(unsigned int index) { 73 | return S_i[index].get_d(); 74 | } 75 | 76 | // Converted to double for the sake of Python 77 | double get_Gi(unsigned int index) { 78 | return G_i[index].get_d(); 79 | } 80 | }; 81 | 82 | #endif 83 | -------------------------------------------------------------------------------- /example/oh_host=ludwig_scheduler=C-FL-L2-RM_locks=MX-Q_stat=avg.csv: -------------------------------------------------------------------------------- 1 | TASK-COUNT, LOCK, READ-LOCK, READ-UNLOCK, SYSCALL-IN, SYSCALL-OUT, UNLOCK 2 | 2, 0.1384, 0.53848, 0.06211, 3.70392, 8.80697, 0.06214 3 | 4, 0.14506, 0.47369, 0.0621, 3.18246, 6.82131, 0.06213 4 | 6, 0.08174, 0.45971, 0.0621, 2.8336, 6.31049, 0.06212 5 | 8, 0.08496, 0.47502, 0.06209, 2.73304, 20.91474, 0.06211 6 | 10, 0.09113, 0.48268, 0.06269, 2.58052, 77.61109, 0.06209 7 | 12, 0.17603, 0.48421, 0.08559, 2.4247, 140.99054, 0.0623 8 | 14, 0.17747, 0.47695, 0.08064, 2.28196, 178.77589, 0.0623 9 | 16, 0.20195, 0.48381, 0.11853, 2.15314, 244.6085, 0.06631 10 | 18, 0.23352, 0.48954, 0.12895, 2.05774, 298.66, 0.10147 11 | 20, 0.24854, 0.49222, 0.13417, 1.91271, 351.92975, 0.12259 12 | 22, 0.25789, 0.49197, 0.13897, 1.83359, 405.82531, 0.12745 13 | 24, 0.26659, 0.49396, 0.14277, 1.79118, 448.10951, 0.13065 14 | 26, 0.26749, 0.49041, 0.14375, 1.71109, 508.09325, 0.13336 15 | 28, 0.27218, 0.49092, 0.14439, 1.69352, 536.4658, 0.13495 16 | 30, 0.28046, 0.4939, 0.14844, 1.66184, 576.92379, 0.13821 17 | 32, 0.30073, 0.50478, 0.16025, 1.61638, 571.77327, 0.14682 18 | 34, 0.30251, 0.50457, 0.16137, 1.62052, 598.64906, 0.1477 19 | 36, 0.30383, 0.50494, 0.16103, 1.60655, 636.22497, 0.1476 20 | 38, 0.30507, 0.50418, 0.16117, 1.58808, 661.08658, 0.14781 21 | 40, 0.31318, 0.50753, 0.16675, 1.56768, 658.96721, 0.14992 22 | -------------------------------------------------------------------------------- /schedcat/sched/edf/bcl_iterative.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implementation of Marko Bertogna, Michele Cirinei, and Giuseppe Lipari 3 | iterative schedulability test. This implementation follows the description in: 4 | 5 | Schedulability analysis of global scheduling algorithms on 6 | multiprocessor platforms by Marko Bertogna, Michele Cirinei, Giuseppe 7 | Lipari to appear in Journal IEEE Transactions on Parallel and 8 | Distributed Systems (2008). 9 | """ 10 | 11 | from __future__ import division 12 | 13 | from math import floor, ceil 14 | 15 | def interfering_jobs(length, ti): 16 | "Equ. (15) in the paper." 17 | return int(floor((length + ti.deadline - ti.cost - ti.bcl_slack) / ti.period)) 18 | 19 | def wk_interfering_workload(length, ti): 20 | "General work-conserving case, Equ. (14) in the paper." 21 | jobs = interfering_jobs(length, ti) 22 | return jobs * ti.cost + min(ti.cost, length + ti.deadline - ti.cost 23 | - ti.bcl_slack - jobs * ti.period) 24 | 25 | def edf_interfering_workload(length, ti): 26 | "Equ. (17) in the paper." 27 | jobs = int(floor(length / ti.period)) 28 | return jobs * ti.cost + min(ti.cost, 29 | max(0, length - ti.bcl_slack - jobs * ti.period)) 30 | 31 | def edf_slack_update(tk, tasks, no_cpus): 32 | """Compute slack in the case of G-EDF. 33 | Equ. (18) in the paper. 34 | """ 35 | other_work = 0 36 | for ti in tasks: 37 | if tk != ti: 38 | other_work += min(edf_interfering_workload(tk.deadline, ti), 39 | # the '+ 1' below assumes integral time 40 | tk.deadline - tk.cost + 1) 41 | return tk.deadline - tk.cost - int(floor(other_work / no_cpus)) 42 | 43 | def is_schedulable(no_cpus, tasks, round_limit=None): 44 | """"Iteratively improve slack bound for each task until either the system 45 | is deemed to be feasible, no more improvements could be found, or 46 | the round limit (if given) is reached. 47 | """ 48 | for t in tasks: 49 | t.bcl_slack = 0.0 50 | updated = True 51 | feasible = False 52 | round = 0 53 | while updated and not feasible and (not round_limit or round < round_limit): 54 | round += 1 55 | feasible = True 56 | updated = False 57 | for tk in tasks: 58 | new_bound = edf_slack_update(tk, tasks, no_cpus) 59 | feasible = feasible and new_bound >= 0 60 | updated = updated or new_bound > tk.bcl_slack 61 | tk.bcl_slack = max(tk.bcl_slack, new_bound) 62 | return feasible 63 | -------------------------------------------------------------------------------- /native/src/edf/qpa_msrp.cpp: -------------------------------------------------------------------------------- 1 | #include "edf/qpa_msrp.h" 2 | #include "sharedres_types.h" 3 | #include "blocking.h" 4 | 5 | #include "iter-helper.h" 6 | 7 | #include 8 | 9 | #include 10 | using namespace std; 11 | 12 | static unsigned long max_relative_deadline(const TaskSet &ts) 13 | { 14 | unsigned long dl = 0; 15 | 16 | for (unsigned int i = 0; i < ts.get_task_count(); i++) 17 | dl = std::max(dl, ts[i].get_deadline()); 18 | 19 | return dl; 20 | } 21 | 22 | QPA_MSRPTest::QPA_MSRPTest(unsigned int num_processors, const ResourceSharingInfo& rsinfo, 23 | unsigned int _num_cpus, unsigned int _cpu_id) // Needed by msrp_bounds 24 | : QPATest(num_processors), num_cpus(_num_cpus), cpu_id(_cpu_id), info(rsinfo) 25 | {} 26 | 27 | 28 | integral_t QPA_MSRPTest::get_demand(integral_t interval, const TaskSet &ts) 29 | { 30 | integral_t demand = QPATest::get_demand(interval,ts); 31 | 32 | if (interval <= max_relative_deadline) 33 | demand += get_EDF_arrival_blocking(info, num_cpus, interval.get_ui(), cpu_id); 34 | 35 | return demand; 36 | } 37 | 38 | integral_t QPA_MSRPTest::get_max_interval(const TaskSet &ts, const fractional_t& util) 39 | { 40 | integral_t max_interval = QPATest::get_max_interval(ts, util); 41 | 42 | // Follows Baruah RTSS'06 - "Resource sharing in EDF-scheduled systems: a closer look" 43 | max_interval = std::max(max_interval.get_ui(), max_relative_deadline); 44 | 45 | return max_interval; 46 | } 47 | 48 | 49 | // ------------------------------------------------------------------ 50 | // --------------------[ E N T R Y P O I N T ]-------------------- 51 | // ------------------------------------------------------------------ 52 | 53 | bool pedf_msrp_classic_is_schedulable(const ResourceSharingInfo& info, unsigned int num_cpus) 54 | { 55 | bool esit = true; 56 | 57 | BlockingBounds* blocking = msrp_bounds(info, num_cpus); 58 | 59 | foreach_cluster(info, k) 60 | { 61 | 62 | TaskSet ts; 63 | 64 | // Prepare a TaskSet object for each processor: 65 | // This is to adapt ResourceSharingInfo to the data structures used 66 | // by the QPA implementation. 67 | foreach_task_in_cluster(info.get_tasks(), k, T_i) 68 | { 69 | ts.add_task(T_i->get_cost() + blocking->get_remote_blocking(T_i->get_id()), // WCET inflation 70 | T_i->get_period(), T_i->get_deadline()); 71 | } 72 | 73 | QPA_MSRPTest test(1, info, num_cpus, k); 74 | test.set_max_relative_deadline(max_relative_deadline(ts)); 75 | 76 | if (!test.is_schedulable(ts, false)) 77 | { 78 | esit = false; 79 | break; 80 | } 81 | } 82 | 83 | // The object is allocated into msrp_bound() 84 | delete blocking; 85 | 86 | return esit; 87 | } 88 | -------------------------------------------------------------------------------- /example/locking.py: -------------------------------------------------------------------------------- 1 | #Necessary includes and stuff 2 | 3 | from schedcat.locking.bounds import apply_task_fair_mutex_bounds, \ 4 | assign_prio_pt_preemption_levels 5 | 6 | from schedcat.overheads.jlfp import charge_scheduling_overheads, \ 7 | quantize_params 8 | 9 | from schedcat.sched.edf.gel_pl import \ 10 | bound_gfl_response_times, has_bounded_tardiness 11 | 12 | from schedcat.overheads.locking import charge_spinlock_overheads 13 | 14 | def copy_ts(ts, clusts): 15 | new_ts = [] 16 | new_clusts = [] 17 | for clust in clusts: 18 | new_clust = clust.copy() 19 | new_clust.cpus = clust.cpus 20 | new_clusts.append(new_clust) 21 | new_ts += new_clust 22 | return (new_ts, new_clusts) 23 | 24 | def preprocess_ts(taskset, clusts, oheads): 25 | for clust in clusts: 26 | charge_spinlock_overheads(oheads, clust) 27 | for task in clust: 28 | #Initially assume completion by deadline and use G-FL 29 | task.response_time = task.deadline 30 | task.prio_pt = task.deadline - \ 31 | (clust.cpus - 1) / (clust.cpus) * task.cost 32 | assign_prio_pt_preemption_levels(taskset) 33 | 34 | def post_blocking_term_oh_inflation(oheads, clusts): 35 | for clust in clusts: 36 | inflation = oheads.syscall_in(len(clust)) 37 | for t in clust: 38 | if t.arrival_blocked: 39 | t.cost += inflation 40 | t.arrival_blocked += inflation 41 | if not charge_scheduling_overheads(oheads, clust.cpus, 42 | True, clust): 43 | return False 44 | quantize_params(clust) 45 | return True 46 | 47 | def bound_cfl_with_locks(tasks, clusts, oheads, cluster_size): 48 | preprocess_ts(tasks, clusts, oheads) 49 | completion_ok = False 50 | count = 0 51 | while not completion_ok: 52 | completion_ok = True 53 | new_ts, new_clusts = copy_ts(tasks, clusts) 54 | count += 1 55 | if count > 100: 56 | return False 57 | apply_task_fair_mutex_bounds(new_ts, cluster_size, 0) 58 | if not post_blocking_term_oh_inflation(oheads, 59 | new_clusts): 60 | return False 61 | for i, clust in enumerate(new_clusts): 62 | if not has_bounded_tardiness(clust.cpus, clust): 63 | return False 64 | bound_gfl_response_times(clust.cpus, clust, 15) 65 | for j, t in enumerate(clust): 66 | if t.response_time > clusts[i][j].response_time: 67 | completion_ok = False 68 | return new_clusts 69 | -------------------------------------------------------------------------------- /native/include/sharedres.h: -------------------------------------------------------------------------------- 1 | #ifndef SHAREDRES_H 2 | #define SHAREDRES_H 3 | 4 | #include "sharedres_types.h" 5 | 6 | // spinlocks 7 | 8 | BlockingBounds* task_fair_mutex_bounds(const ResourceSharingInfo& info, 9 | unsigned int procs_per_cluster, 10 | int dedicated_irq = NO_CPU); 11 | 12 | BlockingBounds* task_fair_rw_bounds(const ResourceSharingInfo& info, 13 | const ResourceSharingInfo& info_mtx, 14 | unsigned int procs_per_cluster, 15 | int dedicated_irq = NO_CPU); 16 | 17 | BlockingBounds* phase_fair_rw_bounds(const ResourceSharingInfo& info, 18 | unsigned int procs_per_cluster, 19 | int dedicated_irq = NO_CPU); 20 | 21 | BlockingBounds* msrp_bounds_holistic( 22 | const ResourceSharingInfo& info, 23 | int dedicated_irq = NO_CPU); 24 | 25 | // s-oblivious protocols 26 | 27 | BlockingBounds* global_omlp_bounds(const ResourceSharingInfo& info, 28 | unsigned int num_procs); 29 | BlockingBounds* global_fmlp_bounds(const ResourceSharingInfo& info); 30 | 31 | BlockingBounds* clustered_omlp_bounds(const ResourceSharingInfo& info, 32 | unsigned int procs_per_cluster, 33 | int dedicated_irq = NO_CPU); 34 | 35 | BlockingBounds* clustered_rw_omlp_bounds(const ResourceSharingInfo& info, 36 | unsigned int procs_per_cluster, 37 | int dedicated_irq = NO_CPU); 38 | 39 | BlockingBounds* clustered_kx_omlp_bounds(const ResourceSharingInfo& info, 40 | const ReplicaInfo& replicaInfo, 41 | unsigned int procs_per_cluster, 42 | int dedicated_irq); 43 | 44 | BlockingBounds* part_omlp_bounds(const ResourceSharingInfo& info); 45 | 46 | 47 | // s-aware protocols 48 | 49 | BlockingBounds* part_fmlp_bounds(const ResourceSharingInfo& info, 50 | bool preemptive = true); 51 | 52 | BlockingBounds* mpcp_bounds(const ResourceSharingInfo& info, 53 | bool use_virtual_spinning); 54 | 55 | BlockingBounds* dpcp_bounds(const ResourceSharingInfo& info, 56 | const ResourceLocality& locality); 57 | 58 | BlockingBounds* msrp_bounds(const ResourceSharingInfo& info, 59 | unsigned int num_cpus); 60 | 61 | BlockingBounds* global_pip_bounds( 62 | const ResourceSharingInfo& info, 63 | unsigned int number_of_cpus); 64 | 65 | BlockingBounds* ppcp_bounds( 66 | const ResourceSharingInfo& info, 67 | unsigned int number_of_cpus, 68 | bool reasonable_priority_assignment = false); 69 | 70 | unsigned long get_EDF_arrival_blocking(const ResourceSharingInfo& info, unsigned int num_cpus, 71 | unsigned long interval_length, unsigned int cpu_id); 72 | 73 | bool pedf_msrp_classic_is_schedulable(const ResourceSharingInfo& info, unsigned int num_cpus); 74 | 75 | // Still missing: 76 | // ============== 77 | 78 | // spin_rw_wpref_bounds 79 | // spin_rw_rpref_bounds 80 | 81 | 82 | 83 | #endif 84 | -------------------------------------------------------------------------------- /native/src/blocking/nested_cs.cpp: -------------------------------------------------------------------------------- 1 | #include "stl-hashmap.h" 2 | #include "stl-helper.h" 3 | 4 | #include "sharedres_types.h" 5 | #include "nested_cs.h" 6 | 7 | 8 | static void build_trans_nest_rel( 9 | hashmap > &directly_nested, 10 | hashmap > &trans_nested, 11 | unsigned int res) 12 | { 13 | if (trans_nested.find(res) == trans_nested.end()) 14 | { 15 | // assumes cycle-freedom 16 | 17 | // create set for res 18 | trans_nested[res] = hashset(); 19 | 20 | // populate by merging sets of children 21 | // 1) compute rel. for nested resources 22 | hashset &s = trans_nested[res]; 23 | foreach(directly_nested[res], nres) 24 | { 25 | build_trans_nest_rel(directly_nested, trans_nested, *nres); 26 | s.insert(*nres); 27 | s.insert(trans_nested[*nres].begin(), trans_nested[*nres].end()); 28 | } 29 | } 30 | // Otherwise already computed, nothing to do. 31 | } 32 | 33 | /* Compute for each resource 'q' the set of resources that could be 34 | * transitively requested while holding 'q'. */ 35 | hashmap > 36 | CriticalSectionsOfTaskset::get_transitive_nesting_relationship() const 37 | { 38 | hashmap > directly_nested; 39 | 40 | foreach(tsks, t) 41 | { 42 | foreach(t->get_cs(), cs) 43 | { 44 | if (directly_nested.find(cs->resource_id) == directly_nested.end()) 45 | directly_nested[cs->resource_id] = hashset(); 46 | 47 | int outer = cs->outer; 48 | unsigned int nested_res = cs->resource_id; 49 | 50 | if (outer != CriticalSection::NO_PARENT) 51 | { 52 | unsigned int parent = t->get_cs()[outer].resource_id; 53 | directly_nested[parent].insert(nested_res); 54 | } 55 | } 56 | } 57 | 58 | hashmap > nested; 59 | foreach(directly_nested, res) 60 | build_trans_nest_rel(directly_nested, nested, res->first); 61 | 62 | return nested; 63 | } 64 | 65 | 66 | LockSet CriticalSection::get_outer_locks(const CriticalSectionsOfTask &task) const 67 | { 68 | LockSet already_held; 69 | 70 | int held = outer; 71 | while (held != NO_PARENT) 72 | { 73 | unsigned int parent = task.get_cs()[held].resource_id; 74 | already_held.insert(parent); 75 | held = task.get_cs()[held].outer; 76 | } 77 | 78 | return already_held; 79 | } 80 | 81 | bool CriticalSection::has_common_outer( 82 | const CriticalSectionsOfTask &this_task, 83 | const LockSet &already_held_by_other) const 84 | { 85 | int held = outer; 86 | while (held != NO_PARENT) 87 | { 88 | unsigned int parent = this_task.get_cs()[held].resource_id; 89 | if (already_held_by_other.find(parent) != already_held_by_other.end()) 90 | return true; 91 | held = this_task.get_cs()[held].outer; 92 | } 93 | 94 | return false; 95 | } 96 | -------------------------------------------------------------------------------- /schedcat/overheads/quanta.py: -------------------------------------------------------------------------------- 1 | """Support for quantum-based scheduling. 2 | """ 3 | from __future__ import division 4 | 5 | from math import ceil, floor 6 | 7 | def is_quantum_multiple(qlen, value): 8 | return value % qlen is 0 9 | 10 | def has_integral_period(qlen): 11 | return lambda t: t.period % qlen is 0 12 | 13 | def quantize_wcet(qlen, tasks, effective_qlen=None): 14 | """Round up execution cost to account for partially used quanta. 15 | Specify an effective_qlen less than the quantum length to account for 16 | overheads. 17 | """ 18 | if effective_qlen is None: 19 | effective_qlen = qlen 20 | assert effective_qlen > 0 21 | assert qlen > 0 22 | for t in tasks: 23 | nr_quanta = int(ceil(t.cost / effective_qlen)) 24 | t.cost = nr_quanta * qlen 25 | if t.density() >= 1: 26 | return False 27 | return tasks 28 | 29 | def quantize_period(qlen, tasks, deadline=False): 30 | """Round down periods to account for the fact that in a quantum-based 31 | scheduler all periods must be multiples of the quantum length. 32 | 33 | Rounding down the period of a periodic task yields a sporadic task that has 34 | an inter-arrival delay of one quantum. 35 | """ 36 | for t in tasks: 37 | if not is_quantum_multiple(t.period, qlen): 38 | nr_quanta = int(floor(t.period / qlen)) 39 | per = nr_quanta * qlen 40 | t.period = per 41 | if deadline and not is_quantum_multiple(t.deadline, qlen): 42 | nr_quanta = int(floor(t.deadline / qlen)) 43 | dl = nr_quanta * qlen 44 | t.deadline = dl 45 | if t.density() >= 1: 46 | return False 47 | return tasks 48 | 49 | def account_for_delayed_release(delay, tasks): 50 | """A release will not be noticed until the start of the next quantum 51 | boundary. Hence, the period and deadline must both be reduced by one 52 | quantum size for hard real-time use. 53 | """ 54 | for t in tasks: 55 | t.period -= delay 56 | t.deadline -= delay 57 | if t.density() >= 1: 58 | return False 59 | return tasks 60 | 61 | def stagger_latency(qlen, num_cpus): 62 | return (num_cpus - 1) / num_cpus * qlen 63 | 64 | def account_for_staggering(qlen, num_cpus, tasks): 65 | """A job may miss its deadline by up to ((m - 1) / m) of a quantum length 66 | due to staggering. Hence, we need to reduce the period and deadline. 67 | 68 | This leaves non-integral task parameters, which must be quantized 69 | afterward with quantize_period(). 70 | """ 71 | reduction = stagger_latency(qlen, num_cpus) 72 | for t in tasks: 73 | t.period -= reduction 74 | t.deadline -= reduction 75 | if t.density() >= 1: 76 | return False 77 | return tasks 78 | -------------------------------------------------------------------------------- /native/src/blocking/linprog/lp_sa_gfmlp.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "linprog/model.h" 7 | #include "linprog/varmapperbase.h" 8 | #include "linprog/solver.h" 9 | 10 | #include "sharedres_types.h" 11 | 12 | #include "iter-helper.h" 13 | #include "stl-helper.h" 14 | #include "stl-io-helper.h" 15 | 16 | #include 17 | #include 18 | #include "res_io.h" 19 | #include "linprog/io.h" 20 | 21 | #include "lp_global.h" 22 | 23 | 24 | class GlobalFMLPAnalysis : public GlobalPrioInheritanceLP, public GlobalFIFOQueuesLP 25 | { 26 | private: 27 | // Constraint 13 28 | void add_fmlp_indirect_preemption_constraints(); 29 | 30 | public: 31 | 32 | GlobalFMLPAnalysis(const ResourceSharingInfo& info, 33 | unsigned int i, 34 | unsigned int number_of_cpus) 35 | : GlobalSuspensionAwareLP(info, i, number_of_cpus), 36 | GlobalPrioInheritanceLP(info, i, number_of_cpus), 37 | GlobalFIFOQueuesLP(info, i, number_of_cpus) 38 | { 39 | // Protocol-specific constraints 40 | 41 | // Constraint 11 42 | add_pip_fmlp_no_stalling_interference(); 43 | // Constraint 13 44 | add_fmlp_indirect_preemption_constraints(); 45 | } 46 | }; 47 | 48 | // Constraint 13: for each resource lq, and each task Tx, 49 | // the sum of indirect and preemption pi-blocking that lower-priority tasks 50 | // causes to a job Ji is bounded by the number of requests that higher-priority 51 | // tasks can issue to this resource under the FMLP. 52 | void GlobalFMLPAnalysis::add_fmlp_indirect_preemption_constraints() 53 | { 54 | foreach(all_resources, resource) 55 | { 56 | unsigned int request_count = 0; 57 | 58 | // the cumulative number of requests for this resource 59 | // issued by all higher-priority tasks 60 | foreach_higher_priority_task(taskset, ti, th) 61 | foreach_request_for(th->get_requests(), *resource, request) 62 | request_count += request->get_max_num_requests(ti.get_response()); 63 | 64 | foreach_lower_priority_task(taskset, ti, tx) 65 | { 66 | const unsigned int x = tx->get_id(); 67 | const unsigned int q = *resource; 68 | 69 | foreach_request_for(tx->get_requests(), q, request) 70 | { 71 | LinearExpression *exp = new LinearExpression(); 72 | 73 | foreach_request_instance(*request, ti, v) 74 | { 75 | exp->add_var(vars.indirect(x, q, v)); 76 | exp->add_var(vars.preemption(x, q, v)); 77 | } 78 | add_inequality(exp, request_count); 79 | } 80 | } 81 | } 82 | } 83 | 84 | BlockingBounds* lp_sa_gfmlp_bounds( 85 | const ResourceSharingInfo& info, 86 | unsigned int number_of_cpus) 87 | { 88 | BlockingBounds* results = new BlockingBounds(info); 89 | 90 | for (unsigned int i = 0; i < info.get_tasks().size(); i++) 91 | { 92 | GlobalFMLPAnalysis lp(info, i, number_of_cpus); 93 | (*results)[i] = lp.solve(); 94 | } 95 | 96 | return results; 97 | } 98 | -------------------------------------------------------------------------------- /example/lock_example_2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | -------------------------------------------------------------------------------- /tests/generator.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from schedcat.util.time import ms2us 4 | 5 | import schedcat.generator.tasks as tg 6 | import schedcat.generator.tasksets as tsgen 7 | 8 | class TaskGen(unittest.TestCase): 9 | 10 | def test_drawing_functions(self): 11 | f = tg.uniform_int(10, 100) 12 | self.assertTrue(type(f()) == int) 13 | self.assertTrue(10 <= f() <= 100) 14 | 15 | f = tg.uniform(10, 100) 16 | self.assertTrue(type(f()) == float) 17 | self.assertTrue(10 <= f() <= 100) 18 | 19 | f = tg.uniform_choice("abcdefg") 20 | self.assertTrue(type(f()) == str) 21 | self.assertTrue('a' <= f() <= 'g') 22 | 23 | f = tg.exponential(0.1, 0.7, 0.4) 24 | self.assertTrue(type(f()) == float) 25 | self.assertTrue(0.1 <= f() <= 0.7) 26 | 27 | def test_limiters(self): 28 | global counter 29 | counter = 0 30 | def inc(): 31 | global counter 32 | counter += 10 33 | return counter 34 | 35 | trun = tg.truncate(15, 35)(inc) 36 | 37 | self.assertEqual(trun(), 15) 38 | self.assertEqual(counter, 10) 39 | 40 | counter = 0 41 | lim = tg.redraw(15, 35)(inc) 42 | self.assertEqual(lim(), 20) 43 | self.assertEqual(counter, 20) 44 | 45 | 46 | def test_generator(self): 47 | periods = tg.uniform_int(10, 100) 48 | utils = tg.exponential(0.1, 0.9, 0.3) 49 | g = tg.TaskGenerator(periods, utils) 50 | 51 | self.assertEqual(len(list(g.tasks(max_tasks = 10))), 10) 52 | self.assertLessEqual(len(list(g.tasks(max_util = 10))), 100) 53 | 54 | ts1 = g.tasks(max_util = 10, squeeze = True, time_conversion=ms2us) 55 | ts2 = g.tasks(max_util = 10, squeeze = False, time_conversion=ms2us) 56 | 57 | self.assertAlmostEqual(sum([t.utilization() for t in ts1]), 10, places=2) 58 | self.assertNotEqual(sum([t.utilization() for t in ts2]), 10) 59 | 60 | def test_task_system_creation(self): 61 | periods = tg.uniform_int(10, 100) 62 | utils = tg.exponential(0.1, 0.9, 0.3) 63 | g = tg.TaskGenerator(periods, utils) 64 | 65 | self.assertEqual(len(g.make_task_set(max_tasks = 10)), 10) 66 | self.assertLessEqual(len((g.make_task_set(max_util = 10))), 100) 67 | 68 | ts1 = g.make_task_set(max_util = 10, squeeze = True, time_conversion=ms2us) 69 | ts2 = g.make_task_set(max_util = 10, squeeze = False, time_conversion=ms2us) 70 | 71 | self.assertAlmostEqual(ts1.utilization(), 10, places=2) 72 | # Not strictly impossible, but very unlikely 73 | self.assertNotEqual(ts2.utilization(), 10) 74 | 75 | class TaskSetGen(unittest.TestCase): 76 | 77 | def test_feasible_tasks(self): 78 | for name in tsgen.ALL_DISTS: 79 | g = tsgen.ALL_DISTS[name] 80 | ts = g(time_conversion=ms2us, max_tasks=4) 81 | self.assertLessEqual(ts.utilization(), 4) 82 | -------------------------------------------------------------------------------- /tests/example_end_to_end.py: -------------------------------------------------------------------------------- 1 | # Uses the example code in the example/ directory, since it's a good end-to-end 2 | # test. 3 | from __future__ import division 4 | 5 | import unittest 6 | 7 | from example.driver import get_script_dir, nolock_example, lock_example 8 | 9 | nolock_example_1_list = [0, 55575, 55575, 67570, 67569, 8805, 8805, 68149, 10 | 68149, 12514, 12513, 55400, 55400, 76501, 76501, 11 | 66761, 66762, -324, -324, 91995, 91995, 35996, 35997] 12 | 13 | nolock_example_2_list = None 14 | lock_example_1_list = [-45215, -104607, -9854, -44381, -53484, -32039, -68667, 15 | -67771, -55354, -47430, -12144] 16 | 17 | lock_example_2_list = None 18 | 19 | class NoLockExample(unittest.TestCase): 20 | def setUp(self): 21 | example_dir = get_script_dir() 22 | self.nolock_example_1 = example_dir + "/nolock_example_1" 23 | self.nolock_example_2 = example_dir + "/nolock_example_2" 24 | results = nolock_example([self.nolock_example_1, self.nolock_example_2]) 25 | self.distilled = [] 26 | for (name, clusts) in results: 27 | if clusts is None: 28 | self.distilled.append((name, None)) 29 | else: 30 | new_distilled = [] 31 | for clust in clusts: 32 | new_distilled += [task.response_time - task.deadline 33 | for task in clust] 34 | self.distilled.append((name, new_distilled)) 35 | 36 | def test_nolock_example_1_works(self): 37 | self.assertEqual(self.distilled[0], (self.nolock_example_1, 38 | nolock_example_1_list)) 39 | 40 | def test_nolock_example_2_works(self): 41 | self.assertEqual(self.distilled[1], (self.nolock_example_2, 42 | nolock_example_2_list)) 43 | 44 | class LockExample(unittest.TestCase): 45 | def setUp(self): 46 | example_dir = get_script_dir() 47 | self.lock_example_1 = example_dir + "/lock_example_1" 48 | self.lock_example_2 = example_dir + "/lock_example_2" 49 | results = lock_example([self.lock_example_1, self.lock_example_2]) 50 | self.distilled = [] 51 | for (name, clusts) in results: 52 | if clusts is None: 53 | self.distilled.append((name, None)) 54 | else: 55 | new_distilled = [] 56 | for clust in clusts: 57 | new_distilled += [task.response_time - task.deadline 58 | for task in clust] 59 | self.distilled.append((name, new_distilled)) 60 | 61 | def test_nolock_example_1_works(self): 62 | self.assertEqual(self.distilled[0], (self.lock_example_1, 63 | lock_example_1_list)) 64 | 65 | def test_nolock_example_2_works(self): 66 | self.assertEqual(self.distilled[1], (self.lock_example_2, 67 | lock_example_2_list)) 68 | -------------------------------------------------------------------------------- /tests/util.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import unittest 4 | 5 | from fractions import Fraction 6 | 7 | import schedcat.util.iter as iter 8 | import schedcat.util.math as m 9 | 10 | class Iters(unittest.TestCase): 11 | def setUp(self): 12 | self.s1 = xrange(1, 1000, 3) 13 | self.s2 = xrange(4, 1000, 5) 14 | self.s3 = [-3, 6000] 15 | self.s1b = xrange(1, 1000, 3) 16 | self.s1c = xrange(1, 1000, 3) 17 | 18 | def test_imerge(self): 19 | s = iter.imerge(lambda x, y: x < y, self.s1, self.s2, self.s3) 20 | self.assertEqual(list(s)[:10], 21 | [-3, 1, 4, 4, 7, 9, 10, 13, 14, 16]) 22 | 23 | def test_imerge2(self): 24 | a = range(10) 25 | b = range(1, 6) 26 | c = range(3, 14) 27 | a.reverse() 28 | b.reverse() 29 | c.reverse() 30 | self.assertEqual(list(iter.imerge(lambda a,b: a >= b, a, b, c)), 31 | [13, 12,11, 10, 32 | 9, 9, 8, 8, 7, 7, 6, 6, 33 | 5, 5, 5, 4, 4, 4, 3, 3, 3, 34 | 2, 2, 1, 1, 35 | 0]) 36 | 37 | def test_uniq(self): 38 | s = iter.uniq(iter.imerge(lambda x, y: x < y, self.s1, self.s2, self.s3)) 39 | self.assertEqual(list(s)[:10], 40 | [-3, 1, 4, 7, 9, 10, 13, 14, 16, 19]) 41 | 42 | 43 | class Math(unittest.TestCase): 44 | def test_integral(self): 45 | self.assertTrue(m.is_integral(int(1))) 46 | self.assertTrue(m.is_integral(long(1))) 47 | self.assertFalse(m.is_integral("foo")) 48 | self.assertFalse(m.is_integral(1.0)) 49 | self.assertFalse(m.is_integral(20 / 1)) 50 | self.assertFalse(m.is_integral(Fraction(100, 10))) 51 | 52 | def test_lcm(self): 53 | self.assertEqual(m.lcm(), 0) 54 | self.assertEqual(m.lcm(99), 99) 55 | self.assertEqual(m.lcm(10, 20, 3), 60) 56 | self.assertEqual(m.lcm(10, 20), 20) 57 | self.assertEqual(m.lcm(3, 4), 12) 58 | 59 | def test_topsum(self): 60 | vals = [30, 60, 10, 40, 50, 20] 61 | self.assertEqual(m.topsum(vals, lambda x: x * 2, 3), 2 * (40 + 50 + 60)) 62 | self.assertEqual(m.lcm(99), 99) 63 | self.assertEqual(m.lcm(10, 20, 3), 60) 64 | 65 | 66 | class LinEqs(unittest.TestCase): 67 | def setUp(self): 68 | self.f = m.lin(1, 3) 69 | self.c = m.const(123) 70 | self.pwlin = m.monotonic_pwlin([(0, 1), (1, 0), (1, 4), (2, 5)]) 71 | 72 | def test_const(self): 73 | for x in xrange(1000): 74 | self.assertAlmostEqual(self.c(x), 123) 75 | 76 | def test_lin(self): 77 | for x in xrange(1000): 78 | self.assertAlmostEqual(self.f(x), 1 + x * 3.0) 79 | 80 | def test_pwlin(self): 81 | for x in xrange(1000): 82 | self.assertAlmostEqual(self.pwlin(-x), 1) 83 | self.assertAlmostEqual(self.pwlin(1), 1) 84 | for x in xrange(1000): 85 | x = x + 2 86 | self.assertAlmostEqual(self.pwlin(x), x + 3) 87 | 88 | 89 | -------------------------------------------------------------------------------- /schedcat/sched/edf/da.py: -------------------------------------------------------------------------------- 1 | """Global EDF soft real-time test and tardiness bounds, based on Devi & Anderson's work. 2 | """ 3 | 4 | from __future__ import division 5 | 6 | from math import ceil 7 | 8 | def tardiness_x(no_cpus, tasks): 9 | """This function computes the X part of Uma Devi's G-EDF tardiness bound, as 10 | given in Corollary 4.11 on page 109 of Uma's thesis.. 11 | 12 | This function assumes full preemptivity. 13 | """ 14 | if not tasks: 15 | return 0 16 | U = tasks.utilization() 17 | if no_cpus == 1: 18 | if U <= 1: 19 | return 0 20 | else: 21 | return None 22 | by_util = [t.utilization() for t in tasks] 23 | by_util.sort(reverse=True) 24 | by_cost = [t.cost for t in tasks] 25 | by_cost.sort(reverse=True) 26 | 27 | Lambda = int(ceil(U)) - 1 28 | emin = by_cost[-1] 29 | 30 | reduced_capacity = no_cpus - sum(by_util[0:Lambda - 1]) 31 | if reduced_capacity <= 0: 32 | # bad: tardiness is not bounded 33 | return None 34 | 35 | reduced_cost = max(0, sum(by_cost[0:Lambda]) - emin) 36 | return int(ceil(reduced_cost / reduced_capacity)) 37 | 38 | def np_tardiness_x(no_cpus, tasks): 39 | """This function computes the X part of Uma Devi's G-EDF tardiness bound, as 40 | given in Corollary 4.3 in Uma's thesis, page 110. 41 | """ 42 | if not tasks: 43 | return 0 44 | U = tasks.utilization() 45 | # by_util is mu in Uma's theorem 46 | by_util = [t.utilization() for t in tasks] 47 | by_util.sort(reverse=True) 48 | # by_cost is epsilon in Uma's theorem 49 | by_cost = [t.cost for t in tasks] 50 | by_cost.sort(reverse=True) 51 | 52 | Lambda = int(ceil(U)) - 1 53 | emin = by_cost[-1] 54 | 55 | reduced_capacity = no_cpus - sum(by_util[0:Lambda - 1]) 56 | if reduced_capacity <= 0: 57 | # bad: tardiness is not bounded 58 | return None 59 | 60 | block_idx = no_cpus - Lambda - 1 61 | reduced_cost = sum(by_cost[0:Lambda]) + sum(by_cost[0:block_idx]) - emin 62 | return int(ceil(reduced_cost / reduced_capacity)) 63 | 64 | def task_tardiness_bound(no_cpus, tasks, preemptive=True): 65 | x = 0 66 | # first check if the bound formulas are valid 67 | if not has_bounded_tardiness(no_cpus, tasks): 68 | return None 69 | if no_cpus > 1: 70 | if preemptive: 71 | x = tardiness_x(no_cpus, tasks) 72 | else: 73 | x = np_tardiness_x(no_cpus, tasks) 74 | else: 75 | x = 0 76 | return x 77 | 78 | def has_bounded_tardiness(no_cpus, tasks): 79 | return tasks.utilization() <= no_cpus and \ 80 | all(t.period >= t.cost for t in tasks) 81 | 82 | def bound_response_times(no_cpus, tasks, preemptive=True): 83 | # DA's work applies to implicit-deadline tasks 84 | assert all(t.implicit_deadline() for t in tasks) 85 | 86 | x = task_tardiness_bound(no_cpus, tasks, preemptive) 87 | if x is None: 88 | return False 89 | else: 90 | for t in tasks: 91 | t.response_time = t.deadline + t.cost + x 92 | return True 93 | -------------------------------------------------------------------------------- /tests/sched.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | from fractions import Fraction 4 | 5 | import unittest 6 | 7 | import schedcat.sched.run as run 8 | 9 | class RUNOfflinePhase(unittest.TestCase): 10 | def setUp(self): 11 | self.servers = [ 12 | run.Server(Fraction(1, 2), id=1), 13 | run.Server(Fraction(4, 10), id=2), 14 | run.Server(Fraction(4, 10), id=3), 15 | run.Server(Fraction(3, 10), id=4), 16 | run.Server(Fraction(2, 10), id=5), 17 | run.Server(Fraction(1, 10), id=6), 18 | run.Server(Fraction(1, 10), id=7), 19 | ] 20 | 21 | self.example_packing = [ 22 | [self.servers[0], self.servers[3]], 23 | [self.servers[1], self.servers[4], self.servers[5], self.servers[6]], 24 | [self.servers[2]] 25 | ] 26 | 27 | def test_find_packing(self): 28 | packing = run.find_packing(self.servers) 29 | all = set(self.servers) 30 | packed = set() 31 | for bin in packing: 32 | for s in bin: 33 | packed.add(s) 34 | self.assertEqual(all, packed) 35 | 36 | def test_pack(self): 37 | packed = run.pack(self.example_packing, 8) 38 | self.assertEqual(len(packed), len(self.example_packing)) 39 | for s, bin in zip(packed, self.example_packing): 40 | self.assertEqual(s.clients, bin) 41 | 42 | def test_dual(self): 43 | packed = run.pack(self.example_packing, 8) 44 | duals = run.dual(packed) 45 | self.assertEqual(len(duals), len(self.example_packing)) 46 | for d, s in zip(duals, packed): 47 | self.assertEqual(1, d.rate + s.rate) 48 | 49 | self.assertFalse(duals[0].is_null_server()) 50 | self.assertFalse(duals[1].is_null_server()) 51 | self.assertFalse(duals[2].is_null_server()) 52 | self.assertFalse(duals[0].is_unit_server()) 53 | self.assertFalse(duals[1].is_unit_server()) 54 | self.assertFalse(duals[2].is_unit_server()) 55 | 56 | def test_reduction_step(self): 57 | red = run.reduction_step(self.servers, 8) 58 | self.assertTrue(red[0].is_null_server()) 59 | self.assertTrue(red[1].is_null_server()) 60 | self.assertFalse(red[0].is_unit_server()) 61 | self.assertFalse(red[1].is_unit_server()) 62 | 63 | def test_reduce(self): 64 | final, levels = run.reduce(self.servers, 8) 65 | for s in final: 66 | self.assertEqual(s.rate, 1) 67 | self.assertEqual(len(levels), 1) 68 | self.assertEqual(len(final), 2) 69 | self.assertEqual(run.max_number_of_preemptions_per_job_release(levels), 1) 70 | 71 | def test_reduce_non_integer(self): 72 | self.servers.append(run.Server(Fraction(1, 3), id='frac')) 73 | final, levels = run.reduce(self.servers, 8) 74 | for s in final: 75 | self.assertEqual(s.rate, 1) 76 | self.assertEqual(len(final), 3) 77 | self.assertEqual(len(levels), 1) 78 | self.assertEqual(run.max_number_of_preemptions_per_job_release(levels), 1) 79 | -------------------------------------------------------------------------------- /schedcat/sched/edf/bar.py: -------------------------------------------------------------------------------- 1 | """G-EDF hard schedulability test 2 | 3 | This module implements Sanjoy Baruah's G-EDF schedulability test as presented 4 | in his paper "Techniques for Multiprocessor Global Schedulability Analysis." 5 | 6 | The variable names are picked to resemble the paper and are not meant to be 7 | understandable without the paper as a reference. 8 | """ 9 | 10 | from __future__ import division 11 | 12 | from math import floor, ceil 13 | from itertools import izip 14 | from schedcat.util.math import topsum 15 | 16 | # The definition of I1() and I2() diverge from the one given in the 17 | # RTSS'07 paper. According to S. Baruah: "The second term in the min -- 18 | # A_k+D_k-C_k -- implicitly assumes that the job missing its deadline 19 | # executes for C_k time units, whereas it actually executes for strictly 20 | # less than C_k. Hence this second term should be --A_k+D_k(-C_k - 21 | # \epsilon); for task systems with integer parameters, epsilon can be 22 | # taken to e equal to one. [...] A similar modification may need to be 23 | # made for the definition of I2." 24 | 25 | def I1(tsk_i, tsk_k, a_k): 26 | d_k = tsk_k.deadline 27 | c_k = tsk_k.cost 28 | if tsk_k == tsk_i: 29 | return min(tsk_i.dbf(a_k + d_k) - c_k, a_k) 30 | else: 31 | return min(tsk_i.dbf(a_k + d_k), a_k + d_k - (c_k - 1)) 32 | 33 | def dbf_(tsk, t): 34 | """dbf() for carry-in scenario""" 35 | return int(floor(t / tsk.period)) * tsk.cost + min(tsk.cost, t % tsk.period) 36 | 37 | def I2(tsk_i, tsk_k, a_k): 38 | d_k = tsk_k.deadline 39 | c_k = tsk_k.cost 40 | if tsk_k == tsk_i: 41 | return min(dbf_(tsk_i, a_k + d_k) - c_k, a_k) 42 | else: 43 | return min(dbf_(tsk_i, a_k + d_k), a_k + d_k - (c_k - 1)) 44 | 45 | def Idiff(tsk_i, tsk_k, a_k): 46 | return I2(tsk_i, tsk_k, a_k) - I1(tsk_i, tsk_k, a_k) 47 | 48 | def task_schedulable_for_offset(all_tsks, tsk_k, a_k, m): 49 | """Tests condition 8 from the paper""" 50 | I1s = [I1(tsk_i, tsk_k, a_k) for tsk_i in all_tsks] 51 | Idiffs = [I2(tsk_i, tsk_k, a_k) - i1 for (tsk_i, i1) in izip(all_tsks, I1s)] 52 | Idiff = topsum(Idiffs, None, m -1) 53 | return sum(I1s) + Idiff <= m * (a_k + tsk_k.deadline - tsk_k.cost) 54 | 55 | def ak_bounds(all_tsks, m): 56 | U = all_tsks.utilization() 57 | c_sigma = topsum(all_tsks, lambda t: t.cost, m - 1) 58 | y = sum([(t.period - t.deadline) * t.utilization() for t in all_tsks]) 59 | mu = m - U 60 | def ak_bound(tsk_k): 61 | # Equation 9 in the paper 62 | return (c_sigma - tsk_k.deadline * mu + y + m * tsk_k.cost) / mu 63 | return [ak_bound(t) for t in all_tsks] 64 | 65 | def is_schedulable(m, tasks): 66 | """Are the given tasks schedulable on m processors?""" 67 | if tasks.utilization() >= m or not all(t.constrained_deadline() for t in tasks): 68 | return False 69 | for (tsk_k, a_k_bound) in izip(tasks, ak_bounds(tasks, m)): 70 | for a_k in tasks.dbf_points_of_change(a_k_bound, offset=tsk_k.deadline): 71 | if not task_schedulable_for_offset(tasks, tsk_k, a_k, m): 72 | return False 73 | return True 74 | 75 | -------------------------------------------------------------------------------- /native/src/edf/sim.cpp: -------------------------------------------------------------------------------- 1 | #include "tasks.h" 2 | #include "edf/sim.h" 3 | 4 | #include "schedule_sim.h" 5 | 6 | #include 7 | 8 | typedef GlobalScheduler GedfSim; 9 | 10 | class DeadlineMissSearch : public GedfSim 11 | { 12 | private: 13 | bool dmissed; 14 | 15 | public: 16 | simtime_t when_missed; 17 | simtime_t when_completed; 18 | 19 | DeadlineMissSearch(int m, bool preemptive) : GedfSim(m, preemptive), dmissed(false) {}; 20 | 21 | virtual void job_completed(int proc, Job *job) 22 | { 23 | if (this->get_current_time() > job->get_deadline()) 24 | { 25 | dmissed = true; 26 | when_missed = job->get_deadline(); 27 | when_completed = this->get_current_time(); 28 | abort(); 29 | } 30 | }; 31 | 32 | bool deadline_was_missed() 33 | { 34 | return dmissed; 35 | } 36 | }; 37 | 38 | class Tardiness : public GedfSim 39 | { 40 | public: 41 | Stats stats; 42 | 43 | Tardiness(int m, bool preemptive) : GedfSim(m, preemptive) 44 | { 45 | stats.num_tardy_jobs = 0; 46 | stats.num_ok_jobs = 0; 47 | stats.total_tardiness = 0; 48 | stats.max_tardiness = 0; 49 | stats.first_miss = 0; 50 | }; 51 | 52 | virtual void job_completed(int proc, Job *job) 53 | { 54 | if (this->get_current_time() > job->get_deadline()) 55 | { 56 | simtime_t tardiness; 57 | tardiness = this->get_current_time() - job->get_deadline(); 58 | stats.num_tardy_jobs++; 59 | stats.total_tardiness += tardiness; 60 | stats.max_tardiness = std::max(tardiness, stats.max_tardiness); 61 | if (!stats.first_miss) 62 | stats.first_miss = job->get_deadline(); 63 | } 64 | else 65 | stats.num_ok_jobs++; 66 | }; 67 | }; 68 | 69 | unsigned long edf_first_violation(unsigned int num_procs, 70 | TaskSet &ts, 71 | unsigned long end_of_simulation, 72 | bool preemptive) 73 | { 74 | DeadlineMissSearch sim(num_procs, preemptive); 75 | 76 | run_periodic_simulation(sim, ts, end_of_simulation); 77 | if (sim.deadline_was_missed()) 78 | return sim.when_missed; 79 | else 80 | return 0; 81 | } 82 | 83 | bool edf_misses_deadline(unsigned int num_procs, 84 | TaskSet &ts, 85 | unsigned long end_of_simulation, 86 | bool preemptive) 87 | { 88 | DeadlineMissSearch sim(num_procs, preemptive); 89 | 90 | run_periodic_simulation(sim, ts, end_of_simulation); 91 | return sim.deadline_was_missed(); 92 | } 93 | 94 | 95 | Stats edf_observe_tardiness(unsigned int num_procs, 96 | TaskSet &ts, 97 | unsigned long end_of_simulation, 98 | bool preemptive) 99 | { 100 | Tardiness sim(num_procs, preemptive); 101 | 102 | run_periodic_simulation(sim, ts, end_of_simulation); 103 | 104 | return sim.stats; 105 | } 106 | 107 | -------------------------------------------------------------------------------- /schedcat/overheads/jlfp.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | from math import ceil, floor 4 | import heapq 5 | 6 | def charge_initial_load(oheads, taskset): 7 | """Increase WCET to reflect the cost of establishing a warm cache. 8 | Note: assumes that .wss (working set size) has been populated in each task. 9 | """ 10 | if oheads: 11 | for ti in taskset: 12 | load = oheads.initial_cache_load(ti.wss) 13 | assert load >= 0 # negative overheads make no sense 14 | ti.cost += load 15 | if ti.density() > 1: 16 | # infeasible 17 | return False 18 | return taskset 19 | 20 | def preemption_centric_irq_costs(oheads, dedicated_irq, taskset): 21 | n = len(taskset) 22 | n_rel_irq = 0 23 | qlen = oheads.quantum_length 24 | tck = oheads.tick(n) 25 | ev_lat = oheads.release_latency(n) 26 | 27 | # tick interrupt 28 | utick = tck / qlen 29 | 30 | urel = 0.0 31 | if not dedicated_irq: 32 | rel = oheads.release(n) 33 | for ti in taskset: 34 | if not hasattr(ti, 'early_releasing') or ti.early_releasing == False: 35 | urel += (rel / ti.period) 36 | n_rel_irq += 1 37 | 38 | # cost of preemption 39 | cpre_numerator = tck + ev_lat * utick 40 | if not dedicated_irq: 41 | cpre_numerator += n_rel_irq * rel + ev_lat * urel 42 | 43 | uscale = 1.0 - utick - urel 44 | 45 | return (uscale, cpre_numerator / uscale) 46 | 47 | def charge_scheduling_overheads(oheads, num_cpus, dedicated_irq, taskset): 48 | if not oheads: 49 | return taskset 50 | 51 | uscale, cpre = preemption_centric_irq_costs(oheads, dedicated_irq, taskset) 52 | 53 | if uscale <= 0: 54 | # interrupt overload 55 | return False 56 | 57 | n = len(taskset) 58 | 59 | cpmd = [(ti, oheads.cache_affinity_loss(ti.wss)) \ 60 | for ti in heapq.nlargest(2, taskset, lambda x: x.wss)] 61 | sched = 2 * (oheads.schedule(n) + oheads.ctx_switch(n)) 62 | 63 | irq_latency = oheads.release_latency(n) 64 | 65 | if dedicated_irq: 66 | unscaled = 2 * cpre + oheads.ipi_latency(n) + oheads.release(n) 67 | elif num_cpus > 1: 68 | unscaled = 2 * cpre + oheads.ipi_latency(n) 69 | else: 70 | unscaled = 2 * cpre 71 | 72 | for ti in taskset: 73 | tasksched = sched 74 | if cpmd: 75 | if ti != cpmd[0][0]: 76 | tasksched += cpmd[0][1] 77 | elif len(cpmd) > 1: 78 | tasksched += cpmd[1][1] 79 | 80 | ti.period -= irq_latency 81 | ti.deadline -= irq_latency 82 | ti.cost = ((ti.cost + tasksched) / uscale) + unscaled 83 | if ti.density() > 1: 84 | return False 85 | 86 | return taskset 87 | 88 | def quantize_params(taskset): 89 | """After applying overheads, use this function to make 90 | task parameters integral again.""" 91 | 92 | for t in taskset: 93 | t.cost = int(ceil(t.cost)) 94 | t.period = int(floor(t.period)) 95 | t.deadline = int(floor(t.deadline)) 96 | if not min(t.period, t.deadline) or t.density() > 1: 97 | return False 98 | 99 | return taskset 100 | -------------------------------------------------------------------------------- /tests/pedf_spinlocks.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import unittest 4 | import random 5 | 6 | import schedcat.locking.bounds as lb 7 | import schedcat.locking.native as cpp 8 | import schedcat.model.tasks as tasks 9 | import schedcat.model.resources as r 10 | 11 | import schedcat.util.linprog 12 | 13 | class PEDF_Spinlocks(unittest.TestCase): 14 | def setUp(self): 15 | self.trivial_ts = tasks.TaskSystem([ 16 | tasks.SporadicTask(10, 33, 24), 17 | tasks.SporadicTask(10, 95, 81), 18 | tasks.SporadicTask(12, 63, 63), 19 | # 20 | tasks.SporadicTask(10, 30, 30), 21 | tasks.SporadicTask(20, 150, 130), 22 | ]) 23 | self.trivial_num_cpus = 2 24 | 25 | 26 | r.initialize_resource_model(self.trivial_ts) 27 | lb.assign_edf_preemption_levels(self.trivial_ts) 28 | 29 | #for i, t in enumerate(self.trivial_ts): 30 | # t.partition = 0 31 | # t.response_time = 4*t.cost 32 | 33 | # CPU0 34 | self.trivial_ts[0].partition = 0; 35 | self.trivial_ts[1].partition = 0; 36 | self.trivial_ts[2].partition = 0; 37 | 38 | # CPU1 39 | self.trivial_ts[3].partition = 1; 40 | self.trivial_ts[4].partition = 1; 41 | 42 | # ---------[ Reuqests ]----------------------- 43 | 44 | # L_{0,0}=3, N_{0,0}=1 45 | self.trivial_ts[0].resmodel[0].add_request(3) 46 | 47 | # L_{1,0}=3, N_{1,0}=1 48 | self.trivial_ts[1].resmodel[0].add_request(1) 49 | #self.trivial_ts[1].resmodel[0].add_request(1) 50 | 51 | # L_{1,1}=5, N_{1,1}=1 52 | self.trivial_ts[1].resmodel[1].add_request(5) 53 | 54 | # L_{2,0}=5, N_{2,0}=1 55 | self.trivial_ts[2].resmodel[0].add_request(1) 56 | self.trivial_ts[2].resmodel[0].add_request(1) 57 | self.trivial_ts[2].resmodel[0].add_request(1) 58 | 59 | 60 | # L_{3,0}=1, N_{3,0}=2 61 | self.trivial_ts[3].resmodel[0].add_request(1) 62 | self.trivial_ts[3].resmodel[0].add_request(1) 63 | 64 | # L_{4,0}=1, N_{4,0}=1 65 | self.trivial_ts[4].resmodel[0].add_request(4) 66 | 67 | # L_{4,1}=1, N_{4,1}=2 68 | self.trivial_ts[4].resmodel[1].add_request(1) 69 | self.trivial_ts[4].resmodel[1].add_request(1) 70 | 71 | # L_{4,2}=2, N_{4,2}=1 72 | self.trivial_ts[4].resmodel[2].add_request(2) 73 | 74 | @unittest.skipIf(not schedcat.locking.bounds.lp_cpp_available, "no native LP solver available") 75 | def test_MSRP(self): 76 | if lb.pedf_msrp_is_schedulable(self.trivial_ts): 77 | print "[MSRP] SCHEDULABLE"; 78 | else: 79 | print "[MSRP] NOT SCHEDULABLE"; 80 | 81 | @unittest.skipIf(not schedcat.locking.bounds.lp_cpp_available, "no native LP solver available") 82 | def test_FIFO_preempt(self): 83 | if lb.pedf_fifo_preempt_is_schedulable(self.trivial_ts): 84 | print "[FIFO Preemptive] SCHEDULABLE"; 85 | else: 86 | print "[FIFO Preemptive] NOT SCHEDULABLE"; 87 | 88 | def test_MSRP_classic(self): 89 | if lb.pedf_msrp_classic_is_schedulable(self.trivial_ts, self.trivial_num_cpus): 90 | print "[MSRP Classic] SCHEDULABLE"; 91 | else: 92 | print "[MSRP Classic] NOT SCHEDULABLE"; 93 | 94 | #self.assertEqual(self.trivial_ts[0].response_time, self.trivial_ts[0].cost + 75) 95 | -------------------------------------------------------------------------------- /native/include/canbus/tardiness_stats.h: -------------------------------------------------------------------------------- 1 | #ifndef CANBUS_TARDINESS_STATS 2 | #define CANBUS_TARDINESS_STATS 3 | 4 | #include "can_sim.h" 5 | 6 | #define DEBUG_OUTPUT(TIME, JOB, JOB_STATUS) \ 7 | if (DEBUG_MODE) \ 8 | { \ 9 | cout << "at time " << TIME << ": "; \ 10 | cout << JOB_STATUS << " job "; \ 11 | cout << JOB->get_task().get_taskid() << "_"; \ 12 | cout << JOB->get_task().get_priority() << "_"; \ 13 | cout << JOB->get_seqno() << endl; \ 14 | } \ 15 | 16 | // keeps track of the number of messages received 17 | // for each round of the task 18 | struct RoundInfo 19 | { 20 | unsigned long seqno; // round number j for vector M_j 21 | unsigned long ok_msgs; 22 | unsigned long faulty_msgs; 23 | }; 24 | 25 | // keeps track of the completed rounds, the latest 26 | // round number, and the currently active round 27 | struct TaskIdInfo 28 | { 29 | unsigned long latest_round_completed; 30 | unsigned long num_ok_rounds; 31 | unsigned long num_faulty_rounds; 32 | std::vector active_rounds; 33 | }; 34 | 35 | class CANBusTardinessStats: public CANBusScheduler 36 | { 37 | private: 38 | std::map sync_stats; 39 | std::map async_stats; 40 | unsigned int rprime; 41 | 42 | public: 43 | CANBusTardinessStats() : CANBusScheduler() {} 44 | 45 | unsigned int get_rprime() { return rprime; } 46 | void set_rprime(unsigned int rprime) { this->rprime = rprime; } 47 | 48 | unsigned long get_num_ok_rounds_sync(unsigned long tid) 49 | { 50 | return sync_stats.find(tid)->second.num_ok_rounds; 51 | } 52 | 53 | unsigned long get_num_ok_rounds_async(unsigned long tid) 54 | { 55 | return async_stats.find(tid)->second.num_ok_rounds; 56 | } 57 | 58 | unsigned long get_num_faulty_rounds_sync(unsigned long tid) 59 | { 60 | return sync_stats.find(tid)->second.num_faulty_rounds; 61 | } 62 | 63 | unsigned long get_num_faulty_rounds_async(unsigned long tid) 64 | { 65 | return async_stats.find(tid)->second.num_faulty_rounds; 66 | } 67 | 68 | virtual void job_released(CANJob *job) 69 | { 70 | DEBUG_OUTPUT(current_time, job, "released"); 71 | } 72 | 73 | virtual void job_scheduled(int proc, CANJob *preempted, CANJob *scheduled) 74 | { 75 | DEBUG_OUTPUT(current_time, scheduled, "scheduled") 76 | } 77 | 78 | virtual void job_retransmitted(CANJob *job) 79 | { 80 | DEBUG_OUTPUT(current_time, job, "RETRANSMITTING") 81 | } 82 | 83 | virtual void job_omitted(CANJob *job) 84 | { 85 | DEBUG_OUTPUT(current_time, job, "OMITTED") 86 | } 87 | 88 | virtual void job_committed(CANJob *job) 89 | { 90 | DEBUG_OUTPUT(current_time, job, "COMMITTED") 91 | } 92 | 93 | virtual void job_completed(int proc, CANJob *job); 94 | virtual void job_deadline_expired(CANJob *job); 95 | 96 | void init_sync_stats_for_taskid(unsigned long tid); 97 | void init_async_stats_for_taskid(unsigned long tid); 98 | void reset_sync_stats(); 99 | void reset_async_stats(); 100 | 101 | void job_completed_sync(CANJob *job); 102 | void job_completed_async(CANJob *job); 103 | void job_deadline_expired_sync(CANJob *job); 104 | void job_deadline_expired_async(CANJob *job); 105 | }; 106 | 107 | #endif 108 | -------------------------------------------------------------------------------- /native/src/edf/bcl_iterative.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "tasks.h" 5 | #include "schedulability.h" 6 | 7 | #include "edf/bcl_iterative.h" 8 | 9 | using namespace std; 10 | 11 | static void interfering_workload(const Task &t_i, 12 | const Task &t_k, 13 | unsigned long slack, 14 | integral_t &inf) 15 | { 16 | unsigned long njobs = t_k.get_deadline() / t_i.get_period(); 17 | 18 | inf = njobs; 19 | inf *= t_i.get_wcet(); 20 | 21 | unsigned long tmp = slack + njobs * t_i.get_period(); 22 | 23 | if (t_k.get_deadline() >= tmp) 24 | inf += min(t_i.get_wcet(), t_k.get_deadline() - tmp); 25 | //else inf += min(t.get_wcet(), 0) // always null by definition. 26 | } 27 | 28 | bool BCLIterativeGedf::slack_update(unsigned int k, 29 | const TaskSet &ts, 30 | unsigned long *slack, 31 | bool &has_slack) 32 | { 33 | integral_t other_work = 0; 34 | integral_t inf; 35 | integral_t inf_bound = ts[k].get_deadline() - ts[k].get_wcet() + 1; 36 | 37 | for (unsigned int i = 0; i < ts.get_task_count(); i++) 38 | if (k != i) 39 | { 40 | interfering_workload(ts[i], ts[k], slack[i], inf); 41 | other_work += min(inf, inf_bound); 42 | } 43 | other_work /= m; 44 | unsigned long tmp = ts[k].get_wcet() + other_work.get_ui(); 45 | 46 | assert( other_work.fits_ulong_p() ); 47 | assert (tmp > other_work.get_ui() ); 48 | 49 | has_slack = tmp <= ts[k].get_deadline(); 50 | if (!has_slack) 51 | // negative slack => no update, always assume zero 52 | return false; 53 | else 54 | { 55 | tmp = ts[k].get_deadline() - tmp; 56 | if (tmp > slack[k]) 57 | { 58 | // better slack => update 59 | slack[k] = tmp; 60 | return true; 61 | } 62 | else 63 | // no improvement 64 | return false; 65 | } 66 | } 67 | 68 | bool BCLIterativeGedf::is_schedulable(const TaskSet &ts, 69 | bool check_preconditions) 70 | { 71 | if (check_preconditions) 72 | { 73 | if (!(ts.has_only_feasible_tasks() 74 | && ts.is_not_overutilized(m) 75 | && ts.has_only_constrained_deadlines() 76 | && ts.has_no_self_suspending_tasks())) 77 | return false; 78 | if (ts.get_task_count() == 0) 79 | return true; 80 | } 81 | 82 | unsigned long* slack = new unsigned long[ts.get_task_count()]; 83 | 84 | for (unsigned int i = 0; i < ts.get_task_count(); i++) 85 | slack[i] = 0; 86 | 87 | unsigned long round = 0; 88 | bool schedulable = false; 89 | bool updated = true; 90 | 91 | while (updated && !schedulable && (max_rounds == 0 || round < max_rounds)) 92 | { 93 | round++; 94 | schedulable = true; 95 | updated = false; 96 | for (unsigned int k = 0; k < ts.get_task_count(); k++) 97 | { 98 | bool ok; 99 | if (slack_update(k, ts, slack, ok)) 100 | updated = true; 101 | schedulable = schedulable && ok; 102 | } 103 | } 104 | 105 | return schedulable; 106 | } 107 | -------------------------------------------------------------------------------- /schedcat/generator/tasksets.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate random task sets for schedulability experiments. 3 | """ 4 | 5 | 6 | import re 7 | import random 8 | from functools import partial 9 | 10 | import schedcat.generator.tasks as gen 11 | 12 | def decode_params(name): 13 | # uni-UMIN-UMAX-PMIN-PMAX 14 | # bimo- 15 | # exp-UMIN-UMAX-MEAN-PMIN-PMAX 16 | 17 | pass 18 | 19 | NAMED_PERIODS = { 20 | # Named period distributions used in several UNC papers, in milliseconds. 21 | 'uni-short' : gen.uniform_int( 3, 33), 22 | 'uni-moderate' : gen.uniform_int(10, 100), 23 | 'uni-long' : gen.uniform_int(50, 250), 24 | 25 | 'log-uni-short' : gen.log_uniform_int( 3, 33), 26 | 'log-uni-moderate' : gen.log_uniform_int(10, 100), 27 | 'log-uni-long' : gen.log_uniform_int(50, 250), 28 | } 29 | 30 | NAMED_UTILIZATIONS = { 31 | # Named utilization distributions used in several UNC papers, in milliseconds. 32 | 'uni-light' : gen.uniform(0.001, 0.1), 33 | 'uni-medium' : gen.uniform(0.1 , 0.4), 34 | 'uni-heavy' : gen.uniform(0.5 , 0.9), 35 | 36 | 'exp-light' : gen.exponential(0, 1, 0.10), 37 | 'exp-medium' : gen.exponential(0, 1, 0.25), 38 | 'exp-heavy' : gen.exponential(0, 1, 0.50), 39 | 40 | 'bimo-light' : gen.multimodal([(gen.uniform(0.001, 0.5), 8), 41 | (gen.uniform(0.5 , 0.9), 1)]), 42 | 'bimo-medium' : gen.multimodal([(gen.uniform(0.001, 0.5), 6), 43 | (gen.uniform(0.5 , 0.9), 3)]), 44 | 'bimo-heavy' : gen.multimodal([(gen.uniform(0.001, 0.5), 4), 45 | (gen.uniform(0.5 , 0.9), 5)]), 46 | } 47 | 48 | def uniform_slack(min_slack_ratio, max_slack_ratio): 49 | """Choose deadlines uniformly such that the slack 50 | is within [cost + min_slack_ratio * (period - cost), 51 | cost + max_slack_ratio * (period - cost)]. 52 | 53 | Setting max_slack_ratio = 1 implies constrained deadlines. 54 | """ 55 | def choose_deadline(cost, period): 56 | slack = period - cost 57 | earliest = slack * min_slack_ratio 58 | latest = slack * max_slack_ratio 59 | return cost + random.uniform(earliest, latest) 60 | return choose_deadline 61 | 62 | NAMED_DEADLINES = { 63 | 'implicit' : None, 64 | 'uni-constrained' : uniform_slack(0, 1), 65 | 'uni-arbitrary' : uniform_slack(0, 2), 66 | } 67 | 68 | def mkgen(utils, periods, deadlines=None): 69 | if deadlines is None: 70 | g = gen.TaskGenerator(periods, utils) 71 | else: 72 | g = gen.TaskGenerator(periods, utils, deadlines) 73 | return partial(g.make_task_set) 74 | 75 | def make_standard_dists(dl='implicit'): 76 | by_period = {} 77 | for p in NAMED_PERIODS: 78 | by_util = {} 79 | by_period[p] = by_util 80 | for u in NAMED_UTILIZATIONS: 81 | by_util[u] = mkgen(NAMED_UTILIZATIONS[u], 82 | NAMED_PERIODS[p], 83 | NAMED_DEADLINES[dl]) 84 | return by_period 85 | 86 | # keyed by deadline type, then by period, then by utilization 87 | DIST_BY_KEY = {} 88 | for dl in NAMED_DEADLINES: 89 | DIST_BY_KEY[dl] = make_standard_dists(dl) 90 | 91 | ALL_DISTS = {} 92 | for dl in NAMED_DEADLINES: 93 | for p in NAMED_PERIODS: 94 | for u in NAMED_UTILIZATIONS: 95 | ALL_DISTS[':'.join([u, p, dl])] = DIST_BY_KEY[dl][p][u] 96 | -------------------------------------------------------------------------------- /schedcat/sched/run.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | from schedcat.mapping.binpack import best_fit_decreasing 4 | from math import ceil 5 | from fractions import Fraction 6 | 7 | class Server(object): 8 | 9 | def __init__(self, rate, deadlines=None, clients=None, id=None, 10 | created_from=None): 11 | self.rate = rate 12 | if deadlines is None: 13 | deadlines = frozenset() 14 | self.deadlines = deadlines 15 | if clients is None: 16 | clients = [] 17 | self.clients = clients 18 | self.id = id 19 | self.created_from = created_from 20 | 21 | @staticmethod 22 | def aggregate(servers, id=None): 23 | total_rate = sum((s.rate for s in servers)) 24 | deadlines = set() 25 | for s in servers: 26 | deadlines |= s.deadlines 27 | return Server(total_rate, deadlines=frozenset(deadlines), 28 | clients=list(servers), id=id) 29 | 30 | def dual(self): 31 | return Server(1 - self.rate, 32 | deadlines=self.deadlines, 33 | clients=self.clients, 34 | id='%s*' % self.id if self.id else None, 35 | created_from=self) 36 | 37 | def is_unit_server(self): 38 | return self.rate == 1 39 | 40 | def is_null_server(self): 41 | return self.rate == 0 42 | 43 | def __repr__(self): 44 | return 'Server(%s%s%s%s)' % \ 45 | (self.rate, 46 | ', deadlines=%s' % self.deadlines if self.deadlines else '', 47 | ', clients=%s' % self.clients if self.clients else '', 48 | ', id=%s' % self.id if self.id else '') 49 | 50 | def __str__(self): 51 | return repr(self) 52 | 53 | def find_packing(servers): 54 | return best_fit_decreasing(servers, 0, weight=lambda s: s.rate) 55 | 56 | def dual(servers): 57 | return [s.dual() for s in servers] 58 | 59 | def pack(packing, next_id=None): 60 | if not next_id: 61 | return [Server.aggregate(bin) for bin in packing] 62 | else: 63 | return [Server.aggregate(bin, next_id + i) 64 | for (i, bin) in enumerate(packing)] 65 | 66 | def reduction_step(servers, next_id=None): 67 | packing = find_packing(servers) 68 | packed = pack(packing, next_id) 69 | return dual(packed) 70 | 71 | def total_rate(servers): 72 | return sum((s.rate for s in servers)) 73 | 74 | def all_unit_servers(servers): 75 | return all((s.is_unit_server() for s in servers)) 76 | 77 | def ensure_integer_rate(servers): 78 | r = total_rate(servers) 79 | next_int = Fraction(ceil(r)) 80 | if next_int != r: 81 | return list(servers) + [Server(next_int - r, id='slack')] 82 | else: 83 | return list(servers) 84 | 85 | def reduce(server, next_id=None): 86 | levels = [ensure_integer_rate(server)] 87 | while True: 88 | packing = find_packing(levels[-1]) 89 | packed = pack(packing, next_id) 90 | if all_unit_servers(packed): 91 | # great, we are done 92 | return packed, levels 93 | else: 94 | # nope, need to look at duals and continue 95 | levels.append(dual(packed)) 96 | if next_id: 97 | next_id += len(packed) 98 | 99 | def max_number_of_preemptions_per_job_release(levels): 100 | # Lemma 8 in the RUN journal paper 101 | return int(ceil(len(levels) / 2)) 102 | 103 | -------------------------------------------------------------------------------- /schedcat/sched/edf/rta.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implementation of Bertogna and Cirinei's response time analysis test. 3 | 4 | "Response-Time Analysis for Globally Scheduled Symmetric 5 | Multiprocessor Platforms" 6 | M. Bertogna and M. Cirinei, 7 | Proceedings of the 28th IEEE International Real-Time Systems Symposium, 8 | pages 149--160, 2007. 9 | 10 | """ 11 | 12 | from __future__ import division 13 | from math import floor 14 | 15 | 16 | def rta_interfering_workload(length, ti): 17 | "Equ. (4) and (8)" 18 | interval = length + ti.deadline - ti.cost - ti.rta_slack 19 | jobs = int(floor(interval / ti.period)) 20 | return jobs * ti.cost + min(ti.cost, interval % ti.period) 21 | 22 | def edf_interfering_workload(length, ti): 23 | "Equs. (5) and (9)" 24 | # implicit floor by integer division 25 | jobs = int(floor(length / ti.period)) 26 | return jobs * ti.cost + \ 27 | min(ti.cost, max(0, length % ti.period - ti.rta_slack)) 28 | 29 | def response_estimate(tk, tasks, no_cpus, response_time): 30 | cumulative_work = 0 31 | delay_limit = response_time - tk.cost + 1 32 | for ti in tasks: 33 | if ti != tk: 34 | cumulative_work += min(rta_interfering_workload(response_time, ti), 35 | edf_interfering_workload(tk.deadline, ti), 36 | delay_limit) 37 | return tk.cost + int(floor(cumulative_work / no_cpus)) 38 | 39 | def rta_fixpoint(tk, tasks, no_cpus, min_delta=None): 40 | """If the fixpoint search converges too slowly, then 41 | use min_delta to enforce a minimum step size.""" 42 | # response time iteration, start with cost 43 | last, resp = tk.cost, response_estimate(tk, tasks, no_cpus, tk.cost) 44 | 45 | while last != resp and resp <= tk.deadline: 46 | if resp > last and resp - last < min_delta: 47 | resp = min(last + min_delta, tk.deadline) 48 | last, resp = resp, response_estimate(tk, tasks, no_cpus, resp) 49 | 50 | return resp 51 | 52 | def is_schedulable(no_cpus, tasks, round_limit=25, min_fixpoint_step=0): 53 | """"Iteratively improve slack bound for each task until either the system 54 | is deemed to be feasible, no more improvements could be found, or 55 | the round limit (if given) is reached. 56 | """ 57 | for t in tasks: 58 | t.rta_slack = 0 59 | updated = True 60 | schedulable = False 61 | round = 0 62 | 63 | while updated and not schedulable \ 64 | and (not round_limit or round < round_limit): 65 | round += 1 66 | schedulable = True 67 | updated = False 68 | for tk in tasks: 69 | # compute new response time bound 70 | response = rta_fixpoint(tk, tasks, no_cpus, 71 | min_delta=min_fixpoint_step) 72 | if response <= tk.deadline: 73 | # this is a valid response time 74 | new_slack = tk.deadline - response 75 | if new_slack != tk.rta_slack: 76 | tk.rta_slack = new_slack 77 | updated = True 78 | else: 79 | # this one is currently not schedulable 80 | schedulable = False 81 | return schedulable 82 | 83 | def bound_response_times(no_cpus, tasks, *args, **kargs): 84 | if is_schedulable(no_cpus, tasks, *args, **kargs): 85 | for t in tasks: 86 | t.response_time = t.deadline - t.rta_slack 87 | return True 88 | else: 89 | return False 90 | -------------------------------------------------------------------------------- /native/src/blocking/msrp-holistic.cpp: -------------------------------------------------------------------------------- 1 | #include "stl-hashmap.h" 2 | 3 | #include "sharedres.h" 4 | #include "blocking.h" 5 | 6 | 7 | // Analysis of the MSRP: PCP/SRP for local resources, task-fair mutex 8 | // spin locks for global resources 9 | // Applies only to partitioned scheduling. 10 | BlockingBounds* msrp_bounds_holistic( 11 | const ResourceSharingInfo& info, 12 | int dedicated_irq) 13 | { 14 | ResourceSet locals = get_local_resources(info); 15 | ResourceSharingInfo linfo = extract_local_resources(info, locals); 16 | ResourceSharingInfo ginfo = extract_global_resources(info, locals); 17 | 18 | // Analyze blocking due to local resources. 19 | BlockingBounds pcp = pcp_blocking(linfo); 20 | 21 | // Analyze blocking due to global resources. 22 | BlockingBounds* results = task_fair_mutex_bounds(ginfo, 1, dedicated_irq); 23 | 24 | // Merge the two analysis results. 25 | // We only care about local resources if the maximum 26 | // arrival blocking due to local resources exceeds the 27 | // maximum arrival blocking due to non-preemptive sections 28 | // as determined by the global analysis. 29 | for (unsigned int i = 0; i < results->size(); i++) 30 | { 31 | // max arrival blocking due to local resource 32 | unsigned int b_pcp = pcp.get_blocking_term(i); 33 | // max arrival blocking due to global resource 34 | unsigned int b_spin = results->get_arrival_blocking(i); 35 | 36 | if (b_pcp > b_spin) { 37 | // need to account for larger local blocking 38 | Interference new_arrival(b_pcp); 39 | Interference total = (*results)[i]; 40 | 41 | // Increase total by difference to spin-only blocking. 42 | // This is needed because charge_arrival_blocking(), 43 | // called indirectly via task_fair_mutex_bounds(), 44 | // also increases the total bound. We patch this up 45 | // here to correctly reflect the total blocking. 46 | // NOTE: we are not changing remote blocking, 47 | // which still accurately reflects the 48 | // maximum time spent spinning. 49 | total.total_length += (b_pcp - b_spin); 50 | 51 | // update 52 | results->set_arrival_blocking(i, new_arrival); 53 | (*results)[i] = total; 54 | } 55 | } 56 | 57 | return results; 58 | } 59 | 60 | 61 | BlockingBounds pcp_blocking(const ResourceSharingInfo& info) 62 | { 63 | PriorityCeilings prio_ceilings = get_priority_ceilings(info); 64 | 65 | // split everything by partition 66 | Clusters clusters; 67 | split_by_cluster(info, clusters); 68 | 69 | // blocking results 70 | BlockingBounds results(info); 71 | 72 | foreach(clusters, ct) 73 | { 74 | Cluster& cluster = *ct; 75 | foreach(cluster, it) 76 | { 77 | const TaskInfo* tsk = *it; 78 | unsigned int id = tsk->get_id(); 79 | unsigned int prio = tsk->get_priority(); 80 | 81 | // check each other task 82 | foreach(cluster, jt) 83 | { 84 | const TaskInfo* other = *jt; 85 | if (id != other->get_id() && 86 | prio <= other->get_priority()) 87 | { 88 | // blocking possible 89 | 90 | foreach(other->get_requests(), req) 91 | { 92 | unsigned int res = req->get_resource_id(); 93 | if (prio_ceilings[res] <= prio) { 94 | // this CS could cause ceiling blocking / PI blocking 95 | // make sure blocking term is at least this large 96 | Interference inf(req->get_request_length()); 97 | results.raise_blocking_length(id, inf); 98 | } 99 | } 100 | } 101 | } 102 | } 103 | } 104 | 105 | return results; 106 | } 107 | -------------------------------------------------------------------------------- /native/src/blocking/linprog/lp_prsb.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "linprog/model.h" 7 | #include "linprog/varmapperbase.h" 8 | #include "linprog/solver.h" 9 | 10 | #include "sharedres_types.h" 11 | 12 | #include "iter-helper.h" 13 | #include "stl-helper.h" 14 | #include "stl-io-helper.h" 15 | #include "math-helper.h" 16 | 17 | #include 18 | #include 19 | #include "res_io.h" 20 | #include "linprog/io.h" 21 | 22 | #include "lp_global.h" 23 | 24 | 25 | class GlobalPRSBAnalysis : public GlobalRestrictedSegmentBoostingLP, public GlobalPriorityQueuesLP 26 | { 27 | // Constraint 26 28 | void add_prsb_indirect_constraints(); 29 | 30 | public: 31 | GlobalPRSBAnalysis(const ResourceSharingInfo& info, 32 | unsigned int i, 33 | unsigned int number_of_cpus) 34 | : GlobalSuspensionAwareLP(info, i, number_of_cpus), 35 | GlobalRestrictedSegmentBoostingLP(info, i, number_of_cpus), 36 | GlobalPriorityQueuesLP(info, i, number_of_cpus) 37 | { 38 | // Protocol-specific constraints 39 | // Constraint 26 40 | add_prsb_indirect_constraints(); 41 | } 42 | }; 43 | 44 | // Constraint 26: the number of times that Tx can cause Ji to incur indirect 45 | // blocking is limited by the number of times that J_i can be directly blocked 46 | // by other tasks 47 | void GlobalPRSBAnalysis::add_prsb_indirect_constraints() 48 | 49 | { 50 | unsigned int total_num_requests = 0; // RHS of C.26 51 | 52 | // RHS 53 | foreach(all_resources, resource) 54 | { 55 | unsigned int res_u = *resource; 56 | 57 | // count the requests of lower-base-priority jobs to res_u 58 | unsigned int lp_reqs_res_u = 0; 59 | foreach_lower_priority_task(taskset, ti, tl) 60 | { 61 | lp_reqs_res_u += tl->get_num_requests(res_u); 62 | if (lp_reqs_res_u) 63 | break; // one is enough 64 | } 65 | 66 | // (Lemma 13) NDiq = min(1, sum_{lp tasks} Nlq) 67 | unsigned int num_direc_bloc = std::min(1u, lp_reqs_res_u); 68 | 69 | // get the maximum time that Ji spends on waiting 70 | // for the requested resource 71 | unsigned int res_wait_time = resource_wait_time(res_u); 72 | 73 | // check for convergence failure 74 | if (res_wait_time == UNLIMITED) 75 | { 76 | // did not converge, but cannot continue without this bound 77 | // skip this constraint 78 | return; 79 | } 80 | 81 | //calculate the maximum number requests for the resource by 82 | //higher-priority tasks 83 | foreach_higher_priority_task(taskset, ti, th) 84 | foreach_request_for(th->get_requests(), res_u, hreq) 85 | num_direc_bloc += hreq->get_max_num_requests(res_wait_time); 86 | 87 | total_num_requests += num_direc_bloc * ti.get_num_requests(res_u); 88 | } 89 | 90 | foreach_lower_priority_task(taskset, ti, tx) 91 | { 92 | const unsigned int x = tx->get_id(); 93 | LinearExpression *exp = new LinearExpression(); 94 | 95 | foreach(tx->get_requests(), request) 96 | { 97 | const unsigned int q = request->get_resource_id(); 98 | 99 | foreach_request_instance(*request, ti, v) 100 | exp->add_var(vars.indirect(x, q, v)); 101 | } 102 | 103 | add_inequality(exp, total_num_requests); 104 | } 105 | } 106 | 107 | 108 | BlockingBounds* lp_prsb_bounds( 109 | const ResourceSharingInfo& info, 110 | unsigned int number_of_cpus) 111 | { 112 | BlockingBounds* results = new BlockingBounds(info); 113 | 114 | for (unsigned int i = 0; i < info.get_tasks().size(); i++) 115 | { 116 | GlobalPRSBAnalysis lp(info, i, number_of_cpus); 117 | (*results)[i] = lp.solve(); 118 | } 119 | 120 | return results; 121 | } 122 | -------------------------------------------------------------------------------- /schedcat/sched/canbus/broster.py: -------------------------------------------------------------------------------- 1 | import mpmath 2 | 3 | from schedcat.model.canbus import CANMessage 4 | from schedcat.model.canbus import CANMessageSet 5 | 6 | """ 7 | Implementation of Broster et al.'s probabilistic response time analysis 8 | 9 | "Probabilistic analysis of CAN with faults" 10 | I. Bertogna, A. Burns, and G. Rodriguez-Navas, 11 | Proceedings of the 23rd IEEE International Real-Time Systems Symposium, 12 | pages 269-278, 2002 13 | 14 | "Timing analysis of real-time communication under electromagnetic 15 | interference" 16 | I. Bertogna, A. Burns, and G. Rodriguez-Navas, 17 | Real-Time Systems 30.1-2 (2005): 55-81 18 | 19 | """ 20 | 21 | def get_prob_poisson(events, length, rate): 22 | """ P(k, lambda = t * rate) = """ 23 | avg_events = mpmath.fmul(rate, length) # lambda 24 | prob = mpmath.fmul((-1), avg_events) 25 | for i in range(1, events + 1): 26 | prob = mpmath.fadd(prob, mpmath.log(mpmath.fdiv(avg_events, i))) 27 | prob = mpmath.exp(prob) 28 | return prob 29 | 30 | def get_prob_schedulable(msgs, m, maxfaults = None): 31 | """Returns the probability that a message is schedulable even in the 32 | presence of transmission faults. If faults != None, then the probability 33 | that the message is schedulable assuming #retransmissions=maxfaults is 34 | returned. Else, then the cumulative probability is returned. The following 35 | iterative equation is used to derive the probability: 36 | P(R_i_n) = P(n, R_i_n) - 37 | \sum_{j=0}^{n-1} [P(R_i_j) * P(n - j, R_i_n - R_i_j)], 38 | where R_i_n is the worst case response time of message i assuming it is 39 | delayed by n retransmissions, P(R_i_n) is the upper bound on probability 40 | that message i is affected by exactly n faults, and P(n, R_i_n) is the 41 | probability that there are n faults in an interval of length R_i_n 42 | (we may use Poisson distribution for this). Thus, P(R_i_0) = P(0, R_i_0). 43 | """ 44 | 45 | faults = 0 46 | if maxfaults != None: 47 | if len(m.prob_vec) > maxfaults + 1: 48 | return m.prob_vec[maxfaults] 49 | faults = len(m.prob_vec) 50 | 51 | while maxfaults == None or faults <= maxfaults: 52 | 53 | wctt = msgs.get_wctt_fast(m, faults) 54 | wcrt = wctt + m.jitter 55 | 56 | if wcrt > m.deadline: 57 | break 58 | 59 | prob = get_prob_poisson(faults, wctt, msgs.mfr) 60 | error = mpmath.mpf(0) 61 | 62 | # DO NOT change the index to 'faults + 1' 63 | for i in range(0, faults): 64 | wctt_tmp = msgs.get_wctt_fast(m, i) 65 | error = mpmath.fadd(error, mpmath.fmul(m.prob_vec[i], \ 66 | get_prob_poisson(faults - i, wctt - wctt_tmp, msgs.mfr))) 67 | 68 | prob = mpmath.fsub(prob, error) 69 | assert prob <= 1 70 | m.prob_vec.append(prob) 71 | faults += 1 72 | 73 | retval = mpmath.mpf(0) 74 | 75 | # if maxfaults != None and len(prob_vec) < maxfaults + 1: 76 | # retval = 0 77 | 78 | if maxfaults != None and len(m.prob_vec) >= maxfaults + 1: 79 | retval = m.prob_vec[maxfaults] 80 | 81 | elif maxfaults == None: 82 | for prob in m.prob_vec: 83 | if (mpmath.fadd(retval, prob) <= 1): 84 | retval = mpmath.fadd(retval, prob) 85 | else: 86 | break 87 | 88 | assert (retval <= 1) 89 | 90 | if retval > 1: 91 | retval = mpmath.mpf('1') 92 | return min(retval, 1) 93 | 94 | def is_schedulable(msgs, m, faults): 95 | if get_prob_schedulable(msgs, m, faults) == 0: 96 | return False 97 | else: 98 | return True 99 | -------------------------------------------------------------------------------- /native/include/nested_cs.h: -------------------------------------------------------------------------------- 1 | #ifndef NESTED_CS_H 2 | #define NESTED_CS_H 3 | 4 | #ifndef SWIG 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "stl-hashmap.h" 10 | #endif 11 | 12 | class CriticalSectionsOfTask; 13 | 14 | typedef std::set LockSet; 15 | 16 | struct CriticalSection 17 | { 18 | unsigned int resource_id; 19 | unsigned int length; /* excluding nested requests, if any */ 20 | int outer; /* index of containing critical section, -1 if outermost */ 21 | 22 | enum { 23 | NO_PARENT = -1, 24 | }; 25 | 26 | CriticalSection(unsigned int res_id, unsigned int len, 27 | int outer_cs = NO_PARENT) 28 | : resource_id(res_id), length(len), outer(outer_cs) {} 29 | 30 | // return the set of resources already held when this resource is requested 31 | LockSet get_outer_locks(const CriticalSectionsOfTask &task) const; 32 | 33 | bool is_nested() const 34 | { 35 | return outer != NO_PARENT; 36 | } 37 | 38 | bool is_outermost() const 39 | { 40 | return outer == NO_PARENT; 41 | } 42 | 43 | bool has_common_outer( 44 | const CriticalSectionsOfTask &this_task, 45 | const LockSet &already_held_by_other) const; 46 | 47 | bool has_common_outer( 48 | const CriticalSectionsOfTask &this_task, 49 | const CriticalSection &other_cs, 50 | const CriticalSectionsOfTask &other_task) const 51 | { 52 | /* first check that neither is outermost */ 53 | if (is_outermost() || other_cs.is_outermost()) 54 | return false; 55 | else 56 | return other_cs.has_common_outer( 57 | this_task, other_cs.get_outer_locks(other_task)); 58 | } 59 | }; 60 | 61 | 62 | typedef std::vector CriticalSections; 63 | 64 | class CriticalSectionsOfTask 65 | { 66 | CriticalSections cs; 67 | 68 | public: 69 | 70 | const CriticalSections& get_cs() const 71 | { 72 | return cs; 73 | } 74 | 75 | operator const CriticalSections&() const 76 | { 77 | return cs; 78 | } 79 | 80 | void add(unsigned int res_id, unsigned int len, 81 | int outer_cs = CriticalSection::NO_PARENT) 82 | { 83 | assert( outer_cs == CriticalSection::NO_PARENT 84 | || (unsigned long) outer_cs < cs.size() ); 85 | cs.push_back(CriticalSection(res_id, len, outer_cs)); 86 | } 87 | 88 | bool has_nested_requests(unsigned int cs_index) const 89 | { 90 | for (int i = cs_index + 1; i < (int) cs.size(); i++) 91 | if (cs[i].outer == (int) cs_index) 92 | return true; 93 | return false; 94 | } 95 | 96 | unsigned int get_outermost(unsigned int cs_index) const 97 | { 98 | unsigned int cur = cs_index; 99 | 100 | while (cs[cur].is_nested()) 101 | cur = cs[cur].outer; 102 | 103 | return cur; 104 | } 105 | 106 | }; 107 | 108 | 109 | 110 | typedef std::vector CriticalSectionsOfTasks; 111 | 112 | class CriticalSectionsOfTaskset 113 | { 114 | CriticalSectionsOfTasks tsks; 115 | 116 | public: 117 | 118 | const CriticalSectionsOfTasks& get_tasks() const 119 | { 120 | return tsks; 121 | } 122 | 123 | operator const CriticalSectionsOfTasks&() const 124 | { 125 | return tsks; 126 | } 127 | 128 | CriticalSectionsOfTask& new_task() 129 | { 130 | tsks.push_back(CriticalSectionsOfTask()); 131 | return tsks.back(); 132 | } 133 | 134 | /* Compute for each resource 'q' the set of resources that could be 135 | * transitively requested while holding 'q'. */ 136 | hashmap > 137 | get_transitive_nesting_relationship() const; 138 | }; 139 | 140 | 141 | void dump(const CriticalSectionsOfTaskset &x); 142 | 143 | BlockingBounds* lp_nested_fifo_spinlock_bounds( 144 | const ResourceSharingInfo& info, 145 | const CriticalSectionsOfTaskset& tsk_cs); 146 | 147 | #endif 148 | -------------------------------------------------------------------------------- /native/include/blocking.h: -------------------------------------------------------------------------------- 1 | #ifndef BLOCKING_H 2 | #define BLOCKING_H 3 | 4 | #include "stl-hashmap.h" 5 | 6 | typedef std::vector ContentionSet; 7 | typedef std::vector Resources; 8 | typedef std::vector ClusterResources; 9 | typedef std::vector AllPerCluster; 10 | typedef std::vector TaskContention; 11 | typedef std::vector ClusterContention; 12 | 13 | struct LimitedRequestBound { 14 | LimitedRequestBound(const RequestBound *rqb, unsigned int l) : 15 | request_bound(rqb), limit(l) {}; 16 | LimitedRequestBound() : request_bound(NULL), limit(0) {}; 17 | 18 | const RequestBound *request_bound; 19 | unsigned int limit; 20 | }; 21 | 22 | typedef std::vector LimitedContentionSet; 23 | 24 | void sort_by_request_length(LimitedContentionSet &lcs); 25 | void sort_by_request_length(Resources& resources); 26 | void sort_by_request_length(ClusterResources& resources); 27 | void sort_by_request_length(ContentionSet& cs); 28 | 29 | 30 | typedef std::vector Cluster; 31 | typedef std::vector Clusters; 32 | 33 | void split_by_cluster(const ResourceSharingInfo& info, Clusters& clusters, unsigned int num_cpus = 0); 34 | void split_by_resource(const ResourceSharingInfo& info, Resources& resources); 35 | void split_by_resource(const Cluster& cluster, Resources& resources); 36 | void split_by_resource(const Clusters& clusters, ClusterResources& resources); 37 | 38 | 39 | Interference bound_blocking(const ContentionSet& cont, 40 | unsigned long interval, 41 | unsigned int max_total_requests, 42 | unsigned int max_requests_per_source, 43 | const TaskInfo* exclude_tsk, 44 | unsigned int min_priority = 0); 45 | 46 | Interference bound_blocking(const ContentionSet& cont, 47 | unsigned long interval, 48 | unsigned int max_total_requests, 49 | unsigned int max_requests_per_source, 50 | bool exclude_whole_cluster, 51 | const TaskInfo* exclude_tsk); 52 | 53 | Interference np_fifo_per_resource( 54 | const TaskInfo& tsk, const ClusterResources& clusters, 55 | unsigned int procs_per_cluster, 56 | unsigned int res_id, unsigned int issued, 57 | int dedicated_irq = NO_CPU); 58 | 59 | void charge_arrival_blocking(const ResourceSharingInfo& info, 60 | BlockingBounds& bounds); 61 | 62 | 63 | struct ClusterLimit 64 | { 65 | unsigned int max_total_requests; 66 | unsigned int max_requests_per_source; 67 | 68 | ClusterLimit(unsigned int total, unsigned int src) : 69 | max_total_requests(total), max_requests_per_source(src) {} 70 | }; 71 | 72 | typedef std::vector ClusterLimits; 73 | 74 | ClusterLimits np_fifo_limits( 75 | const TaskInfo& tsk, const ClusterResources& clusters, 76 | unsigned int procs_per_cluster, 77 | const unsigned int issued, 78 | int dedicated_irq); 79 | 80 | Interference bound_blocking_all_clusters( 81 | const ClusterResources& clusters, 82 | const ClusterLimits& limits, 83 | unsigned int res_id, 84 | unsigned long interval, 85 | const TaskInfo* exclude_tsk); 86 | 87 | typedef std::vector PriorityCeilings; 88 | 89 | void determine_priority_ceilings(const Resources& resources, 90 | PriorityCeilings& ceilings); 91 | 92 | PriorityCeilings get_priority_ceilings(const ResourceSharingInfo& info); 93 | 94 | typedef hashset ResourceSet; 95 | 96 | ResourceSet get_local_resources(const ResourceSharingInfo& info); 97 | 98 | ResourceSharingInfo extract_local_resources( 99 | const ResourceSharingInfo& info, 100 | const ResourceSet& locals); 101 | 102 | ResourceSharingInfo extract_global_resources( 103 | const ResourceSharingInfo& info, 104 | const ResourceSet& locals); 105 | 106 | BlockingBounds pcp_blocking(const ResourceSharingInfo& info); 107 | 108 | extern const unsigned int UNLIMITED; 109 | 110 | #endif 111 | -------------------------------------------------------------------------------- /native/src/blocking/linprog/lp_global_no.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "linprog/varmapperbase.h" 7 | #include "linprog/solver.h" 8 | 9 | #include "sharedres_types.h" 10 | 11 | #include "iter-helper.h" 12 | #include "stl-helper.h" 13 | #include "stl-io-helper.h" 14 | #include "math-helper.h" 15 | 16 | #include 17 | #include 18 | #include "res_io.h" 19 | #include "linprog/io.h" 20 | 21 | #include "lp_global.h" 22 | 23 | GlobalNoProgressMechanismLP::GlobalNoProgressMechanismLP( 24 | const ResourceSharingInfo& info, 25 | unsigned int task_index, 26 | unsigned int number_of_cpus) 27 | : GlobalSuspensionAwareLP(info, task_index, number_of_cpus) 28 | { 29 | // Constraint 14 30 | add_no_progress_constraints(); 31 | 32 | // Constraint 15 in the paper is the same as constraint 20 33 | add_no_progress_no_stalling_interference(); 34 | } 35 | 36 | // The maximum resource-holding H_x,q in the absence of 37 | // a progress mechanism according to Lemma 10 in the paper 38 | unsigned long GlobalNoProgressMechanismLP::resource_hold_time( 39 | unsigned int tx_id, 40 | unsigned int res_id) 41 | { 42 | unsigned int res_exe_time = 0; 43 | 44 | const TaskInfo &tx = taskset[tx_id]; 45 | 46 | res_exe_time = tx.get_request_length(res_id); 47 | 48 | if (!res_exe_time) 49 | return 0; 50 | 51 | unsigned long max_hold = res_exe_time; 52 | 53 | if (tx_id < m) 54 | return max_hold; 55 | 56 | unsigned long interval; 57 | do 58 | { 59 | // last bound 60 | interval = max_hold; 61 | 62 | // Bail out if it doesn't converge. 63 | if (max_hold > tx.get_deadline()) 64 | return UNLIMITED; 65 | 66 | double interf = 0; 67 | 68 | foreach_higher_priority_task_except(taskset, tx, ti, ta) 69 | interf += ta->workload_bound(interval); 70 | 71 | max_hold = res_exe_time + divide_with_ceil(interf, m); 72 | 73 | // Loop until it converges. 74 | } while ( interval != max_hold ); 75 | 76 | return max_hold; 77 | } 78 | 79 | // Constraint 14: no indirect blocking, preemption blocking, 80 | // or co-boosting interference when no progress mechanism is used 81 | void GlobalNoProgressMechanismLP::add_no_progress_constraints() 82 | { 83 | LinearExpression *exp = new LinearExpression(); 84 | 85 | foreach_lower_priority_task(taskset, ti, tx) 86 | { 87 | const unsigned int tx_id = tx->get_id(); 88 | 89 | exp->add_var(vars.co_boosting_interference(tx_id)); 90 | 91 | foreach(tx->get_requests(), request) 92 | { 93 | const unsigned int q = request->get_resource_id(); 94 | 95 | foreach_request_instance(*request, ti, v) 96 | { 97 | exp->add_var(vars.indirect(tx_id, q, v)); 98 | exp->add_var(vars.preemption(tx_id, q, v)); 99 | } 100 | } 101 | } 102 | add_inequality(exp, 0); 103 | } 104 | 105 | // Constraint 15: rule out stalling interference of each lower-base-priority 106 | // task Tx if all of Tx's lower-base-priority tasks do not access any resource 107 | // requested by Ti 108 | void GlobalNoProgressMechanismLP::add_no_progress_no_stalling_interference() 109 | { 110 | // find highest-priority task Th with priority less than Ti that 111 | // accesses a resources used by Ti, but no lower-priority task does. 112 | unsigned int h; 113 | 114 | for (h = taskset.size() - 1; h > i; h--) 115 | { 116 | // check task of priority h - 1 117 | const TaskInfo& th = taskset[h]; 118 | 119 | bool overlap = false; 120 | foreach(th.get_requests(), req) 121 | { 122 | unsigned int q = req->get_resource_id(); 123 | if (ti.get_num_requests(q) > 0) 124 | { 125 | overlap = true; 126 | break; 127 | } 128 | } 129 | if (overlap) 130 | break; 131 | } 132 | 133 | LinearExpression *exp = new LinearExpression(); 134 | 135 | foreach_lower_priority_task(taskset, ti, tx) 136 | { 137 | const unsigned int tx_id = tx->get_id(); 138 | 139 | // each task with id > index does not access any resource 140 | // that will be requested by T_i 141 | if (tx_id >= h) 142 | exp->add_var(vars.stalling_interference(tx->get_id())); 143 | } 144 | 145 | add_inequality(exp, 0); 146 | } 147 | -------------------------------------------------------------------------------- /example/nolock_example_2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | -------------------------------------------------------------------------------- /tests/quanta.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import unittest 4 | 5 | from fractions import Fraction 6 | 7 | import schedcat.overheads.quanta as q 8 | import schedcat.model.tasks as tasks 9 | 10 | from schedcat.util.math import is_integral 11 | 12 | class Overheads(unittest.TestCase): 13 | def setUp(self): 14 | self.ts = tasks.TaskSystem([ 15 | tasks.SporadicTask(100, 1000), 16 | tasks.SporadicTask(39, 1050), 17 | tasks.SporadicTask(51, 599), 18 | ]) 19 | self.qlen = 50 20 | 21 | def test_wcet(self): 22 | q.quantize_wcet(self.qlen, self.ts) 23 | self.assertEqual(self.ts[0].cost, 100) 24 | self.assertEqual(self.ts[1].cost, 50) 25 | self.assertEqual(self.ts[2].cost, 100) 26 | 27 | self.assertTrue(is_integral(self.ts[0].cost)) 28 | self.assertTrue(is_integral(self.ts[1].cost)) 29 | self.assertTrue(is_integral(self.ts[2].cost)) 30 | 31 | self.assertEqual(self.ts[0].period, 1000) 32 | self.assertEqual(self.ts[1].period, 1050) 33 | self.assertEqual(self.ts[2].period, 599) 34 | 35 | self.assertEqual(self.ts[0].deadline, 1000) 36 | self.assertEqual(self.ts[1].deadline, 1050) 37 | self.assertEqual(self.ts[2].deadline, 599) 38 | 39 | def test_ewcet(self): 40 | q.quantize_wcet(self.qlen, self.ts, effective_qlen=25) 41 | self.assertEqual(self.ts[0].cost, 200) 42 | self.assertEqual(self.ts[1].cost, 100) 43 | self.assertEqual(self.ts[2].cost, 150) 44 | 45 | self.assertTrue(is_integral(self.ts[0].cost)) 46 | self.assertTrue(is_integral(self.ts[1].cost)) 47 | self.assertTrue(is_integral(self.ts[2].cost)) 48 | 49 | self.assertEqual(self.ts[0].period, 1000) 50 | self.assertEqual(self.ts[1].period, 1050) 51 | self.assertEqual(self.ts[2].period, 599) 52 | 53 | self.assertEqual(self.ts[0].deadline, 1000) 54 | self.assertEqual(self.ts[1].deadline, 1050) 55 | self.assertEqual(self.ts[2].deadline, 599) 56 | 57 | def test_period(self): 58 | q.quantize_period(self.qlen, self.ts) 59 | self.assertEqual(self.ts[0].cost, 100) 60 | self.assertEqual(self.ts[1].cost, 39) 61 | self.assertEqual(self.ts[2].cost, 51) 62 | 63 | self.assertTrue(is_integral(self.ts[0].period)) 64 | self.assertTrue(is_integral(self.ts[1].period)) 65 | self.assertTrue(is_integral(self.ts[2].period)) 66 | 67 | self.assertEqual(self.ts[0].period, 1000) 68 | self.assertEqual(self.ts[1].period, 1050) 69 | self.assertEqual(self.ts[2].period, 550) 70 | 71 | self.assertEqual(self.ts[0].deadline, 1000) 72 | self.assertEqual(self.ts[1].deadline, 1050) 73 | self.assertEqual(self.ts[2].deadline, 599) 74 | 75 | def test_release_delay(self): 76 | q.account_for_delayed_release(101, self.ts) 77 | q.quantize_period(self.qlen, self.ts) 78 | self.assertEqual(self.ts[0].cost, 100) 79 | self.assertEqual(self.ts[1].cost, 39) 80 | self.assertEqual(self.ts[2].cost, 51) 81 | 82 | self.assertTrue(is_integral(self.ts[0].period)) 83 | self.assertTrue(is_integral(self.ts[1].period)) 84 | self.assertTrue(is_integral(self.ts[2].period)) 85 | 86 | self.assertEqual(self.ts[0].period, 850) 87 | self.assertEqual(self.ts[1].period, 900) 88 | self.assertEqual(self.ts[2].period, 450) 89 | 90 | def test_staggering(self): 91 | q.account_for_staggering(self.qlen, 4, self.ts) 92 | 93 | self.assertAlmostEqual(self.ts[0].period, 1000 - 37.5) 94 | self.assertAlmostEqual(self.ts[1].period, 1050 - 37.5) 95 | self.assertAlmostEqual(self.ts[2].period, 599 - 37.5) 96 | 97 | self.assertEqual(self.ts[0].cost, 100) 98 | self.assertEqual(self.ts[1].cost, 39) 99 | self.assertEqual(self.ts[2].cost, 51) 100 | 101 | q.quantize_period(self.qlen, self.ts) 102 | 103 | self.assertEqual(self.ts[0].period, 950) 104 | self.assertEqual(self.ts[1].period, 1000) 105 | self.assertEqual(self.ts[2].period, 550) 106 | -------------------------------------------------------------------------------- /native/include/lp_pedf_lockfree_common.h: -------------------------------------------------------------------------------- 1 | #ifndef LP_PEDF_LOCKFREE_COMMON_H 2 | #define LP_PEDF_LOCKFREE_COMMON_H 3 | 4 | // ------------------------------------------------------------------ 5 | // --------------------[ V A R M A P P E R ]---------------------- 6 | // ------------------------------------------------------------------ 7 | 8 | #include "linprog/varmapperbase.h" 9 | #include "lp_pedf_analysis.h" 10 | 11 | class LockFreeVarMapper : public VarMapperBase 12 | { 13 | enum variable_type_t 14 | { 15 | LOCAL_CONFLICT = 0, 16 | REMOTE_CONFLICT = 1, 17 | INDICATOR_ARRIVAL_BLOCKING = 2, 18 | }; 19 | 20 | union lookup_key_t 21 | { 22 | uint64_t raw; 23 | struct 24 | { 25 | uint64_t tid_i:20; // task i ID 26 | uint64_t tid_j:20; // task j ID 27 | uint64_t rid:20; // resource ID 28 | 29 | uint64_t variable_type:2; 30 | } var; 31 | 32 | enum 33 | { 34 | KEY_MAX = (unsigned) (1 << 20), 35 | VTYPE_MAX = (unsigned) (1 << 2), 36 | }; 37 | 38 | /* construct an Y^{L,R} variable */ 39 | void make_var_for( 40 | unsigned int task_i_id, unsigned int task_j_id, 41 | unsigned int res_id, variable_type_t btype) 42 | { 43 | assert(task_i_id < KEY_MAX); 44 | assert(task_j_id < KEY_MAX); 45 | assert(res_id < KEY_MAX); 46 | 47 | raw = 0; 48 | var.tid_i = task_i_id; 49 | var.tid_j = task_j_id; 50 | var.rid = res_id; 51 | var.variable_type = btype; 52 | } 53 | }; 54 | 55 | public: 56 | unsigned int local_conflicts(unsigned int task_i_id, unsigned int task_j_id, 57 | unsigned int res_id) 58 | { 59 | lookup_key_t k; 60 | 61 | k.make_var_for(task_i_id, task_j_id, res_id, LOCAL_CONFLICT); 62 | return var_for_key(k.raw); 63 | } 64 | 65 | unsigned int remote_conflicts(unsigned int task_id, unsigned int res_id) 66 | { 67 | lookup_key_t k; 68 | 69 | k.make_var_for(task_id, 0, res_id, REMOTE_CONFLICT); 70 | return var_for_key(k.raw); 71 | } 72 | 73 | unsigned int indicator_arrival(unsigned int task_id, unsigned int res_id) 74 | { 75 | lookup_key_t k; 76 | 77 | k.make_var_for(task_id, 0, res_id, INDICATOR_ARRIVAL_BLOCKING); 78 | return var_for_key(k.raw); 79 | } 80 | 81 | std::string key2str(uint64_t key, unsigned int var) const; 82 | }; 83 | 84 | // ------------------------------------------------------------------ 85 | // -----------------------------[ L P ]------------------------------ 86 | // ------------------------------------------------------------------ 87 | 88 | 89 | class PEDFBlockingAnalysisLP_LockFree : protected LinearProgram 90 | { 91 | 92 | protected: 93 | 94 | LockFreeVarMapper vars; 95 | const TaskInfos& taskset; 96 | 97 | const ResourceSharingInfo& info; 98 | 99 | // which type of LP are we constructing? 100 | const analysis_type_t lp_type; 101 | 102 | // length of the analysis interval 103 | const unsigned long interval_length; 104 | 105 | // cluster under analysis 106 | unsigned int cluster; 107 | 108 | const std::set all_resources; 109 | 110 | // Hack that may be needed in derived classes if some 111 | // constraints need to reference member fields that are not yet initialized 112 | // during object construction. 113 | virtual void add_constraints_post_ctor() {}; 114 | 115 | bool integer_relaxation; 116 | 117 | void add_no_arrival_blocking(); 118 | 119 | private: 120 | 121 | void set_objective(); 122 | 123 | // Generic constraints: 124 | void add_blocking_upper_and_lower_bound(unsigned long blocking_LB, 125 | unsigned long blocking_UB); 126 | void add_no_retries_for_resources_not_accessed(); 127 | void add_one_retry_for_at_most_one_remote_commit(); 128 | 129 | 130 | public: 131 | typedef const unsigned int var_t; 132 | 133 | PEDFBlockingAnalysisLP_LockFree( 134 | const ResourceSharingInfo& info, 135 | analysis_type_t analysis_type, 136 | unsigned long interval_length, 137 | unsigned int cluster, 138 | unsigned long blocking_LB, 139 | unsigned long blocking_UB = 0, 140 | bool relax = true); 141 | 142 | unsigned long solve(bool verbose = false); 143 | }; 144 | 145 | #endif 146 | --------------------------------------------------------------------------------