├── lab_scheduling ├── end │ ├── waldspurger.pdf │ ├── usys.S │ ├── foo02.c │ ├── syscall.h │ ├── foo01.c │ ├── user.h │ ├── nice.c │ ├── init.c │ ├── ps.c │ ├── SMLsanity.c │ ├── proc.h │ ├── sanity.c │ ├── trap.c │ ├── sysproc.c │ ├── syscall.c │ ├── defs.h │ ├── Makefile │ └── proc.c └── README.md ├── generate.sh └── README.md /lab_scheduling/end/waldspurger.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/marf/xv6-scheduling/HEAD/lab_scheduling/end/waldspurger.pdf -------------------------------------------------------------------------------- /lab_scheduling/end/usys.S: -------------------------------------------------------------------------------- 1 | #include "syscall.h" 2 | #include "traps.h" 3 | 4 | #define SYSCALL(name) \ 5 | .globl name; \ 6 | name: \ 7 | movl $SYS_ ## name, %eax; \ 8 | int $T_SYSCALL; \ 9 | ret 10 | 11 | SYSCALL(fork) 12 | SYSCALL(exit) 13 | SYSCALL(wait) 14 | SYSCALL(pipe) 15 | SYSCALL(read) 16 | SYSCALL(write) 17 | SYSCALL(close) 18 | SYSCALL(kill) 19 | SYSCALL(exec) 20 | SYSCALL(open) 21 | SYSCALL(mknod) 22 | SYSCALL(unlink) 23 | SYSCALL(fstat) 24 | SYSCALL(link) 25 | SYSCALL(mkdir) 26 | SYSCALL(chdir) 27 | SYSCALL(dup) 28 | SYSCALL(getpid) 29 | SYSCALL(sbrk) 30 | SYSCALL(sleep) 31 | SYSCALL(uptime) 32 | SYSCALL(getptable) 33 | SYSCALL(getppid) 34 | SYSCALL(chpr) 35 | SYSCALL(wait2) 36 | SYSCALL(yield) 37 | SYSCALL(chtickets) 38 | -------------------------------------------------------------------------------- /lab_scheduling/end/foo02.c: -------------------------------------------------------------------------------- 1 | #include "types.h" 2 | #include "stat.h" 3 | #include "user.h" 4 | #include "fcntl.h" 5 | 6 | #define TRUE 1 7 | #define FALSE 0 8 | #define TH 100 9 | 10 | int to_be_or_not(int to_be) { 11 | int count = 0; 12 | 13 | while(count++ < TH) { 14 | if (to_be) printf(1, "to be!\n"); 15 | if (!to_be) printf(1, "not to be!\n"); 16 | //sleep(1); 17 | } 18 | return 0; 19 | } 20 | 21 | int main(int argc, char **argv) { 22 | // first child 23 | if (fork() == 0) { 24 | to_be_or_not(TRUE); 25 | exit(); 26 | } 27 | 28 | // second child 29 | if(fork() == 0) { 30 | to_be_or_not(FALSE); 31 | exit(); 32 | } 33 | 34 | // parent 35 | wait(); 36 | wait(); 37 | exit(); 38 | } 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /lab_scheduling/end/syscall.h: -------------------------------------------------------------------------------- 1 | // System call numbers 2 | #define SYS_fork 1 3 | #define SYS_exit 2 4 | #define SYS_wait 3 5 | #define SYS_pipe 4 6 | #define SYS_read 5 7 | #define SYS_kill 6 8 | #define SYS_exec 7 9 | #define SYS_fstat 8 10 | #define SYS_chdir 9 11 | #define SYS_dup 10 12 | #define SYS_getpid 11 13 | #define SYS_sbrk 12 14 | #define SYS_sleep 13 15 | #define SYS_uptime 14 16 | #define SYS_open 15 17 | #define SYS_write 16 18 | #define SYS_mknod 17 19 | #define SYS_unlink 18 20 | #define SYS_link 19 21 | #define SYS_mkdir 20 22 | #define SYS_close 21 23 | #define SYS_getptable 22 24 | #define SYS_getppid 23 25 | #define SYS_chpr 24 26 | #define SYS_wait2 25 27 | #define SYS_yield 26 28 | #define SYS_chtickets 27 29 | -------------------------------------------------------------------------------- /lab_scheduling/end/foo01.c: -------------------------------------------------------------------------------- 1 | #include "types.h" 2 | #include "stat.h" 3 | #include "user.h" 4 | #include "fcntl.h" 5 | 6 | int main(int argc, char *argv[]) { 7 | int pid; 8 | int k, n; 9 | int x, z; 10 | 11 | if (argc != 2) { 12 | printf(2, "usage: %s n\n", argv[0]); 13 | } 14 | 15 | n = atoi(argv[1]); 16 | 17 | for ( k = 0; k < n; k++ ) { 18 | pid = fork (); 19 | if ( pid < 0 ) { 20 | printf(1, "%d failed in fork!\n", getpid()); 21 | exit(); 22 | } else if (pid == 0) { 23 | // child 24 | printf(1, "Child %d created\n",getpid()); 25 | for ( z = 0; z < 10000.0; z += 0.01 ) 26 | x = x + 3.14 * 89.64; // useless calculations to consume CPU time 27 | exit(); 28 | } 29 | } 30 | 31 | for (k = 0; k < n; k++) { 32 | wait(); 33 | } 34 | 35 | exit(); 36 | } 37 | -------------------------------------------------------------------------------- /lab_scheduling/end/user.h: -------------------------------------------------------------------------------- 1 | struct stat; 2 | struct rtcdate; 3 | 4 | // system calls 5 | int fork(void); 6 | int exit(void) __attribute__((noreturn)); 7 | int wait(void); 8 | int pipe(int*); 9 | int write(int, void*, int); 10 | int read(int, void*, int); 11 | int close(int); 12 | int kill(int); 13 | int exec(char*, char**); 14 | int open(char*, int); 15 | int mknod(char*, short, short); 16 | int unlink(char*); 17 | int fstat(int fd, struct stat*); 18 | int link(char*, char*); 19 | int mkdir(char*); 20 | int chdir(char*); 21 | int dup(int); 22 | int getpid(void); 23 | char* sbrk(int); 24 | int sleep(int); 25 | int uptime(void); 26 | int getptable(int, void*); 27 | int getppid(void); 28 | int chpr(int, int); 29 | int wait2(int*, int*, int*); 30 | int yield(void); 31 | int chtickets(int, int); 32 | 33 | // ulib.c 34 | int stat(char*, struct stat*); 35 | char* strcpy(char*, char*); 36 | void *memmove(void*, void*, int); 37 | char* strchr(const char*, char c); 38 | int strcmp(const char*, const char*); 39 | void printf(int, char*, ...); 40 | char* gets(char*, int max); 41 | uint strlen(char*); 42 | void* memset(void*, int, uint); 43 | void* malloc(uint); 44 | void free(void*); 45 | int atoi(const char*); 46 | -------------------------------------------------------------------------------- /lab_scheduling/end/nice.c: -------------------------------------------------------------------------------- 1 | #include "types.h" 2 | #include "stat.h" 3 | #include "user.h" 4 | #include "fcntl.h" 5 | 6 | int main(int argc, char *argv[]) 7 | { 8 | int value, pid; 9 | char *type; 10 | 11 | if (argc < 3) { 12 | printf(2, "Usage: nice [type: -p: priority, -t: tickets] [pid] [priority or tickets]\n" ); 13 | exit(); 14 | } 15 | type = argv[1]; 16 | pid = atoi(argv[2]); 17 | value = atoi(argv[3]); 18 | 19 | if(strcmp(type, "-p") == 0) 20 | { 21 | #ifdef SML 22 | if (value < 1 || value > 3) { 23 | printf(2, "Invalid priority (1-3)!\n" ); 24 | exit(); 25 | } 26 | #else 27 | if (value < 1 || value > 20) { 28 | printf(2, "Invalid priority (1-20)!\n" ); 29 | exit(); 30 | } 31 | #endif 32 | 33 | chpr(pid, value); 34 | 35 | } 36 | else if(strcmp(type, "-t") == 0) 37 | { 38 | if (value < 0 || value > 100) { 39 | printf(2, "Invalid tickets (0-100)!\n" ); 40 | exit(); 41 | } 42 | chtickets(pid, value); 43 | } 44 | else 45 | { 46 | printf(2, "Usage: nice [type: -p: priority, -t: tickets] [pid] [priority or tickets]\n" ); 47 | exit(); 48 | } 49 | 50 | 51 | 52 | exit(); 53 | } 54 | -------------------------------------------------------------------------------- /lab_scheduling/end/init.c: -------------------------------------------------------------------------------- 1 | // init: The initial user-level program 2 | 3 | #include "types.h" 4 | #include "stat.h" 5 | #include "user.h" 6 | #include "fcntl.h" 7 | 8 | char *argv[] = { "sh", 0 }; 9 | 10 | int 11 | main(void) 12 | { 13 | int pid, wpid; 14 | 15 | chpr(getpid(), 1); 16 | 17 | if(open("console", O_RDWR) < 0){ 18 | mknod("console", 1, 1); 19 | open("console", O_RDWR); 20 | } 21 | dup(0); // stdout 22 | dup(0); // stderr 23 | 24 | #ifdef DEFAULT 25 | printf(1, "Scheduler policy: DEFAULT\n"); 26 | #else 27 | #ifdef PRIORITY 28 | printf(1, "Scheduler policy: PRIORITY\n"); 29 | #else 30 | #ifdef FCFS 31 | printf(1, "Scheduler policy: FCFS\n"); 32 | #else 33 | #ifdef SML 34 | printf(1, "Scheduler policy: SML\n"); 35 | #else 36 | #ifdef LOTTERY 37 | printf(1, "Scheduler policy: LOTTERY\n"); 38 | #endif 39 | #endif 40 | #endif 41 | #endif 42 | #endif 43 | 44 | for(;;){ 45 | printf(1, "init: starting sh\n"); 46 | pid = fork(); 47 | if(pid < 0){ 48 | printf(1, "init: fork failed\n"); 49 | exit(); 50 | } 51 | if(pid == 0){ 52 | exec("sh", argv); 53 | printf(1, "init: exec sh failed\n"); 54 | exit(); 55 | } 56 | while((wpid=wait()) >= 0 && wpid != pid) 57 | printf(1, "zombie!\n"); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /generate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # set initial values 4 | SUBDIR="end" 5 | NO_INIT=0 6 | CLEAN=0 7 | FLAGS="" 8 | 9 | # read options 10 | TEMP=`getopt -o s:ncl: --long subdir:,flags:,no-init,clean,lab: -n $0 -- "$@"` 11 | eval set -- "$TEMP" 12 | 13 | # extract options and their arguments 14 | while true ; do 15 | case "$1" in 16 | -s|--subdir) 17 | case "$2" in 18 | "") shift 2 ;; 19 | *) SUBDIR=$2 ; shift 2 ;; 20 | esac ;; 21 | -f|--flags) 22 | case "$2" in 23 | "") shift 2 ;; 24 | *) FLAGS=$2 ; shift 2 ;; 25 | esac ;; 26 | -n|--no-init) NO_INIT=1 ; shift ;; 27 | -c|--clean) CLEAN=1 ; shift ;; 28 | -l|--lab) 29 | case "$2" in 30 | "") shift 2 ;; 31 | *) LAB=$2 ; shift 2;; 32 | esac ;; 33 | 34 | --) shift ; break ;; 35 | *) echo "Internal error!" ; exit 1 ;; 36 | esac 37 | done 38 | 39 | if [ "$CLEAN" -eq 1 ]; then 40 | rm -rf xv6-public-* 41 | exit 0 42 | fi 43 | 44 | if [ -z "$LAB" ]; then 45 | echo "usage: $0 --lab lab [--subdir subdir] [--no-init] [--clean]" 46 | exit 1 47 | fi 48 | 49 | DST=xv6-public-$(basename "$LAB") 50 | mkdir -p "$DST" 51 | 52 | if [ $NO_INIT -eq 0 ]; then 53 | rm -rf "$DST" 54 | cp -r xv6-public "$DST" 55 | fi 56 | 57 | cp -r "$LAB"/"$SUBDIR"/* "$DST" 58 | cd "$DST" 59 | make SCHEDPOLICY=$FLAGS 60 | make qemu-nox 61 | 62 | exit 0 63 | -------------------------------------------------------------------------------- /lab_scheduling/end/ps.c: -------------------------------------------------------------------------------- 1 | #include "types.h" 2 | #include "stat.h" 3 | #include "user.h" 4 | #include "param.h" 5 | 6 | /* 7 | * Subset of struct proc (from proc.h) 8 | * Passing the whole struct proc from kernel 9 | * to user space would be VERY UNSAFE! 10 | */ 11 | enum procstate { UNUSED, EMBRYO, SLEEPING, RUNNABLE, RUNNING, ZOMBIE }; 12 | 13 | struct proc { 14 | enum procstate state; // Process state 15 | int pid; // Process ID 16 | int ppid ; // Parent process ID 17 | int priority; // Process priority 18 | int tickets; // Process totalTickets 19 | int ctime; 20 | char name[16]; // Process name 21 | }; 22 | 23 | int 24 | main(int argc, char *argv[]){ 25 | struct proc ptable[NPROC]; 26 | struct proc *p; 27 | int err; 28 | 29 | err = getptable(NPROC * sizeof(struct proc), &ptable); 30 | if (err) { 31 | printf(1,"Error getting ptable"); 32 | exit(); 33 | } 34 | 35 | printf(1, "PID\t\tPPID\t\tSTATE\t\tPRIORITY\tTICKETS\t\tCPUTIME\t\tCMD\n"); 36 | for(p = ptable; p != &ptable[NPROC-1]; p++) { 37 | if (p->state == UNUSED) continue; 38 | 39 | // init does not have ppid! 40 | if (p->pid == 1) 41 | printf(1, "%d\t\tN/A",p->pid); 42 | else 43 | printf(1, "%d\t\t%d", p->pid, p->ppid); 44 | 45 | switch(p->state){ 46 | case UNUSED: 47 | printf(1,"\t\t%s", "UNUSED "); 48 | break; 49 | case EMBRYO: 50 | printf(1,"\t\t%s", "EMBRYO "); 51 | break; 52 | case SLEEPING: 53 | printf(1,"\t\t%s", "SLEEPING"); 54 | break; 55 | case RUNNABLE: 56 | printf(1,"\t\t%s", "RUNNABLE"); 57 | break; 58 | case RUNNING: 59 | printf(1,"\t\t%s", "RUNNING "); 60 | break; 61 | case ZOMBIE: 62 | printf(1,"\t\t%s", "ZOMBIE "); 63 | break; 64 | } 65 | 66 | printf(1, "\t%d", p->priority); 67 | printf(1, "\t\t%d", p->tickets); 68 | printf(1, "\t\t%d", p->ctime); 69 | printf(1,"\t\t%s\n", p->name); 70 | } 71 | exit(); 72 | } 73 | -------------------------------------------------------------------------------- /lab_scheduling/end/SMLsanity.c: -------------------------------------------------------------------------------- 1 | #include "types.h" 2 | #include "user.h" 3 | 4 | 5 | int 6 | main(int argc, char *argv[]) 7 | { 8 | if (argc != 2){ 9 | printf(1, "Usage: SMLsanity \n"); 10 | exit(); 11 | } 12 | int i; 13 | int n; 14 | int j = 0; 15 | int k; 16 | int retime; 17 | int rutime; 18 | int stime; 19 | int sums[3][3]; 20 | for (i = 0; i < 3; i++) 21 | for (j = 0; j < 3; j++) 22 | sums[i][j] = 0; 23 | n = atoi(argv[1]); 24 | int pid; 25 | for (i = 0; i < n; i++) { 26 | j = i % 3; 27 | pid = fork(); 28 | if (pid == 0) {//child 29 | j = (getpid() - 4) % 3; // ensures independence from the first son's pid when gathering the results in the second part of the program 30 | #ifdef SML 31 | switch(j) { 32 | case 0: 33 | chpr(getpid(), 1); 34 | break; 35 | case 1: 36 | chpr(getpid(), 2); 37 | break; 38 | case 2: 39 | chpr(getpid(), 3); 40 | break; 41 | } 42 | #endif 43 | for (k = 0; k < 100; k++){ 44 | for (double z = 0; z < 10000.0; z+= 0.1){ 45 | double x = x + 3.14 * 89.64; // useless calculations to consume CPU time 46 | } 47 | } 48 | exit(); // children exit here 49 | } 50 | continue; // father continues to spawn the next child 51 | } 52 | for (i = 0; i < n; i++) { 53 | pid = wait2(&retime, &rutime, &stime); 54 | int res = (pid - 4) % 3; // correlates to j in the dispatching loop 55 | switch(res) { 56 | case 0: // CPU bound processes 57 | printf(1, "Priority 1, pid: %d, ready: %d, running: %d, sleeping: %d, turnaround: %d\n", pid, retime, rutime, stime, retime + rutime + stime); 58 | sums[0][0] += retime; 59 | sums[0][1] += rutime; 60 | sums[0][2] += stime; 61 | break; 62 | case 1: // CPU bound processes, short tasks 63 | printf(1, "Priority 2, pid: %d, ready: %d, running: %d, sleeping: %d, turnaround: %d\n", pid, retime, rutime, stime, retime + rutime + stime); 64 | sums[1][0] += retime; 65 | sums[1][1] += rutime; 66 | sums[1][2] += stime; 67 | break; 68 | case 2: // simulating I/O bound processes 69 | printf(1, "Priority 3, pid: %d, ready: %d, running: %d, sleeping: %d, turnaround: %d\n", pid, retime, rutime, stime, retime + rutime + stime); 70 | sums[2][0] += retime; 71 | sums[2][1] += rutime; 72 | sums[2][2] += stime; 73 | break; 74 | } 75 | } 76 | for (i = 0; i < 3; i++) 77 | for (j = 0; j < 3; j++) 78 | sums[i][j] /= n; 79 | printf(1, "\n\nPriority 1:\nAverage ready time: %d\nAverage running time: %d\nAverage sleeping time: %d\nAverage turnaround time: %d\n\n\n", sums[0][0], sums[0][1], sums[0][2], sums[0][0] + sums[0][1] + sums[0][2]); 80 | printf(1, "Priority 2:\nAverage ready time: %d\nAverage running time: %d\nAverage sleeping time: %d\nAverage turnaround time: %d\n\n\n", sums[1][0], sums[1][1], sums[1][2], sums[1][0] + sums[1][1] + sums[1][2]); 81 | printf(1, "Priority 3:\nAverage ready time: %d\nAverage running time: %d\nAverage sleeping time: %d\nAverage turnaround time: %d\n\n\n", sums[2][0], sums[2][1], sums[2][2], sums[2][0] + sums[2][1] + sums[2][2]); 82 | exit(); 83 | } 84 | -------------------------------------------------------------------------------- /lab_scheduling/end/proc.h: -------------------------------------------------------------------------------- 1 | #define DEFAULT_TICKETS 1 2 | 3 | // Per-CPU state 4 | struct cpu { 5 | uchar apicid; // Local APIC ID 6 | struct context *scheduler; // swtch() here to enter scheduler 7 | struct taskstate ts; // Used by x86 to find stack for interrupt 8 | struct segdesc gdt[NSEGS]; // x86 global descriptor table 9 | volatile uint started; // Has the CPU started? 10 | int ncli; // Depth of pushcli nesting. 11 | int intena; // Were interrupts enabled before pushcli? 12 | struct proc *proc; // The process running on this cpu or null 13 | }; 14 | 15 | extern struct cpu cpus[NCPU]; 16 | extern int ncpu; 17 | 18 | //PAGEBREAK: 17 19 | // Saved registers for kernel context switches. 20 | // Don't need to save all the segment registers (%cs, etc), 21 | // because they are constant across kernel contexts. 22 | // Don't need to save %eax, %ecx, %edx, because the 23 | // x86 convention is that the caller has saved them. 24 | // Contexts are stored at the bottom of the stack they 25 | // describe; the stack pointer is the address of the context. 26 | // The layout of the context matches the layout of the stack in swtch.S 27 | // at the "Switch stacks" comment. Switch doesn't save eip explicitly, 28 | // but it is on the stack and allocproc() manipulates it. 29 | struct context { 30 | uint edi; 31 | uint esi; 32 | uint ebx; 33 | uint ebp; 34 | uint eip; 35 | }; 36 | 37 | enum procstate { UNUSED, EMBRYO, SLEEPING, RUNNABLE, RUNNING, ZOMBIE }; 38 | 39 | // Per-process state 40 | struct proc { 41 | uint sz; // Size of process memory (bytes) 42 | pde_t* pgdir; // Page table 43 | char *kstack; // Bottom of kernel stack for this process 44 | enum procstate state; // Process state 45 | int pid; // Process ID 46 | struct proc *parent; // Parent process 47 | struct trapframe *tf; // Trap frame for current syscall 48 | struct context *context; // swtch() here to run process 49 | void *chan; // If non-zero, sleeping on chan 50 | int killed; // If non-zero, have been killed 51 | struct file *ofile[NOFILE]; // Open files 52 | struct inode *cwd; // Current directory 53 | char name[16]; // Process name (debugging) 54 | int priority; // Process priority 55 | uint ctime; // Process creation time 56 | int stime; //process SLEEPING time 57 | int retime; //process READY(RUNNABLE) time 58 | int rutime; //process RUNNING time 59 | int tickets; // Process tickets used in LOTTERY scheduling algorithm 60 | }; 61 | 62 | // Process memory is laid out contiguously, low addresses first: 63 | // text 64 | // original data and bss 65 | // fixed-size stack 66 | // expandable heap 67 | 68 | static void wakeup1(void *chan); 69 | 70 | void updatestatistics(); 71 | int random(int max); 72 | int totalTickets(); 73 | struct proc* findReadyProcess(int *index1, int *index2, int *index3, uint *priority); 74 | -------------------------------------------------------------------------------- /lab_scheduling/end/sanity.c: -------------------------------------------------------------------------------- 1 | #include "types.h" 2 | #include "user.h" 3 | 4 | 5 | int 6 | main(int argc, char *argv[]) 7 | { 8 | if (argc != 2){ 9 | printf(1, "Usage: sanity [n]\n"); 10 | exit(); 11 | } 12 | int i; 13 | int n; 14 | int j = 0; 15 | int k; 16 | int retime; 17 | int rutime; 18 | int stime; 19 | int sums[3][3]; 20 | for (i = 0; i < 3; i++) 21 | for (j = 0; j < 3; j++) 22 | sums[i][j] = 0; 23 | n = atoi(argv[1]); 24 | int pid; 25 | for (i = 0; i < n; i++) { 26 | j = i % 3; 27 | pid = fork(); 28 | if (pid == 0) {//child 29 | j = (getpid() - 4) % 3; // ensures independence from the first son's pid when gathering the results in the second part of the program 30 | switch(j) { 31 | case 0: //CPU‐bound process (CPU): 32 | for (double z = 0; z < 10000.0; z+= 0.1){ 33 | double x = x + 3.14 * 89.64; // useless calculations to consume CPU time 34 | } 35 | break; 36 | case 1: //short tasks based CPU‐bound process (S‐CPU): 37 | for (k = 0; k < 100; k++){ 38 | for (j = 0; j < 1000000; j++){} 39 | yield(); 40 | } 41 | break; 42 | case 2:// simulate I/O bound process (IO) 43 | for(k = 0; k < 100; k++){ 44 | sleep(1); 45 | } 46 | break; 47 | } 48 | exit(); // children exit here 49 | } 50 | continue; // father continues to spawn the next child 51 | } 52 | for (i = 0; i < n; i++) { 53 | pid = wait2(&retime, &rutime, &stime); 54 | int res = (pid - 4) % 3; // correlates to j in the dispatching loop 55 | switch(res) { 56 | case 0: // CPU bound processes 57 | printf(1, "CPU-bound, pid: %d, ready: %d, running: %d, sleeping: %d, turnaround: %d\n", pid, retime, rutime, stime, retime + rutime + stime); 58 | sums[0][0] += retime; 59 | sums[0][1] += rutime; 60 | sums[0][2] += stime; 61 | break; 62 | case 1: // CPU bound processes, short tasks 63 | printf(1, "CPU-S bound, pid: %d, ready: %d, running: %d, sleeping: %d, turnaround: %d\n", pid, retime, rutime, stime, retime + rutime + stime); 64 | sums[1][0] += retime; 65 | sums[1][1] += rutime; 66 | sums[1][2] += stime; 67 | break; 68 | case 2: // simulating I/O bound processes 69 | printf(1, "I/O bound, pid: %d, ready: %d, running: %d, sleeping: %d, turnaround: %d\n", pid, retime, rutime, stime, retime + rutime + stime); 70 | sums[2][0] += retime; 71 | sums[2][1] += rutime; 72 | sums[2][2] += stime; 73 | break; 74 | } 75 | } 76 | for (i = 0; i < 3; i++) 77 | for (j = 0; j < 3; j++) 78 | sums[i][j] /= n; 79 | printf(1, "\n\nCPU bound:\nAverage ready time: %d\nAverage running time: %d\nAverage sleeping time: %d\nAverage turnaround time: %d\n\n\n", sums[0][0], sums[0][1], sums[0][2], sums[0][0] + sums[0][1] + sums[0][2]); 80 | printf(1, "CPU-S bound:\nAverage ready time: %d\nAverage running time: %d\nAverage sleeping time: %d\nAverage turnaround time: %d\n\n\n", sums[1][0], sums[1][1], sums[1][2], sums[1][0] + sums[1][1] + sums[1][2]); 81 | printf(1, "I/O bound:\nAverage ready time: %d\nAverage running time: %d\nAverage sleeping time: %d\nAverage turnaround time: %d\n\n\n", sums[2][0], sums[2][1], sums[2][2], sums[2][0] + sums[2][1] + sums[2][2]); 82 | exit(); 83 | } 84 | -------------------------------------------------------------------------------- /lab_scheduling/end/trap.c: -------------------------------------------------------------------------------- 1 | #include "types.h" 2 | #include "defs.h" 3 | #include "param.h" 4 | #include "memlayout.h" 5 | #include "mmu.h" 6 | #include "proc.h" 7 | #include "x86.h" 8 | #include "traps.h" 9 | #include "spinlock.h" 10 | 11 | // Interrupt descriptor table (shared by all CPUs). 12 | struct gatedesc idt[256]; 13 | extern uint vectors[]; // in vectors.S: array of 256 entry pointers 14 | struct spinlock tickslock; 15 | uint ticks; 16 | 17 | void 18 | tvinit(void) 19 | { 20 | int i; 21 | 22 | for(i = 0; i < 256; i++) 23 | SETGATE(idt[i], 0, SEG_KCODE<<3, vectors[i], 0); 24 | SETGATE(idt[T_SYSCALL], 1, SEG_KCODE<<3, vectors[T_SYSCALL], DPL_USER); 25 | 26 | initlock(&tickslock, "time"); 27 | } 28 | 29 | void 30 | idtinit(void) 31 | { 32 | lidt(idt, sizeof(idt)); 33 | } 34 | 35 | //PAGEBREAK: 41 36 | void 37 | trap(struct trapframe *tf) 38 | { 39 | if(tf->trapno == T_SYSCALL){ 40 | if(myproc()->killed) 41 | exit(); 42 | myproc()->tf = tf; 43 | syscall(); 44 | if(myproc()->killed) 45 | exit(); 46 | return; 47 | } 48 | 49 | switch(tf->trapno){ 50 | case T_IRQ0 + IRQ_TIMER: 51 | if(cpuid() == 0){ 52 | acquire(&tickslock); 53 | ticks++; 54 | updatestatistics(); //will update proc statistic every clock tick 55 | wakeup(&ticks); 56 | release(&tickslock); 57 | } 58 | lapiceoi(); 59 | break; 60 | case T_IRQ0 + IRQ_IDE: 61 | ideintr(); 62 | lapiceoi(); 63 | break; 64 | case T_IRQ0 + IRQ_IDE+1: 65 | // Bochs generates spurious IDE1 interrupts. 66 | break; 67 | case T_IRQ0 + IRQ_KBD: 68 | kbdintr(); 69 | lapiceoi(); 70 | break; 71 | case T_IRQ0 + IRQ_COM1: 72 | uartintr(); 73 | lapiceoi(); 74 | break; 75 | case T_IRQ0 + 7: 76 | case T_IRQ0 + IRQ_SPURIOUS: 77 | cprintf("cpu%d: spurious interrupt at %x:%x\n", 78 | cpuid(), tf->cs, tf->eip); 79 | lapiceoi(); 80 | break; 81 | 82 | //PAGEBREAK: 13 83 | default: 84 | if(myproc() == 0 || (tf->cs&3) == 0){ 85 | // In kernel, it must be our mistake. 86 | cprintf("unexpected trap %d from cpu %d eip %x (cr2=0x%x)\n", 87 | tf->trapno, cpuid(), tf->eip, rcr2()); 88 | panic("trap"); 89 | } 90 | // In user space, assume process misbehaved. 91 | cprintf("pid %d %s: trap %d err %d on cpu %d " 92 | "eip 0x%x addr 0x%x--kill proc\n", 93 | myproc()->pid, myproc()->name, tf->trapno, 94 | tf->err, cpuid(), tf->eip, rcr2()); 95 | myproc()->killed = 1; 96 | } 97 | 98 | // Force process exit if it has been killed and is in user space. 99 | // (If it is still executing in the kernel, let it keep running 100 | // until it gets to the regular system call return.) 101 | if(myproc() && myproc()->killed && (tf->cs&3) == DPL_USER) 102 | exit(); 103 | 104 | // Force process to give up CPU on clock tick. 105 | // If interrupts were on while locks held, would need to check nlock. 106 | if(myproc() && myproc()->state == RUNNING && 107 | tf->trapno == T_IRQ0+IRQ_TIMER) 108 | yield(); 109 | 110 | // Check if the process has been killed since we yielded 111 | if(myproc() && myproc()->killed && (tf->cs&3) == DPL_USER) 112 | exit(); 113 | } 114 | -------------------------------------------------------------------------------- /lab_scheduling/end/sysproc.c: -------------------------------------------------------------------------------- 1 | #include "types.h" 2 | #include "x86.h" 3 | #include "defs.h" 4 | #include "date.h" 5 | #include "param.h" 6 | #include "memlayout.h" 7 | #include "mmu.h" 8 | #include "proc.h" 9 | 10 | int 11 | sys_fork(void) 12 | { 13 | return fork(); 14 | } 15 | 16 | int 17 | sys_exit(void) 18 | { 19 | exit(); 20 | return 0; // not reached 21 | } 22 | 23 | int 24 | sys_wait(void) 25 | { 26 | return wait(); 27 | } 28 | 29 | /* 30 | this is the actual function being called from syscall.c 31 | @returns - pidof the terminated child process ‐ if successful 32 | ­ -1, upon failure 33 | */ 34 | int sys_wait2(void) { 35 | int *retime, *rutime, *stime; 36 | if (argptr(0, (void*)&retime, sizeof(retime)) < 0) 37 | return -1; 38 | if (argptr(1, (void*)&rutime, sizeof(retime)) < 0) 39 | return -1; 40 | if (argptr(2, (void*)&stime, sizeof(stime)) < 0) 41 | return -1; 42 | return wait2(retime, rutime, stime); 43 | } 44 | 45 | int 46 | sys_kill(void) 47 | { 48 | int pid; 49 | 50 | if(argint(0, &pid) < 0) 51 | return -1; 52 | return kill(pid); 53 | } 54 | 55 | int 56 | sys_getpid(void) 57 | { 58 | return myproc()->pid; 59 | } 60 | 61 | int 62 | sys_sbrk(void) 63 | { 64 | int addr; 65 | int n; 66 | 67 | if(argint(0, &n) < 0) 68 | return -1; 69 | addr = myproc()->sz; 70 | if(growproc(n) < 0) 71 | return -1; 72 | return addr; 73 | } 74 | 75 | int 76 | sys_sleep(void) 77 | { 78 | int n; 79 | uint ticks0; 80 | 81 | if(argint(0, &n) < 0) 82 | return -1; 83 | acquire(&tickslock); 84 | ticks0 = ticks; 85 | while(ticks - ticks0 < n){ 86 | if(myproc()->killed){ 87 | release(&tickslock); 88 | return -1; 89 | } 90 | sleep(&ticks, &tickslock); 91 | } 92 | release(&tickslock); 93 | return 0; 94 | } 95 | 96 | // return how many clock tick interrupts have occurred 97 | // since start. 98 | int 99 | sys_uptime(void) 100 | { 101 | uint xticks; 102 | 103 | acquire(&tickslock); 104 | xticks = ticks; 105 | release(&tickslock); 106 | return xticks; 107 | } 108 | 109 | // copy elements from the kernel ptable to the user space 110 | extern struct proc * getptable_proc(void); 111 | 112 | int sys_getptable(void){ 113 | int size; 114 | char *buf; 115 | char *s; 116 | struct proc *p = '\0'; 117 | 118 | if (argint(0, &size) <0){ 119 | return -1; 120 | } 121 | if (argptr(1, &buf,size) <0){ 122 | return -1; 123 | } 124 | 125 | s = buf; 126 | p = getptable_proc(); 127 | 128 | while(buf + size > s && p->state != UNUSED){ 129 | *(int *)s = p->state; 130 | s+=4; 131 | *(int *)s = p->pid; 132 | s+=4; 133 | *(int *)s = p->parent->pid; 134 | s+=4; 135 | *(int *)s = p->priority; 136 | s+=4; 137 | *(int *)s = p->tickets; 138 | s+=4; 139 | *(int *)s = p->ctime; 140 | s+=4; 141 | memmove(s,p->name,16); 142 | s+=16; 143 | p++; 144 | } 145 | return 0; 146 | } 147 | 148 | int 149 | sys_getppid(void) 150 | { 151 | return myproc()->parent->pid; 152 | } 153 | 154 | // change priority of a specific process 155 | extern int chpr(int, int); 156 | extern int chtickets(int, int); 157 | 158 | int 159 | sys_chpr(void) 160 | { 161 | int pid, pr; 162 | if(argint(0, &pid) < 0) 163 | return -1; 164 | if(argint(1, &pr) < 0) 165 | return -1; 166 | 167 | return chpr(pid, pr); 168 | } 169 | 170 | int sys_yield(void) { 171 | yield(); 172 | return 0; 173 | } 174 | 175 | int 176 | sys_chtickets(void) 177 | { 178 | int pid, tickets; 179 | if(argint(0, &pid) < 0) 180 | return -1; 181 | if(argint(1, &tickets) < 0) 182 | return -1; 183 | 184 | return chtickets(pid, tickets); 185 | } 186 | -------------------------------------------------------------------------------- /lab_scheduling/end/syscall.c: -------------------------------------------------------------------------------- 1 | #include "types.h" 2 | #include "defs.h" 3 | #include "param.h" 4 | #include "memlayout.h" 5 | #include "mmu.h" 6 | #include "proc.h" 7 | #include "x86.h" 8 | #include "syscall.h" 9 | 10 | // User code makes a system call with INT T_SYSCALL. 11 | // System call number in %eax. 12 | // Arguments on the stack, from the user call to the C 13 | // library system call function. The saved user %esp points 14 | // to a saved program counter, and then the first argument. 15 | 16 | // Fetch the int at addr from the current process. 17 | int 18 | fetchint(uint addr, int *ip) 19 | { 20 | struct proc *curproc = myproc(); 21 | 22 | if(addr >= curproc->sz || addr+4 > curproc->sz) 23 | return -1; 24 | *ip = *(int*)(addr); 25 | return 0; 26 | } 27 | 28 | // Fetch the nul-terminated string at addr from the current process. 29 | // Doesn't actually copy the string - just sets *pp to point at it. 30 | // Returns length of string, not including nul. 31 | int 32 | fetchstr(uint addr, char **pp) 33 | { 34 | char *s, *ep; 35 | struct proc *curproc = myproc(); 36 | 37 | if(addr >= curproc->sz) 38 | return -1; 39 | *pp = (char*)addr; 40 | ep = (char*)curproc->sz; 41 | for(s = *pp; s < ep; s++){ 42 | if(*s == 0) 43 | return s - *pp; 44 | } 45 | return -1; 46 | } 47 | 48 | // Fetch the nth 32-bit system call argument. 49 | int 50 | argint(int n, int *ip) 51 | { 52 | return fetchint((myproc()->tf->esp) + 4 + 4*n, ip); 53 | } 54 | 55 | // Fetch the nth word-sized system call argument as a pointer 56 | // to a block of memory of size bytes. Check that the pointer 57 | // lies within the process address space. 58 | int 59 | argptr(int n, char **pp, int size) 60 | { 61 | int i; 62 | struct proc *curproc = myproc(); 63 | 64 | if(argint(n, &i) < 0) 65 | return -1; 66 | if(size < 0 || (uint)i >= curproc->sz || (uint)i+size > curproc->sz) 67 | return -1; 68 | *pp = (char*)i; 69 | return 0; 70 | } 71 | 72 | // Fetch the nth word-sized system call argument as a string pointer. 73 | // Check that the pointer is valid and the string is nul-terminated. 74 | // (There is no shared writable memory, so the string can't change 75 | // between this check and being used by the kernel.) 76 | int 77 | argstr(int n, char **pp) 78 | { 79 | int addr; 80 | if(argint(n, &addr) < 0) 81 | return -1; 82 | return fetchstr(addr, pp); 83 | } 84 | 85 | extern int sys_chdir(void); 86 | extern int sys_close(void); 87 | extern int sys_dup(void); 88 | extern int sys_exec(void); 89 | extern int sys_exit(void); 90 | extern int sys_fork(void); 91 | extern int sys_fstat(void); 92 | extern int sys_getpid(void); 93 | extern int sys_kill(void); 94 | extern int sys_link(void); 95 | extern int sys_mkdir(void); 96 | extern int sys_mknod(void); 97 | extern int sys_open(void); 98 | extern int sys_pipe(void); 99 | extern int sys_read(void); 100 | extern int sys_sbrk(void); 101 | extern int sys_sleep(void); 102 | extern int sys_unlink(void); 103 | extern int sys_wait(void); 104 | extern int sys_wait2(void); 105 | extern int sys_write(void); 106 | extern int sys_uptime(void); 107 | extern int sys_getptable(void); 108 | extern int sys_getppid(void); 109 | extern int sys_chpr(void); 110 | extern int sys_yield(void); 111 | extern int sys_chtickets(void); 112 | 113 | static int (*syscalls[])(void) = { 114 | [SYS_fork] sys_fork, 115 | [SYS_exit] sys_exit, 116 | [SYS_wait] sys_wait, 117 | [SYS_pipe] sys_pipe, 118 | [SYS_read] sys_read, 119 | [SYS_kill] sys_kill, 120 | [SYS_exec] sys_exec, 121 | [SYS_fstat] sys_fstat, 122 | [SYS_chdir] sys_chdir, 123 | [SYS_dup] sys_dup, 124 | [SYS_getpid] sys_getpid, 125 | [SYS_sbrk] sys_sbrk, 126 | [SYS_sleep] sys_sleep, 127 | [SYS_uptime] sys_uptime, 128 | [SYS_open] sys_open, 129 | [SYS_write] sys_write, 130 | [SYS_mknod] sys_mknod, 131 | [SYS_unlink] sys_unlink, 132 | [SYS_link] sys_link, 133 | [SYS_mkdir] sys_mkdir, 134 | [SYS_close] sys_close, 135 | [SYS_getptable] sys_getptable, 136 | [SYS_getppid] sys_getppid, 137 | [SYS_chpr] sys_chpr, 138 | [SYS_wait2] sys_wait2, 139 | [SYS_yield] sys_yield, 140 | [SYS_chtickets] sys_chtickets, 141 | }; 142 | 143 | void 144 | syscall(void) 145 | { 146 | int num; 147 | struct proc *curproc = myproc(); 148 | 149 | num = curproc->tf->eax; 150 | if(num > 0 && num < NELEM(syscalls) && syscalls[num]) { 151 | curproc->tf->eax = syscalls[num](); 152 | } else { 153 | cprintf("%d %s: unknown sys call %d\n", 154 | curproc->pid, curproc->name, num); 155 | curproc->tf->eax = -1; 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /lab_scheduling/end/defs.h: -------------------------------------------------------------------------------- 1 | struct buf; 2 | struct context; 3 | struct file; 4 | struct inode; 5 | struct pipe; 6 | struct proc; 7 | struct rtcdate; 8 | struct spinlock; 9 | struct sleeplock; 10 | struct stat; 11 | struct superblock; 12 | 13 | // bio.c 14 | void binit(void); 15 | struct buf* bread(uint, uint); 16 | void brelse(struct buf*); 17 | void bwrite(struct buf*); 18 | 19 | // console.c 20 | void consoleinit(void); 21 | void cprintf(char*, ...); 22 | void consoleintr(int(*)(void)); 23 | void panic(char*) __attribute__((noreturn)); 24 | 25 | // exec.c 26 | int exec(char*, char**); 27 | 28 | // file.c 29 | struct file* filealloc(void); 30 | void fileclose(struct file*); 31 | struct file* filedup(struct file*); 32 | void fileinit(void); 33 | int fileread(struct file*, char*, int n); 34 | int filestat(struct file*, struct stat*); 35 | int filewrite(struct file*, char*, int n); 36 | 37 | // fs.c 38 | void readsb(int dev, struct superblock *sb); 39 | int dirlink(struct inode*, char*, uint); 40 | struct inode* dirlookup(struct inode*, char*, uint*); 41 | struct inode* ialloc(uint, short); 42 | struct inode* idup(struct inode*); 43 | void iinit(int dev); 44 | void ilock(struct inode*); 45 | void iput(struct inode*); 46 | void iunlock(struct inode*); 47 | void iunlockput(struct inode*); 48 | void iupdate(struct inode*); 49 | int namecmp(const char*, const char*); 50 | struct inode* namei(char*); 51 | struct inode* nameiparent(char*, char*); 52 | int readi(struct inode*, char*, uint, uint); 53 | void stati(struct inode*, struct stat*); 54 | int writei(struct inode*, char*, uint, uint); 55 | 56 | // ide.c 57 | void ideinit(void); 58 | void ideintr(void); 59 | void iderw(struct buf*); 60 | 61 | // ioapic.c 62 | void ioapicenable(int irq, int cpu); 63 | extern uchar ioapicid; 64 | void ioapicinit(void); 65 | 66 | // kalloc.c 67 | char* kalloc(void); 68 | void kfree(char*); 69 | void kinit1(void*, void*); 70 | void kinit2(void*, void*); 71 | 72 | // kbd.c 73 | void kbdintr(void); 74 | 75 | // lapic.c 76 | void cmostime(struct rtcdate *r); 77 | int lapicid(void); 78 | extern volatile uint* lapic; 79 | void lapiceoi(void); 80 | void lapicinit(void); 81 | void lapicstartap(uchar, uint); 82 | void microdelay(int); 83 | 84 | // log.c 85 | void initlog(int dev); 86 | void log_write(struct buf*); 87 | void begin_op(); 88 | void end_op(); 89 | 90 | // mp.c 91 | extern int ismp; 92 | void mpinit(void); 93 | 94 | // picirq.c 95 | void picenable(int); 96 | void picinit(void); 97 | 98 | // pipe.c 99 | int pipealloc(struct file**, struct file**); 100 | void pipeclose(struct pipe*, int); 101 | int piperead(struct pipe*, char*, int); 102 | int pipewrite(struct pipe*, char*, int); 103 | 104 | //PAGEBREAK: 16 105 | // proc.c 106 | int cpuid(void); 107 | void exit(void); 108 | int fork(void); 109 | int growproc(int); 110 | int kill(int); 111 | struct cpu* mycpu(void); 112 | struct proc* myproc(); 113 | void pinit(void); 114 | void procdump(void); 115 | void scheduler(void) __attribute__((noreturn)); 116 | void sched(void); 117 | void setproc(struct proc*); 118 | void sleep(void*, struct spinlock*); 119 | void userinit(void); 120 | int wait(void); 121 | int wait2(int*, int*, int*); 122 | void wakeup(void*); 123 | void yield(void); 124 | 125 | // swtch.S 126 | void swtch(struct context**, struct context*); 127 | 128 | // spinlock.c 129 | void acquire(struct spinlock*); 130 | void getcallerpcs(void*, uint*); 131 | int holding(struct spinlock*); 132 | void initlock(struct spinlock*, char*); 133 | void release(struct spinlock*); 134 | void pushcli(void); 135 | void popcli(void); 136 | 137 | // sleeplock.c 138 | void acquiresleep(struct sleeplock*); 139 | void releasesleep(struct sleeplock*); 140 | int holdingsleep(struct sleeplock*); 141 | void initsleeplock(struct sleeplock*, char*); 142 | 143 | // string.c 144 | int memcmp(const void*, const void*, uint); 145 | void* memmove(void*, const void*, uint); 146 | void* memset(void*, int, uint); 147 | char* safestrcpy(char*, const char*, int); 148 | int strlen(const char*); 149 | int strncmp(const char*, const char*, uint); 150 | char* strncpy(char*, const char*, int); 151 | 152 | // syscall.c 153 | int argint(int, int*); 154 | int argptr(int, char**, int); 155 | int argstr(int, char**); 156 | int fetchint(uint, int*); 157 | int fetchstr(uint, char**); 158 | void syscall(void); 159 | 160 | // timer.c 161 | void timerinit(void); 162 | 163 | // trap.c 164 | void idtinit(void); 165 | extern uint ticks; 166 | void tvinit(void); 167 | extern struct spinlock tickslock; 168 | 169 | // uart.c 170 | void uartinit(void); 171 | void uartintr(void); 172 | void uartputc(int); 173 | 174 | // vm.c 175 | void seginit(void); 176 | void kvmalloc(void); 177 | pde_t* setupkvm(void); 178 | char* uva2ka(pde_t*, char*); 179 | int allocuvm(pde_t*, uint, uint); 180 | int deallocuvm(pde_t*, uint, uint); 181 | void freevm(pde_t*); 182 | void inituvm(pde_t*, char*, uint); 183 | int loaduvm(pde_t*, char*, struct inode*, uint, uint); 184 | pde_t* copyuvm(pde_t*, uint); 185 | void switchuvm(struct proc*); 186 | void switchkvm(void); 187 | int copyout(pde_t*, uint, void*, uint); 188 | void clearpteu(pde_t *pgdir, char *uva); 189 | 190 | // number of elements in fixed-size array 191 | #define NELEM(x) (sizeof(x)/sizeof((x)[0])) 192 | -------------------------------------------------------------------------------- /lab_scheduling/end/Makefile: -------------------------------------------------------------------------------- 1 | OBJS = \ 2 | bio.o\ 3 | console.o\ 4 | exec.o\ 5 | file.o\ 6 | fs.o\ 7 | ide.o\ 8 | ioapic.o\ 9 | kalloc.o\ 10 | kbd.o\ 11 | lapic.o\ 12 | log.o\ 13 | main.o\ 14 | mp.o\ 15 | picirq.o\ 16 | pipe.o\ 17 | proc.o\ 18 | sleeplock.o\ 19 | spinlock.o\ 20 | string.o\ 21 | swtch.o\ 22 | syscall.o\ 23 | sysfile.o\ 24 | sysproc.o\ 25 | trapasm.o\ 26 | trap.o\ 27 | uart.o\ 28 | vectors.o\ 29 | vm.o\ 30 | 31 | # Cross-compiling (e.g., on Mac OS X) 32 | # TOOLPREFIX = i386-jos-elf 33 | 34 | # Using native tools (e.g., on X86 Linux) 35 | #TOOLPREFIX = 36 | 37 | # Try to infer the correct TOOLPREFIX if not set 38 | ifndef TOOLPREFIX 39 | TOOLPREFIX := $(shell if i386-jos-elf-objdump -i 2>&1 | grep '^elf32-i386$$' >/dev/null 2>&1; \ 40 | then echo 'i386-jos-elf-'; \ 41 | elif objdump -i 2>&1 | grep 'elf32-i386' >/dev/null 2>&1; \ 42 | then echo ''; \ 43 | else echo "***" 1>&2; \ 44 | echo "*** Error: Couldn't find an i386-*-elf version of GCC/binutils." 1>&2; \ 45 | echo "*** Is the directory with i386-jos-elf-gcc in your PATH?" 1>&2; \ 46 | echo "*** If your i386-*-elf toolchain is installed with a command" 1>&2; \ 47 | echo "*** prefix other than 'i386-jos-elf-', set your TOOLPREFIX" 1>&2; \ 48 | echo "*** environment variable to that prefix and run 'make' again." 1>&2; \ 49 | echo "*** To turn off this error, run 'gmake TOOLPREFIX= ...'." 1>&2; \ 50 | echo "***" 1>&2; exit 1; fi) 51 | endif 52 | 53 | # If the makefile can't find QEMU, specify its path here 54 | # QEMU = qemu-system-i386 55 | 56 | # Try to infer the correct QEMU 57 | ifndef QEMU 58 | QEMU = $(shell if which qemu > /dev/null; \ 59 | then echo qemu; exit; \ 60 | elif which qemu-system-i386 > /dev/null; \ 61 | then echo qemu-system-i386; exit; \ 62 | elif which qemu-system-x86_64 > /dev/null; \ 63 | then echo qemu-system-x86_64; exit; \ 64 | else \ 65 | qemu=/Applications/Q.app/Contents/MacOS/i386-softmmu.app/Contents/MacOS/i386-softmmu; \ 66 | if test -x $$qemu; then echo $$qemu; exit; fi; fi; \ 67 | echo "***" 1>&2; \ 68 | echo "*** Error: Couldn't find a working QEMU executable." 1>&2; \ 69 | echo "*** Is the directory containing the qemu binary in your PATH" 1>&2; \ 70 | echo "*** or have you tried setting the QEMU variable in Makefile?" 1>&2; \ 71 | echo "***" 1>&2; exit 1) 72 | endif 73 | 74 | ifndef SCHEDPOLICY 75 | SCHEDPOLICY := DEFAULT 76 | endif 77 | 78 | 79 | CC = $(TOOLPREFIX)gcc 80 | AS = $(TOOLPREFIX)gas 81 | LD = $(TOOLPREFIX)ld 82 | OBJCOPY = $(TOOLPREFIX)objcopy 83 | OBJDUMP = $(TOOLPREFIX)objdump 84 | CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -O2 -Wall -MD -ggdb -m32 -Werror -Wno-unused-variable -Wno-unused-function -fno-omit-frame-pointer -D $(SCHEDPOLICY) 85 | #CFLAGS = -fno-pic -static -fno-builtin -fno-strict-aliasing -fvar-tracking -fvar-tracking-assignments -O0 -g -Wall -MD -gdwarf-2 -m32 -Werror -fno-omit-frame-pointer 86 | CFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector) 87 | ASFLAGS = -m32 -gdwarf-2 -Wa,-divide 88 | # FreeBSD ld wants ``elf_i386_fbsd'' 89 | LDFLAGS += -m $(shell $(LD) -V | grep elf_i386 2>/dev/null | head -n 1) 90 | 91 | xv6.img: bootblock kernel fs.img 92 | dd if=/dev/zero of=xv6.img count=10000 93 | dd if=bootblock of=xv6.img conv=notrunc 94 | dd if=kernel of=xv6.img seek=1 conv=notrunc 95 | 96 | xv6memfs.img: bootblock kernelmemfs 97 | dd if=/dev/zero of=xv6memfs.img count=10000 98 | dd if=bootblock of=xv6memfs.img conv=notrunc 99 | dd if=kernelmemfs of=xv6memfs.img seek=1 conv=notrunc 100 | 101 | bootblock: bootasm.S bootmain.c 102 | $(CC) $(CFLAGS) -fno-pic -O -nostdinc -I. -c bootmain.c 103 | $(CC) $(CFLAGS) -fno-pic -nostdinc -I. -c bootasm.S 104 | $(LD) $(LDFLAGS) -N -e start -Ttext 0x7C00 -o bootblock.o bootasm.o bootmain.o 105 | $(OBJDUMP) -S bootblock.o > bootblock.asm 106 | $(OBJCOPY) -S -O binary -j .text bootblock.o bootblock 107 | ./sign.pl bootblock 108 | 109 | entryother: entryother.S 110 | $(CC) $(CFLAGS) -fno-pic -nostdinc -I. -c entryother.S 111 | $(LD) $(LDFLAGS) -N -e start -Ttext 0x7000 -o bootblockother.o entryother.o 112 | $(OBJCOPY) -S -O binary -j .text bootblockother.o entryother 113 | $(OBJDUMP) -S bootblockother.o > entryother.asm 114 | 115 | initcode: initcode.S 116 | $(CC) $(CFLAGS) -nostdinc -I. -c initcode.S 117 | $(LD) $(LDFLAGS) -N -e start -Ttext 0 -o initcode.out initcode.o 118 | $(OBJCOPY) -S -O binary initcode.out initcode 119 | $(OBJDUMP) -S initcode.o > initcode.asm 120 | 121 | kernel: $(OBJS) entry.o entryother initcode kernel.ld 122 | $(LD) $(LDFLAGS) -T kernel.ld -o kernel entry.o $(OBJS) -b binary initcode entryother 123 | $(OBJDUMP) -S kernel > kernel.asm 124 | $(OBJDUMP) -t kernel | sed '1,/SYMBOL TABLE/d; s/ .* / /; /^$$/d' > kernel.sym 125 | 126 | # kernelmemfs is a copy of kernel that maintains the 127 | # disk image in memory instead of writing to a disk. 128 | # This is not so useful for testing persistent storage or 129 | # exploring disk buffering implementations, but it is 130 | # great for testing the kernel on real hardware without 131 | # needing a scratch disk. 132 | MEMFSOBJS = $(filter-out ide.o,$(OBJS)) memide.o 133 | kernelmemfs: $(MEMFSOBJS) entry.o entryother initcode kernel.ld fs.img 134 | $(LD) $(LDFLAGS) -T kernel.ld -o kernelmemfs entry.o $(MEMFSOBJS) -b binary initcode entryother fs.img 135 | $(OBJDUMP) -S kernelmemfs > kernelmemfs.asm 136 | $(OBJDUMP) -t kernelmemfs | sed '1,/SYMBOL TABLE/d; s/ .* / /; /^$$/d' > kernelmemfs.sym 137 | 138 | tags: $(OBJS) entryother.S _init 139 | etags *.S *.c 140 | 141 | vectors.S: vectors.pl 142 | perl vectors.pl > vectors.S 143 | 144 | ULIB = ulib.o usys.o printf.o umalloc.o 145 | 146 | _%: %.o $(ULIB) 147 | $(LD) $(LDFLAGS) -N -e main -Ttext 0 -o $@ $^ 148 | $(OBJDUMP) -S $@ > $*.asm 149 | $(OBJDUMP) -t $@ | sed '1,/SYMBOL TABLE/d; s/ .* / /; /^$$/d' > $*.sym 150 | 151 | _forktest: forktest.o $(ULIB) 152 | # forktest has less library code linked in - needs to be small 153 | # in order to be able to max out the proc table. 154 | $(LD) $(LDFLAGS) -N -e main -Ttext 0 -o _forktest forktest.o ulib.o usys.o 155 | $(OBJDUMP) -S _forktest > forktest.asm 156 | 157 | mkfs: mkfs.c fs.h 158 | gcc -Werror -Wall -o mkfs mkfs.c 159 | 160 | # Prevent deletion of intermediate files, e.g. cat.o, after first build, so 161 | # that disk image changes after first build are persistent until clean. More 162 | # details: 163 | # http://www.gnu.org/software/make/manual/html_node/Chained-Rules.html 164 | .PRECIOUS: %.o 165 | 166 | UPROGS=\ 167 | _cat\ 168 | _echo\ 169 | _forktest\ 170 | _grep\ 171 | _init\ 172 | _kill\ 173 | _ln\ 174 | _ls\ 175 | _mkdir\ 176 | _rm\ 177 | _sh\ 178 | _stressfs\ 179 | _usertests\ 180 | _wc\ 181 | _zombie\ 182 | _ps\ 183 | _nice\ 184 | _foo01\ 185 | _foo02\ 186 | _sanity\ 187 | _SMLsanity\ 188 | 189 | fs.img: mkfs README $(UPROGS) 190 | ./mkfs fs.img README $(UPROGS) 191 | 192 | -include *.d 193 | 194 | clean: 195 | rm -f *.tex *.dvi *.idx *.aux *.log *.ind *.ilg \ 196 | *.o *.d *.asm *.sym vectors.S bootblock entryother \ 197 | initcode initcode.out kernel xv6.img fs.img kernelmemfs mkfs \ 198 | .gdbinit \ 199 | $(UPROGS) 200 | 201 | # make a printout 202 | FILES = $(shell grep -v '^\#' runoff.list) 203 | PRINT = runoff.list runoff.spec README toc.hdr toc.ftr $(FILES) 204 | 205 | xv6.pdf: $(PRINT) 206 | ./runoff 207 | ls -l xv6.pdf 208 | 209 | print: xv6.pdf 210 | 211 | # run in emulators 212 | 213 | bochs : fs.img xv6.img 214 | if [ ! -e .bochsrc ]; then ln -s dot-bochsrc .bochsrc; fi 215 | bochs -q 216 | 217 | # try to generate a unique GDB port 218 | GDBPORT = $(shell expr `id -u` % 5000 + 25000) 219 | # QEMU's gdb stub command line changed in 0.11 220 | QEMUGDB = $(shell if $(QEMU) -help | grep -q '^-gdb'; \ 221 | then echo "-gdb tcp::$(GDBPORT)"; \ 222 | else echo "-s -p $(GDBPORT)"; fi) 223 | ifndef CPUS 224 | CPUS := 2 225 | endif 226 | QEMUOPTS = -drive file=fs.img,index=1,media=disk,format=raw -drive file=xv6.img,index=0,media=disk,format=raw -smp $(CPUS) -m 512 $(QEMUEXTRA) 227 | 228 | flags: 229 | @echo $(SCHEDPOLICY) 230 | 231 | qemu: fs.img xv6.img 232 | $(QEMU) -serial mon:stdio $(QEMUOPTS) 233 | 234 | qemu-memfs: xv6memfs.img 235 | $(QEMU) -drive file=xv6memfs.img,index=0,media=disk,format=raw -smp $(CPUS) -m 256 236 | 237 | qemu-nox: fs.img xv6.img 238 | $(QEMU) -nographic $(QEMUOPTS) 239 | 240 | .gdbinit: .gdbinit.tmpl 241 | sed "s/localhost:1234/localhost:$(GDBPORT)/" < $^ > $@ 242 | 243 | qemu-gdb: fs.img xv6.img .gdbinit 244 | @echo "*** Now run 'gdb'." 1>&2 245 | $(QEMU) -serial mon:stdio $(QEMUOPTS) -S $(QEMUGDB) 246 | 247 | qemu-nox-gdb: fs.img xv6.img .gdbinit 248 | @echo "*** Now run 'gdb'." 1>&2 249 | $(QEMU) -nographic $(QEMUOPTS) -S $(QEMUGDB) 250 | 251 | # CUT HERE 252 | # prepare dist for students 253 | # after running make dist, probably want to 254 | # rename it to rev0 or rev1 or so on and then 255 | # check in that version. 256 | 257 | EXTRA=\ 258 | mkfs.c ulib.c user.h cat.c echo.c forktest.c grep.c kill.c\ 259 | ln.c ls.c mkdir.c rm.c stressfs.c usertests.c wc.c zombie.c\ 260 | printf.c umalloc.c\ 261 | README dot-bochsrc *.pl toc.* runoff runoff1 runoff.list\ 262 | .gdbinit.tmpl gdbutil\ 263 | 264 | dist: 265 | rm -rf dist 266 | mkdir dist 267 | for i in $(FILES); \ 268 | do \ 269 | grep -v PAGEBREAK $$i >dist/$$i; \ 270 | done 271 | sed '/CUT HERE/,$$d' Makefile >dist/Makefile 272 | echo >dist/runoff.spec 273 | cp $(EXTRA) dist 274 | 275 | dist-test: 276 | rm -rf dist 277 | make dist 278 | rm -rf dist-test 279 | mkdir dist-test 280 | cp dist/* dist-test 281 | cd dist-test; $(MAKE) print 282 | cd dist-test; $(MAKE) bochs || true 283 | cd dist-test; $(MAKE) qemu 284 | 285 | # update this rule (change rev#) when it is time to 286 | # make a new revision. 287 | tar: 288 | rm -rf /tmp/xv6 289 | mkdir -p /tmp/xv6 290 | cp dist/* dist/.gdbinit.tmpl /tmp/xv6 291 | (cd /tmp; tar cf - xv6) | gzip >xv6-rev10.tar.gz # the next one will be 10 (9/17) 292 | 293 | .PHONY: dist-test dist 294 | -------------------------------------------------------------------------------- /lab_scheduling/README.md: -------------------------------------------------------------------------------- 1 | # Scheduling policies in xv6 2 | 3 | This patch of xv6 aims to present 5 different scheduling policies which can be used in xv6. 4 | The 5 policies implemented: DEFAULT, FCFS, SML, DML and LOTTERY. 5 | In order to enable a specific policy, when you launch qemu you have to specify the command above, which will set a flag that wil enable the scheduling policity specified in it. 6 | 7 | ``` 8 | $ make qemu SCHEDFLAG=FCFS 9 | ``` 10 | 11 | If the flag isn't defined at launch, then DEFAULT (Round-Robin policy) is used. 12 | 13 | ## Polices 14 | 15 | * [DEFAULT](#default) - This is the default Round-Robin policy which comes with the vanilla version of xv6 16 | * [FCFS](#fcfs) - **F**irst **C**ome **F**irst **S**erved 17 | * [PRIORITY](#priority) - Priority algoritm based on priorities values 18 | * [SML](#sml) - **S**tatic **M**ulti­**L**evel queue scheduling 19 | * [LOTTERY](#lottery) - Lottery algorithm based on tickets and probability 20 | 21 | ### DEFAULT - Round Robin 22 | 23 | The default algorithm implemented in xv6 it's one of the simplest (with FCFS) and relies on the Round-Robin policy, basically it loops through all the process which are available to run (market with the ```RUNNABLE```) state and give access to 24 | CPU at each one of them one at a time. 25 | To schedule processes fairly, a round-robin scheduler generally employs time-sharing, giving each job a time slot or quantum (its allowance of CPU time), and interrupting the job if it is not completed by then. 26 | The job is resumed next time a time slot is assigned to that process. 27 | If the process terminates or changes its state to waiting during its attributed time quantum, the scheduler selects the first process in the ready queue to execute. 28 | In the absence of time-sharing, or if the quanta were large relative to the sizes of the jobs, a process that produced large jobs would be favoured over other processes. 29 | Round-robin scheduling is simple, easy to implement, and starvation-free. 30 | 31 | To enable it and see how DEFAULT works use this command when compiling xv6: 32 | 33 | $ make qemu SCHEDFLAG=DEFAULT 34 | 35 | ### FCFS - First Come First Served 36 | 37 | First come first served (FCFS), is the simplest scheduling algorithm. FCFS simply queues processes in the order that they arrive in the ready queue. 38 | The scheduling overhead due to using this policy is minimal since context switches only occur upon process termination, and no reorganization of the process queue is required. 39 | Throughput can be low, because long processes can be holding CPU, waiting the short processes for a long time, so short processes which are in a queue are penalized over the longer ones (known as convoy effect). 40 | By using this policy we have no starvation, because each process gets chance to be executed after a definite time. 41 | Turnaround time, waiting time and response time depends on the order of their arrival and can be high for the same reasons above. 42 | There isn't prioritization, so using this policy we cannot force certain processes to be completed first which means that this system has trouble meeting process deadlines. 43 | The lack of prioritization means that as long as every process eventually completes, there is no starvation. 44 | In an environment where some processes might not complete, there can be starvation since the processes that come next the one which might not complete are never executed. 45 | 46 | To enable it and see how FCFS works use this command when compiling xv6: 47 | 48 | $ make qemu SCHEDFLAG=FCFS 49 | 50 | ### PRIORITY - Priority scheduling algoritm 51 | 52 | The priority scheduling algorithm (SML) represents a preemptive policy that executes processes based on their priority. 53 | The scheduling policy first select the runnable process with the lowest value of priority and executes it, after that it finds the one with the seconds lowest value of priority and excutes it 54 | and so on, until we have finished all the processes. 55 | This scheduling policy allows the user to mark some processes which we wants to be completed first in a simple but fast way. 56 | Priority range in this algorithm is 1-20 (default is 10) where we give priority equals to 1 for the processes which we want to be completed first. 57 | 58 | The following system call will change the priority queue of the process with a specific pid process: 59 | 60 | ``` 61 | int chpr(int pid, int priority) 62 | ``` 63 | 64 | In this case ```priority``` is a number between 1 and 20 which represents the new process priority. 65 | 66 | To enable it and see how PRIORITY works use this command when compiling xv6: 67 | 68 | $ make qemu SCHEDFLAG=PRIORITY 69 | 70 | 71 | ### SML - Static multilevel queue scheduling 72 | 73 | The static multilevel queue scheduling (SML) represents a preemptive policy that includes a three priority queues (priority can asusme three values: 1,2 and 3). 74 | The initial process should be initiated at priority 2and the priority should be copied upon fork. 75 | In this scheduling policy the scheduler will select a process from a lower queue only if no process is ready to run at a higher queue. 76 | The algorithm first runs all the process with highest priority and then, when they finish, it will consider all the process with a lower priority. 77 | Moving between priority queues is only available via a system call. 78 | This algorithm is very similar to PRIORITY, but in this case we have only three queues (low, medium, high) and the user must select foreach process which queue the process belongs to (default -> 2: medium) 79 | Priority range in this algorithm is 1-3 (default is 2) where we give priority equals to 1 for the processes which we want to be completed first. 80 | 81 | The following system call will change the priority queue of the process with a specific pid process: 82 | 83 | ``` 84 | int chpr(int pid, int priority) 85 | ``` 86 | 87 | In this case ```priority``` is a number between 1 and 20 which represents the new process priority. 88 | 89 | To enable it and see how SML works use this command when compiling xv6: 90 | 91 | $ make qemu SCHEDFLAG=SML 92 | 93 | ### LOTTERY - Lottery probabilistic scheduling algorithm 94 | 95 | The lottery is a probabilistic scheduling algorithm where at each process are each assigned some number of lottery tickets and the scheduler draws a random ticket to select the next process to run. 96 | The distribution of tickets need not be uniform; granting a process more tickets provides it a relative higher chance of selection. This technique can be used to approximate other scheduling algorithms, such as Shortest job next and Fair-share scheduling. 97 | Lottery scheduling solves the problem of starvation. Giving each process at least one lottery ticket guarantees that it has non-zero probability of being selected at each scheduling operation. 98 | A straightforward way to implement a centralized lottery scheduler is to randomly select a winning ticket, and then search a list of clients to locate the client holding that ticket. 99 | This requires a random number generation and O(n) operations to traverse a client list of length n, accumulating a running ticket sum until it reaches the winning value. 100 | 101 | The following system call will change the tickets of the process with a specific pid process: 102 | 103 | ``` 104 | int chtickets(int pid, int tickets) 105 | ``` 106 | 107 | In this case ```tickets``` is a number between 1 and 100 which represents the new process' tickets. 108 | 109 | Example: 110 | 111 | ![alt Lottery scheduling algorithm example](https://image.prntscr.com/image/aAn5ZSaWTbuTZIRISLnaFw.png) 112 | *Five clients compete in a list-based 113 | lottery with a total of 20 tickets. The fifteenth ticket is randomly 114 | selected, and the client list is searched for the winner. A running 115 | ticket sum is accumulated until the winning ticket value is reached. 116 | In this example, the third client is the winner.* 117 | 118 | Additional information on Lottery algorithm can be found here: [lottery paper](https://www.usenix.net/legacy/publications/library/proceedings/osdi/full_papers/waldspurger.pdf) 119 | 120 | To enable it and see how LOTTERY works use this command when compiling xv6: 121 | 122 | $ make qemu SCHEDFLAG=LOTTERY 123 | 124 | 125 | 126 | This patch also includes a new system call, similar to ```wait```, but with more funtionalities, in order to check the performances of our scheduling algorithms. 127 | The new system call is called ```wait2``` and returns the creation time, the running time (```rutime```), the sleeping time (```stime```) snd the ready time (runnable) (```retime```) 128 | for each process, this helps a lot in understanding how a scheluding policy affects the times of every process. 129 | For example using the PRIORITY algorithm the sleeping time of processes with a low priority (the one which we need to run first) will be smaller than the one with high priority 130 | (the one which we want to run later) 131 | 132 | Two other important functions which can be useful to play with are 133 | 134 | ## Authors 135 | 136 | * **Marco Fontana** - *Added fcfs, sml and lottery scheduling algorithms, edited nice, added wait2 and reorganization of all scheduling polices (whith descriptions and optimizations)* - [marf](https://github.com/marf) 137 | * **Nicola Bicocchi** - *Command ps, first version of nice and processes priorities* - [agr_unimore](https://bitbucket.org/agr_unimore/operatingsystemsmsc) 138 | 139 | 140 | ## License 141 | 142 | This project is licensed under the MIT License 143 | 144 | ## Acknowledgments 145 | 146 | xv6 is inspired by John Lions's Commentary on UNIX 6th Edition (Peer 147 | to Peer Communications; ISBN: 1-57398-013-7; 1st edition (June 14, 148 | 2000)). See also http://pdos.csail.mit.edu/6.828/2014/xv6.html, which 149 | provides pointers to on-line resources for v6. 150 | 151 | xv6 borrows code from the following sources: 152 | JOS (asm.h, elf.h, mmu.h, bootasm.S, ide.c, console.c, and others) 153 | Plan 9 (entryother.S, mp.h, mp.c, lapic.c) 154 | FreeBSD (ioapic.c) 155 | NetBSD (console.c) 156 | 157 | The following people have made contributions: 158 | Russ Cox (context switching, locking) 159 | Cliff Frey (MP) 160 | Xiao Yu (MP) 161 | Nickolai Zeldovich 162 | Austin Clements 163 | 164 | In addition, we are grateful for the bug reports and patches contributed by 165 | Silas Boyd-Wickizer, Peter Froehlich, Shivam Handa, Anders Kaseorg, Eddie 166 | Kohler, Yandong Mao, Hitoshi Mitake, Carmi Merimovich, Joel Nider, Greg Price, 167 | Eldar Sehayek, Yongming Shen, Stephen Tu, and Zouchangwei. 168 | 169 | The code in the files that constitute xv6 is 170 | Copyright 2006-2017 Frans Kaashoek, Robert Morris, and Russ Cox. 171 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Scheduling policies in xv6 2 | 3 | This patch of xv6 aims to present 5 different scheduling policies which can be used in xv6. 4 | The 5 policies implemented: DEFAULT, FCFS, PRIORITY, SML and LOTTERY. 5 | In order to enable a specific policy, when you launch qemu you have to specify the command above, which will set a flag that will enable the scheduling policity specified in it. 6 | 7 | ``` 8 | $ make qemu SCHEDFLAG=FCFS 9 | ``` 10 | 11 | Alternatively if you want to patch your existing system you can launch the following command: 12 | 13 | ``` 14 | $ ./generate.sh --lab lab_scheduling --flags FCFS 15 | ``` 16 | 17 | If the flag isn't defined at launch, then DEFAULT (Round-Robin policy) is used. 18 | 19 | ## Polices 20 | 21 | * [DEFAULT](#default---round-robin) - This is the default Round-Robin policy which comes with the vanilla version of xv6 22 | * [FCFS](#fcfs---first-come-first-served) - **F**irst **C**ome **F**irst **S**erved 23 | * [PRIORITY](#priority---priority-scheduling-algoritm) - Priority algoritm based on priorities values 24 | * [SML](#sml---static-multilevel-queue-scheduling) - **S**tatic **M**ulti­**L**evel queue scheduling 25 | * [LOTTERY](#lottery---lottery-probabilistic-scheduling-algorithm) - Lottery algorithm based on tickets and probability 26 | 27 | ## DEFAULT - Round Robin 28 | 29 | The default algorithm implemented in xv6 it's one of the simplest (with FCFS) and relies on the Round-Robin policy, basically it loops through all the process which are available to run (marked with the ```RUNNABLE```) state and give access to 30 | CPU at each one of them one at a time. 31 | To schedule processes fairly, a round-robin scheduler generally employs time-sharing, giving each job a time slot or quantum (its allowance of CPU time), and interrupting the job if it is not completed by then. 32 | The job is resumed next time a time slot is assigned to that process. 33 | If the process terminates or changes its state to waiting during its attributed time quantum, the scheduler selects the first process in the ready queue to execute. 34 | In the absence of time-sharing, or if the quanta were large relative to the sizes of the jobs, a process that produced large jobs would be favoured over other processes. 35 | Round-robin scheduling is simple, easy to implement, and starvation-free. 36 | 37 | To enable it and see how DEFAULT works use this command when compiling xv6: 38 | 39 | ``` 40 | $ make qemu SCHEDFLAG=DEFAULT 41 | ``` 42 | 43 | ``` 44 | $ ./generate.sh --lab lab_scheduling --flags DEFAULT 45 | ``` 46 | 47 | ## FCFS - First Come First Served 48 | 49 | First come first served (FCFS), is the simplest scheduling algorithm. FCFS simply queues processes in the order that they arrive in the ready queue. 50 | The scheduling overhead due to using this policy is minimal since context switches only occur upon process termination, and no reorganization of the process queue is required. 51 | Throughput can be low, because long processes can be holding CPU, waiting the short processes for a long time, so short processes which are in a queue are penalized over the longer ones (known as convoy effect). 52 | By using this policy we have no starvation, because each process gets chance to be executed after a definite time. 53 | Turnaround time, waiting time and response time depends on the order of their arrival and can be high for the same reasons above. 54 | There isn't prioritization, so using this policy we cannot force certain processes to be completed first which means that this system has trouble meeting process deadlines. 55 | The lack of prioritization means that as long as every process eventually completes, there is no starvation. 56 | In an environment where some processes might not complete, there can be starvation since the processes that come next the one which might not complete are never executed. 57 | 58 | To enable it and see how FCFS works use this command when compiling xv6: 59 | 60 | ``` 61 | $ make qemu SCHEDFLAG=FCFS 62 | ``` 63 | 64 | ``` 65 | $ ./generate.sh --lab lab_scheduling --flags FCFS 66 | ``` 67 | 68 | ## PRIORITY - Priority scheduling algoritm 69 | 70 | The priority scheduling algorithm (SML) represents a preemptive policy that executes processes based on their priority. 71 | The scheduling policy first select the runnable process with the lowest value of priority and executes it, after that it finds the one with the seconds lowest value of priority and excutes it 72 | and so on, until we have finished all the processes. 73 | This scheduling policy allows the user to mark some processes which we wants to be completed first in a simple but fast way. 74 | Priority range in this algorithm is 1-20 (default is 10) where we give priority equals to 1 for the processes which we want to be completed first. 75 | 76 | The following system call will change the priority queue of the process with a specific pid process: 77 | 78 | ``` 79 | int chpr(int pid, int priority) 80 | ``` 81 | 82 | In this case ```priority``` is a number between 1 and 20 which represents the new process priority. 83 | 84 | To enable it and see how PRIORITY works use this command when compiling xv6: 85 | 86 | ``` 87 | $ make qemu SCHEDFLAG=PRIORITY 88 | ``` 89 | 90 | ``` 91 | $ ./generate.sh --lab lab_scheduling --flags PRIORITY 92 | ``` 93 | 94 | 95 | ## SML - Static multilevel queue scheduling 96 | 97 | The static multilevel queue scheduling (SML) represents a preemptive policy that includes a three priority queues (priority can asusme three values: 1,2 and 3). 98 | The initial process should be initiated at priority 2 and the priority should be copied upon fork. 99 | In this scheduling policy the scheduler will select a process from a lower queue only if no process is ready to run at a higher queue. 100 | The algorithm first runs all the process with highest priority and then, when they finish, it will consider all the process with a lower priority. 101 | Moving between priority queues is only available via a system call. 102 | This algorithm is very similar to PRIORITY, but in this case we have only three queues (low, medium, high) and the user must select foreach process which queue the process belongs to (default -> 2: medium) 103 | Priority range in this algorithm is 1-3 (default is 2) where we give priority equals to 1 for the processes which we want to be completed first. 104 | 105 | The following system call will change the priority queue of the process with a specific pid process: 106 | 107 | ``` 108 | int chpr(int pid, int priority) 109 | ``` 110 | 111 | In this case ```priority``` is a number between 1 and 3 which represents the new process priority. 112 | 113 | To enable it and see how SML works use this command when compiling xv6: 114 | 115 | ``` 116 | $ make qemu SCHEDFLAG=SML 117 | ``` 118 | 119 | ## LOTTERY - Lottery probabilistic scheduling algorithm 120 | 121 | The lottery is a probabilistic scheduling algorithm where at each process are each assigned some number of lottery tickets and the scheduler draws a random ticket to select the next process to run. 122 | The distribution of tickets need not be uniform; granting a process more tickets provides it a relative higher chance of selection. This technique can be used to approximate other scheduling algorithms, such as Shortest job next and Fair-share scheduling. 123 | Lottery scheduling solves the problem of starvation. Giving each process at least one lottery ticket guarantees that it has non-zero probability of being selected at each scheduling operation. 124 | A straightforward way to implement a centralized lottery scheduler is to randomly select a winning ticket, and then search a list of clients to locate the client holding that ticket. 125 | This requires a random number generation and O(n) operations to traverse a client list of length n, accumulating a running ticket sum until it reaches the winning value. 126 | 127 | The following system call will change the tickets of the process with a specific pid process: 128 | 129 | ``` 130 | int chtickets(int pid, int tickets) 131 | ``` 132 | 133 | In this case ```tickets``` is a number between 1 and 100 which represents the new process' tickets. 134 | 135 | Example: 136 | 137 | ![alt Lottery scheduling algorithm example](https://image.prntscr.com/image/aAn5ZSaWTbuTZIRISLnaFw.png) 138 | 139 | *Five clients compete in a list-based 140 | lottery with a total of 20 tickets. The fifteenth ticket is randomly 141 | selected, and the client list is searched for the winner. A running 142 | ticket sum is accumulated until the winning ticket value is reached. 143 | In this example, the third client is the winner.* 144 | 145 | Additional information on Lottery algorithm can be found here: [lottery paper](https://www.usenix.net/legacy/publications/library/proceedings/osdi/full_papers/waldspurger.pdf) 146 | 147 | To enable it and see how LOTTERY works use this command when compiling xv6: 148 | 149 | ``` 150 | $ make qemu SCHEDFLAG=LOTTERY 151 | ``` 152 | 153 | ``` 154 | $ ./generate.sh --lab lab_scheduling --flags LOTTERY 155 | ``` 156 | 157 | 158 | 159 | This patch also includes a new system call, similar to ```wait```, but with more funtionalities, in order to check the performances of our scheduling algorithms. 160 | The new system call is called ```wait2``` and returns the creation time, the running time (```rutime```), the sleeping time (```stime```) snd the ready time (runnable) (```retime```) 161 | for each process, this helps a lot in understanding how a scheluding policy affects the times of every process. 162 | For example using the PRIORITY algorithm the sleeping time of processes with a low priority (the one which we need to run first) will be smaller than the one with high priority 163 | (the one which we want to run later) 164 | 165 | Two other important functions which can be useful to play with are 166 | 167 | ## Authors 168 | 169 | * **Marco Fontana** - *Added fcfs, sml and lottery scheduling algorithms, edited nice, added wait2 and reorganization of all scheduling polices (whith descriptions and optimizations)* - [marf](https://github.com/marf) 170 | * **Nicola Bicocchi** - *Command ps, first version of nice and processes priorities* - [os_unimore](https://github.com/nbicocchi/operatingsystemsmsc) 171 | 172 | 173 | ## License 174 | 175 | This project is licensed under the MIT License 176 | 177 | ## Acknowledgments 178 | 179 | xv6 is inspired by John Lions's Commentary on UNIX 6th Edition (Peer 180 | to Peer Communications; ISBN: 1-57398-013-7; 1st edition (June 14, 181 | 2000)). See also http://pdos.csail.mit.edu/6.828/2014/xv6.html, which 182 | provides pointers to on-line resources for v6. 183 | 184 | xv6 borrows code from the following sources: 185 | JOS (asm.h, elf.h, mmu.h, bootasm.S, ide.c, console.c, and others) 186 | Plan 9 (entryother.S, mp.h, mp.c, lapic.c) 187 | FreeBSD (ioapic.c) 188 | NetBSD (console.c) 189 | 190 | The following people have made contributions: 191 | Russ Cox (context switching, locking) 192 | Cliff Frey (MP) 193 | Xiao Yu (MP) 194 | Nickolai Zeldovich 195 | Austin Clements 196 | 197 | In addition, we are grateful for the bug reports and patches contributed by 198 | Silas Boyd-Wickizer, Peter Froehlich, Shivam Handa, Anders Kaseorg, Eddie 199 | Kohler, Yandong Mao, Hitoshi Mitake, Carmi Merimovich, Joel Nider, Greg Price, 200 | Eldar Sehayek, Yongming Shen, Stephen Tu, and Zouchangwei. 201 | 202 | The code in the files that constitute xv6 is 203 | Copyright 2006-2017 Frans Kaashoek, Robert Morris, and Russ Cox. 204 | -------------------------------------------------------------------------------- /lab_scheduling/end/proc.c: -------------------------------------------------------------------------------- 1 | #include "types.h" 2 | #include "defs.h" 3 | #include "param.h" 4 | #include "memlayout.h" 5 | #include "mmu.h" 6 | #include "x86.h" 7 | #include "proc.h" 8 | #include "spinlock.h" 9 | 10 | struct { 11 | struct spinlock lock; 12 | struct proc proc[NPROC]; 13 | } ptable; 14 | 15 | static struct proc *initproc; 16 | 17 | int nextpid = 1; 18 | extern void forkret(void); 19 | extern void trapret(void); 20 | 21 | void 22 | pinit(void) 23 | { 24 | initlock(&ptable.lock, "ptable"); 25 | } 26 | 27 | // Must be called with interrupts disabled 28 | int 29 | cpuid() { 30 | return mycpu()-cpus; 31 | } 32 | 33 | // Must be called with interrupts disabled to avoid the caller being 34 | // rescheduled between reading lapicid and running through the loop. 35 | struct cpu* 36 | mycpu(void) 37 | { 38 | int apicid, i; 39 | 40 | if(readeflags()&FL_IF) 41 | panic("mycpu called with interrupts enabled\n"); 42 | 43 | apicid = lapicid(); 44 | // APIC IDs are not guaranteed to be contiguous. Maybe we should have 45 | // a reverse map, or reserve a register to store &cpus[i]. 46 | for (i = 0; i < ncpu; ++i) { 47 | if (cpus[i].apicid == apicid) 48 | return &cpus[i]; 49 | } 50 | panic("unknown apicid\n"); 51 | } 52 | 53 | // Disable interrupts so that we are not rescheduled 54 | // while reading proc from the cpu structure 55 | struct proc* 56 | myproc(void) { 57 | struct cpu *c; 58 | struct proc *p; 59 | pushcli(); 60 | c = mycpu(); 61 | p = c->proc; 62 | popcli(); 63 | return p; 64 | } 65 | 66 | //PAGEBREAK: 32 67 | // Look in the process table for an UNUSED proc. 68 | // If found, change state to EMBRYO and initialize 69 | // state required to run in the kernel. 70 | // Otherwise return 0. 71 | static struct proc* 72 | allocproc(void) 73 | { 74 | struct proc *p; 75 | char *sp; 76 | 77 | acquire(&ptable.lock); 78 | 79 | for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) 80 | if(p->state == UNUSED) 81 | goto found; 82 | 83 | release(&ptable.lock); 84 | return 0; 85 | 86 | found: 87 | p->state = EMBRYO; 88 | p->pid = nextpid++; 89 | 90 | #ifdef PRIORITY 91 | p->priority = 10; 92 | #else 93 | #ifdef SML 94 | p->priority = 2; 95 | #endif 96 | #endif 97 | 98 | p->ctime = ticks; 99 | p->retime = 0; 100 | p->rutime = 0; 101 | p->stime = 0; 102 | 103 | release(&ptable.lock); 104 | 105 | // Allocate kernel stack. 106 | if((p->kstack = kalloc()) == 0){ 107 | p->state = UNUSED; 108 | return 0; 109 | } 110 | sp = p->kstack + KSTACKSIZE; 111 | 112 | // Leave room for trap frame. 113 | sp -= sizeof *p->tf; 114 | p->tf = (struct trapframe*)sp; 115 | 116 | // Set up new context to start executing at forkret, 117 | // which returns to trapret. 118 | sp -= 4; 119 | *(uint*)sp = (uint)trapret; 120 | 121 | sp -= sizeof *p->context; 122 | p->context = (struct context*)sp; 123 | memset(p->context, 0, sizeof *p->context); 124 | p->context->eip = (uint)forkret; 125 | 126 | return p; 127 | } 128 | 129 | //PAGEBREAK: 32 130 | // Set up first user process. 131 | void 132 | userinit(void) 133 | { 134 | struct proc *p; 135 | extern char _binary_initcode_start[], _binary_initcode_size[]; 136 | 137 | p = allocproc(); 138 | 139 | initproc = p; 140 | if((p->pgdir = setupkvm()) == 0) 141 | panic("userinit: out of memory?"); 142 | inituvm(p->pgdir, _binary_initcode_start, (int)_binary_initcode_size); 143 | p->sz = PGSIZE; 144 | p->ctime = ticks; 145 | memset(p->tf, 0, sizeof(*p->tf)); 146 | p->tf->cs = (SEG_UCODE << 3) | DPL_USER; 147 | p->tf->ds = (SEG_UDATA << 3) | DPL_USER; 148 | p->tf->es = p->tf->ds; 149 | p->tf->ss = p->tf->ds; 150 | p->tf->eflags = FL_IF; 151 | p->tf->esp = PGSIZE; 152 | p->tf->eip = 0; // beginning of initcode.S 153 | p->tickets = DEFAULT_TICKETS; // used in LOTTERY 154 | 155 | safestrcpy(p->name, "initcode", sizeof(p->name)); 156 | p->cwd = namei("/"); 157 | 158 | // this assignment to p->state lets other cores 159 | // run this process. the acquire forces the above 160 | // writes to be visible, and the lock is also needed 161 | // because the assignment might not be atomic. 162 | acquire(&ptable.lock); 163 | 164 | p->state = RUNNABLE; 165 | 166 | release(&ptable.lock); 167 | } 168 | 169 | // Grow current process's memory by n bytes. 170 | // Return 0 on success, -1 on failure. 171 | int 172 | growproc(int n) 173 | { 174 | uint sz; 175 | struct proc *curproc = myproc(); 176 | 177 | sz = curproc->sz; 178 | if(n > 0){ 179 | if((sz = allocuvm(curproc->pgdir, sz, sz + n)) == 0) 180 | return -1; 181 | } else if(n < 0){ 182 | if((sz = deallocuvm(curproc->pgdir, sz, sz + n)) == 0) 183 | return -1; 184 | } 185 | curproc->sz = sz; 186 | switchuvm(curproc); 187 | return 0; 188 | } 189 | 190 | // Create a new process copying p as the parent. 191 | // Sets up stack to return as if from system call. 192 | // Caller must set state of returned proc to RUNNABLE. 193 | int 194 | fork(void) 195 | { 196 | int i, pid; 197 | struct proc *np; 198 | struct proc *curproc = myproc(); 199 | 200 | // Allocate process. 201 | if((np = allocproc()) == 0){ 202 | return -1; 203 | } 204 | 205 | // Copy process state from proc. 206 | if((np->pgdir = copyuvm(curproc->pgdir, curproc->sz)) == 0){ 207 | kfree(np->kstack); 208 | np->kstack = 0; 209 | np->state = UNUSED; 210 | return -1; 211 | } 212 | np->sz = curproc->sz; 213 | np->parent = curproc; 214 | *np->tf = *curproc->tf; 215 | 216 | np->tickets = DEFAULT_TICKETS; // used in LOTTERY 217 | 218 | // Clear %eax so that fork returns 0 in the child. 219 | np->tf->eax = 0; 220 | 221 | for(i = 0; i < NOFILE; i++) 222 | if(curproc->ofile[i]) 223 | np->ofile[i] = filedup(curproc->ofile[i]); 224 | np->cwd = idup(curproc->cwd); 225 | 226 | safestrcpy(np->name, curproc->name, sizeof(curproc->name)); 227 | 228 | pid = np->pid; 229 | 230 | acquire(&ptable.lock); 231 | 232 | np->state = RUNNABLE; 233 | 234 | release(&ptable.lock); 235 | 236 | return pid; 237 | } 238 | 239 | // Exit the current process. Does not return. 240 | // An exited process remains in the zombie state 241 | // until its parent calls wait() to find out it exited. 242 | void 243 | exit(void) 244 | { 245 | struct proc *curproc = myproc(); 246 | struct proc *p; 247 | int fd; 248 | 249 | if(curproc == initproc) 250 | panic("init exiting"); 251 | 252 | // Close all open files. 253 | for(fd = 0; fd < NOFILE; fd++){ 254 | if(curproc->ofile[fd]){ 255 | fileclose(curproc->ofile[fd]); 256 | curproc->ofile[fd] = 0; 257 | } 258 | } 259 | 260 | begin_op(); 261 | iput(curproc->cwd); 262 | end_op(); 263 | curproc->cwd = 0; 264 | 265 | acquire(&ptable.lock); 266 | 267 | // Parent might be sleeping in wait(). 268 | wakeup1(curproc->parent); 269 | 270 | // Pass abandoned children to init. 271 | for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ 272 | if(p->parent == curproc){ 273 | p->parent = initproc; 274 | if(p->state == ZOMBIE) 275 | wakeup1(initproc); 276 | } 277 | } 278 | 279 | // Jump into the scheduler, never to return. 280 | curproc->state = ZOMBIE; 281 | sched(); 282 | panic("zombie exit"); 283 | } 284 | 285 | // Wait for a child process to exit and return its pid. 286 | // Return -1 if this process has no children. 287 | int 288 | wait(void) 289 | { 290 | struct proc *p; 291 | int havekids, pid; 292 | struct proc *curproc = myproc(); 293 | 294 | acquire(&ptable.lock); 295 | for(;;){ 296 | // Scan through table looking for exited children. 297 | havekids = 0; 298 | for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ 299 | if(p->parent != curproc) 300 | continue; 301 | havekids = 1; 302 | if(p->state == ZOMBIE){ 303 | // Found one. 304 | pid = p->pid; 305 | kfree(p->kstack); 306 | p->kstack = 0; 307 | freevm(p->pgdir); 308 | p->pid = 0; 309 | p->parent = 0; 310 | p->name[0] = 0; 311 | p->killed = 0; 312 | p->ctime = 0; 313 | p->state = UNUSED; 314 | release(&ptable.lock); 315 | return pid; 316 | } 317 | } 318 | 319 | // No point waiting if we don't have any children. 320 | if(!havekids || curproc->killed){ 321 | release(&ptable.lock); 322 | return -1; 323 | } 324 | 325 | // Wait for children to exit. (See wakeup1 call in proc_exit.) 326 | sleep(curproc, &ptable.lock); //DOC: wait-sleep 327 | } 328 | } 329 | 330 | int wait2(int *retime, int *rutime, int *stime) { 331 | struct proc *p; 332 | int havekids, pid; 333 | acquire(&ptable.lock); 334 | for(;;){ 335 | // Scan through table looking for zombie children. 336 | havekids = 0; 337 | for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ 338 | if(p->parent != myproc()) 339 | continue; 340 | havekids = 1; 341 | if(p->state == ZOMBIE){ 342 | // Found one. 343 | *retime = p->retime; 344 | *rutime = p->rutime; 345 | *stime = p->stime; 346 | pid = p->pid; 347 | kfree(p->kstack); 348 | p->kstack = 0; 349 | freevm(p->pgdir); 350 | p->state = UNUSED; 351 | p->pid = 0; 352 | p->parent = 0; 353 | p->name[0] = 0; 354 | p->killed = 0; 355 | p->ctime = 0; 356 | p->retime = 0; 357 | p->rutime = 0; 358 | p->stime = 0; 359 | p->priority = 0; 360 | release(&ptable.lock); 361 | return pid; 362 | } 363 | } 364 | 365 | // No point waiting if we don't have any children. 366 | if(!havekids || myproc()->killed){ 367 | release(&ptable.lock); 368 | return -1; 369 | } 370 | 371 | // Wait for children to exit. (See wakeup1 call in proc_exit.) 372 | sleep(myproc(), &ptable.lock); //DOC: wait-sleep 373 | } 374 | } 375 | 376 | //PAGEBREAK: 42 377 | // Per-CPU process scheduler. 378 | // Each CPU calls scheduler() after setting itself up. 379 | // Scheduler never returns. It loops, doing: 380 | // - choose a process to run 381 | // - swtch to start running that process 382 | // - eventually that process transfers control 383 | // via swtch back to the scheduler. 384 | void 385 | scheduler(void) 386 | { 387 | struct proc *p = 0; 388 | 389 | struct cpu *c = mycpu(); 390 | c->proc = 0; 391 | 392 | for(;;) 393 | { 394 | // Enable interrupts on this processor. 395 | sti(); 396 | 397 | // Loop over process table looking for process to run. 398 | acquire(&ptable.lock); 399 | for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) 400 | { 401 | 402 | #ifdef DEFAULT 403 | if(p->state != RUNNABLE) 404 | continue; 405 | #else 406 | #ifdef PRIORITY 407 | 408 | struct proc *highP = 0; 409 | struct proc *p1 = 0; 410 | 411 | if(p->state != RUNNABLE) 412 | continue; 413 | // Choose the process with highest priority (among RUNNABLEs) 414 | highP = p; 415 | for(p1 = ptable.proc; p1 < &ptable.proc[NPROC]; p1++){ 416 | if((p1->state == RUNNABLE) && (highP->priority > p1->priority)) 417 | highP = p1; 418 | } 419 | 420 | if(highP != 0) 421 | p = highP; 422 | 423 | #else 424 | #ifdef FCFS 425 | 426 | struct proc *minP = 0; 427 | 428 | if(p->state != RUNNABLE) 429 | continue; 430 | 431 | // ignore init and sh processes from FCFS 432 | if(p->pid > 1) 433 | { 434 | if (minP != 0){ 435 | // here I find the process with the lowest creation time (the first one that was created) 436 | if(p->ctime < minP->ctime) 437 | minP = p; 438 | } 439 | else 440 | minP = p; 441 | } 442 | 443 | // If I found the process which I created first and it is runnable I run it 444 | //(in the real FCFS I should not check if it is runnable, but for testing purposes I have to make this control, otherwise every time I launch 445 | // a process which does I/0 operation (every simple command) everything will be blocked 446 | if(minP != 0 && minP->state == RUNNABLE) 447 | p = minP; 448 | #else 449 | #ifdef LOTTERY 450 | 451 | if(p->state != RUNNABLE) 452 | continue; 453 | 454 | int totalT = totalTickets(); 455 | int draw = -1; 456 | 457 | if (totalT > 0 || draw <= 0) 458 | draw = random(totalT); 459 | 460 | draw = draw - p->tickets; 461 | 462 | // process with a great number of tickets has more probability to put draw to 0 or negative and execute 463 | if(draw >= 0) 464 | continue; 465 | #else 466 | #ifdef SML 467 | 468 | struct proc *foundP = 0; 469 | 470 | uint priority = 1; 471 | 472 | int index1 = 0; 473 | int index2 = 0; 474 | int index3 = 0; 475 | 476 | foundP = findReadyProcess(&index1, &index2, &index3, &priority); 477 | if (foundP != 0) 478 | p = foundP; 479 | else{ 480 | if(p->state != RUNNABLE) 481 | continue; 482 | } 483 | 484 | #endif 485 | #endif 486 | #endif 487 | #endif 488 | #endif 489 | 490 | if(p != 0) 491 | { 492 | 493 | // Switch to chosen process. It is the process's job 494 | // to release ptable.lock and then reacquire it 495 | // before jumping back to us. 496 | c->proc = p; 497 | switchuvm(p); 498 | p->state = RUNNING; 499 | 500 | swtch(&(c->scheduler), p->context); 501 | switchkvm(); 502 | 503 | // Process is done running for now. 504 | // It should have changed its p->state before coming back. 505 | c->proc = 0; 506 | } 507 | } 508 | 509 | release(&ptable.lock); 510 | } 511 | } 512 | 513 | /*void 514 | scheduler(void) 515 | { 516 | struct proc *p; 517 | struct cpu *c = mycpu(); 518 | c->proc = 0; 519 | 520 | for(;;){ 521 | // Enable interrupts on this processor. 522 | sti(); 523 | 524 | // Loop over process table looking for process to run. 525 | acquire(&ptable.lock); 526 | for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ 527 | if(p->state != RUNNABLE) 528 | continue; 529 | 530 | // Switch to chosen process. It is the process's job 531 | // to release ptable.lock and then reacquire it 532 | // before jumping back to us. 533 | c->proc = p; 534 | switchuvm(p); 535 | p->state = RUNNING; 536 | 537 | swtch(&(c->scheduler), p->context); 538 | switchkvm(); 539 | 540 | // Process is done running for now. 541 | // It should have changed its p->state before coming back. 542 | c->proc = 0; 543 | } 544 | release(&ptable.lock); 545 | 546 | } 547 | }*/ 548 | 549 | 550 | // Enter scheduler. Must hold only ptable.lock 551 | // and have changed proc->state. Saves and restores 552 | // intena because intena is a property of this 553 | // kernel thread, not this CPU. It should 554 | // be proc->intena and proc->ncli, but that would 555 | // break in the few places where a lock is held but 556 | // there's no process. 557 | void 558 | sched(void) 559 | { 560 | int intena; 561 | struct proc *p = myproc(); 562 | 563 | if(!holding(&ptable.lock)) 564 | panic("sched ptable.lock"); 565 | if(mycpu()->ncli != 1) 566 | panic("sched locks"); 567 | if(p->state == RUNNING) 568 | panic("sched running"); 569 | if(readeflags()&FL_IF) 570 | panic("sched interruptible"); 571 | intena = mycpu()->intena; 572 | swtch(&p->context, mycpu()->scheduler); 573 | mycpu()->intena = intena; 574 | } 575 | 576 | // A fork child's very first scheduling by scheduler() 577 | // will swtch here. "Return" to user space. 578 | void 579 | forkret(void) 580 | { 581 | static int first = 1; 582 | // Still holding ptable.lock from scheduler. 583 | release(&ptable.lock); 584 | 585 | if (first) { 586 | // Some initialization functions must be run in the context 587 | // of a regular process (e.g., they call sleep), and thus cannot 588 | // be run from main(). 589 | first = 0; 590 | iinit(ROOTDEV); 591 | initlog(ROOTDEV); 592 | } 593 | 594 | // Return to "caller", actually trapret (see allocproc). 595 | } 596 | 597 | // Give up the CPU for one scheduling round. 598 | void 599 | yield(void) 600 | { 601 | acquire(&ptable.lock); //DOC: yieldlock 602 | myproc()->state = RUNNABLE; 603 | sched(); 604 | release(&ptable.lock); 605 | } 606 | 607 | 608 | // Atomically release lock and sleep on chan. 609 | // Reacquires lock when awakened. 610 | void 611 | sleep(void *chan, struct spinlock *lk) 612 | { 613 | struct proc *p = myproc(); 614 | 615 | if(p == 0) 616 | panic("sleep"); 617 | 618 | if(lk == 0) 619 | panic("sleep without lk"); 620 | 621 | // Must acquire ptable.lock in order to 622 | // change p->state and then call sched. 623 | // Once we hold ptable.lock, we can be 624 | // guaranteed that we won't miss any wakeup 625 | // (wakeup runs with ptable.lock locked), 626 | // so it's okay to release lk. 627 | if(lk != &ptable.lock){ //DOC: sleeplock0 628 | acquire(&ptable.lock); //DOC: sleeplock1 629 | release(lk); 630 | } 631 | // Go to sleep. 632 | p->chan = chan; 633 | p->state = SLEEPING; 634 | 635 | sched(); 636 | 637 | // Tidy up. 638 | p->chan = 0; 639 | 640 | // Reacquire original lock. 641 | if(lk != &ptable.lock){ //DOC: sleeplock2 642 | release(&ptable.lock); 643 | acquire(lk); 644 | } 645 | } 646 | 647 | //PAGEBREAK! 648 | // Wake up all processes sleeping on chan. 649 | // The ptable lock must be held. 650 | static void 651 | wakeup1(void *chan) 652 | { 653 | struct proc *p; 654 | 655 | for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) 656 | if(p->state == SLEEPING && p->chan == chan) 657 | p->state = RUNNABLE; 658 | } 659 | 660 | // Wake up all processes sleeping on chan. 661 | void 662 | wakeup(void *chan) 663 | { 664 | acquire(&ptable.lock); 665 | wakeup1(chan); 666 | release(&ptable.lock); 667 | } 668 | 669 | // Kill the process with the given pid. 670 | // Process won't exit until it returns 671 | // to user space (see trap in trap.c). 672 | int 673 | kill(int pid) 674 | { 675 | struct proc *p; 676 | 677 | acquire(&ptable.lock); 678 | for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ 679 | if(p->pid == pid){ 680 | p->killed = 1; 681 | // Wake process from sleep if necessary. 682 | if(p->state == SLEEPING) 683 | p->state = RUNNABLE; 684 | release(&ptable.lock); 685 | return 0; 686 | } 687 | } 688 | release(&ptable.lock); 689 | return -1; 690 | } 691 | 692 | //PAGEBREAK: 36 693 | // Print a process listing to console. For debugging. 694 | // Runs when user types ^P on console. 695 | // No lock to avoid wedging a stuck machine further. 696 | void 697 | procdump(void) 698 | { 699 | static char *states[] = { 700 | [UNUSED] "unused", 701 | [EMBRYO] "embryo", 702 | [SLEEPING] "sleep ", 703 | [RUNNABLE] "runble", 704 | [RUNNING] "run ", 705 | [ZOMBIE] "zombie" 706 | }; 707 | int i; 708 | struct proc *p; 709 | char *state; 710 | uint pc[10]; 711 | 712 | for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ 713 | if(p->state == UNUSED) 714 | continue; 715 | if(p->state >= 0 && p->state < NELEM(states) && states[p->state]) 716 | state = states[p->state]; 717 | else 718 | state = "???"; 719 | cprintf("%d %s %s", p->pid, state, p->name); 720 | if(p->state == SLEEPING){ 721 | getcallerpcs((uint*)p->context->ebp+2, pc); 722 | for(i=0; i<10 && pc[i] != 0; i++) 723 | cprintf(" %p", pc[i]); 724 | } 725 | cprintf("\n"); 726 | } 727 | } 728 | 729 | struct proc *getptable_proc(void) { 730 | return ptable.proc; 731 | } 732 | 733 | // Change Process priority 734 | int 735 | chpr(int pid, int priority) 736 | { 737 | struct proc *p; 738 | 739 | acquire(&ptable.lock); 740 | for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ 741 | if(p->pid == pid) { 742 | p->priority = priority; 743 | break; 744 | } 745 | } 746 | release(&ptable.lock); 747 | 748 | return pid; 749 | } 750 | 751 | // Change Process tickets 752 | int 753 | chtickets(int pid, int tickets) 754 | { 755 | struct proc *p; 756 | 757 | acquire(&ptable.lock); 758 | for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ 759 | if(p->pid == pid) { 760 | p->tickets = tickets; 761 | break; 762 | } 763 | } 764 | release(&ptable.lock); 765 | 766 | return pid; 767 | } 768 | 769 | /* 770 | This method will run every clock tick and update the statistic fields for each proc 771 | */ 772 | void updatestatistics() { 773 | struct proc *p; 774 | acquire(&ptable.lock); 775 | for(p = ptable.proc; p < &ptable.proc[NPROC]; p++){ 776 | switch(p->state) { 777 | case SLEEPING: 778 | p->stime++; 779 | break; 780 | case RUNNABLE: 781 | p->retime++; 782 | break; 783 | case RUNNING: 784 | p->rutime++; 785 | break; 786 | default: 787 | ; 788 | } 789 | } 790 | release(&ptable.lock); 791 | } 792 | 793 | /* This method is used to generate a random number, between 0 and M 794 | This is a modified version of the LFSR alogrithm 795 | found here: http://goo.gl/At4AIC */ 796 | int 797 | random(int max) { 798 | 799 | if(max <= 0) { 800 | return 1; 801 | } 802 | 803 | static int z1 = 12345; // 12345 for rest of zx 804 | static int z2 = 12345; // 12345 for rest of zx 805 | static int z3 = 12345; // 12345 for rest of zx 806 | static int z4 = 12345; // 12345 for rest of zx 807 | 808 | int b; 809 | b = (((z1 << 6) ^ z1) >> 13); 810 | z1 = (((z1 & 4294967294) << 18) ^ b); 811 | b = (((z2 << 2) ^ z2) >> 27); 812 | z2 = (((z2 & 4294967288) << 2) ^ b); 813 | b = (((z3 << 13) ^ z3) >> 21); 814 | z3 = (((z3 & 4294967280) << 7) ^ b); 815 | b = (((z4 << 3) ^ z4) >> 12); 816 | z4 = (((z4 & 4294967168) << 13) ^ b); 817 | 818 | // if we have an argument, then we can use it 819 | int rand = ((z1 ^ z2 ^ z3 ^ z4)) % max; 820 | 821 | if(rand < 0) { 822 | rand = rand * -1; 823 | } 824 | 825 | return rand; 826 | } 827 | 828 | /* This method counts the total number of tickets that the runnable processes have 829 | (the lottery is done only of the process which can execute) */ 830 | int 831 | totalTickets(void) { 832 | 833 | struct proc *p; 834 | int total = 0; 835 | for (p = ptable.proc; p < &ptable.proc[NPROC]; p++) { 836 | if (p->state == RUNNABLE) { 837 | total += p->tickets; 838 | } 839 | } 840 | 841 | return total; 842 | } 843 | 844 | #ifdef SML 845 | /* 846 | this method will find the next process to run 847 | */ 848 | struct proc* findReadyProcess(int *index1, int *index2, int *index3, uint *priority) { 849 | int i; 850 | struct proc* proc2; 851 | notfound: 852 | for (i = 0; i < NPROC; i++) { 853 | switch(*priority) { 854 | case 1: 855 | proc2 = &ptable.proc[(*index1 + i) % NPROC]; 856 | if (proc2->state == RUNNABLE && proc2->priority == *priority) { 857 | *index1 = (*index1 + 1 + i) % NPROC; 858 | return proc2; // found a runnable process with appropriate priority 859 | } 860 | case 2: 861 | proc2 = &ptable.proc[(*index2 + i) % NPROC]; 862 | if (proc2->state == RUNNABLE && proc2->priority == *priority) { 863 | *index2 = (*index2 + 1 + i) % NPROC; 864 | return proc2; // found a runnable process with appropriate priority 865 | } 866 | case 3: 867 | proc2 = &ptable.proc[(*index3 + i) % NPROC]; 868 | if (proc2->state == RUNNABLE && proc2->priority == *priority){ 869 | *index3 = (*index3 + 1 + i) % NPROC; 870 | return proc2; // found a runnable process with appropriate priority 871 | } 872 | } 873 | } 874 | if (*priority == 3) {//did not find any process on any of the prorities 875 | *priority = 3; 876 | return 0; 877 | } 878 | else { 879 | *priority += 1; //will try to find a process at a lower priority (ighter value of priority) 880 | goto notfound; 881 | } 882 | return 0; 883 | } 884 | #endif 885 | --------------------------------------------------------------------------------