--- linux-2.6.1/include/linux/sched.h 2004-01-27 17:05:54.108844000 +1100 +++ linux-2.6.1-ck1/include/linux/sched.h 2004-01-27 17:07:51.949929432 +1100 @@ -127,9 +127,10 @@ extern unsigned long nr_iowait(void); #define SCHED_FIFO 1 #define SCHED_RR 2 #define SCHED_BATCH 3 +#define SCHED_ISO 4 #define SCHED_MIN 0 -#define SCHED_MAX 3 +#define SCHED_MAX 4 #define SCHED_RANGE(policy) ((policy) >= SCHED_MIN && \ (policy) <= SCHED_MAX) @@ -294,6 +295,7 @@ struct signal_struct { #define rt_task(p) ((p)->prio < MAX_RT_PRIO) #define batch_task(p) ((p)->policy == SCHED_BATCH) +#define iso_task(p) ((p)->policy == SCHED_ISO) /* * Some day this will be a full-fledged user tracking system.. --- linux-2.6.1/kernel/sched.c 2004-01-27 17:18:54.620188240 +1100 +++ linux-2.6.1-ck1/kernel/sched.c 2004-01-27 17:24:18.313979304 +1100 @@ -1498,9 +1498,31 @@ void scheduler_tick(int user_ticks, int enqueue_task(p, rq->batch); else { if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) { - enqueue_task(p, rq->expired); - if (p->static_prio < rq->best_expired_prio) - rq->best_expired_prio = p->static_prio; + if (iso_task(p)) { + /* + * If a SCHED_ISO task should expire + * a task from the expired list is + * pulled to active list instead. + */ + prio_array_t *array; + array = rq->expired; + if (array->nr_active) { + task_t *reactivate; + struct list_head *queue; + int idx; + + idx = sched_find_first_bit(array->bitmap); + queue = array->queue + idx; + reactivate = list_entry(queue->next, task_t, run_list); + dequeue_task(reactivate, array); + enqueue_task(reactivate, rq->active); + } + enqueue_task(p, rq->active); + } else { + enqueue_task(p, rq->expired); + if (p->static_prio < rq->best_expired_prio) + rq->best_expired_prio = p->static_prio; + } } else enqueue_task(p, rq->active); } @@ -2186,8 +2208,13 @@ static int setscheduler(pid_t pid, int p goto out_unlock; retval = -EPERM; - if (SCHED_RT(policy) && !capable(CAP_SYS_NICE)) - goto out_unlock; + if (SCHED_RT(policy) && !capable(CAP_SYS_NICE)) + /* + * If the caller requested an RT policy without having the + * necessary rights, we downgrade the policy to SCHED_ISO. + */ + policy = SCHED_ISO; + if ((current->euid != p->euid) && (current->euid != p->uid) && !capable(CAP_SYS_NICE)) goto out_unlock; @@ -2220,6 +2247,13 @@ static int setscheduler(pid_t pid, int p } else if (TASK_PREEMPTS_CURR(p, rq)) resched_task(rq->curr); } + if (policy == SCHED_ISO) { + /* + * SCHED_ISO tasks get maximum interactivity. + */ + p->sleep_avg = NS_MAX_SLEEP_AVG; + p->interactive_credit = CREDIT_LIMIT + 1; + } out_unlock: task_rq_unlock(rq, &flags); @@ -2512,6 +2546,7 @@ asmlinkage long sys_sched_get_priority_m break; case SCHED_NORMAL: case SCHED_BATCH: + case SCHED_ISO: ret = 0; break; } @@ -2536,6 +2571,7 @@ asmlinkage long sys_sched_get_priority_m break; case SCHED_NORMAL: case SCHED_BATCH: + case SCHED_ISO: ret = 0; } return ret;