Index: linux-2.6.8.1/include/linux/sched.h =================================================================== --- linux-2.6.8.1.orig/include/linux/sched.h 2004-08-15 15:01:13.887305672 +1000 +++ linux-2.6.8.1/include/linux/sched.h 2004-08-15 15:01:15.117113043 +1000 @@ -127,9 +127,10 @@ extern unsigned long nr_iowait(void); #define SCHED_FIFO 1 #define SCHED_RR 2 #define SCHED_BATCH 3 +#define SCHED_ISO 4 #define SCHED_MIN 0 -#define SCHED_MAX 3 +#define SCHED_MAX 4 #define SCHED_RANGE(policy) ((policy) >= SCHED_MIN && \ (policy) <= SCHED_MAX) @@ -312,6 +313,7 @@ struct signal_struct { #define rt_task(p) ((p)->prio < MAX_RT_PRIO) #define batch_task(p) ((p)->policy == SCHED_BATCH) +#define iso_task(p) ((p)->policy == SCHED_ISO) /* * Some day this will be a full-fledged user tracking system.. Index: linux-2.6.8.1/kernel/sched.c =================================================================== --- linux-2.6.8.1.orig/kernel/sched.c 2004-08-15 15:01:13.890305202 +1000 +++ linux-2.6.8.1/kernel/sched.c 2004-08-15 15:01:53.693073042 +1000 @@ -247,6 +247,8 @@ static unsigned int burst(task_t *p) { if (likely(!rt_task(p))) { unsigned int task_user_prio = TASK_USER_PRIO(p); + if (iso_task(p)) + task_user_prio /= 2; return 39 - task_user_prio; } else return p->burst; @@ -285,6 +287,18 @@ static unsigned int slice(task_t *p) */ int sched_interactive = 1; +static int rr_interval(task_t * p) +{ + int rr_interval = RR_INTERVAL(); + if (batch_task(p)) + rr_interval *= 10; + else if (iso_task(p)) + rr_interval /= 2; + if (!rr_interval) + rr_interval = 1; + return rr_interval; +} + /* * effective_prio - dynamic priority dependent on burst. * The priority normally decreases by one each RR_INTERVAL. @@ -293,7 +307,7 @@ int sched_interactive = 1; */ static int effective_prio(task_t *p) { - int prio; + int prio, rr; unsigned int full_slice, used_slice, first_slice; unsigned int best_burst; if (rt_task(p)) @@ -311,20 +325,22 @@ static int effective_prio(task_t *p) } best_burst = burst(p); - if (p->flags & PF_UISLEEP && sched_interactive && best_burst && p->mm) - best_burst--; + if (p->flags & PF_UISLEEP && sched_interactive && best_burst && + p->mm && !iso_task(p)) + best_burst--; full_slice = slice(p); + rr = rr_interval(p); used_slice = full_slice - p->slice; if (p->burst > best_burst) p->burst = best_burst; - first_slice = RR_INTERVAL(); - if (sched_interactive && !sched_compute && p->mm) + first_slice = rr; + if (sched_interactive && !sched_compute && p->mm && !iso_task(p)) first_slice *= (p->burst + 1); prio = MAX_PRIO - 2 - best_burst; if (used_slice < first_slice) return prio; - prio += 1 + (used_slice - first_slice) / RR_INTERVAL(); + prio += 1 + (used_slice - first_slice) / rr; if (prio > MAX_PRIO - 2) prio = MAX_PRIO - 2; return prio; @@ -338,7 +354,7 @@ static int effective_prio(task_t *p) static void recalc_task_prio(task_t *p, unsigned long long now) { unsigned long sleep_time = now - p->timestamp; - unsigned int rr = RR_INTERVAL(); + unsigned int rr = rr_interval(p); if (p->flags & PF_FORKED || (NS_TO_JIFFIES(p->runtime + sleep_time) < rr / 2 || ((!sched_interactive || sched_compute) && @@ -382,7 +398,7 @@ static void activate_task(task_t *p, run recalc_task_prio(p, now); p->prio = effective_prio(p); p->flags &= ~PF_UISLEEP; - p->time_slice = RR_INTERVAL(); + p->time_slice = rr_interval(p); if (batch_task(p)) p->time_slice = p->slice; p->timestamp = now; @@ -1803,19 +1819,19 @@ void scheduler_tick(int user_ticks, int dec_burst(p); p->slice = slice(p); p->prio = effective_prio(p); - p->time_slice = RR_INTERVAL(); + p->time_slice = rr_interval(p); enqueue_task(p, rq); goto out_unlock; } /* * Tasks that run out of time_slice but still have slice left get - * requeued with a lower priority && RR_INTERVAL time_slice. + * requeued with a lower priority && rr_interval time_slice. */ if (!--p->time_slice) { set_tsk_need_resched(p); dequeue_task(p, rq); p->prio = effective_prio(p); - p->time_slice = RR_INTERVAL(); + p->time_slice = rr_interval(p); enqueue_task(p, rq); goto out_unlock; } @@ -2457,7 +2473,11 @@ static int setscheduler(pid_t pid, int p retval = -EPERM; if (SCHED_RT(policy) && !capable(CAP_SYS_NICE)) - goto out_unlock; + /* + * If the caller requested an RT policy without having the + * necessary rights, we downgrade the policy to SCHED_ISO. + */ + policy = SCHED_ISO; if ((current->euid != p->euid) && (current->euid != p->uid) && !capable(CAP_SYS_NICE)) goto out_unlock; @@ -2792,6 +2812,7 @@ asmlinkage long sys_sched_get_priority_m break; case SCHED_NORMAL: case SCHED_BATCH: + case SCHED_ISO: ret = 0; break; } @@ -2816,6 +2837,7 @@ asmlinkage long sys_sched_get_priority_m break; case SCHED_NORMAL: case SCHED_BATCH: + case SCHED_ISO: ret = 0; } return ret;