diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-rc3-s6.EB/include/linux/sched.h linux-2.6.7-rc3-s6.EI/include/linux/sched.h --- linux-2.6.7-rc3-s6.EB/include/linux/sched.h 2004-06-12 22:42:51.190393739 +1000 +++ linux-2.6.7-rc3-s6.EI/include/linux/sched.h 2004-06-12 22:57:20.053705440 +1000 @@ -127,9 +127,10 @@ extern unsigned long nr_iowait(void); #define SCHED_FIFO 1 #define SCHED_RR 2 #define SCHED_BATCH 3 +#define SCHED_ISO 4 #define SCHED_MIN 0 -#define SCHED_MAX 3 +#define SCHED_MAX 4 #define SCHED_RANGE(policy) ((policy) >= SCHED_MIN && \ (policy) <= SCHED_MAX) @@ -317,6 +318,7 @@ struct signal_struct { #define rt_task(p) ((p)->prio < MAX_RT_PRIO) #define batch_task(p) ((p)->policy == SCHED_BATCH) +#define iso_task(p) ((p)->policy == SCHED_ISO) /* * Some day this will be a full-fledged user tracking system.. diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-rc3-s6.EB/kernel/sched.c linux-2.6.7-rc3-s6.EI/kernel/sched.c --- linux-2.6.7-rc3-s6.EB/kernel/sched.c 2004-06-12 22:55:20.493246276 +1000 +++ linux-2.6.7-rc3-s6.EI/kernel/sched.c 2004-06-12 22:57:20.054705285 +1000 @@ -188,6 +188,18 @@ static inline void rq_unlock(runqueue_t spin_unlock_irq(&rq->lock); } +static int rr_interval(task_t * p) +{ + int rr_interval = RR_INTERVAL; + if (batch_task(p)) + rr_interval *= 10; + else if (iso_task(p)) + rr_interval /= 2; + if (!rr_interval) + rr_interval = 1; + return rr_interval; +} + static int task_preempts_curr(struct task_struct *p, runqueue_t *rq) { if (p->prio >= rq->curr->prio) @@ -257,6 +269,8 @@ static unsigned int burst(task_t *p) if (rt_task(p)) return p->burst; task_user_prio = TASK_USER_PRIO(p); + if (iso_task(p)) + task_user_prio /= 2; if (likely(task_user_prio < 40)) return 39 - task_user_prio; else @@ -299,7 +313,7 @@ int interactive = 1; */ static int effective_prio(task_t *p) { - int prio; + int prio, rr; unsigned int full_slice, used_slice, first_slice; unsigned int best_burst; if (rt_task(p)) @@ -309,10 +323,11 @@ static int effective_prio(task_t *p) best_burst = burst(p); full_slice = slice(p); + rr = rr_interval(p); used_slice = full_slice - p->slice; if (p->burst > best_burst) p->burst = best_burst; - first_slice = RR_INTERVAL; + first_slice = rr; if (interactive && !compute) first_slice *= (p->burst + 1); prio = MAX_PRIO - 2 - best_burst; @@ -320,7 +335,7 @@ static int effective_prio(task_t *p) if (used_slice < first_slice) return prio; if (p->mm) - prio += 1 + (used_slice - first_slice) / RR_INTERVAL; + prio += 1 + (used_slice - first_slice) / rr; if (prio > MAX_PRIO - 2) prio = MAX_PRIO - 2; return prio; @@ -331,7 +346,7 @@ static void recalc_task_prio(task_t *p, unsigned long sleep_time = now - p->timestamp; unsigned long run_time = NS_TO_JIFFIES(p->runtime); unsigned long total_run = NS_TO_JIFFIES(p->totalrun) + run_time; - if (!run_time && NS_TO_JIFFIES(p->runtime + sleep_time) < RR_INTERVAL && + if (!run_time && NS_TO_JIFFIES(p->runtime + sleep_time) < rr_interval(p) && !batch_task(p)) { if (p->slice - total_run < 1) { p->totalrun = 0; @@ -364,7 +379,7 @@ static void activate_task(task_t *p, run p->slice = slice(p); recalc_task_prio(p, now); p->prio = effective_prio(p); - p->time_slice = RR_INTERVAL; + p->time_slice = rr_interval(p); if (batch_task(p)) p->time_slice *= 10; p->timestamp = now; @@ -1757,19 +1772,19 @@ void scheduler_tick(int user_ticks, int dec_burst(p); p->slice = slice(p); p->prio = effective_prio(p); - p->time_slice = RR_INTERVAL; + p->time_slice = rr_interval(p); enqueue_task(p, rq); goto out_unlock; } /* * Tasks that run out of time_slice but still have slice left get - * requeued with a lower priority && RR_INTERVAL time_slice. + * requeued with a lower priority && rr_interval time_slice. */ if (!--p->time_slice) { set_tsk_need_resched(p); dequeue_task(p, rq); p->prio = effective_prio(p); - p->time_slice = RR_INTERVAL; + p->time_slice = rr_interval(p); enqueue_task(p, rq); goto out_unlock; } @@ -2405,7 +2420,11 @@ static int setscheduler(pid_t pid, int p retval = -EPERM; if (SCHED_RT(policy) && !capable(CAP_SYS_NICE)) - goto out_unlock; + /* + * If the caller requested an RT policy without having the + * necessary rights, we downgrade the policy to SCHED_ISO. + */ + policy = SCHED_ISO; if ((current->euid != p->euid) && (current->euid != p->uid) && !capable(CAP_SYS_NICE)) goto out_unlock; @@ -2723,6 +2742,7 @@ asmlinkage long sys_sched_get_priority_m break; case SCHED_NORMAL: case SCHED_BATCH: + case SCHED_ISO: ret = 0; break; } @@ -2747,6 +2767,7 @@ asmlinkage long sys_sched_get_priority_m break; case SCHED_NORMAL: case SCHED_BATCH: + case SCHED_ISO: ret = 0; } return ret;