Index: linux-2.6.7-ck2pre/include/linux/sched.h =================================================================== --- linux-2.6.7-ck2pre.orig/include/linux/sched.h 2004-06-25 22:41:58.697151313 +1000 +++ linux-2.6.7-ck2pre/include/linux/sched.h 2004-06-25 22:42:38.471921459 +1000 @@ -558,6 +558,8 @@ #define PF_SWAPOFF 0x00080000 /* I am in swapoff */ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ #define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ +#define PF_FORKED 0x00400000 /* I have just forked */ +#define PF_PREEMPTED 0x00800000 /* I have just been preempted */ #ifdef CONFIG_SMP #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ Index: linux-2.6.7-ck2pre/kernel/sched.c =================================================================== --- linux-2.6.7-ck2pre.orig/kernel/sched.c 2004-06-25 22:42:16.324390389 +1000 +++ linux-2.6.7-ck2pre/kernel/sched.c 2004-06-25 22:42:38.473921146 +1000 @@ -206,8 +206,10 @@ return 0; if (!compute || rq->cache_ticks >= cache_decay_ticks || rt_task(p) || !p->mm || rq->curr == rq->idle || - (batch_task(rq->curr) && !batch_task(p))) + (batch_task(rq->curr))) { + rq->curr->flags |= PF_PREEMPTED; return 1; + } rq->preempted = 1; return 0; } @@ -229,7 +231,11 @@ static void enqueue_task(struct task_struct *p, runqueue_t *rq) { - list_add_tail(&p->run_list, rq->queue + p->prio); + if (rq->curr->flags & PF_PREEMPTED) { + rq->curr->flags &= ~PF_PREEMPTED; + list_add(&p->run_list, rq->queue + p->prio); + } else + list_add_tail(&p->run_list, rq->queue + p->prio); __set_bit(p->prio, rq->bitmap); } @@ -334,8 +340,7 @@ if (used_slice < first_slice) return prio; - if (p->mm) - prio += 1 + (used_slice - first_slice) / rr; + prio += 1 + (used_slice - first_slice) / rr; if (prio > MAX_PRIO - 2) prio = MAX_PRIO - 2; return prio; @@ -346,15 +351,16 @@ unsigned long sleep_time = now - p->timestamp; unsigned long run_time = NS_TO_JIFFIES(p->runtime); unsigned long total_run = NS_TO_JIFFIES(p->totalrun) + run_time; - if (!run_time && NS_TO_JIFFIES(p->runtime + sleep_time) < rr_interval(p) && - !batch_task(p)) { - if (p->slice - total_run < 1) { - p->totalrun = 0; - dec_burst(p); - } else { - p->totalrun += p->runtime; - p->slice -= NS_TO_JIFFIES(p->totalrun); - } + if ((!run_time && NS_TO_JIFFIES(p->runtime + sleep_time) < + rr_interval(p)) || p->flags & PF_FORKED) { + p->flags &= ~PF_FORKED; + if (p->slice - total_run < 1) { + p->totalrun = 0; + dec_burst(p); + } else { + p->totalrun += p->runtime; + p->slice -= NS_TO_JIFFIES(p->totalrun); + } } else { inc_burst(p); p->runtime = 0; @@ -780,13 +786,14 @@ unsigned long flags; runqueue_t *rq = task_rq_lock(current, &flags); - // Forked process gets no burst to prevent fork bombs. + //Forked process gets no burst to prevent fork bombs. p->burst = 0; BUG_ON(p->state != TASK_RUNNING); set_task_cpu(p, smp_processor_id()); __activate_task(p, rq); + current->flags |= PF_FORKED; task_rq_unlock(rq, &flags); } @@ -2654,11 +2661,11 @@ runqueue_t *rq = this_rq_lock(); dequeue_task(current, rq); - current->slice = RR_INTERVAL; - current->time_slice = current->slice; + current->slice = slice(current); + current->time_slice = RR_INTERVAL; if (!rt_task(current) && !batch_task(current)) current->prio = MAX_PRIO - 2; - inc_burst(current); + current->burst = 0; enqueue_task(current, rq); /*