diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-rc3-s6.ER/include/linux/init_task.h linux-2.6.7-rc3-s6.EB/include/linux/init_task.h --- linux-2.6.7-rc3-s6.ER/include/linux/init_task.h 2004-06-12 22:42:08.929942930 +1000 +++ linux-2.6.7-rc3-s6.EB/include/linux/init_task.h 2004-06-12 22:42:51.189393894 +1000 @@ -71,8 +71,8 @@ extern struct group_info init_groups; .usage = ATOMIC_INIT(2), \ .flags = 0, \ .lock_depth = -1, \ - .prio = MAX_PRIO-20, \ - .static_prio = MAX_PRIO-20, \ + .prio = MAX_PRIO-21, \ + .static_prio = MAX_PRIO-21, \ .policy = SCHED_NORMAL, \ .cpus_allowed = CPU_MASK_ALL, \ .mm = NULL, \ diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-rc3-s6.ER/include/linux/sched.h linux-2.6.7-rc3-s6.EB/include/linux/sched.h --- linux-2.6.7-rc3-s6.ER/include/linux/sched.h 2004-06-12 22:42:26.644197731 +1000 +++ linux-2.6.7-rc3-s6.EB/include/linux/sched.h 2004-06-12 22:42:51.190393739 +1000 @@ -126,9 +126,10 @@ extern unsigned long nr_iowait(void); #define SCHED_NORMAL 0 #define SCHED_FIFO 1 #define SCHED_RR 2 +#define SCHED_BATCH 3 #define SCHED_MIN 0 -#define SCHED_MAX 2 +#define SCHED_MAX 3 #define SCHED_RANGE(policy) ((policy) >= SCHED_MIN && \ (policy) <= SCHED_MAX) @@ -312,9 +313,10 @@ struct signal_struct { #define MAX_USER_RT_PRIO 100 #define MAX_RT_PRIO MAX_USER_RT_PRIO -#define MAX_PRIO (MAX_RT_PRIO + 40) +#define MAX_PRIO (MAX_RT_PRIO + 41) #define rt_task(p) ((p)->prio < MAX_RT_PRIO) +#define batch_task(p) ((p)->policy == SCHED_BATCH) /* * Some day this will be a full-fledged user tracking system.. diff -Naurp --exclude-from=/home/con/kernel/dontdiff linux-2.6.7-rc3-s6.ER/kernel/sched.c linux-2.6.7-rc3-s6.EB/kernel/sched.c --- linux-2.6.7-rc3-s6.ER/kernel/sched.c 2004-06-12 22:42:26.646197421 +1000 +++ linux-2.6.7-rc3-s6.EB/kernel/sched.c 2004-06-12 22:55:20.493246276 +1000 @@ -47,7 +47,7 @@ /* * Convert user-nice values [ -20 ... 0 ... 19 ] - * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], + * to static priority [ MAX_RT_PRIO..MAX_PRIO-2 ], * and back. */ #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20) @@ -57,7 +57,7 @@ /* * 'User priority' is the nice value converted to something we * can work with better when scaling various scheduler parameters, - * it's a [ 0 ... 39 ] range. + * it's a [ 0 ... 40 ] range. */ #define USER_PRIO(p) ((p)-MAX_RT_PRIO) #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio) @@ -193,7 +193,8 @@ static int task_preempts_curr(struct tas if (p->prio >= rq->curr->prio) return 0; if (!compute || rq->cache_ticks >= cache_decay_ticks || - rt_task(p) || !p->mm || rq->curr == rq->idle) + rt_task(p) || !p->mm || rq->curr == rq->idle || + (batch_task(rq->curr) && !batch_task(p))) return 1; rq->preempted = 1; return 0; @@ -282,6 +283,8 @@ static unsigned int slice(task_t *p) unsigned int slice = RR_INTERVAL; if (!rt_task(p)) slice += burst(p) * RR_INTERVAL; + if (batch_task(p)) + slice *= 10; return slice; } @@ -301,6 +304,8 @@ static int effective_prio(task_t *p) unsigned int best_burst; if (rt_task(p)) return p->prio; + if (batch_task(p)) + return MAX_PRIO - 1; best_burst = burst(p); full_slice = slice(p); @@ -310,14 +315,14 @@ static int effective_prio(task_t *p) first_slice = RR_INTERVAL; if (interactive && !compute) first_slice *= (p->burst + 1); - prio = MAX_PRIO - 1 - best_burst; + prio = MAX_PRIO - 2 - best_burst; if (used_slice < first_slice) return prio; if (p->mm) prio += 1 + (used_slice - first_slice) / RR_INTERVAL; - if (prio > MAX_PRIO - 1) - prio = MAX_PRIO - 1; + if (prio > MAX_PRIO - 2) + prio = MAX_PRIO - 2; return prio; } @@ -326,7 +331,8 @@ static void recalc_task_prio(task_t *p, unsigned long sleep_time = now - p->timestamp; unsigned long run_time = NS_TO_JIFFIES(p->runtime); unsigned long total_run = NS_TO_JIFFIES(p->totalrun) + run_time; - if (!run_time && NS_TO_JIFFIES(p->runtime + sleep_time) < RR_INTERVAL) { + if (!run_time && NS_TO_JIFFIES(p->runtime + sleep_time) < RR_INTERVAL && + !batch_task(p)) { if (p->slice - total_run < 1) { p->totalrun = 0; dec_burst(p); @@ -359,6 +365,8 @@ static void activate_task(task_t *p, run recalc_task_prio(p, now); p->prio = effective_prio(p); p->time_slice = RR_INTERVAL; + if (batch_task(p)) + p->time_slice *= 10; p->timestamp = now; __activate_task(p, rq); } @@ -1731,7 +1739,7 @@ void scheduler_tick(int user_ticks, int rebalance_tick(cpu, rq, IDLE); return; } - if (TASK_NICE(p) > 0) + if (TASK_NICE(p) > 0 || batch_task(p)) cpustat->nice += user_ticks; else cpustat->user += user_ticks; @@ -1830,8 +1838,9 @@ static inline int dependent_sleeper(int * physical cpu's resources. -ck */ if (((smt_curr->slice * (100 - sd->per_cpu_gain) / 100) > - slice(p) || rt_task(smt_curr)) && - p->mm && smt_curr->mm && !rt_task(p)) + slice(p) || rt_task(smt_curr) || batch_task(p)) && + p->mm && smt_curr->mm && !rt_task(p) && + !batch_task(smt_curr)) ret = 1; /* @@ -1840,8 +1849,9 @@ static inline int dependent_sleeper(int * reasons. */ if ((((p->slice * (100 - sd->per_cpu_gain) / 100) > - slice(smt_curr) || rt_task(p)) && - smt_curr->mm && p->mm && !rt_task(smt_curr)) || + slice(smt_curr) || rt_task(p) || batch_task(smt_curr)) && + smt_curr->mm && p->mm && !rt_task(smt_curr) && + !batch_task(p)) || (smt_curr == smt_rq->idle && smt_rq->nr_running)) resched_task(smt_curr); } @@ -2229,8 +2239,9 @@ void set_user_nice(task_t *p, long nice) * If the task increased its priority or is running and * lowered its priority, then reschedule its CPU: */ - if (delta < 0 || (delta > 0 && task_running(rq, p))) - resched_task(rq->curr); + if (delta < 0 || ((delta > 0 || batch_task(p)) && + task_running(rq, p))) + resched_task(rq->curr); } out_unlock: task_rq_unlock(rq, &flags); @@ -2399,6 +2410,12 @@ static int setscheduler(pid_t pid, int p !capable(CAP_SYS_NICE)) goto out_unlock; + if (!(p->mm) && policy == SCHED_BATCH) + /* + * Don't allow kernel threads to be SCHED_BATCH. + */ + goto out_unlock; + retval = security_task_setscheduler(p, policy, &lp); if (retval) goto out_unlock; @@ -2620,8 +2637,8 @@ asmlinkage long sys_sched_yield(void) dequeue_task(current, rq); current->slice = RR_INTERVAL; current->time_slice = current->slice; - if (!rt_task(current)) - current->prio = MAX_PRIO - 1; + if (!rt_task(current) && !batch_task(current)) + current->prio = MAX_PRIO - 2; inc_burst(current); enqueue_task(current, rq); @@ -2705,6 +2722,7 @@ asmlinkage long sys_sched_get_priority_m ret = MAX_USER_RT_PRIO-1; break; case SCHED_NORMAL: + case SCHED_BATCH: ret = 0; break; } @@ -2728,6 +2746,7 @@ asmlinkage long sys_sched_get_priority_m ret = 1; break; case SCHED_NORMAL: + case SCHED_BATCH: ret = 0; } return ret;