From: Con Kolivas Modify the smp nice code to store load_weight on uniprocessor as well to allow relative niceness on one cpu to be assessed. Minor cleanups and uninline set_load_weight(). Signed-off-by: Con Kolivas Cc: Peter Williams Acked-by: Ingo Molnar Signed-off-by: Andrew Morton --- include/linux/sched.h | 4 ++-- kernel/sched.c | 24 ++++++------------------ 2 files changed, 8 insertions(+), 20 deletions(-) diff -puN include/linux/sched.h~sched-store-weighted-load-on-up include/linux/sched.h --- devel/include/linux/sched.h~sched-store-weighted-load-on-up 2006-05-17 13:10:07.000000000 -0700 +++ devel-akpm/include/linux/sched.h 2006-05-17 13:10:07.000000000 -0700 @@ -591,9 +591,9 @@ enum idle_type /* * sched-domains (multiprocessor balancing) declarations: */ -#ifdef CONFIG_SMP #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ +#ifdef CONFIG_SMP #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ #define SD_BALANCE_EXEC 4 /* Balance on exec */ @@ -750,8 +750,8 @@ struct task_struct { #ifdef __ARCH_WANT_UNLOCKED_CTXSW int oncpu; #endif - int load_weight; /* for load balancing purposes */ #endif + int load_weight; /* for niceness load balancing purposes */ int prio, static_prio; struct list_head run_list; prio_array_t *array; diff -puN kernel/sched.c~sched-store-weighted-load-on-up kernel/sched.c --- devel/kernel/sched.c~sched-store-weighted-load-on-up 2006-05-17 13:10:07.000000000 -0700 +++ devel-akpm/kernel/sched.c 2006-05-17 13:10:07.000000000 -0700 @@ -169,12 +169,12 @@ */ #define SCALE_PRIO(x, prio) \ - max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE) + max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE) static unsigned int static_prio_timeslice(int static_prio) { if (static_prio < NICE_TO_PRIO(0)) - return SCALE_PRIO(DEF_TIMESLICE*4, static_prio); + return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio); else return SCALE_PRIO(DEF_TIMESLICE, static_prio); } @@ -214,8 +214,8 @@ struct runqueue { * remote CPUs use both these fields when doing load calculation. */ unsigned long nr_running; -#ifdef CONFIG_SMP unsigned long raw_weighted_load; +#ifdef CONFIG_SMP unsigned long cpu_load[3]; #endif unsigned long long nr_switches; @@ -737,7 +737,6 @@ static inline int expired_starving(runqu return 0; } -#ifdef CONFIG_SMP /* * To aid in avoiding the subversion of "niceness" due to uneven distribution * of tasks with abnormal "nice" values across CPUs the contribution that @@ -760,9 +759,10 @@ static inline int expired_starving(runqu #define RTPRIO_TO_LOAD_WEIGHT(rp) \ (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) -static inline void set_load_weight(task_t *p) +static void set_load_weight(task_t *p) { if (rt_task(p)) { +#ifdef CONFIG_SMP if (p == task_rq(p)->migration_thread) /* * The migration thread does the actual balancing. @@ -771,6 +771,7 @@ static inline void set_load_weight(task_ */ p->load_weight = 0; else +#endif p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority); } else p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio); @@ -785,19 +786,6 @@ static inline void dec_raw_weighted_load { rq->raw_weighted_load -= p->load_weight; } -#else -static inline void set_load_weight(task_t *p) -{ -} - -static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p) -{ -} - -static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p) -{ -} -#endif static inline void inc_nr_running(task_t *p, runqueue_t *rq) { _