From: Con Kolivas The activated flag in task_struct is used to track different sleep types and its usage is somewhat obfuscated. Convert the variable to an enum with more descriptive names without altering the function. Signed-off-by: Con Kolivas Acked-by: Ingo Molnar Signed-off-by: Andrew Morton --- include/linux/sched.h | 9 ++++++++- kernel/sched.c | 24 +++++++++++++++--------- 2 files changed, 23 insertions(+), 10 deletions(-) diff -puN include/linux/sched.h~sched-cleanup_task_activated include/linux/sched.h --- devel/include/linux/sched.h~sched-cleanup_task_activated 2006-03-11 02:50:34.000000000 -0800 +++ devel-akpm/include/linux/sched.h 2006-03-11 02:50:34.000000000 -0800 @@ -681,6 +681,13 @@ static inline void prefetch_stack(struct struct audit_context; /* See audit.c */ struct mempolicy; +enum sleep_type { + SLEEP_NORMAL, + SLEEP_NONINTERACTIVE, + SLEEP_INTERACTIVE, + SLEEP_INTERRUPTED, +}; + struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ struct thread_info *thread_info; @@ -706,7 +713,7 @@ struct task_struct { unsigned long sleep_avg; unsigned long long timestamp, last_ran; unsigned long long sched_time; /* sched_clock time spent running */ - int activated; + enum sleep_type sleep_type; unsigned long policy; cpumask_t cpus_allowed; diff -puN kernel/sched.c~sched-cleanup_task_activated kernel/sched.c --- devel/kernel/sched.c~sched-cleanup_task_activated 2006-03-11 02:50:34.000000000 -0800 +++ devel-akpm/kernel/sched.c 2006-03-11 02:50:34.000000000 -0800 @@ -784,7 +784,7 @@ static int recalc_task_prio(task_t *p, u * prevent them suddenly becoming cpu hogs and starving * other processes. */ - if (p->mm && p->activated != -1 && + if (p->mm && p->sleep_type != SLEEP_NONINTERACTIVE && sleep_time > INTERACTIVE_SLEEP(p)) { p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG - DEF_TIMESLICE); @@ -800,7 +800,7 @@ static int recalc_task_prio(task_t *p, u * limited in their sleep_avg rise as they * are likely to be waiting on I/O */ - if (p->activated == -1 && p->mm) { + if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) { if (p->sleep_avg >= INTERACTIVE_SLEEP(p)) sleep_time = 0; else if (p->sleep_avg + sleep_time >= @@ -855,7 +855,7 @@ static void activate_task(task_t *p, run * This checks to make sure it's not an uninterruptible task * that is now waking up. */ - if (!p->activated) { + if (p->sleep_type == SLEEP_NORMAL) { /* * Tasks which were woken up by interrupts (ie. hw events) * are most likely of interactive nature. So we give them @@ -864,13 +864,13 @@ static void activate_task(task_t *p, run * on a CPU, first time around: */ if (in_interrupt()) - p->activated = 2; + p->sleep_type = SLEEP_INTERRUPTED; else { /* * Normal first-time wakeups get a credit too for * on-runqueue time, but it will be weighted down: */ - p->activated = 1; + p->sleep_type = SLEEP_INTERACTIVE; } } p->timestamp = now; @@ -1373,7 +1373,7 @@ out_activate: * Tasks on involuntary sleep don't earn * sleep_avg beyond just interactive state. */ - p->activated = -1; + p->sleep_type = SLEEP_NONINTERACTIVE; } /* @@ -2996,6 +2996,12 @@ EXPORT_SYMBOL(sub_preempt_count); #endif +static inline int interactive_sleep(enum sleep_type sleep_type) +{ + return (sleep_type == SLEEP_INTERACTIVE || + sleep_type == SLEEP_INTERRUPTED); +} + /* * schedule() is the main scheduler function. */ @@ -3121,12 +3127,12 @@ go_idle: queue = array->queue + idx; next = list_entry(queue->next, task_t, run_list); - if (!rt_task(next) && next->activated > 0) { + if (!rt_task(next) && interactive_sleep(next->sleep_type)) { unsigned long long delta = now - next->timestamp; if (unlikely((long long)(now - next->timestamp) < 0)) delta = 0; - if (next->activated == 1) + if (next->sleep_type == SLEEP_INTERACTIVE) delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128; array = next->array; @@ -3139,7 +3145,7 @@ go_idle: } else requeue_task(next, array); } - next->activated = 0; + next->sleep_type = SLEEP_NORMAL; switch_tasks: if (next == rq->idle) schedstat_inc(rq, sched_goidle); _