sched: fixlet for group load balance
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Tue, 23 Sep 2008 13:33:42 +0000 (15:33 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 23 Sep 2008 14:23:15 +0000 (16:23 +0200)
We should not only correct the increment for the initial group, but should
be consistent and do so for all the groups we encounter.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

kernel/sched_fair.c

index c208997..0c59da7 100644 (file)
@@ -1027,7 +1027,6 @@ static long effective_load(struct task_group *tg, int cpu,
                long wl, long wg)
 {
        struct sched_entity *se = tg->se[cpu];
-       long more_w;
 
        if (!tg->parent)
                return wl;
@@ -1039,18 +1038,17 @@ static long effective_load(struct task_group *tg, int cpu,
        if (!wl && sched_feat(ASYM_EFF_LOAD))
                return wl;
 
-       /*
-        * Instead of using this increment, also add the difference
-        * between when the shares were last updated and now.
-        */
-       more_w = se->my_q->load.weight - se->my_q->rq_weight;
-       wl += more_w;
-       wg += more_w;
-
        for_each_sched_entity(se) {
-#define D(n) (likely(n) ? (n) : 1)
-
                long S, rw, s, a, b;
+               long more_w;
+
+               /*
+                * Instead of using this increment, also add the difference
+                * between when the shares were last updated and now.
+                */
+               more_w = se->my_q->load.weight - se->my_q->rq_weight;
+               wl += more_w;
+               wg += more_w;
 
                S = se->my_q->tg->shares;
                s = se->my_q->shares;
@@ -1059,7 +1057,11 @@ static long effective_load(struct task_group *tg, int cpu,
                a = S*(rw + wl);
                b = S*rw + s*wg;
 
-               wl = s*(a-b)/D(b);
+               wl = s*(a-b);
+
+               if (likely(b))
+                       wl /= b;
+
                /*
                 * Assume the group is already running and will
                 * thus already be accounted for in the weight.
@@ -1068,7 +1070,6 @@ static long effective_load(struct task_group *tg, int cpu,
                 * alter the group weight.
                 */
                wg = 0;
-#undef D
        }
 
        return wl;