adding BFS 316 to 318 patch
authorCorey O'Connor <coreyoconnor@gmail.com>
Thu, 2 Sep 2010 19:42:01 +0000 (12:42 -0700)
committerCorey O'Connor <coreyoconnor@gmail.com>
Thu, 2 Sep 2010 19:42:01 +0000 (12:42 -0700)
kernel-power-2.6.28/debian/patches/bfs-316-to-318.patch [new file with mode: 0644]

diff --git a/kernel-power-2.6.28/debian/patches/bfs-316-to-318.patch b/kernel-power-2.6.28/debian/patches/bfs-316-to-318.patch
new file mode 100644 (file)
index 0000000..e621a7c
--- /dev/null
@@ -0,0 +1,74 @@
+Make sure we disable preemption in try_to_wake_up and when changing the cpu
+in set_cpus_allowed_ptr.
+
+Rework the change in rr_interval with number of cpus to not go up quite so
+quickly, scaling only by 50% every doubling of CPUs for better interactivity
+on multicore machines. Throughput did not appear to decrease measurably with
+this change.
+
+-ck
+
+---
+ include/linux/sched.h |    2 +-
+ kernel/sched_bfs.c    |   14 ++++++++++++--
+ 2 files changed, 13 insertions(+), 3 deletions(-)
+
+Index: kernel-2.6.28/include/linux/sched.h
+===================================================================
+--- kernel-2.6.28.orig/include/linux/sched.h
++++ kernel-2.6.28/include/linux/sched.h
+@@ -1422,7 +1422,7 @@ static inline void tsk_cpus_current(stru
+ static inline void print_scheduler_version(void)
+ {
+-      printk(KERN_INFO"BFS CPU scheduler v0.316 by Con Kolivas ported by ToAsTcfh.\n");
++      printk(KERN_INFO"BFS CPU scheduler v0.318 by Con Kolivas ported by ToAsTcfh.\n");
+ }
+ static inline int iso_task(struct task_struct *p)
+Index: kernel-2.6.28/kernel/sched_bfs.c
+===================================================================
+--- kernel-2.6.28.orig/kernel/sched_bfs.c
++++ kernel-2.6.28/kernel/sched_bfs.c
+@@ -1193,6 +1193,8 @@ static int try_to_wake_up(struct task_st
+       int success = 0;
+       struct rq *rq;
++      get_cpu();
++
+       /* This barrier is undocumented, probably for p->state? くそ */
+       smp_wmb();
+@@ -1227,6 +1229,8 @@ out_running:
+       p->state = TASK_RUNNING;
+ out_unlock:
+       task_grq_unlock(&flags);
++      put_cpu();
++
+       return success;
+ }
+@@ -5748,7 +5752,7 @@ static int cache_cpu_idle(unsigned long
+ void __init sched_init_smp(void)
+ {
+       struct sched_domain *sd;
+-      int cpu;
++      int cpu, i, cpu_scale;
+       cpumask_t non_isolated_cpus;
+@@ -5783,7 +5787,13 @@ void __init sched_init_smp(void)
+        * allowing us to increase the base rr_interval, but in a non linear
+        * fashion.
+        */
+-      rr_interval *= 1 + ilog2(num_online_cpus());
++      cpu_scale = ilog2(num_online_cpus());
++      rr_interval *= 100;
++      for (i = 0; i < cpu_scale; i++) {
++              rr_interval *= 3;
++              rr_interval /= 2;
++      }
++      rr_interval /= 100;
+       grq_lock_irq();
+       /*