bfs 357 -> 363, kernel-power v46 -> kernel-bfs
authorDennis Groenen <dennis_groenen@hotmail.com>
Tue, 11 Jan 2011 15:48:39 +0000 (16:48 +0100)
committerDennis Groenen <dennis_groenen@hotmail.com>
Tue, 11 Jan 2011 15:48:39 +0000 (16:48 +0100)
kernel-bfs-2.6.28/debian/changelog
kernel-bfs-2.6.28/debian/patches/bfs-350-to-357.patch
kernel-bfs-2.6.28/debian/patches/bfs-357-to-360.patch [new file with mode: 0644]
kernel-bfs-2.6.28/debian/patches/bfs-360-to-363.patch [new file with mode: 0644]
kernel-bfs-2.6.28/debian/patches/bt-mice.diff [new file with mode: 0644]
kernel-bfs-2.6.28/debian/patches/series
kernel-bfs-2.6.28/debian/rules

index 4d82de1..55c15f2 100644 (file)
@@ -1,3 +1,11 @@
+kernel-bfs (2.6.28-bfs5) fremantle; urgency=low
+
+  * Updated kernel-bfs to reflect changes kernel-power 2.6.28-maemo46
+  * Updated BFS scheduler to bfs363
+  * Tidied up bfs-350-to-357.patch file
+
+ -- Dennis Groenen <dennis_groenen@hotmail.com>  Mon,  03 Jan 2010 16:30:00 +0100
+
 kernel-bfs (2.6.28-bfs4) fremantle; urgency=low
 
   * Updated kernel-bfs to reflect changes kernel-power 2.6.28-maemo43, 2.6.28-44 and 2.6.28-45
index fc4c2a7..9199cc9 100644 (file)
@@ -16,11 +16,11 @@ Go magnum.
  kernel/sched_bfs.c    |   76 +++++++++++++++++++++++++++-----------------------
  2 files changed, 43 insertions(+), 35 deletions(-)
 
-Index: linux-2.6.35.7/kernel/sched_bfs.c
+Index: linux-2.6.28/kernel/sched_bfs.c
 ===================================================================
---- linux-2.6.35.7.orig/kernel/sched_bfs.c     2010-10-03 21:29:08.421363441 +1100
-+++ linux-2.6.35.7/kernel/sched_bfs.c  2010-10-04 11:39:08.027283891 +1100
-@@ -111,10 +111,12 @@
+--- linux-2.6.28.orig/kernel/sched_bfs.c       2010-10-03 21:29:08.421363441 +1100
++++ linux-2.6.28/kernel/sched_bfs.c    2010-10-04 11:39:08.027283891 +1100
+@@ -107,10 +107,12 @@
   * approximate multiples of ten for less overhead.
   */
  #define JIFFIES_TO_NS(TIME)   ((TIME) * (1000000000 / HZ))
@@ -33,7 +33,7 @@ Index: linux-2.6.35.7/kernel/sched_bfs.c
  #define NS_TO_MS(TIME)                ((TIME) >> 20)
  #define NS_TO_US(TIME)                ((TIME) >> 10)
  
-@@ -165,8 +167,8 @@ struct global_rq {
+@@ -182,8 +184,8 @@ struct global_rq {
        cpumask_t cpu_idle_map;
        int idle_cpus;
  #endif
@@ -42,9 +42,9 @@ Index: linux-2.6.35.7/kernel/sched_bfs.c
 +      u64 niffies; /* Nanosecond jiffies */
 +      unsigned long last_jiffy; /* Last jiffy we updated niffies */
  
-       raw_spinlock_t iso_lock;
+       spinlock_t iso_lock;
        int iso_ticks;
-@@ -193,7 +195,7 @@ struct rq {
+@@ -209,7 +211,7 @@ struct rq {
        struct mm_struct *prev_mm;
  
        /* Stored data about rq->curr to work outside grq lock */
@@ -117,7 +117,7 @@ Index: linux-2.6.35.7/kernel/sched_bfs.c
        grq.niffies += ndiff;
  }
  #endif
-@@ -1202,7 +1221,7 @@ EXPORT_SYMBOL_GPL(kick_process);
+@@ -1203,7 +1222,7 @@ void kick_process(struct task_struct *p)
   * prio PRIO_LIMIT so it is always preempted.
   */
  static inline int
@@ -126,7 +126,7 @@ Index: linux-2.6.35.7/kernel/sched_bfs.c
            unsigned int policy)
  {
        /* Better static priority RT task or better policy preemption */
-@@ -1252,7 +1271,8 @@ static inline int needs_other_cpu(struct
+@@ -1254,7 +1273,8 @@ static inline int needs_other_cpu(struct
  static void try_preempt(struct task_struct *p, struct rq *this_rq)
  {
        struct rq *highest_prio_rq = this_rq;
@@ -136,7 +136,7 @@ Index: linux-2.6.35.7/kernel/sched_bfs.c
        int highest_prio;
        cpumask_t tmp;
  
-@@ -1274,7 +1294,7 @@ static void try_preempt(struct task_stru
+@@ -1276,7 +1296,7 @@ static void try_preempt(struct task_stru
        highest_prio = -1;
  
        for_each_cpu_mask_nr(cpu, tmp) {
@@ -145,7 +145,7 @@ Index: linux-2.6.35.7/kernel/sched_bfs.c
                struct rq *rq;
                int rq_prio;
  
-@@ -1975,16 +1995,12 @@ static void pc_user_time(struct rq *rq, 
+@@ -1895,16 +1915,12 @@ static void pc_user_time(struct rq *rq,
  }
  
  /* Convert nanoseconds to percentage of one tick. */
@@ -163,7 +163,7 @@ Index: linux-2.6.35.7/kernel/sched_bfs.c
   */
  static void
  update_cpu_clock(struct rq *rq, struct task_struct *p, int tick)
-@@ -2019,18 +2035,9 @@ update_cpu_clock(struct rq *rq, struct t
+@@ -1939,18 +1955,9 @@ update_cpu_clock(struct rq *rq, struct t
  
        /* time_slice accounting is done in usecs to avoid overflow on 32bit */
        if (rq->rq_policy != SCHED_FIFO && p != idle) {
@@ -184,7 +184,7 @@ Index: linux-2.6.35.7/kernel/sched_bfs.c
                rq->rq_time_slice -= NS_TO_US(time_diff);
        }
        rq->rq_last_ran = rq->timekeep_clock = rq->clock;
-@@ -2438,17 +2445,17 @@ EXPORT_SYMBOL(sub_preempt_count);
+@@ -2358,17 +2365,17 @@ EXPORT_SYMBOL(sub_preempt_count);
   * proportion works out to the square of the virtual deadline difference, so
   * this equation will give nice 19 3% CPU compared to nice 0.
   */
@@ -205,7 +205,7 @@ Index: linux-2.6.35.7/kernel/sched_bfs.c
  {
        return prio_deadline_diff(USER_PRIO(static_prio));
  }
-@@ -2504,7 +2511,7 @@ static inline void check_deadline(struct
+@@ -2424,7 +2431,7 @@ static inline void check_deadline(struct
  static inline struct
  task_struct *earliest_deadline_task(struct rq *rq, struct task_struct *idle)
  {
@@ -214,7 +214,7 @@ Index: linux-2.6.35.7/kernel/sched_bfs.c
        struct task_struct *p, *edt = idle;
        unsigned int cpu = cpu_of(rq);
        struct list_head *queue;
-@@ -6644,6 +6651,7 @@ void __init sched_init(void)
+@@ -6100,6 +6107,7 @@ void __init sched_init(void)
        spin_lock_init(&grq.lock);
        grq.nr_running = grq.nr_uninterruptible = grq.nr_switches = 0;
        grq.niffies = 0;
@@ -222,11 +222,11 @@ Index: linux-2.6.35.7/kernel/sched_bfs.c
        spin_lock_init(&grq.iso_lock);
        grq.iso_ticks = grq.iso_refractory = 0;
  #ifdef CONFIG_SMP
-Index: linux-2.6.35.7/include/linux/sched.h
+Index: linux-2.6.28/include/linux/sched.h
 ===================================================================
---- linux-2.6.35.7.orig/include/linux/sched.h  2010-10-04 09:34:58.028244089 +1100
-+++ linux-2.6.35.7/include/linux/sched.h       2010-10-04 09:35:08.833093538 +1100
-@@ -1541,7 +1541,7 @@ static inline void tsk_cpus_current(stru
+--- linux-2.6.28.orig/include/linux/sched.h    2010-10-04 09:34:58.028244089 +1100
++++ linux-2.6.28/include/linux/sched.h 2010-10-04 09:35:08.833093538 +1100
+@@ -1426,7 +1426,7 @@ static inline void tsk_cpus_current(stru
  
  static inline void print_scheduler_version(void)
  {
diff --git a/kernel-bfs-2.6.28/debian/patches/bfs-357-to-360.patch b/kernel-bfs-2.6.28/debian/patches/bfs-357-to-360.patch
new file mode 100644 (file)
index 0000000..09b037f
--- /dev/null
@@ -0,0 +1,229 @@
+Don't unnecessarily preempt for a task on the wrong CPU.
+
+Cope with worker threads trying to wake themselves up due to shifting CPUs on
+suspend by reactivating it, instead of hitting the BUG_ON
+
+Wrap timer jiffies at 10 seconds instead of 5 minutes since 32 bit load
+averages don't work until the first timer wrap.
+
+Remove the last_task logic as it wasn't providing any significant performance
+advantage.
+
+Change the locality logic to try to reschedule on the exact same logical core
+instead of assuming scheduling on a sibling core or sibling thread is
+equivalent. This allows CPUs with a "turbo" mode (such as i7) to use that more
+often by using one CPU more than spreading out, and allows ondemand cpu
+frequency scaling to ramp up more easily when a task stays on the same CPU. It
+increases throughput on threaded CPUs when lightly loaded, and may offer both
+performance and power saving advantages on all SMP topologies with cpu
+frequency scaling.
+
+-ck
+
+---
+ include/linux/jiffies.h |    2 -
+ include/linux/sched.h   |    2 -
+ kernel/sched_bfs.c      |   89 ++++++++++++++++++++++--------------------------
+ 3 files changed, 43 insertions(+), 50 deletions(-)
+
+Index: linux-2.6.28/include/linux/jiffies.h
+===================================================================
+--- linux-2.6.28.orig/include/linux/jiffies.h  2010-12-14 22:13:10.975304692 +1100
++++ linux-2.6.28/include/linux/jiffies.h       2010-12-14 22:14:03.530569735 +1100
+@@ -154,7 +154,7 @@ static inline u64 get_jiffies_64(void)
+  * Have the 32 bit jiffies value wrap 5 minutes after boot
+  * so jiffies wrap bugs show up earlier.
+  */
+-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
++#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-10*HZ))
+ /*
+  * Change timeval to jiffies, trying to avoid the
+Index: linux-2.6.28/include/linux/sched.h
+===================================================================
+--- linux-2.6.28.orig/include/linux/sched.h    2010-12-14 22:13:10.965304640 +1100
++++ linux-2.6.28/include/linux/sched.h 2010-12-14 22:14:03.524569704 +1100
+@@ -1426,7 +1426,7 @@ static inline void tsk_cpus_current(stru
+ static inline void print_scheduler_version(void)
+ {
+-      printk(KERN_INFO"BFS CPU scheduler v0.357 by Con Kolivas.\n");
++      printk(KERN_INFO"BFS CPU scheduler v0.360 by Con Kolivas.\n");
+ }
+ static inline int iso_task(struct task_struct *p)
+Index: linux-2.6.28/kernel/sched_bfs.c
+===================================================================
+--- linux-2.6.28.orig/kernel/sched_bfs.c       2010-12-14 22:13:10.983304734 +1100
++++ linux-2.6.28/kernel/sched_bfs.c    2010-12-14 22:14:54.061814177 +1100
+@@ -204,7 +204,6 @@ struct rq {
+ #ifdef CONFIG_NO_HZ
+       unsigned char in_nohz_recently;
+ #endif
+-      struct task_struct *last_task;
+ #endif
+       struct task_struct *curr, *idle;
+@@ -733,19 +732,12 @@ static int suitable_idle_cpus(struct tas
+ static void resched_task(struct task_struct *p);
+-/*
+- * last_task stores the last non-idle task scheduled on the local rq for
+- * cache warmth testing.
+- */
+-static inline void set_last_task(struct rq *rq, struct task_struct *p)
+-{
+-      rq->last_task = p;
+-}
+-
+-#define CPUIDLE_CACHE_BUSY    (1)
+-#define CPUIDLE_DIFF_CPU      (2)
+-#define CPUIDLE_THREAD_BUSY   (4)
+-#define CPUIDLE_DIFF_NODE     (8)
++#define CPUIDLE_DIFF_THREAD   (1)
++#define CPUIDLE_DIFF_CORE     (2)
++#define CPUIDLE_CACHE_BUSY    (4)
++#define CPUIDLE_DIFF_CPU      (8)
++#define CPUIDLE_THREAD_BUSY   (16)
++#define CPUIDLE_DIFF_NODE     (32)
+ /*
+  * The best idle CPU is chosen according to the CPUIDLE ranking above where the
+@@ -798,27 +790,28 @@ static void resched_best_idle(struct tas
+               }
+               tmp_rq = cpu_rq(cpu_tmp);
+-              if (rq->cpu_locality[cpu_tmp]) {
+-                      /* Check rq->last_task hasn't been dereferenced */
+-                      if (rq->last_task && p != rq->last_task) {
+ #ifdef CONFIG_NUMA
+-                              if (rq->cpu_locality[cpu_tmp] > 1)
+-                                      ranking |= CPUIDLE_DIFF_NODE;
++              if (rq->cpu_locality[cpu_tmp] > 3)
++                      ranking |= CPUIDLE_DIFF_NODE;
++              else
+ #endif
+-                              ranking |= CPUIDLE_DIFF_CPU;
+-                      }
+-              }
++              if (rq->cpu_locality[cpu_tmp] > 2)
++                      ranking |= CPUIDLE_DIFF_CPU;
+ #ifdef CONFIG_SCHED_MC
++              if (rq->cpu_locality[cpu_tmp] == 2)
++                      ranking |= CPUIDLE_DIFF_CORE;
+               if (!(tmp_rq->cache_idle(cpu_tmp)))
+                       ranking |= CPUIDLE_CACHE_BUSY;
+ #endif
+ #ifdef CONFIG_SCHED_SMT
++              if (rq->cpu_locality[cpu_tmp] == 1)
++                      ranking |= CPUIDLE_DIFF_THREAD;
+               if (!(tmp_rq->siblings_idle(cpu_tmp)))
+                       ranking |= CPUIDLE_THREAD_BUSY;
+ #endif
+               if (ranking < best_ranking) {
+                       best_cpu = cpu_tmp;
+-                      if (ranking <= 1)
++                      if (ranking == 0)
+                               break;
+                       best_ranking = ranking;
+               }
+@@ -835,11 +828,11 @@ static inline void resched_suitable_idle
+ /*
+  * The cpu cache locality difference between CPUs is used to determine how far
+- * to offset the virtual deadline. "One" difference in locality means that one
++ * to offset the virtual deadline. <2 difference in locality means that one
+  * timeslice difference is allowed longer for the cpu local tasks. This is
+  * enough in the common case when tasks are up to 2* number of CPUs to keep
+  * tasks within their shared cache CPUs only. CPUs on different nodes or not
+- * even in this domain (NUMA) have "3" difference, allowing 4 times longer
++ * even in this domain (NUMA) have "4" difference, allowing 4 times longer
+  * deadlines before being taken onto another cpu, allowing for 2* the double
+  * seen by separate CPUs above.
+  * Simple summary: Virtual deadlines are equal on shared cache CPUs, double
+@@ -848,12 +841,11 @@ static inline void resched_suitable_idle
+ static inline int
+ cache_distance(struct rq *task_rq, struct rq *rq, struct task_struct *p)
+ {
+-      /* Check rq->last_task hasn't been dereferenced */
+-      if (likely(rq->last_task)) {
+-              if (rq->last_task == p)
+-                      return 0;
+-      }
+-      return rq->cpu_locality[cpu_of(task_rq)] * task_timeslice(p);
++      int locality = rq->cpu_locality[cpu_of(task_rq)] - 2;
++
++      if (locality > 0)
++              return task_timeslice(p) << locality;
++      return 0;
+ }
+ #else /* CONFIG_SMP */
+ static inline void inc_qnr(void)
+@@ -892,10 +884,6 @@ cache_distance(struct rq *task_rq, struc
+ {
+       return 0;
+ }
+-
+-static inline void set_last_task(struct rq *rq, struct task_struct *p)
+-{
+-}
+ #endif /* CONFIG_SMP */
+ /*
+@@ -1287,10 +1275,10 @@ static void try_preempt(struct task_stru
+               return;
+       }
+-      if (online_cpus(p))
++      if (likely(online_cpus(p)))
+               cpus_and(tmp, cpu_online_map, p->cpus_allowed);
+       else
+-              (cpumask_copy(&tmp, &cpu_online_map));
++              return;
+       latest_deadline = 0;
+       highest_prio = -1;
+@@ -2597,7 +2585,7 @@ need_resched_nonpreemptible:
+               prev->last_ran = rq->clock;
+               /* Task changed affinity off this CPU */
+-              if (needs_other_cpu(prev, cpu))
++              if (unlikely(!cpu_isset(cpu, prev->cpus_allowed)))
+                       resched_suitable_idle(prev);
+               else if (!deactivate) {
+                       if (!queued_notrunning()) {
+@@ -2639,8 +2627,6 @@ need_resched_nonpreemptible:
+       if (likely(prev != next)) {
+               sched_info_switch(prev, next);
+-              if (prev != idle)
+-                      set_last_task(rq, prev);
+               set_rq_task(rq, next);
+               grq.nr_switches++;
+               prev->oncpu = 0;
+@@ -6054,10 +6040,12 @@ void __init sched_init_smp(void)
+                                       cpu_set(other_cpu, rq->cache_siblings);
+                       }
+ #endif
+-                      if (sd->level <= SD_LV_MC)
+-                              locality = 0;
+-                      else if (sd->level <= SD_LV_NODE)
++                      if (sd->level <= SD_LV_SIBLING)
+                               locality = 1;
++                      else if (sd->level <= SD_LV_MC)
++                              locality = 2;
++                      else if (sd->level <= SD_LV_NODE)
++                              locality = 3;
+                       else
+                               continue;
+@@ -6160,7 +6148,7 @@ void __init sched_init(void)
+                       if (i == j)
+                               rq->cpu_locality[j] = 0;
+                       else
+-                              rq->cpu_locality[j] = 3;
++                              rq->cpu_locality[j] = 4;
+               }
+       }
+ #endif
diff --git a/kernel-bfs-2.6.28/debian/patches/bfs-360-to-363.patch b/kernel-bfs-2.6.28/debian/patches/bfs-360-to-363.patch
new file mode 100644 (file)
index 0000000..12ef93d
--- /dev/null
@@ -0,0 +1,189 @@
+Make CPU offlining more robust by simply removing all affinity for processes
+that no longer have any CPUs they can run on. This allows the machine stop
+thread to complete offlining CPUs and makes for a little less overhead in hot
+paths.
+
+Allow SCHED_IDLEPRIO to wake up idle CPUs in try_preempt. This would have
+caused minor slowdowns for IDLEPRIO tasks only on relatively quiescent systems.
+
+Remove inappropriate likely()s.
+
+Update cpustat for irq - may have been under-reporting interrupt load.
+
+Cosmetic changes.
+
+Bump version to 0.363
+
+-ck
+
+---
+ include/linux/sched.h |    2 -
+ kernel/sched_bfs.c    |   59 ++++++++++++++++++++++++++++++++++++++------------
+ 2 files changed, 46 insertions(+), 15 deletions(-)
+
+Index: linux-2.6.28/kernel/sched_bfs.c
+===================================================================
+--- linux-2.6.28.orig/kernel/sched_bfs.c       2011-01-01 14:44:45.863309853 +1100
++++ linux-2.6.28/kernel/sched_bfs.c    2011-01-01 14:45:09.246874529 +1100
+@@ -116,7 +116,7 @@
+ #define NS_TO_MS(TIME)                ((TIME) >> 20)
+ #define NS_TO_US(TIME)                ((TIME) >> 10)
+-#define RESCHED_US    (100) /* Reschedule if less than this many us left */
++#define RESCHED_US    (100) /* Reschedule if less than this many μs left */
+ #ifdef CONFIG_SMP
+ /*
+@@ -1249,7 +1249,7 @@ static inline int online_cpus(struct tas
+  */
+ static inline int needs_other_cpu(struct task_struct *p, int cpu)
+ {
+-      if (unlikely(!cpu_isset(cpu, p->cpus_allowed) && online_cpus(p)))
++      if (unlikely(!cpu_isset(cpu, p->cpus_allowed)))
+               return 1;
+       return 0;
+ }
+@@ -1266,15 +1266,15 @@ static void try_preempt(struct task_stru
+       int highest_prio;
+       cpumask_t tmp;
+-      /* IDLEPRIO tasks never preempt anything */
+-      if (p->policy == SCHED_IDLEPRIO)
+-              return;
+-
+       if (suitable_idle_cpus(p)) {
+               resched_best_idle(p);
+               return;
+       }
++      /* IDLEPRIO tasks never preempt anything */
++      if (p->policy == SCHED_IDLEPRIO)
++              return;
++
+       if (likely(online_cpus(p)))
+               cpus_and(tmp, cpu_online_map, p->cpus_allowed);
+       else
+@@ -1720,14 +1720,14 @@ context_switch(struct rq *rq, struct tas
+        */
+       arch_enter_lazy_cpu_mode();
+-      if (unlikely(!mm)) {
++      if (!mm) {
+               next->active_mm = oldmm;
+               atomic_inc(&oldmm->mm_count);
+               enter_lazy_tlb(oldmm, next);
+       } else
+               switch_mm(oldmm, mm, next);
+-      if (unlikely(!prev->mm)) {
++      if (!prev->mm) {
+               prev->active_mm = NULL;
+               rq->prev_mm = oldmm;
+       }
+@@ -1853,9 +1853,13 @@ pc_system_time(struct rq *rq, struct tas
+       }
+       p->sched_time += ns;
+-      if (hardirq_count() - hardirq_offset)
++      if (hardirq_count() - hardirq_offset) {
+               rq->irq_pc += pc;
+-      else if (softirq_count()) {
++              if (rq->irq_pc >= 100) {
++                      rq->irq_pc %= 100;
++                      cpustat->irq = cputime64_add(cpustat->irq, tmp);
++              }
++      } else if (softirq_count()) {
+               rq->softirq_pc += pc;
+               if (rq->softirq_pc >= 100) {
+                       rq->softirq_pc %= 100;
+@@ -2245,7 +2249,7 @@ static void task_running_tick(struct rq
+        * Tasks that were scheduled in the first half of a tick are not
+        * allowed to run into the 2nd half of the next tick if they will
+        * run out of time slice in the interim. Otherwise, if they have
+-       * less than 100us of time slice left they will be rescheduled.
++       * less than RESCHED_US μs of time slice left they will be rescheduled.
+        */
+       if (rq->dither) {
+               if (rq->rq_time_slice > HALF_JIFFY_US)
+@@ -2585,7 +2589,7 @@ need_resched_nonpreemptible:
+               prev->last_ran = rq->clock;
+               /* Task changed affinity off this CPU */
+-              if (unlikely(!cpu_isset(cpu, prev->cpus_allowed)))
++              if (needs_other_cpu(prev, cpu))
+                       resched_suitable_idle(prev);
+               else if (!deactivate) {
+                       if (!queued_notrunning()) {
+@@ -3288,8 +3292,8 @@ recheck:
+        * SCHED_BATCH is 0.
+        */
+       if (param->sched_priority < 0 ||
+-          (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
+-          (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
++          (p->mm && param->sched_priority > MAX_USER_RT_PRIO - 1) ||
++          (!p->mm && param->sched_priority > MAX_RT_PRIO - 1))
+               return -EINVAL;
+       if (is_rt_policy(policy) != (param->sched_priority != 0))
+               return -EINVAL;
+@@ -3999,7 +4003,10 @@ void init_idle(struct task_struct *idle,
+       idle->prio = PRIO_LIMIT;
+       set_rq_task(rq, idle);
+       idle->cpus_allowed = cpumask_of_cpu(cpu);
++      /* Silence PROVE_RCU */
++      rcu_read_lock();
+       set_task_cpu(idle, cpu);
++      rcu_read_unlock();
+       rq->curr = rq->idle = idle;
+       idle->oncpu = 1;
+       set_cpuidle_map(cpu);
+@@ -4218,6 +4225,29 @@ void move_task_off_dead_cpu(int dead_cpu
+ }
++/* Run through task list and find tasks affined to just the dead cpu, then
++ * allocate a new affinity */
++static void break_sole_affinity(int src_cpu)
++{
++      struct task_struct *p, *t;
++
++      do_each_thread(t, p) {
++              if (!online_cpus(p)) {
++                      cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
++                      /*
++                       * Don't tell them about moving exiting tasks or
++                       * kernel threads (both mm NULL), since they never
++                       * leave kernel.
++                       */
++                      if (p->mm && printk_ratelimit()) {
++                              printk(KERN_INFO "process %d (%s) no "
++                                     "longer affine to cpu %d\n",
++                                     task_pid_nr(p), p->comm, src_cpu);
++                      }
++              }
++      } while_each_thread(t, p);
++}
++
+ /*
+  * Schedules idle task to be the next runnable task on current CPU.
+  * It does so by boosting its priority to highest possible.
+@@ -4238,6 +4268,7 @@ void sched_idle_next(void)
+        * and interrupts disabled on the current cpu.
+        */
+       grq_lock_irqsave(&flags);
++      break_sole_affinity(this_cpu);
+       __setscheduler(idle, rq, SCHED_FIFO, MAX_RT_PRIO - 1);
+Index: linux-2.6.28/include/linux/sched.h
+===================================================================
+--- linux-2.6.28.orig/include/linux/sched.h    2010-12-16 15:43:24.006131284 +1100
++++ linux-2.6.28/include/linux/sched.h 2011-01-01 14:45:09.250874451 +1100
+@@ -1426,7 +1426,7 @@ static inline void tsk_cpus_current(stru
+ static inline void print_scheduler_version(void)
+ {
+-      printk(KERN_INFO"BFS CPU scheduler v0.360 by Con Kolivas.\n");
++      printk(KERN_INFO"BFS CPU scheduler v0.363 by Con Kolivas.\n");
+ }
+ static inline int iso_task(struct task_struct *p)
diff --git a/kernel-bfs-2.6.28/debian/patches/bt-mice.diff b/kernel-bfs-2.6.28/debian/patches/bt-mice.diff
new file mode 100644 (file)
index 0000000..0b5fd92
--- /dev/null
@@ -0,0 +1,15 @@
+--- kernel-power-2.6.28.orig/net/bluetooth/hci_event.c
++++ kernel-power-2.6.28/net/bluetooth/hci_event.c
+@@ -1486,12 +1486,6 @@
+               conn->mode = ev->mode;
+               conn->interval = __le16_to_cpu(ev->interval);
+-              if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
+-                      if (conn->mode == HCI_CM_ACTIVE)
+-                              conn->power_save = 1;
+-                      else
+-                              conn->power_save = 0;
+-              }
+       }
+       hci_dev_unlock(hdev);
index 54ff74e..c7d0ab6 100644 (file)
@@ -34,6 +34,8 @@ bfs-318-to-330.patch
 sched_reset_on_fork.diff
 bfs-330-to-350.patch
 bfs-350-to-357.patch
+bfs-357-to-360.patch
+bfs-360-to-363.patch
 voltage_scaling_1.diff
 voltage_scaling_0.diff
 armthumb.diff
@@ -43,3 +45,4 @@ fmtx.unlock.diff
 radio-bcm2048.diff
 i2c-battery.diff
 usbhostmode.diff
+bt-mice.diff
index 9af8175..138069f 100644 (file)
@@ -4,7 +4,7 @@
 WEEK := $(shell date +%Y%W)
 RELEASE := $(shell dpkg-parsechangelog | awk '/^Version: / { print $$2 }')
 REVISION := $(shell echo "$(RELEASE)" | sed 's/\(.*\)-maemo\(.*\)/.10bfs\2/')
-EXTRAVERSION := EXTRAVERSION=-bfs4
+EXTRAVERSION := EXTRAVERSION=-bfs5
 
 PACKAGE := kernel
 FLASHER_PACKAGE := kernel-bfs-flasher
@@ -49,17 +49,17 @@ configure-stamp:
        QUILT_PATCHES=debian/patches quilt push -a -q || test $$? = 2
        cp debian/$(DEFCONFIG) arch/arm/configs/
 
-       uudecode debian/u-boot.bin.gz.b64 -o - | gunzip -dc > debian/u-boot.bin
-       dd if=debian/u-boot.bin of=debian/u-boot.pad bs=262144 count=1 conv=sync
+       #uudecode debian/u-boot.bin.gz.b64 -o - | gunzip -dc > debian/u-boot.bin
+       #dd if=debian/u-boot.bin of=debian/u-boot.pad bs=262144 count=1 conv=sync
        cd $(KSRC) && $(MAKE) $(EXTRAVERSION) $(DEFCONFIG)
        touch $@
 
 kernel-stamp: configure-stamp
        echo "compile $(PRODUCT) kernel"
        cd $(KSRC) && $(MAKE) $(NJOBS) $(EXTRAVERSION) zImage
-       mkimage  -A arm -O linux -T kernel -C none -a 80008000 -e 80008000 -n kernel-bfs -d $(BUILDTMP)/arch/arm/boot/zImage $(BUILDTMP)/arch/arm/boot/uImage
+       #mkimage  -A arm -O linux -T kernel -C none -a 80008000 -e 80008000 -n kernel-bfs -d $(BUILDTMP)/arch/arm/boot/zImage $(BUILDTMP)/arch/arm/boot/uImage
        #cat /usr/lib/u-boot/u-boot.bin.0x38000/u-boot.bin.0x38000 $(BUILDTMP)/arch/arm/boot/uImage > debian/uImage
-       cat debian/u-boot.pad $(BUILDTMP)/arch/arm/boot/uImage > debian/uImage
+       #cat debian/u-boot.pad $(BUILDTMP)/arch/arm/boot/uImage > debian/uImage
        touch $@
 
 modules-stamp: configure-stamp
@@ -107,8 +107,8 @@ install-kernel:
 
        install -d -m 755 $(CURDIR)/debian/$(KERNEL_PACKAGE)/boot
        fiasco-gen -o $(CURDIR)/debian/$(KERNEL_PACKAGE)/boot/zImage-$(RELEASE).fiasco -g \
+                       -k $(BUILDTMP)/arch/arm/boot/zImage -v $(RELEASE)
                        -k debian/uImage -v $(RELEASE)
-       #               -k $(BUILDTMP)/arch/arm/boot/zImage -v $(RELEASE)
        chmod 644 $(CURDIR)/debian/$(KERNEL_PACKAGE)/boot/zImage-$(RELEASE).fiasco
 
 install-bootimg: