fix hunk locations in bfs-tidy_up_resched.patch
authorDennis Groenen <tj.groenen@gmail.com>
Wed, 31 Aug 2011 14:08:41 +0000 (16:08 +0200)
committerDennis Groenen <tj.groenen@gmail.com>
Wed, 31 Aug 2011 14:08:41 +0000 (16:08 +0200)
kernel-bfs-2.6.28/debian/patches/bfs/bfs-tidy_up_resched.patch

index d1ff566..2e1787d 100644 (file)
@@ -1,15 +1,15 @@
 --- linux-2.6.28/kernel/sched_bfs.c    2011-06-17 23:09:25.884488799 +0200
 +++ linux-2.6.28.new/kernel/sched_bfs.c        2011-06-17 23:15:51.483825482 +0200
-@@ -2459,7 +2459,7 @@ need_resched_nonpreemptible:
+@@ -2453,7 +2453,7 @@ need_resched_nonpreemptible:
        if (unlikely(reacquire_kernel_lock(current) < 0))
                goto need_resched_nonpreemptible;
        preempt_enable_no_resched();
 -      if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
 +      if (need_resched())
-               goto need_resched;
+               goto need_resched;
  }
  EXPORT_SYMBOL(schedule);
-@@ -2491,7 +2491,7 @@ asmlinkage void __sched preempt_schedule
+@@ -2485,7 +2485,7 @@ asmlinkage void __sched preempt_schedule
                 * between schedule and now.
                 */
                barrier();
@@ -18,7 +18,7 @@
  }
  EXPORT_SYMBOL(preempt_schedule);
  
-@@ -2520,7 +2520,7 @@ asmlinkage void __sched preempt_schedule
+@@ -2514,7 +2514,7 @@ asmlinkage void __sched preempt_schedule
                 * between schedule and now.
                 */
                barrier();
@@ -27,7 +27,7 @@
  }
  
  #endif /* CONFIG_PREEMPT */
-@@ -3489,6 +3489,11 @@ asmlinkage long sys_sched_yield(void)
+@@ -3591,6 +3591,11 @@ asmlinkage long sys_sched_yield(void)
        return 0;
  }
  
@@ -39,7 +39,7 @@
  static void __cond_resched(void)
  {
        /* NOT a real fix but will make voluntary preempt work. 馬鹿な事 */
-@@ -3511,8 +3516,7 @@ static void __cond_resched(void)
+@@ -3613,8 +3618,7 @@ static void __cond_resched(void)
  
  int __sched _cond_resched(void)
  {
@@ -49,7 +49,7 @@
                __cond_resched();
                return 1;
        }
-@@ -3530,12 +3534,12 @@ EXPORT_SYMBOL(_cond_resched);
+@@ -3632,12 +3636,12 @@ EXPORT_SYMBOL(_cond_resched);
   */
  int cond_resched_lock(spinlock_t *lock)
  {
@@ -64,7 +64,7 @@
                        __cond_resched();
                else
                        cpu_relax();
-@@ -3550,7 +3554,7 @@ int __sched cond_resched_softirq(void)
+@@ -3652,7 +3656,7 @@ int __sched cond_resched_softirq(void)
  {
        BUG_ON(!in_softirq());
  
@@ -73,7 +73,7 @@
                local_bh_enable();
                __cond_resched();
                local_bh_disable();
-@@ -3919,7 +3923,7 @@ void wake_up_idle_cpu(int cpu)
+@@ -4021,7 +4025,7 @@ void wake_up_idle_cpu(int cpu)
         * lockless. The worst case is that the other CPU runs the
         * idle task through an additional NOOP schedule()
         */