BFS prerequisites and tidy up of resched functions
[kernel-bfs] / kernel-bfs-2.6.28 / debian / patches / bfs / bfs-setup_prereqs.patch
1 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2 index 994b4d3..1e1dc49 100644
3 --- a/arch/arm/kernel/process.c
4 +++ b/arch/arm/kernel/process.c
5 @@ -373,7 +373,7 @@ void release_thread(struct task_struct *dead_task)
6  asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
7  
8  int
9 -copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
10 +copy_thread(unsigned long clone_flags, unsigned long stack_start,
11             unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
12  {
13         struct thread_info *thread = task_thread_info(p);
14 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
15 index bc84e12..99bf9b0 100644
16 --- a/drivers/char/tty_io.c
17 +++ b/drivers/char/tty_io.c
18 @@ -2682,7 +2682,7 @@ void __do_SAK(struct tty_struct *tty)
19         /* Kill the entire session */
20         do_each_pid_task(session, PIDTYPE_SID, p) {
21                 printk(KERN_NOTICE "SAK: killed process %d"
22 -                       " (%s): task_session_nr(p)==tty->session\n",
23 +                       " (%s): task_session(p)==tty->session\n",
24                         task_pid_nr(p), p->comm);
25                 send_sig(SIGKILL, p, 1);
26         } while_each_pid_task(session, PIDTYPE_SID, p);
27 @@ -2692,7 +2692,7 @@ void __do_SAK(struct tty_struct *tty)
28         do_each_thread(g, p) {
29                 if (p->signal->tty == tty) {
30                         printk(KERN_NOTICE "SAK: killed process %d"
31 -                           " (%s): task_session_nr(p)==tty->session\n",
32 +                           " (%s): task_session(p)==tty->session\n",
33                             task_pid_nr(p), p->comm);
34                         send_sig(SIGKILL, p, 1);
35                         continue;
36 diff --git a/include/linux/sched.h b/include/linux/sched.h
37 index 011db2f..802d144 100644
38 --- a/include/linux/sched.h
39 +++ b/include/linux/sched.h
40 @@ -538,25 +539,8 @@ struct signal_struct {
41  
42         struct list_head cpu_timers[3];
43  
44 -       /* job control IDs */
45 -
46 -       /*
47 -        * pgrp and session fields are deprecated.
48 -        * use the task_session_Xnr and task_pgrp_Xnr routines below
49 -        */
50 -
51 -       union {
52 -               pid_t pgrp __deprecated;
53 -               pid_t __pgrp;
54 -       };
55 -
56         struct pid *tty_old_pgrp;
57  
58 -       union {
59 -               pid_t session __deprecated;
60 -               pid_t __session;
61 -       };
62 -
63         /* boolean value for session group leader */
64         int leader;
65  
66 @@ -1453,16 +1437,6 @@ static inline int rt_task(struct task_struct *p)
67         return rt_prio(p->prio);
68  }
69  
70 -static inline void set_task_session(struct task_struct *tsk, pid_t session)
71 -{
72 -       tsk->signal->__session = session;
73 -}
74 -
75 -static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
76 -{
77 -       tsk->signal->__pgrp = pgrp;
78 -}
79 -
80  static inline struct pid *task_pid(struct task_struct *task)
81  {
82         return task->pids[PIDTYPE_PID].pid;
83 @@ -1473,6 +1447,11 @@ static inline struct pid *task_tgid(struct task_struct *task)
84         return task->group_leader->pids[PIDTYPE_PID].pid;
85  }
86  
87 +/*
88 + * Without tasklist or rcu lock it is not safe to dereference
89 + * the result of task_pgrp/task_session even if task == current,
90 + * we can race with another thread doing sys_setsid/sys_setpgid.
91 + */
92  static inline struct pid *task_pgrp(struct task_struct *task)
93  {
94         return task->group_leader->pids[PIDTYPE_PGID].pid;
95 @@ -1498,17 +1477,23 @@ struct pid_namespace;
96   *
97   * see also pid_nr() etc in include/linux/pid.h
98   */
99 +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
100 +                       struct pid_namespace *ns);
101  
102  static inline pid_t task_pid_nr(struct task_struct *tsk)
103  {
104         return tsk->pid;
105  }
106  
107 -pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
108 +static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
109 +                                       struct pid_namespace *ns)
110 +{
111 +       return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
112 +}
113  
114  static inline pid_t task_pid_vnr(struct task_struct *tsk)
115  {
116 -       return pid_vnr(task_pid(tsk));
117 +       return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
118  }
119  
120  
121 @@ -1525,31 +1510,34 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
122  }
123  
124  
125 -static inline pid_t task_pgrp_nr(struct task_struct *tsk)
126 +static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
127 +                                       struct pid_namespace *ns)
128  {
129 -       return tsk->signal->__pgrp;
130 +       return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
131  }
132  
133 -pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
134 -
135  static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
136  {
137 -       return pid_vnr(task_pgrp(tsk));
138 +       return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
139  }
140  
141  
142 -static inline pid_t task_session_nr(struct task_struct *tsk)
143 +static inline pid_t task_session_nr_ns(struct task_struct *tsk,
144 +                                       struct pid_namespace *ns)
145  {
146 -       return tsk->signal->__session;
147 +       return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
148  }
149  
150 -pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
151 -
152  static inline pid_t task_session_vnr(struct task_struct *tsk)
153  {
154 -       return pid_vnr(task_session(tsk));
155 +       return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
156  }
157  
158 +/* obsolete, do not use */
159 +static inline pid_t task_pgrp_nr(struct task_struct *tsk)
160 +{
161 +       return task_pgrp_nr_ns(tsk, &init_pid_ns);
162 +}
163  
164  /**
165   * pid_alive - check that a task structure is not stale
166 @@ -1949,7 +1937,8 @@ extern void mm_release(struct task_struct *, struct mm_struct *);
167  /* Allocate a new mm structure and copy contents from tsk->mm */
168  extern struct mm_struct *dup_mm(struct task_struct *tsk);
169  
170 -extern int  copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
171 +extern int copy_thread(unsigned long, unsigned long, unsigned long,
172 +                       struct task_struct *, struct pt_regs *);
173  extern void flush_thread(void);
174  extern void exit_thread(void);
175  
176 diff --git a/include/linux/wait.h b/include/linux/wait.h
177 index a210ede..0d2eeb0 100644
178 --- a/include/linux/wait.h
179 +++ b/include/linux/wait.h
180 @@ -135,8 +135,11 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
181  void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
182                         int nr_exclusive, int sync, void *key);
183  void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
184 -extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
185 -extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
186 +void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
187 +void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
188 +                       void *key);
189 +void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
190 +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
191  void __wake_up_bit(wait_queue_head_t *, void *, int);
192  int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
193  int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
194 diff --git a/kernel/exit.c b/kernel/exit.c
195 index efd30cc..ca734c6f 100644
196 --- a/kernel/exit.c
197 +++ b/kernel/exit.c
198 @@ -362,16 +362,12 @@ static void reparent_to_kthreadd(void)
199  void __set_special_pids(struct pid *pid)
200  {
201         struct task_struct *curr = current->group_leader;
202 -       pid_t nr = pid_nr(pid);
203  
204 -       if (task_session(curr) != pid) {
205 +       if (task_session(curr) != pid)
206                 change_pid(curr, PIDTYPE_SID, pid);
207 -               set_task_session(curr, nr);
208 -       }
209 -       if (task_pgrp(curr) != pid) {
210 +
211 +       if (task_pgrp(curr) != pid)
212                 change_pid(curr, PIDTYPE_PGID, pid);
213 -               set_task_pgrp(curr, nr);
214 -       }
215  }
216  
217  static void set_special_pids(struct pid *pid)
218 @@ -815,33 +811,44 @@ static void ptrace_exit_finish(struct task_struct *parent,
219         }
220  }
221  
222 -static void reparent_thread(struct task_struct *p, struct task_struct *father)
223 +/* Returns nonzero if the child should be released. */
224 +static int reparent_thread(struct task_struct *p, struct task_struct *father)
225  {
226 +       int dead;
227 +
228         if (p->pdeath_signal)
229                 /* We already hold the tasklist_lock here.  */
230                 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
231  
232         list_move_tail(&p->sibling, &p->real_parent->children);
233  
234 +       if (task_detached(p))
235 +               return 0;
236         /* If this is a threaded reparent there is no need to
237          * notify anyone anything has happened.
238          */
239         if (same_thread_group(p->real_parent, father))
240 -               return;
241 +               return 0;
242  
243         /* We don't want people slaying init.  */
244 -       if (!task_detached(p))
245 -               p->exit_signal = SIGCHLD;
246 +       p->exit_signal = SIGCHLD;
247  
248         /* If we'd notified the old parent about this child's death,
249          * also notify the new parent.
250          */
251 -       if (!ptrace_reparented(p) &&
252 -           p->exit_state == EXIT_ZOMBIE &&
253 -           !task_detached(p) && thread_group_empty(p))
254 +       dead = 0;
255 +       if (!p->ptrace &&
256 +           p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
257                 do_notify_parent(p, p->exit_signal);
258 +               if (task_detached(p)) {
259 +                       p->exit_state = EXIT_DEAD;
260 +                       dead = 1;
261 +               }
262 +       }
263  
264         kill_orphaned_pgrp(p, father);
265 +
266 +       return dead;
267  }
268  
269  /*
270 @@ -901,7 +908,8 @@ static void forget_original_parent(struct task_struct *father)
271                         BUG_ON(p->ptrace);
272                         p->parent = p->real_parent;
273                 }
274 -               reparent_thread(p, father);
275 +               if (reparent_thread(p, father))
276 +                       list_add(&p->ptrace_entry, &ptrace_dead);;
277         }
278  
279         write_unlock_irq(&tasklist_lock);
280 @@ -1420,6 +1428,18 @@ static int wait_task_zombie(struct task_struct *p, int options,
281         return retval;
282  }
283  
284 +static int *task_stopped_code(struct task_struct *p, bool ptrace)
285 +{
286 +       if (ptrace) {
287 +               if (task_is_stopped_or_traced(p))
288 +                       return &p->exit_code;
289 +       } else {
290 +               if (p->signal->flags & SIGNAL_STOP_STOPPED)
291 +                       return &p->signal->group_exit_code;
292 +       }
293 +       return NULL;
294 +}
295 +
296  /*
297   * Handle sys_wait4 work for one task in state TASK_STOPPED.  We hold
298   * read_lock(&tasklist_lock) on entry.  If we return zero, we still hold
299 @@ -1430,7 +1450,7 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
300                              int options, struct siginfo __user *infop,
301                              int __user *stat_addr, struct rusage __user *ru)
302  {
303 -       int retval, exit_code, why;
304 +       int retval, exit_code, *p_code, why;
305         uid_t uid = 0; /* unneeded, required by compiler */
306         pid_t pid;
307  
308 @@ -1440,22 +1460,16 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
309         exit_code = 0;
310         spin_lock_irq(&p->sighand->siglock);
311  
312 -       if (unlikely(!task_is_stopped_or_traced(p)))
313 -               goto unlock_sig;
314 -
315 -       if (!ptrace && p->signal->group_stop_count > 0)
316 -               /*
317 -                * A group stop is in progress and this is the group leader.
318 -                * We won't report until all threads have stopped.
319 -                */
320 +       p_code = task_stopped_code(p, ptrace);
321 +       if (unlikely(!p_code))
322                 goto unlock_sig;
323  
324 -       exit_code = p->exit_code;
325 +       exit_code = *p_code;
326         if (!exit_code)
327                 goto unlock_sig;
328  
329         if (!unlikely(options & WNOWAIT))
330 -               p->exit_code = 0;
331 +               *p_code = 0;
332  
333         /* don't need the RCU readlock here as we're holding a spinlock */
334         uid = __task_cred(p)->uid;
335 @@ -1611,7 +1625,7 @@ static int wait_consider_task(struct task_struct *parent, int ptrace,
336          */
337         *notask_error = 0;
338  
339 -       if (task_is_stopped_or_traced(p))
340 +       if (task_stopped_code(p, ptrace))
341                 return wait_task_stopped(ptrace, p, options,
342                                          infop, stat_addr, ru);
343  
344 @@ -1811,7 +1825,7 @@  asmlinkage long sys_wait4(pid_t upid, in
345                 pid = find_get_pid(-upid);
346         } else if (upid == 0) {
347                 type = PIDTYPE_PGID;
348 -               pid = get_pid(task_pgrp(current));
349 +               pid = get_task_pid(current, PIDTYPE_PGID);
350         } else /* upid > 0 */ {
351                 type = PIDTYPE_PID;
352                 pid = find_get_pid(upid);
353 diff --git a/kernel/fork.c b/kernel/fork.c
354 index 4854c2c..cf9f156 100644
355 --- a/kernel/fork.c
356 +++ b/kernel/fork.c
357 @@ -1120,7 +1120,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
358                 goto bad_fork_cleanup_mm;
359         if ((retval = copy_io(clone_flags, p)))
360                 goto bad_fork_cleanup_namespaces;
361 -       retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
362 +       retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
363         if (retval)
364                 goto bad_fork_cleanup_io;
365  
366 @@ -1258,8 +1258,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
367                         p->signal->leader_pid = pid;
368                         tty_kref_put(p->signal->tty);
369                         p->signal->tty = tty_kref_get(current->signal->tty);
370 -                       set_task_pgrp(p, task_pgrp_nr(current));
371 -                       set_task_session(p, task_session_nr(current));
372                         attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
373                         attach_pid(p, PIDTYPE_SID, task_session(current));
374                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
375 diff --git a/kernel/pid.c b/kernel/pid.c
376 index 1b3586f..8582d4e 100644
377 --- a/kernel/pid.c
378 +++ b/kernel/pid.c
379 @@ -403,6 +403,8 @@  struct pid *get_task_pid(struct task_str
380  {
381         struct pid *pid;
382         rcu_read_lock();
383 +       if (type != PIDTYPE_PID)
384 +               task = task->group_leader;
385         pid = get_pid(task->pids[type].pid);
386         rcu_read_unlock();
387         return pid;
388 @@ -450,11 +452,24 @@ pid_t pid_vnr(struct pid *pid)
389  }
390  EXPORT_SYMBOL_GPL(pid_vnr);
391  
392 -pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
393 +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
394 +                       struct pid_namespace *ns)
395  {
396 -       return pid_nr_ns(task_pid(tsk), ns);
397 +       pid_t nr = 0;
398 +
399 +       rcu_read_lock();
400 +       if (!ns)
401 +               ns = current->nsproxy->pid_ns;
402 +       if (likely(pid_alive(task))) {
403 +               if (type != PIDTYPE_PID)
404 +                       task = task->group_leader;
405 +               nr = pid_nr_ns(task->pids[type].pid, ns);
406 +       }
407 +       rcu_read_unlock();
408 +
409 +       return nr;
410  }
411 -EXPORT_SYMBOL(task_pid_nr_ns);
412 +EXPORT_SYMBOL(__task_pid_nr_ns);
413  
414  pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
415  {
416 @@ -462,18 +477,6 @@ pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
417  }
418  EXPORT_SYMBOL(task_tgid_nr_ns);
419  
420 -pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
421 -{
422 -       return pid_nr_ns(task_pgrp(tsk), ns);
423 -}
424 -EXPORT_SYMBOL(task_pgrp_nr_ns);
425 -
426 -pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
427 -{
428 -       return pid_nr_ns(task_session(tsk), ns);
429 -}
430 -EXPORT_SYMBOL(task_session_nr_ns);
431 -
432  /*
433   * Used by proc to find the first pid that is greater then or equal to nr.
434   *
435 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
436 index 476607f..0df323a 100644
437 --- a/kernel/posix-cpu-timers.c
438 +++ b/kernel/posix-cpu-timers.c
439 @@ -1371,7 +1372,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
440                 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
441                         return 1;
442         }
443 -       return 0;
444 +
445 +       return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
446  }
447  
448  /*
449 @@ -1419,19 +1421,19 @@ void run_posix_cpu_timers(struct task_struct *tsk)
450          * timer call will interfere.
451          */
452         list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
453 -               int firing;
454 +               int cpu_firing;
455 +
456                 spin_lock(&timer->it_lock);
457                 list_del_init(&timer->it.cpu.entry);
458 -               firing = timer->it.cpu.firing;
459 +               cpu_firing = timer->it.cpu.firing;
460                 timer->it.cpu.firing = 0;
461                 /*
462                  * The firing flag is -1 if we collided with a reset
463                  * of the timer, which already reported this
464                  * almost-firing as an overrun.  So don't generate an event.
465                  */
466 -               if (likely(firing >= 0)) {
467 +               if (likely(cpu_firing >= 0))
468                         cpu_timer_fire(timer);
469 -               }
470                 spin_unlock(&timer->it_lock);
471         }
472  }
473 diff --git a/kernel/sched.c b/kernel/sched.c
474 index f1e8560..b0cdc3a 100644
475 --- a/kernel/sched.c
476 +++ b/kernel/sched.c
477 @@ -618,9 +618,6 @@ struct rq {
478         /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
479  
480         /* sys_sched_yield() stats */
481 -       unsigned int yld_exp_empty;
482 -       unsigned int yld_act_empty;
483 -       unsigned int yld_both_empty;
484         unsigned int yld_count;
485  
486         /* schedule() stats */
487 @@ -2750,7 +2747,40 @@ unsigned long nr_iowait(void)
488         return sum;
489  }
490  
491 -unsigned long nr_active(void)
492 +/* Variables and functions for calc_load */
493 +static atomic_long_t calc_load_tasks;
494 +static unsigned long calc_load_update;
495 +unsigned long avenrun[3];
496 +EXPORT_SYMBOL(avenrun);
497 +
498 +/**
499 + * get_avenrun - get the load average array
500 + * @loads:     pointer to dest load array
501 + * @offset:    offset to add
502 + * @shift:     shift count to shift the result left
503 + *
504 + * These values are estimates at best, so no need for locking.
505 + */
506 +void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
507 +{
508 +       loads[0] = (avenrun[0] + offset) << shift;
509 +       loads[1] = (avenrun[1] + offset) << shift;
510 +       loads[2] = (avenrun[2] + offset) << shift;
511 +}
512 +
513 +static unsigned long
514 +calc_load(unsigned long load, unsigned long exp, unsigned long active)
515 +{
516 +       load *= exp;
517 +       load += active * (FIXED_1 - exp);
518 +       return load >> FSHIFT;
519 +}
520 +
521 +/*
522 + * calc_load - update the avenrun load estimates 10 ticks after the
523 + * CPUs have updated calc_load_tasks.
524 + */
525 +void calc_global_load(void)
526  {
527         unsigned long i, running = 0, uninterruptible = 0;
528  
529 @@ -4781,11 +4811,17 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
530         __wake_up_common(q, mode, 1, 0, NULL);
531  }
532  
533 +void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
534 +{
535 +       __wake_up_common(q, mode, 1, 0, key);
536 +}
537 +
538  /**
539 - * __wake_up_sync - wake up threads blocked on a waitqueue.
540 + * __wake_up_sync_key - wake up threads blocked on a waitqueue.
541   * @q: the waitqueue
542   * @mode: which threads
543   * @nr_exclusive: how many wake-one or wake-many threads to wake up
544 + * @key: opaque value to be passed to wakeup targets
545   *
546   * The sync wakeup differs that the waker knows that it will schedule
547   * away soon, so while the target thread will be woken up, it will not
548 @@ -4794,8 +4830,8 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
549   *
550   * On UP it can prevent extra preemption.
551   */
552 -void
553 -__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
554 +void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
555 +                       int nr_exclusive, void *key)
556  {
557         unsigned long flags;
558         int sync = 1;
559 @@ -4807,9 +4843,18 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
560                 sync = 0;
561  
562         spin_lock_irqsave(&q->lock, flags);
563 -       __wake_up_common(q, mode, nr_exclusive, sync, NULL);
564 +       __wake_up_common(q, mode, nr_exclusive, sync, key);
565         spin_unlock_irqrestore(&q->lock, flags);
566  }
567 +EXPORT_SYMBOL_GPL(__wake_up_sync_key);
568 +
569 +/*
570 + * __wake_up_sync - see __wake_up_sync_key()
571 + */
572 +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
573 +{
574 +       __wake_up_sync_key(q, mode, nr_exclusive, NULL);
575 +}
576  EXPORT_SYMBOL_GPL(__wake_up_sync);     /* For internal use only */
577  
578  /**
579 diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
580 index 16eeba4e..bdf57bc 100644
581 --- a/kernel/sched_debug.c
582 +++ b/kernel/sched_debug.c
583 @@ -287,9 +287,6 @@ static void print_cpu(struct seq_file *m, int cpu)
584  #ifdef CONFIG_SCHEDSTATS
585  #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
586  
587 -       P(yld_exp_empty);
588 -       P(yld_act_empty);
589 -       P(yld_both_empty);
590         P(yld_count);
591  
592         P(sched_switch);
593 @@ -314,7 +311,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
594         u64 now = ktime_to_ns(ktime_get());
595         int cpu;
596  
597 -       SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n",
598 +       SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
599                 init_utsname()->release,
600                 (int)strcspn(init_utsname()->version, " "),
601                 init_utsname()->version);
602 diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
603 index a8f93dd..32d2bd4 100644
604 --- a/kernel/sched_stats.h
605 +++ b/kernel/sched_stats.h
606 @@ -4,7 +4,7 @@
607   * bump this up when changing the output format or the meaning of an existing
608   * format, so that tools can adapt (or abort)
609   */
610 -#define SCHEDSTAT_VERSION 14
611 +#define SCHEDSTAT_VERSION 15
612  
613  static int show_schedstat(struct seq_file *seq, void *v)
614  {
615 @@ -26,9 +26,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
616  
617                 /* runqueue-specific stats */
618                 seq_printf(seq,
619 -                   "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
620 -                   cpu, rq->yld_both_empty,
621 -                   rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
622 +                   "cpu%d %u %u %u %u %u %u %llu %llu %lu",
623 +                   cpu, rq->yld_count,
624                     rq->sched_switch, rq->sched_count, rq->sched_goidle,
625                     rq->ttwu_count, rq->ttwu_local,
626                     rq->rq_cpu_time,
627 diff --git a/kernel/sys.c b/kernel/sys.c
628 index 37f458e..742cefa 100644
629 --- a/kernel/sys.c
630 +++ b/kernel/sys.c
631 @@ -1013,10 +1013,8 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
632         if (err)
633                 goto out;
634  
635 -       if (task_pgrp(p) != pgrp) {
636 +       if (task_pgrp(p) != pgrp)
637                 change_pid(p, PIDTYPE_PGID, pgrp);
638 -               set_task_pgrp(p, pid_nr(pgrp));
639 -       }
640  
641         err = 0;
642  out:
643 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
644 index 1f0c509..6e9b6d1 100644
645 --- a/kernel/workqueue.c
646 +++ b/kernel/workqueue.c
647 @@ -972,17 +972,19 @@ undo:
648  
649  #ifdef CONFIG_SMP
650  struct work_for_cpu {
651 -       struct work_struct work;
652 +       struct completion completion;
653         long (*fn)(void *);
654         void *arg;
655         long ret;
656  };
657  
658 -static void do_work_for_cpu(struct work_struct *w)
659 +static int do_work_for_cpu(void *_wfc)
660  {
661 -       struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work);
662 +       struct work_for_cpu *wfc = _wfc;
663  
664         wfc->ret = wfc->fn(wfc->arg);
665 +       complete(&wfc->completion);
666 +       return 0;
667  }
668  
669  /**
670 @@ -996,20 +998,19 @@ static void do_work_for_cpu(struct work_
671   */
672  long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
673  {
674 -       struct work_for_cpu wfc;
675 -
676 -       INIT_WORK(&wfc.work, do_work_for_cpu);
677 -       wfc.fn = fn;
678 -       wfc.arg = arg;
679 -       get_online_cpus();
680 -       if (unlikely(!cpu_online(cpu)))
681 -               wfc.ret = -EINVAL;
682 -       else {
683 -               schedule_work_on(cpu, &wfc.work);
684 -               flush_work(&wfc.work);
685 -       }
686 -       put_online_cpus();
687 -
688 +       struct task_struct *sub_thread;
689 +       struct work_for_cpu wfc = {
690 +               .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
691 +               .fn = fn,
692 +               .arg = arg,
693 +       };
694 +
695 +       sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
696 +       if (IS_ERR(sub_thread))
697 +               return PTR_ERR(sub_thread);
698 +       kthread_bind(sub_thread, cpu);
699 +       wake_up_process(sub_thread);
700 +       wait_for_completion(&wfc.completion);
701         return wfc.ret;
702  }
703  EXPORT_SYMBOL_GPL(work_on_cpu);