1 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2 index 994b4d3..1e1dc49 100644
3 --- a/arch/arm/kernel/process.c
4 +++ b/arch/arm/kernel/process.c
5 @@ -373,7 +373,7 @@ void release_thread(struct task_struct *dead_task)
6 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
9 -copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
10 +copy_thread(unsigned long clone_flags, unsigned long stack_start,
11 unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
13 struct thread_info *thread = task_thread_info(p);
14 diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
15 index bc84e12..99bf9b0 100644
16 --- a/drivers/char/tty_io.c
17 +++ b/drivers/char/tty_io.c
18 @@ -2682,7 +2682,7 @@ void __do_SAK(struct tty_struct *tty)
19 /* Kill the entire session */
20 do_each_pid_task(session, PIDTYPE_SID, p) {
21 printk(KERN_NOTICE "SAK: killed process %d"
22 - " (%s): task_session_nr(p)==tty->session\n",
23 + " (%s): task_session(p)==tty->session\n",
24 task_pid_nr(p), p->comm);
25 send_sig(SIGKILL, p, 1);
26 } while_each_pid_task(session, PIDTYPE_SID, p);
27 @@ -2692,7 +2692,7 @@ void __do_SAK(struct tty_struct *tty)
28 do_each_thread(g, p) {
29 if (p->signal->tty == tty) {
30 printk(KERN_NOTICE "SAK: killed process %d"
31 - " (%s): task_session_nr(p)==tty->session\n",
32 + " (%s): task_session(p)==tty->session\n",
33 task_pid_nr(p), p->comm);
34 send_sig(SIGKILL, p, 1);
36 diff --git a/include/linux/sched.h b/include/linux/sched.h
37 index 011db2f..802d144 100644
38 --- a/include/linux/sched.h
39 +++ b/include/linux/sched.h
40 @@ -538,25 +539,8 @@ struct signal_struct {
42 struct list_head cpu_timers[3];
44 - /* job control IDs */
47 - * pgrp and session fields are deprecated.
48 - * use the task_session_Xnr and task_pgrp_Xnr routines below
52 - pid_t pgrp __deprecated;
56 struct pid *tty_old_pgrp;
59 - pid_t session __deprecated;
63 /* boolean value for session group leader */
66 @@ -1453,16 +1437,6 @@ static inline int rt_task(struct task_struct *p)
67 return rt_prio(p->prio);
70 -static inline void set_task_session(struct task_struct *tsk, pid_t session)
72 - tsk->signal->__session = session;
75 -static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
77 - tsk->signal->__pgrp = pgrp;
80 static inline struct pid *task_pid(struct task_struct *task)
82 return task->pids[PIDTYPE_PID].pid;
83 @@ -1473,6 +1447,11 @@ static inline struct pid *task_tgid(struct task_struct *task)
84 return task->group_leader->pids[PIDTYPE_PID].pid;
88 + * Without tasklist or rcu lock it is not safe to dereference
89 + * the result of task_pgrp/task_session even if task == current,
90 + * we can race with another thread doing sys_setsid/sys_setpgid.
92 static inline struct pid *task_pgrp(struct task_struct *task)
94 return task->group_leader->pids[PIDTYPE_PGID].pid;
95 @@ -1498,17 +1477,23 @@ struct pid_namespace;
97 * see also pid_nr() etc in include/linux/pid.h
99 +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
100 + struct pid_namespace *ns);
102 static inline pid_t task_pid_nr(struct task_struct *tsk)
107 -pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
108 +static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
109 + struct pid_namespace *ns)
111 + return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
114 static inline pid_t task_pid_vnr(struct task_struct *tsk)
116 - return pid_vnr(task_pid(tsk));
117 + return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
121 @@ -1525,31 +1510,34 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
125 -static inline pid_t task_pgrp_nr(struct task_struct *tsk)
126 +static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
127 + struct pid_namespace *ns)
129 - return tsk->signal->__pgrp;
130 + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
133 -pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
135 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
137 - return pid_vnr(task_pgrp(tsk));
138 + return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
142 -static inline pid_t task_session_nr(struct task_struct *tsk)
143 +static inline pid_t task_session_nr_ns(struct task_struct *tsk,
144 + struct pid_namespace *ns)
146 - return tsk->signal->__session;
147 + return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
150 -pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
152 static inline pid_t task_session_vnr(struct task_struct *tsk)
154 - return pid_vnr(task_session(tsk));
155 + return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
158 +/* obsolete, do not use */
159 +static inline pid_t task_pgrp_nr(struct task_struct *tsk)
161 + return task_pgrp_nr_ns(tsk, &init_pid_ns);
165 * pid_alive - check that a task structure is not stale
166 @@ -1949,7 +1937,8 @@ extern void mm_release(struct task_struct *, struct mm_struct *);
167 /* Allocate a new mm structure and copy contents from tsk->mm */
168 extern struct mm_struct *dup_mm(struct task_struct *tsk);
170 -extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
171 +extern int copy_thread(unsigned long, unsigned long, unsigned long,
172 + struct task_struct *, struct pt_regs *);
173 extern void flush_thread(void);
174 extern void exit_thread(void);
176 diff --git a/include/linux/wait.h b/include/linux/wait.h
177 index a210ede..0d2eeb0 100644
178 --- a/include/linux/wait.h
179 +++ b/include/linux/wait.h
180 @@ -135,8 +135,11 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
181 void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
182 int nr_exclusive, int sync, void *key);
183 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
184 -extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
185 -extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
186 +void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
187 +void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
189 +void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
190 +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
191 void __wake_up_bit(wait_queue_head_t *, void *, int);
192 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
193 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
194 diff --git a/kernel/exit.c b/kernel/exit.c
195 index efd30cc..ca734c6f 100644
198 @@ -362,16 +362,12 @@ static void reparent_to_kthreadd(void)
199 void __set_special_pids(struct pid *pid)
201 struct task_struct *curr = current->group_leader;
202 - pid_t nr = pid_nr(pid);
204 - if (task_session(curr) != pid) {
205 + if (task_session(curr) != pid)
206 change_pid(curr, PIDTYPE_SID, pid);
207 - set_task_session(curr, nr);
209 - if (task_pgrp(curr) != pid) {
211 + if (task_pgrp(curr) != pid)
212 change_pid(curr, PIDTYPE_PGID, pid);
213 - set_task_pgrp(curr, nr);
217 static void set_special_pids(struct pid *pid)
218 @@ -815,33 +811,44 @@ static void ptrace_exit_finish(struct task_struct *parent,
222 -static void reparent_thread(struct task_struct *p, struct task_struct *father)
223 +/* Returns nonzero if the child should be released. */
224 +static int reparent_thread(struct task_struct *p, struct task_struct *father)
228 if (p->pdeath_signal)
229 /* We already hold the tasklist_lock here. */
230 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
232 list_move_tail(&p->sibling, &p->real_parent->children);
234 + if (task_detached(p))
236 /* If this is a threaded reparent there is no need to
237 * notify anyone anything has happened.
239 if (same_thread_group(p->real_parent, father))
243 /* We don't want people slaying init. */
244 - if (!task_detached(p))
245 - p->exit_signal = SIGCHLD;
246 + p->exit_signal = SIGCHLD;
248 /* If we'd notified the old parent about this child's death,
249 * also notify the new parent.
251 - if (!ptrace_reparented(p) &&
252 - p->exit_state == EXIT_ZOMBIE &&
253 - !task_detached(p) && thread_group_empty(p))
256 + p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
257 do_notify_parent(p, p->exit_signal);
258 + if (task_detached(p)) {
259 + p->exit_state = EXIT_DEAD;
264 kill_orphaned_pgrp(p, father);
270 @@ -901,7 +908,8 @@ static void forget_original_parent(struct task_struct *father)
272 p->parent = p->real_parent;
274 - reparent_thread(p, father);
275 + if (reparent_thread(p, father))
276 + list_add(&p->ptrace_entry, &ptrace_dead);;
279 write_unlock_irq(&tasklist_lock);
280 @@ -1420,6 +1428,18 @@ static int wait_task_zombie(struct task_struct *p, int options,
284 +static int *task_stopped_code(struct task_struct *p, bool ptrace)
287 + if (task_is_stopped_or_traced(p))
288 + return &p->exit_code;
290 + if (p->signal->flags & SIGNAL_STOP_STOPPED)
291 + return &p->signal->group_exit_code;
297 * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
298 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
299 @@ -1430,7 +1450,7 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
300 int options, struct siginfo __user *infop,
301 int __user *stat_addr, struct rusage __user *ru)
303 - int retval, exit_code, why;
304 + int retval, exit_code, *p_code, why;
305 uid_t uid = 0; /* unneeded, required by compiler */
308 @@ -1440,22 +1460,16 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
310 spin_lock_irq(&p->sighand->siglock);
312 - if (unlikely(!task_is_stopped_or_traced(p)))
315 - if (!ptrace && p->signal->group_stop_count > 0)
317 - * A group stop is in progress and this is the group leader.
318 - * We won't report until all threads have stopped.
320 + p_code = task_stopped_code(p, ptrace);
321 + if (unlikely(!p_code))
324 - exit_code = p->exit_code;
325 + exit_code = *p_code;
329 if (!unlikely(options & WNOWAIT))
333 /* don't need the RCU readlock here as we're holding a spinlock */
334 uid = __task_cred(p)->uid;
335 @@ -1611,7 +1625,7 @@ static int wait_consider_task(struct task_struct *parent, int ptrace,
339 - if (task_is_stopped_or_traced(p))
340 + if (task_stopped_code(p, ptrace))
341 return wait_task_stopped(ptrace, p, options,
342 infop, stat_addr, ru);
344 @@ -1811,7 +1825,7 @@ asmlinkage long sys_wait4(pid_t upid, in
345 pid = find_get_pid(-upid);
346 } else if (upid == 0) {
348 - pid = get_pid(task_pgrp(current));
349 + pid = get_task_pid(current, PIDTYPE_PGID);
350 } else /* upid > 0 */ {
352 pid = find_get_pid(upid);
353 diff --git a/kernel/fork.c b/kernel/fork.c
354 index 4854c2c..cf9f156 100644
357 @@ -1120,7 +1120,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
358 goto bad_fork_cleanup_mm;
359 if ((retval = copy_io(clone_flags, p)))
360 goto bad_fork_cleanup_namespaces;
361 - retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
362 + retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
364 goto bad_fork_cleanup_io;
366 @@ -1258,8 +1258,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
367 p->signal->leader_pid = pid;
368 tty_kref_put(p->signal->tty);
369 p->signal->tty = tty_kref_get(current->signal->tty);
370 - set_task_pgrp(p, task_pgrp_nr(current));
371 - set_task_session(p, task_session_nr(current));
372 attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
373 attach_pid(p, PIDTYPE_SID, task_session(current));
374 list_add_tail_rcu(&p->tasks, &init_task.tasks);
375 diff --git a/kernel/pid.c b/kernel/pid.c
376 index 1b3586f..8582d4e 100644
379 @@ -403,6 +403,8 @@ struct pid *get_task_pid(struct task_str
383 + if (type != PIDTYPE_PID)
384 + task = task->group_leader;
385 pid = get_pid(task->pids[type].pid);
388 @@ -450,11 +452,24 @@ pid_t pid_vnr(struct pid *pid)
390 EXPORT_SYMBOL_GPL(pid_vnr);
392 -pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
393 +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
394 + struct pid_namespace *ns)
396 - return pid_nr_ns(task_pid(tsk), ns);
401 + ns = current->nsproxy->pid_ns;
402 + if (likely(pid_alive(task))) {
403 + if (type != PIDTYPE_PID)
404 + task = task->group_leader;
405 + nr = pid_nr_ns(task->pids[type].pid, ns);
411 -EXPORT_SYMBOL(task_pid_nr_ns);
412 +EXPORT_SYMBOL(__task_pid_nr_ns);
414 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
416 @@ -462,18 +477,6 @@ pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
418 EXPORT_SYMBOL(task_tgid_nr_ns);
420 -pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
422 - return pid_nr_ns(task_pgrp(tsk), ns);
424 -EXPORT_SYMBOL(task_pgrp_nr_ns);
426 -pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
428 - return pid_nr_ns(task_session(tsk), ns);
430 -EXPORT_SYMBOL(task_session_nr_ns);
433 * Used by proc to find the first pid that is greater then or equal to nr.
435 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
436 index 476607f..0df323a 100644
437 --- a/kernel/posix-cpu-timers.c
438 +++ b/kernel/posix-cpu-timers.c
439 @@ -1371,7 +1372,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
440 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
445 + return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
449 @@ -1419,19 +1421,19 @@ void run_posix_cpu_timers(struct task_struct *tsk)
450 * timer call will interfere.
452 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
456 spin_lock(&timer->it_lock);
457 list_del_init(&timer->it.cpu.entry);
458 - firing = timer->it.cpu.firing;
459 + cpu_firing = timer->it.cpu.firing;
460 timer->it.cpu.firing = 0;
462 * The firing flag is -1 if we collided with a reset
463 * of the timer, which already reported this
464 * almost-firing as an overrun. So don't generate an event.
466 - if (likely(firing >= 0)) {
467 + if (likely(cpu_firing >= 0))
468 cpu_timer_fire(timer);
470 spin_unlock(&timer->it_lock);
473 diff --git a/kernel/sched.c b/kernel/sched.c
474 index f1e8560..b0cdc3a 100644
477 @@ -618,9 +618,6 @@ struct rq {
478 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
480 /* sys_sched_yield() stats */
481 - unsigned int yld_exp_empty;
482 - unsigned int yld_act_empty;
483 - unsigned int yld_both_empty;
484 unsigned int yld_count;
486 /* schedule() stats */
487 @@ -2750,7 +2747,40 @@ unsigned long nr_iowait(void)
491 -unsigned long nr_active(void)
492 +/* Variables and functions for calc_load */
493 +static atomic_long_t calc_load_tasks;
494 +static unsigned long calc_load_update;
495 +unsigned long avenrun[3];
496 +EXPORT_SYMBOL(avenrun);
499 + * get_avenrun - get the load average array
500 + * @loads: pointer to dest load array
501 + * @offset: offset to add
502 + * @shift: shift count to shift the result left
504 + * These values are estimates at best, so no need for locking.
506 +void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
508 + loads[0] = (avenrun[0] + offset) << shift;
509 + loads[1] = (avenrun[1] + offset) << shift;
510 + loads[2] = (avenrun[2] + offset) << shift;
513 +static unsigned long
514 +calc_load(unsigned long load, unsigned long exp, unsigned long active)
517 + load += active * (FIXED_1 - exp);
518 + return load >> FSHIFT;
522 + * calc_load - update the avenrun load estimates 10 ticks after the
523 + * CPUs have updated calc_load_tasks.
525 +void calc_global_load(void)
527 unsigned long i, running = 0, uninterruptible = 0;
529 @@ -4781,11 +4811,17 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
530 __wake_up_common(q, mode, 1, 0, NULL);
533 +void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
535 + __wake_up_common(q, mode, 1, 0, key);
539 - * __wake_up_sync - wake up threads blocked on a waitqueue.
540 + * __wake_up_sync_key - wake up threads blocked on a waitqueue.
542 * @mode: which threads
543 * @nr_exclusive: how many wake-one or wake-many threads to wake up
544 + * @key: opaque value to be passed to wakeup targets
546 * The sync wakeup differs that the waker knows that it will schedule
547 * away soon, so while the target thread will be woken up, it will not
548 @@ -4794,8 +4830,8 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
550 * On UP it can prevent extra preemption.
553 -__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
554 +void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
555 + int nr_exclusive, void *key)
559 @@ -4807,9 +4843,18 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
562 spin_lock_irqsave(&q->lock, flags);
563 - __wake_up_common(q, mode, nr_exclusive, sync, NULL);
564 + __wake_up_common(q, mode, nr_exclusive, sync, key);
565 spin_unlock_irqrestore(&q->lock, flags);
567 +EXPORT_SYMBOL_GPL(__wake_up_sync_key);
570 + * __wake_up_sync - see __wake_up_sync_key()
572 +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
574 + __wake_up_sync_key(q, mode, nr_exclusive, NULL);
576 EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
579 diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
580 index 16eeba4e..bdf57bc 100644
581 --- a/kernel/sched_debug.c
582 +++ b/kernel/sched_debug.c
583 @@ -287,9 +287,6 @@ static void print_cpu(struct seq_file *m, int cpu)
584 #ifdef CONFIG_SCHEDSTATS
585 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
593 @@ -314,7 +311,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
594 u64 now = ktime_to_ns(ktime_get());
597 - SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n",
598 + SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
599 init_utsname()->release,
600 (int)strcspn(init_utsname()->version, " "),
601 init_utsname()->version);
602 diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
603 index a8f93dd..32d2bd4 100644
604 --- a/kernel/sched_stats.h
605 +++ b/kernel/sched_stats.h
607 * bump this up when changing the output format or the meaning of an existing
608 * format, so that tools can adapt (or abort)
610 -#define SCHEDSTAT_VERSION 14
611 +#define SCHEDSTAT_VERSION 15
613 static int show_schedstat(struct seq_file *seq, void *v)
615 @@ -26,9 +26,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
617 /* runqueue-specific stats */
619 - "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
620 - cpu, rq->yld_both_empty,
621 - rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
622 + "cpu%d %u %u %u %u %u %u %llu %llu %lu",
623 + cpu, rq->yld_count,
624 rq->sched_switch, rq->sched_count, rq->sched_goidle,
625 rq->ttwu_count, rq->ttwu_local,
627 diff --git a/kernel/sys.c b/kernel/sys.c
628 index 37f458e..742cefa 100644
631 @@ -1013,10 +1013,8 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
635 - if (task_pgrp(p) != pgrp) {
636 + if (task_pgrp(p) != pgrp)
637 change_pid(p, PIDTYPE_PGID, pgrp);
638 - set_task_pgrp(p, pid_nr(pgrp));
643 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
644 index 1f0c509..6e9b6d1 100644
645 --- a/kernel/workqueue.c
646 +++ b/kernel/workqueue.c
647 @@ -972,17 +972,19 @@ undo:
650 struct work_for_cpu {
651 - struct work_struct work;
652 + struct completion completion;
658 -static void do_work_for_cpu(struct work_struct *w)
659 +static int do_work_for_cpu(void *_wfc)
661 - struct work_for_cpu *wfc = container_of(w, struct work_for_cpu, work);
662 + struct work_for_cpu *wfc = _wfc;
664 wfc->ret = wfc->fn(wfc->arg);
665 + complete(&wfc->completion);
670 @@ -996,20 +998,19 @@ static void do_work_for_cpu(struct work_
672 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
674 - struct work_for_cpu wfc;
676 - INIT_WORK(&wfc.work, do_work_for_cpu);
680 - if (unlikely(!cpu_online(cpu)))
683 - schedule_work_on(cpu, &wfc.work);
684 - flush_work(&wfc.work);
688 + struct task_struct *sub_thread;
689 + struct work_for_cpu wfc = {
690 + .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
695 + sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
696 + if (IS_ERR(sub_thread))
697 + return PTR_ERR(sub_thread);
698 + kthread_bind(sub_thread, cpu);
699 + wake_up_process(sub_thread);
700 + wait_for_completion(&wfc.completion);
703 EXPORT_SYMBOL_GPL(work_on_cpu);