add sched_reset_on_fork patch. more bfs 330 -> 250 work
[kernel-bfs] / kernel-power-2.6.28 / debian / patches / sched_reset_on_fork.diff
1 Index: kernel-2.6.28/include/linux/sched.h
2 ===================================================================
3 --- kernel-2.6.28.orig/include/linux/sched.h
4 +++ kernel-2.6.28/include/linux/sched.h
5 @@ -45,6 +45,9 @@
6  #define SCHED_RANGE(policy)    ((policy) <= SCHED_MAX)
7  #endif
8  
9 +/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
10 +#define SCHED_RESET_ON_FORK     0x40000000
11 +
12  #ifdef __KERNEL__
13  
14  struct sched_param {
15 @@ -1174,6 +1177,10 @@ struct task_struct {
16         /* ??? */
17         unsigned int personality;
18         unsigned did_exec:1;
19 +
20 +       /* Revert to default priority/policy when forking */
21 +       unsigned sched_reset_on_fork:1;
22 +
23         pid_t pid;
24         pid_t tgid;
25  
26 Index: kernel-2.6.28/kernel/sched_bfs.c
27 ===================================================================
28 --- kernel-2.6.28.orig/kernel/sched_bfs.c
29 +++ kernel-2.6.28/kernel/sched_bfs.c
30 @@ -1356,6 +1356,27 @@ void sched_fork(struct task_struct *p, i
31         p->sched_time = p->stime_pc = p->utime_pc = 0;
32  
33         /*
34 +        * Revert to default priority/policy on fork if requested.
35 +        */
36 +       if (unlikely(p->sched_reset_on_fork)) {
37 +               if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
38 +                       p->policy = SCHED_NORMAL;
39 +                       p->normal_prio = normal_prio(p);
40 +               }
41 +
42 +               if (PRIO_TO_NICE(p->static_prio) < 0) {
43 +                       p->static_prio = NICE_TO_PRIO(0);
44 +                       p->normal_prio = p->static_prio;
45 +               }
46 +
47 +               /*
48 +                * We don't need the reset flag anymore after the fork. It has
49 +                * fulfilled its duty:
50 +                */
51 +               p->sched_reset_on_fork = 0;
52 +       }
53 +
54 +       /*
55          * Make sure we do not leak PI boosting priority to the child:
56          */
57         p->prio = current->normal_prio;
58 @@ -3121,6 +3142,7 @@ static int __sched_setscheduler(struct t
59         struct sched_param zero_param = { .sched_priority = 0 };
60         int queued, retval, oldpolicy = -1;
61         unsigned long flags, rlim_rtprio = 0;
62 +       int reset_on_fork;
63         struct rq *rq;
64  
65         /* may grab non-irq protected spin_locks */
66 @@ -3145,10 +3167,17 @@ static int __sched_setscheduler(struct t
67         }
68  recheck:
69         /* double check policy once rq lock held */
70 -       if (policy < 0)
71 +       if (policy < 0) {
72 +               reset_on_fork = p->sched_reset_on_fork;
73                 policy = oldpolicy = p->policy;
74 -       else if (!SCHED_RANGE(policy))
75 -               return -EINVAL;
76 +       } else {
77 +               reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
78 +               policy &= ~SCHED_RESET_ON_FORK;
79 +
80 +               if (!SCHED_RANGE(policy))
81 +                       return -EINVAL;
82 +       }
83 +
84         /*
85          * Valid priorities for SCHED_FIFO and SCHED_RR are
86          * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
87 @@ -3211,6 +3240,10 @@ recheck:
88                 if ((current->euid != p->euid) &&
89                     (current->euid != p->uid))
90                         return -EPERM;
91 +
92 +               /* Normal users shall not reset the sched_reset_on_fork flag */
93 +               if (p->sched_reset_on_fork && !reset_on_fork)
94 +                       return -EPERM;
95         }
96  
97         retval = security_task_setscheduler(p, policy, param);
98 @@ -3234,6 +3267,8 @@ recheck:
99                 goto recheck;
100         }
101         update_rq_clock(rq);
102 +       p->sched_reset_on_fork = reset_on_fork;
103 +
104         queued = task_queued(p);
105         if (queued)
106                 dequeue_task(p);