update debian/changelog
[kernel-bfs] / kernel-bfs-2.6.28 / debian / patches / ondemand-avoid.diff
1 --- kernel-power-2.6.28.orig/drivers/cpufreq/cpufreq_ondemand.c
2 +++ kernel-power-2.6.28/drivers/cpufreq/cpufreq_ondemand.c
3 @@ -57,6 +57,9 @@
4  #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER   (1000)
5  #define TRANSITION_LATENCY_LIMIT               (10 * 1000 * 1000)
6  
7 +static int avoid_frequencies_count=0;
8 +static unsigned int avoid_frequencies_table[16];
9 +
10  static void do_dbs_timer(struct work_struct *work);
11  
12  /* Sampling types */
13 @@ -105,6 +108,52 @@
14         .powersave_bias = 0,
15  };
16  
17 +static unsigned int find_min_frequency(struct cpufreq_policy *policy, 
18 +               struct cpufreq_frequency_table *table)
19 +{
20 +       int i, f;
21 +       f=policy->max;
22 +       i=0;
23 +       while(table[i].frequency!=CPUFREQ_TABLE_END) {
24 +              if((table[i].frequency<f) &&
25 +                 (table[i].frequency>=policy->min))
26 +                      f=table[i].frequency;
27 +              i++;
28 +       }
29 +       return f;
30 +}
31 +
32 +static unsigned int find_max_frequency(struct cpufreq_policy *policy, 
33 +               struct cpufreq_frequency_table *table)
34 +{
35 +       int i, f;
36 +       f=policy->min;
37 +       i=0;
38 +       while(table[i].frequency!=CPUFREQ_TABLE_END) {
39 +              if((table[i].frequency>f) &&
40 +                 (table[i].frequency<=policy->max))
41 +                      f=table[i].frequency;
42 +              i++;
43 +       }
44 +       return f;
45 +}
46 +
47 +static unsigned int find_lower_frequency(struct cpufreq_policy *policy, 
48 +               struct cpufreq_frequency_table *table,
49 +               unsigned int freq) 
50 +{
51 +       int i, f;
52 +       f=find_min_frequency(policy, table);
53 +       i=0;
54 +       while(table[i].frequency!=CPUFREQ_TABLE_END) {
55 +              if((table[i].frequency>f) &&
56 +                 (table[i].frequency<=freq))
57 +                      f=table[i].frequency;
58 +              i++;
59 +       }
60 +       return f;
61 +}
62 +
63  static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
64                                                         cputime64_t *wall)
65  {
66 @@ -218,8 +267,32 @@
67         int i;
68         for_each_online_cpu(i) {
69                 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i);
70 -               dbs_info->freq_table = cpufreq_frequency_get_table(i);
71 +               struct cpufreq_frequency_table *table;
72 +               int l, k;
73 +               table = cpufreq_frequency_get_table(i);
74 +               l=0;
75 +               k=0;
76 +               while(table[k].frequency != CPUFREQ_TABLE_END) { 
77 +                       if(table[k].frequency != CPUFREQ_ENTRY_INVALID) {
78 +                               int t,j;
79 +                               t=1;
80 +                               for(j=0;j<avoid_frequencies_count;j++) if(table[k].frequency==avoid_frequencies_table[j]) t=0;
81 +                               l+=t;
82 +                       }
83 +                       k++;
84 +               }
85 +               if(dbs_info->freq_table) kfree(dbs_info->freq_table );
86 +               dbs_info->freq_table = kzalloc(sizeof(struct cpufreq_frequency_table)*(l+1), GFP_KERNEL);
87 +               for(l=0,k=0; (table[l].frequency != CPUFREQ_TABLE_END); l++)
88 +                       if (table[l].frequency != CPUFREQ_ENTRY_INVALID) {
89 +                               int t,j;
90 +                               t=1;
91 +                               for(j=0;j<avoid_frequencies_count;j++) if(table[l].frequency==avoid_frequencies_table[j]) t=0;
92 +                               if(t)memcpy(&dbs_info->freq_table[k++], &table[l], sizeof(struct cpufreq_frequency_table));
93 +                       }
94 +               dbs_info->freq_table[k].frequency = CPUFREQ_TABLE_END;
95                 dbs_info->freq_lo = 0;
96 +               
97         }
98  }
99  
100 @@ -357,6 +430,49 @@
101  define_one_rw(ignore_nice_load);
102  define_one_rw(powersave_bias);
103  
104 +static ssize_t show_avoid_frequencies(struct cpufreq_policy *unused,
105 +                                        char *buf)
106 +{
107 +        int i;
108 +        char *b=buf;
109 +        for(i=0;i<avoid_frequencies_count;i++)
110 +                b+=sprintf(b, "%d ", avoid_frequencies_table[i]);
111 +        b+=sprintf(b, "\n");
112 +        return b-buf;
113 +}
114 +
115 +static ssize_t store_avoid_frequencies(struct cpufreq_policy *unused,
116 +                                        const char *buf, size_t n)
117 +{
118 +        unsigned int value[16];
119 +        int i;
120 +
121 +        i=sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u",
122 +                                &value[0], &value[1], &value[2], &value[3],
123 +                                &value[4], &value[5], &value[6], &value[7],
124 +                                &value[8], &value[9], &value[10], &value[11],
125 +                                &value[12], &value[13], &value[14], &value[15]
126 +                                );
127 +       if(i<0) {
128 +                printk(KERN_ERR "avoid_frequencies: Invalid value\n");
129 +                return -EINVAL;
130 +        }
131 +
132 +       avoid_frequencies_count=i;
133 +
134 +        for(i=0;i<avoid_frequencies_count;i++) {
135 +               avoid_frequencies_table[i]=value[i];
136 +        }
137 +
138 +       mutex_lock(&dbs_mutex);
139 +       ondemand_powersave_bias_init();
140 +       mutex_unlock(&dbs_mutex);
141 +
142 +       return n;
143 +};
144 +
145 +define_one_rw(avoid_frequencies);
146 +
147  static struct attribute * dbs_attributes[] = {
148         &sampling_rate_max.attr,
149         &sampling_rate_min.attr,
150 @@ -364,6 +480,7 @@
151         &up_threshold.attr,
152         &ignore_nice_load.attr,
153         &powersave_bias.attr,
154 +       &avoid_frequencies.attr,
155         NULL
156  };
157  
158 @@ -439,10 +556,9 @@
159         if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
160                 /* if we are already at full speed then break out early */
161                 if (!dbs_tuners_ins.powersave_bias) {
162 -                       if (policy->cur == policy->max)
163 +                       if (policy->cur == find_max_frequency(policy, this_dbs_info->freq_table))
164                                 return;
165 -
166 -                       __cpufreq_driver_target(policy, policy->max,
167 +                       __cpufreq_driver_target(policy, find_max_frequency(policy, this_dbs_info->freq_table),
168                                 CPUFREQ_RELATION_H);
169                 } else {
170                         int freq = powersave_bias_target(policy, policy->max,
171 @@ -472,7 +588,8 @@
172                                  dbs_tuners_ins.down_differential);
173  
174                 if (!dbs_tuners_ins.powersave_bias) {
175 -                       __cpufreq_driver_target(policy, freq_next,
176 +                       
177 +                       __cpufreq_driver_target(policy, find_lower_frequency(policy, this_dbs_info->freq_table, freq_next),
178                                         CPUFREQ_RELATION_L);
179                 } else {
180                         int freq = powersave_bias_target(policy, freq_next,
181 @@ -550,7 +667,7 @@
182         this_dbs_info = &per_cpu(cpu_dbs_info, 0);
183         policy = this_dbs_info->cur_policy;
184  
185 -       __cpufreq_driver_target(policy, policy->max,
186 +       __cpufreq_driver_target(policy, find_max_frequency(policy, this_dbs_info->freq_table),
187                                 CPUFREQ_RELATION_L);
188         this_dbs_info->prev_cpu_idle = get_cpu_idle_time(0,
189                         &this_dbs_info->prev_cpu_wall);