663b06b7b2aed6533c62c338491f68ce19ab70f7
[kernel-bfs] / kernel-power-2.6.28 / debian / patches / nokia-swapfile.diff
1 --- kernel-maemo-2.6.28.orig/mm/swapfile.c
2 +++ kernel-maemo-2.6.28/mm/swapfile.c
3 @@ -273,22 +273,41 @@
4  static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
5  {
6         int count = p->swap_map[offset];
7 +       unsigned old;
8  
9 -       if (count < SWAP_MAP_MAX) {
10 -               count--;
11 -               p->swap_map[offset] = count;
12 -               if (!count) {
13 -                       if (offset < p->lowest_bit)
14 -                               p->lowest_bit = offset;
15 -                       if (offset > p->highest_bit)
16 -                               p->highest_bit = offset;
17 -                       if (p->prio > swap_info[swap_list.next].prio)
18 -                               swap_list.next = p - swap_info;
19 -                       nr_swap_pages++;
20 -                       p->inuse_pages--;
21 -               }
22 -       }
23 -       return count;
24 +       if (count >= SWAP_MAP_MAX)
25 +               return count;
26 +
27 +       count--;
28 +       p->swap_map[offset] = count;
29 +       if (count)
30 +               return count;
31 +
32 +       spin_lock(&p->remap_lock);
33 +
34 +       if (offset < p->lowest_bit)
35 +               p->lowest_bit = offset;
36 +       if (offset > p->highest_bit)
37 +               p->highest_bit = offset;
38 +       if (p->prio > swap_info[swap_list.next].prio)
39 +               swap_list.next = p - swap_info;
40 +       nr_swap_pages++;
41 +       p->inuse_pages--;
42 +
43 +       /* Re-map the page number */
44 +       old = p->swap_remap[offset] & 0x7FFFFFFF;
45 +       /* Zero means it was not re-mapped */
46 +       if (!old)
47 +               goto out;
48 +       /* Clear the re-mapping */
49 +       p->swap_remap[offset] &= 0x80000000;
50 +       /* Mark the re-mapped page as unused */
51 +       p->swap_remap[old] &= 0x7FFFFFFF;
52 +       /* Record how many free pages there are */
53 +       p->gaps_exist += 1;
54 +out:
55 +       spin_unlock(&p->remap_lock);
56 +       return 0;
57  }
58  
59  /*
60 @@ -977,14 +996,123 @@
61         spin_unlock(&mmlist_lock);
62  }
63  
64 +/* Find the largest sequence of free pages */
65 +int find_gap(struct swap_info_struct *sis)
66 +{
67 +       unsigned i, uninitialized_var(start), uninitialized_var(gap_next);
68 +       unsigned uninitialized_var(gap_end), gap_size = 0;
69 +       int in_gap = 0;
70 +
71 +       spin_unlock(&sis->remap_lock);
72 +       cond_resched();
73 +       mutex_lock(&sis->remap_mutex);
74 +
75 +       /* Check if a gap was found while we waited for the mutex */
76 +       spin_lock(&sis->remap_lock);
77 +       if (sis->gap_next <= sis->gap_end) {
78 +               mutex_unlock(&sis->remap_mutex);
79 +               return 0;
80 +       }
81 +       if (!sis->gaps_exist) {
82 +               mutex_unlock(&sis->remap_mutex);
83 +               return -1;
84 +       }
85 +       spin_unlock(&sis->remap_lock);
86 +
87 +       /*
88 +        * There is no current gap, so no new re-mappings can be made without
89 +        * going through this function (find_gap) which is protected by the
90 +        * remap_mutex.
91 +        */
92 +       for (i = 1; i < sis->max; i++) {
93 +               if (in_gap) {
94 +                       if (!(sis->swap_remap[i] & 0x80000000))
95 +                               continue;
96 +                       if (i - start > gap_size) {
97 +                               gap_next = start;
98 +                               gap_end = i - 1;
99 +                               gap_size = i - start;
100 +                       }
101 +                       in_gap = 0;
102 +               } else {
103 +                       if (sis->swap_remap[i] & 0x80000000)
104 +                               continue;
105 +                       in_gap = 1;
106 +                       start = i;
107 +               }
108 +               cond_resched();
109 +       }
110 +       spin_lock(&sis->remap_lock);
111 +       if (in_gap && i - start > gap_size) {
112 +               sis->gap_next = start;
113 +               sis->gap_end = i - 1;
114 +       } else {
115 +               sis->gap_next = gap_next;
116 +               sis->gap_end = gap_end;
117 +       }
118 +       mutex_unlock(&sis->remap_mutex);
119 +       return 0;
120 +}
121 +
122  /*
123   * Use this swapdev's extent info to locate the (PAGE_SIZE) block which
124   * corresponds to page offset `offset'.
125   */
126 -sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset)
127 +sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset, int write)
128  {
129         struct swap_extent *se = sis->curr_swap_extent;
130         struct swap_extent *start_se = se;
131 +       unsigned old;
132 +
133 +       /*
134 +        * Instead of using the offset we are given, re-map it to the next
135 +        * sequential position.
136 +        */
137 +       spin_lock(&sis->remap_lock);
138 +       /* Get the old re-mapping */
139 +       old = sis->swap_remap[offset] & 0x7FFFFFFF;
140 +       if (write) {
141 +               /* See if we have free pages */
142 +               if (sis->gap_next > sis->gap_end) {
143 +                       /* The gap is used up. Find another one */
144 +                       if (!sis->gaps_exist || find_gap(sis) < 0) {
145 +                               /*
146 +                                * Out of space, so this page must have a
147 +                                * re-mapping, so use that.
148 +                                */
149 +                               BUG_ON(!old);
150 +                               sis->gap_next = sis->gap_end = old;
151 +                       }
152 +               }
153 +               /* Zero means it was not re-mapped previously */
154 +               if (old) {
155 +                       /* Clear the re-mapping */
156 +                       sis->swap_remap[offset] &= 0x80000000;
157 +                       /* Mark the re-mapped page as unused */
158 +                       sis->swap_remap[old] &= 0x7FFFFFFF;
159 +               } else {
160 +                       /* Record how many free pages there are */
161 +                       sis->gaps_exist -= 1;
162 +               }
163 +               /* Create the re-mapping to the next free page */
164 +               sis->swap_remap[offset] |= sis->gap_next;
165 +               /* Mark it as used */
166 +               sis->swap_remap[sis->gap_next] |= 0x80000000;
167 +               /* Use the re-mapped page number */
168 +               offset = sis->gap_next;
169 +               /* Update the free pages gap */
170 +               sis->gap_next += 1;
171 +       } else {
172 +               /*
173 +                * Always read from the existing re-mapping
174 +                * if there is one. There may not be because
175 +                * 'swapin_readahead()' has won a race with
176 +                * 'add_to_swap()'.
177 +                */
178 +               if (old)
179 +                       offset = old;
180 +       }
181 +       spin_unlock(&sis->remap_lock);
182  
183         for ( ; ; ) {
184                 struct list_head *lh;
185 @@ -1015,7 +1143,8 @@
186                 return 0;
187  
188         sis = swap_info + swap_type;
189 -       return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset) : 0;
190 +#error map_swap_page does not support hibernation
191 +       return (sis->flags & SWP_WRITEOK) ? map_swap_page(sis, offset, 0) : 0;
192  }
193  #endif /* CONFIG_HIBERNATION */
194  
195 @@ -1342,6 +1471,7 @@
196         p->flags = 0;
197         spin_unlock(&swap_lock);
198         mutex_unlock(&swapon_mutex);
199 +       vfree(p->swap_remap);
200         vfree(swap_map);
201         inode = mapping->host;
202         if (S_ISBLK(inode->i_mode)) {
203 @@ -1485,6 +1615,7 @@
204         unsigned long maxpages = 1;
205         int swapfilesize;
206         unsigned short *swap_map = NULL;
207 +       unsigned int *swap_remap = NULL;
208         struct page *page = NULL;
209         struct inode *inode = NULL;
210         int did_down = 0;
211 @@ -1654,9 +1785,15 @@
212                         error = -ENOMEM;
213                         goto bad_swap;
214                 }
215 +               swap_remap = vmalloc(maxpages * sizeof(unsigned));
216 +               if (!swap_remap) {
217 +                       error = -ENOMEM;
218 +                       goto bad_swap;
219 +               }
220  
221                 error = 0;
222                 memset(swap_map, 0, maxpages * sizeof(short));
223 +               memset(swap_remap, 0, maxpages * sizeof(unsigned));
224                 for (i = 0; i < swap_header->info.nr_badpages; i++) {
225                         int page_nr = swap_header->info.badpages[i];
226                         if (page_nr <= 0 || page_nr >= swap_header->info.last_page)
227 @@ -1696,6 +1833,12 @@
228         else
229                 p->prio = --least_priority;
230         p->swap_map = swap_map;
231 +       p->swap_remap = swap_remap;
232 +       p->gap_next = 1;
233 +       p->gap_end = p->max - 1;
234 +       p->gaps_exist = p->max - 1;
235 +       spin_lock_init(&p->remap_lock);
236 +       mutex_init(&p->remap_mutex);
237         p->flags = SWP_ACTIVE;
238         nr_swap_pages += nr_good_pages;
239         total_swap_pages += nr_good_pages;
240 @@ -1734,6 +1877,7 @@
241         p->swap_file = NULL;
242         p->flags = 0;
243         spin_unlock(&swap_lock);
244 +       vfree(swap_remap);
245         vfree(swap_map);
246         if (swap_file)
247                 filp_close(swap_file, NULL);