Branch data Line data Source code
1 : : /*
2 : : * mm/balloon_compaction.c
3 : : *
4 : : * Common interface for making balloon pages movable by compaction.
5 : : *
6 : : * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
7 : : */
8 : : #include <linux/mm.h>
9 : : #include <linux/slab.h>
10 : : #include <linux/export.h>
11 : : #include <linux/balloon_compaction.h>
12 : :
13 : : /*
14 : : * balloon_devinfo_alloc - allocates a balloon device information descriptor.
15 : : * @balloon_dev_descriptor: pointer to reference the balloon device which
16 : : * this struct balloon_dev_info will be servicing.
17 : : *
18 : : * Driver must call it to properly allocate and initialize an instance of
19 : : * struct balloon_dev_info which will be used to reference a balloon device
20 : : * as well as to keep track of the balloon device page list.
21 : : */
22 : 0 : struct balloon_dev_info *balloon_devinfo_alloc(void *balloon_dev_descriptor)
23 : : {
24 : : struct balloon_dev_info *b_dev_info;
25 : : b_dev_info = kmalloc(sizeof(*b_dev_info), GFP_KERNEL);
26 [ # # ]: 0 : if (!b_dev_info)
27 : : return ERR_PTR(-ENOMEM);
28 : :
29 : 0 : b_dev_info->balloon_device = balloon_dev_descriptor;
30 : 0 : b_dev_info->mapping = NULL;
31 : 0 : b_dev_info->isolated_pages = 0;
32 : 0 : spin_lock_init(&b_dev_info->pages_lock);
33 : 0 : INIT_LIST_HEAD(&b_dev_info->pages);
34 : :
35 : 0 : return b_dev_info;
36 : : }
37 : : EXPORT_SYMBOL_GPL(balloon_devinfo_alloc);
38 : :
39 : : /*
40 : : * balloon_page_enqueue - allocates a new page and inserts it into the balloon
41 : : * page list.
42 : : * @b_dev_info: balloon device decriptor where we will insert a new page to
43 : : *
44 : : * Driver must call it to properly allocate a new enlisted balloon page
45 : : * before definetively removing it from the guest system.
46 : : * This function returns the page address for the recently enqueued page or
47 : : * NULL in the case we fail to allocate a new page this turn.
48 : : */
49 : 0 : struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
50 : : {
51 : : unsigned long flags;
52 : : struct page *page = alloc_page(balloon_mapping_gfp_mask() |
53 : : __GFP_NOMEMALLOC | __GFP_NORETRY);
54 [ # # ]: 0 : if (!page)
55 : : return NULL;
56 : :
57 : : /*
58 : : * Block others from accessing the 'page' when we get around to
59 : : * establishing additional references. We should be the only one
60 : : * holding a reference to the 'page' at this point.
61 : : */
62 [ # # ]: 0 : BUG_ON(!trylock_page(page));
63 : 0 : spin_lock_irqsave(&b_dev_info->pages_lock, flags);
64 : 0 : balloon_page_insert(page, b_dev_info->mapping, &b_dev_info->pages);
65 : : spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
66 : 0 : unlock_page(page);
67 : 0 : return page;
68 : : }
69 : : EXPORT_SYMBOL_GPL(balloon_page_enqueue);
70 : :
71 : : /*
72 : : * balloon_page_dequeue - removes a page from balloon's page list and returns
73 : : * the its address to allow the driver release the page.
74 : : * @b_dev_info: balloon device decriptor where we will grab a page from.
75 : : *
76 : : * Driver must call it to properly de-allocate a previous enlisted balloon page
77 : : * before definetively releasing it back to the guest system.
78 : : * This function returns the page address for the recently dequeued page or
79 : : * NULL in the case we find balloon's page list temporarily empty due to
80 : : * compaction isolated pages.
81 : : */
82 : 0 : struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
83 : : {
84 : : struct page *page, *tmp;
85 : : unsigned long flags;
86 : : bool dequeued_page;
87 : :
88 : : dequeued_page = false;
89 [ # # ]: 0 : list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
90 : : /*
91 : : * Block others from accessing the 'page' while we get around
92 : : * establishing additional references and preparing the 'page'
93 : : * to be released by the balloon driver.
94 : : */
95 [ # # ]: 0 : if (trylock_page(page)) {
96 : 0 : spin_lock_irqsave(&b_dev_info->pages_lock, flags);
97 : : /*
98 : : * Raise the page refcount here to prevent any wrong
99 : : * attempt to isolate this page, in case of coliding
100 : : * with balloon_page_isolate() just after we release
101 : : * the page lock.
102 : : *
103 : : * balloon_page_free() will take care of dropping
104 : : * this extra refcount later.
105 : : */
106 : : get_page(page);
107 : : balloon_page_delete(page);
108 : : spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
109 : 0 : unlock_page(page);
110 : : dequeued_page = true;
111 : 0 : break;
112 : : }
113 : : }
114 : :
115 [ # # ]: 0 : if (!dequeued_page) {
116 : : /*
117 : : * If we are unable to dequeue a balloon page because the page
118 : : * list is empty and there is no isolated pages, then something
119 : : * went out of track and some balloon pages are lost.
120 : : * BUG() here, otherwise the balloon driver may get stuck into
121 : : * an infinite loop while attempting to release all its pages.
122 : : */
123 : 0 : spin_lock_irqsave(&b_dev_info->pages_lock, flags);
124 [ # # ][ # # ]: 0 : if (unlikely(list_empty(&b_dev_info->pages) &&
125 : : !b_dev_info->isolated_pages))
126 : 0 : BUG();
127 : : spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
128 : : page = NULL;
129 : : }
130 : 0 : return page;
131 : : }
132 : : EXPORT_SYMBOL_GPL(balloon_page_dequeue);
133 : :
134 : : #ifdef CONFIG_BALLOON_COMPACTION
135 : : /*
136 : : * balloon_mapping_alloc - allocates a special ->mapping for ballooned pages.
137 : : * @b_dev_info: holds the balloon device information descriptor.
138 : : * @a_ops: balloon_mapping address_space_operations descriptor.
139 : : *
140 : : * Driver must call it to properly allocate and initialize an instance of
141 : : * struct address_space which will be used as the special page->mapping for
142 : : * balloon device enlisted page instances.
143 : : */
144 : : struct address_space *balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
145 : : const struct address_space_operations *a_ops)
146 : : {
147 : : struct address_space *mapping;
148 : :
149 : : mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
150 : : if (!mapping)
151 : : return ERR_PTR(-ENOMEM);
152 : :
153 : : /*
154 : : * Give a clean 'zeroed' status to all elements of this special
155 : : * balloon page->mapping struct address_space instance.
156 : : */
157 : : address_space_init_once(mapping);
158 : :
159 : : /*
160 : : * Set mapping->flags appropriately, to allow balloon pages
161 : : * ->mapping identification.
162 : : */
163 : : mapping_set_balloon(mapping);
164 : : mapping_set_gfp_mask(mapping, balloon_mapping_gfp_mask());
165 : :
166 : : /* balloon's page->mapping->a_ops callback descriptor */
167 : : mapping->a_ops = a_ops;
168 : :
169 : : /*
170 : : * Establish a pointer reference back to the balloon device descriptor
171 : : * this particular page->mapping will be servicing.
172 : : * This is used by compaction / migration procedures to identify and
173 : : * access the balloon device pageset while isolating / migrating pages.
174 : : *
175 : : * As some balloon drivers can register multiple balloon devices
176 : : * for a single guest, this also helps compaction / migration to
177 : : * properly deal with multiple balloon pagesets, when required.
178 : : */
179 : : mapping->private_data = b_dev_info;
180 : : b_dev_info->mapping = mapping;
181 : :
182 : : return mapping;
183 : : }
184 : : EXPORT_SYMBOL_GPL(balloon_mapping_alloc);
185 : :
186 : : static inline void __isolate_balloon_page(struct page *page)
187 : : {
188 : : struct balloon_dev_info *b_dev_info = page->mapping->private_data;
189 : : unsigned long flags;
190 : : spin_lock_irqsave(&b_dev_info->pages_lock, flags);
191 : : list_del(&page->lru);
192 : : b_dev_info->isolated_pages++;
193 : : spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
194 : : }
195 : :
196 : : static inline void __putback_balloon_page(struct page *page)
197 : : {
198 : : struct balloon_dev_info *b_dev_info = page->mapping->private_data;
199 : : unsigned long flags;
200 : : spin_lock_irqsave(&b_dev_info->pages_lock, flags);
201 : : list_add(&page->lru, &b_dev_info->pages);
202 : : b_dev_info->isolated_pages--;
203 : : spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
204 : : }
205 : :
206 : : static inline int __migrate_balloon_page(struct address_space *mapping,
207 : : struct page *newpage, struct page *page, enum migrate_mode mode)
208 : : {
209 : : return page->mapping->a_ops->migratepage(mapping, newpage, page, mode);
210 : : }
211 : :
212 : : /* __isolate_lru_page() counterpart for a ballooned page */
213 : : bool balloon_page_isolate(struct page *page)
214 : : {
215 : : /*
216 : : * Avoid burning cycles with pages that are yet under __free_pages(),
217 : : * or just got freed under us.
218 : : *
219 : : * In case we 'win' a race for a balloon page being freed under us and
220 : : * raise its refcount preventing __free_pages() from doing its job
221 : : * the put_page() at the end of this block will take care of
222 : : * release this page, thus avoiding a nasty leakage.
223 : : */
224 : : if (likely(get_page_unless_zero(page))) {
225 : : /*
226 : : * As balloon pages are not isolated from LRU lists, concurrent
227 : : * compaction threads can race against page migration functions
228 : : * as well as race against the balloon driver releasing a page.
229 : : *
230 : : * In order to avoid having an already isolated balloon page
231 : : * being (wrongly) re-isolated while it is under migration,
232 : : * or to avoid attempting to isolate pages being released by
233 : : * the balloon driver, lets be sure we have the page lock
234 : : * before proceeding with the balloon page isolation steps.
235 : : */
236 : : if (likely(trylock_page(page))) {
237 : : /*
238 : : * A ballooned page, by default, has just one refcount.
239 : : * Prevent concurrent compaction threads from isolating
240 : : * an already isolated balloon page by refcount check.
241 : : */
242 : : if (__is_movable_balloon_page(page) &&
243 : : page_count(page) == 2) {
244 : : __isolate_balloon_page(page);
245 : : unlock_page(page);
246 : : return true;
247 : : }
248 : : unlock_page(page);
249 : : }
250 : : put_page(page);
251 : : }
252 : : return false;
253 : : }
254 : :
255 : : /* putback_lru_page() counterpart for a ballooned page */
256 : : void balloon_page_putback(struct page *page)
257 : : {
258 : : /*
259 : : * 'lock_page()' stabilizes the page and prevents races against
260 : : * concurrent isolation threads attempting to re-isolate it.
261 : : */
262 : : lock_page(page);
263 : :
264 : : if (__is_movable_balloon_page(page)) {
265 : : __putback_balloon_page(page);
266 : : /* drop the extra ref count taken for page isolation */
267 : : put_page(page);
268 : : } else {
269 : : WARN_ON(1);
270 : : dump_page(page);
271 : : }
272 : : unlock_page(page);
273 : : }
274 : :
275 : : /* move_to_new_page() counterpart for a ballooned page */
276 : : int balloon_page_migrate(struct page *newpage,
277 : : struct page *page, enum migrate_mode mode)
278 : : {
279 : : struct address_space *mapping;
280 : : int rc = -EAGAIN;
281 : :
282 : : /*
283 : : * Block others from accessing the 'newpage' when we get around to
284 : : * establishing additional references. We should be the only one
285 : : * holding a reference to the 'newpage' at this point.
286 : : */
287 : : BUG_ON(!trylock_page(newpage));
288 : :
289 : : if (WARN_ON(!__is_movable_balloon_page(page))) {
290 : : dump_page(page);
291 : : unlock_page(newpage);
292 : : return rc;
293 : : }
294 : :
295 : : mapping = page->mapping;
296 : : if (mapping)
297 : : rc = __migrate_balloon_page(mapping, newpage, page, mode);
298 : :
299 : : unlock_page(newpage);
300 : : return rc;
301 : : }
302 : : #endif /* CONFIG_BALLOON_COMPACTION */
|