1 | /****************************************************************************** |
---|---|
2 | * Xen balloon driver - enables returning/claiming memory to/from Xen. |
3 | * |
4 | * Copyright (c) 2003, B Dragovic |
5 | * Copyright (c) 2003-2004, M Williamson, K Fraser |
6 | * Copyright (c) 2005 Dan M. Smith, IBM Corporation |
7 | * Copyright (c) 2010 Daniel Kiper |
8 | * |
9 | * Memory hotplug support was written by Daniel Kiper. Work on |
10 | * it was sponsored by Google under Google Summer of Code 2010 |
11 | * program. Jeremy Fitzhardinge from Citrix was the mentor for |
12 | * this project. |
13 | * |
14 | * This program is free software; you can redistribute it and/or |
15 | * modify it under the terms of the GNU General Public License version 2 |
16 | * as published by the Free Software Foundation; or, when distributed |
17 | * separately from the Linux kernel or incorporated into other |
18 | * software packages, subject to the following license: |
19 | * |
20 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
21 | * of this source file (the "Software"), to deal in the Software without |
22 | * restriction, including without limitation the rights to use, copy, modify, |
23 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
24 | * and to permit persons to whom the Software is furnished to do so, subject to |
25 | * the following conditions: |
26 | * |
27 | * The above copyright notice and this permission notice shall be included in |
28 | * all copies or substantial portions of the Software. |
29 | * |
30 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
31 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
32 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
33 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
34 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
35 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
36 | * IN THE SOFTWARE. |
37 | */ |
38 | |
39 | #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt |
40 | |
41 | #include <linux/cpu.h> |
42 | #include <linux/kernel.h> |
43 | #include <linux/sched.h> |
44 | #include <linux/cred.h> |
45 | #include <linux/errno.h> |
46 | #include <linux/freezer.h> |
47 | #include <linux/kthread.h> |
48 | #include <linux/mm.h> |
49 | #include <linux/memblock.h> |
50 | #include <linux/pagemap.h> |
51 | #include <linux/highmem.h> |
52 | #include <linux/mutex.h> |
53 | #include <linux/list.h> |
54 | #include <linux/gfp.h> |
55 | #include <linux/notifier.h> |
56 | #include <linux/memory.h> |
57 | #include <linux/memory_hotplug.h> |
58 | #include <linux/percpu-defs.h> |
59 | #include <linux/slab.h> |
60 | #include <linux/sysctl.h> |
61 | #include <linux/moduleparam.h> |
62 | #include <linux/jiffies.h> |
63 | |
64 | #include <asm/page.h> |
65 | #include <asm/tlb.h> |
66 | |
67 | #include <asm/xen/hypervisor.h> |
68 | #include <asm/xen/hypercall.h> |
69 | |
70 | #include <xen/xen.h> |
71 | #include <xen/interface/xen.h> |
72 | #include <xen/interface/memory.h> |
73 | #include <xen/balloon.h> |
74 | #include <xen/features.h> |
75 | #include <xen/page.h> |
76 | #include <xen/mem-reservation.h> |
77 | |
78 | #undef MODULE_PARAM_PREFIX |
79 | #define MODULE_PARAM_PREFIX "xen." |
80 | |
81 | static uint __read_mostly balloon_boot_timeout = 180; |
82 | module_param(balloon_boot_timeout, uint, 0444); |
83 | |
84 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG |
85 | static int xen_hotplug_unpopulated; |
86 | |
87 | static const struct ctl_table balloon_table[] = { |
88 | { |
89 | .procname = "hotplug_unpopulated", |
90 | .data = &xen_hotplug_unpopulated, |
91 | .maxlen = sizeof(int), |
92 | .mode = 0644, |
93 | .proc_handler = proc_dointvec_minmax, |
94 | .extra1 = SYSCTL_ZERO, |
95 | .extra2 = SYSCTL_ONE, |
96 | }, |
97 | }; |
98 | |
99 | #else |
100 | #define xen_hotplug_unpopulated 0 |
101 | #endif |
102 | |
103 | /* |
104 | * Use one extent per PAGE_SIZE to avoid to break down the page into |
105 | * multiple frame. |
106 | */ |
107 | #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1) |
108 | |
109 | /* |
110 | * balloon_thread() state: |
111 | * |
112 | * BP_DONE: done or nothing to do, |
113 | * BP_WAIT: wait to be rescheduled, |
114 | * BP_EAGAIN: error, go to sleep, |
115 | * BP_ECANCELED: error, balloon operation canceled. |
116 | */ |
117 | |
118 | static enum bp_state { |
119 | BP_DONE, |
120 | BP_WAIT, |
121 | BP_EAGAIN, |
122 | BP_ECANCELED |
123 | } balloon_state = BP_DONE; |
124 | |
125 | /* Main waiting point for xen-balloon thread. */ |
126 | static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq); |
127 | |
128 | static DEFINE_MUTEX(balloon_mutex); |
129 | |
130 | struct balloon_stats balloon_stats; |
131 | EXPORT_SYMBOL_GPL(balloon_stats); |
132 | |
133 | /* We increase/decrease in batches which fit in a page */ |
134 | static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)]; |
135 | |
136 | |
137 | /* List of ballooned pages, threaded through the mem_map array. */ |
138 | static LIST_HEAD(ballooned_pages); |
139 | static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); |
140 | |
141 | /* When ballooning out (allocating memory to return to Xen) we don't really |
142 | want the kernel to try too hard since that can trigger the oom killer. */ |
143 | #define GFP_BALLOON \ |
144 | (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) |
145 | |
146 | /* balloon_append: add the given page to the balloon. */ |
147 | static void balloon_append(struct page *page) |
148 | { |
149 | if (!PageOffline(page)) |
150 | __SetPageOffline(page); |
151 | |
152 | /* Lowmem is re-populated first, so highmem pages go at list tail. */ |
153 | if (PageHighMem(page)) { |
154 | list_add_tail(new: &page->lru, head: &ballooned_pages); |
155 | balloon_stats.balloon_high++; |
156 | } else { |
157 | list_add(new: &page->lru, head: &ballooned_pages); |
158 | balloon_stats.balloon_low++; |
159 | } |
160 | inc_node_page_state(page, NR_BALLOON_PAGES); |
161 | |
162 | wake_up(&balloon_wq); |
163 | } |
164 | |
165 | /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ |
166 | static struct page *balloon_retrieve(bool require_lowmem) |
167 | { |
168 | struct page *page; |
169 | |
170 | if (list_empty(head: &ballooned_pages)) |
171 | return NULL; |
172 | |
173 | page = list_entry(ballooned_pages.next, struct page, lru); |
174 | if (require_lowmem && PageHighMem(page)) |
175 | return NULL; |
176 | list_del(entry: &page->lru); |
177 | |
178 | if (PageHighMem(page)) |
179 | balloon_stats.balloon_high--; |
180 | else |
181 | balloon_stats.balloon_low--; |
182 | |
183 | __ClearPageOffline(page); |
184 | dec_node_page_state(page, NR_BALLOON_PAGES); |
185 | |
186 | return page; |
187 | } |
188 | |
189 | static struct page *balloon_next_page(struct page *page) |
190 | { |
191 | struct list_head *next = page->lru.next; |
192 | if (next == &ballooned_pages) |
193 | return NULL; |
194 | return list_entry(next, struct page, lru); |
195 | } |
196 | |
197 | static void update_schedule(void) |
198 | { |
199 | if (balloon_state == BP_WAIT || balloon_state == BP_ECANCELED) |
200 | return; |
201 | |
202 | if (balloon_state == BP_DONE) { |
203 | balloon_stats.schedule_delay = 1; |
204 | balloon_stats.retry_count = 1; |
205 | return; |
206 | } |
207 | |
208 | ++balloon_stats.retry_count; |
209 | |
210 | if (balloon_stats.max_retry_count != RETRY_UNLIMITED && |
211 | balloon_stats.retry_count > balloon_stats.max_retry_count) { |
212 | balloon_stats.schedule_delay = 1; |
213 | balloon_stats.retry_count = 1; |
214 | balloon_state = BP_ECANCELED; |
215 | return; |
216 | } |
217 | |
218 | balloon_stats.schedule_delay <<= 1; |
219 | |
220 | if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay) |
221 | balloon_stats.schedule_delay = balloon_stats.max_schedule_delay; |
222 | |
223 | balloon_state = BP_EAGAIN; |
224 | } |
225 | |
226 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG |
227 | static void release_memory_resource(struct resource *resource) |
228 | { |
229 | if (!resource) |
230 | return; |
231 | |
232 | /* |
233 | * No need to reset region to identity mapped since we now |
234 | * know that no I/O can be in this region |
235 | */ |
236 | release_resource(new: resource); |
237 | kfree(objp: resource); |
238 | } |
239 | |
240 | static struct resource *additional_memory_resource(phys_addr_t size) |
241 | { |
242 | struct resource *res; |
243 | int ret; |
244 | |
245 | res = kzalloc(sizeof(*res), GFP_KERNEL); |
246 | if (!res) |
247 | return NULL; |
248 | |
249 | res->name = "System RAM"; |
250 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
251 | |
252 | ret = allocate_resource(root: &iomem_resource, new: res, |
253 | size, min: 0, max: -1, |
254 | PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); |
255 | if (ret < 0) { |
256 | pr_err("Cannot allocate new System RAM resource\n"); |
257 | kfree(objp: res); |
258 | return NULL; |
259 | } |
260 | |
261 | return res; |
262 | } |
263 | |
264 | static enum bp_state reserve_additional_memory(void) |
265 | { |
266 | long credit; |
267 | struct resource *resource; |
268 | int nid, rc; |
269 | unsigned long balloon_hotplug; |
270 | |
271 | credit = balloon_stats.target_pages + balloon_stats.target_unpopulated |
272 | - balloon_stats.total_pages; |
273 | |
274 | /* |
275 | * Already hotplugged enough pages? Wait for them to be |
276 | * onlined. |
277 | */ |
278 | if (credit <= 0) |
279 | return BP_WAIT; |
280 | |
281 | balloon_hotplug = round_up(credit, PAGES_PER_SECTION); |
282 | |
283 | resource = additional_memory_resource(size: balloon_hotplug * PAGE_SIZE); |
284 | if (!resource) |
285 | goto err; |
286 | |
287 | nid = memory_add_physaddr_to_nid(start: resource->start); |
288 | |
289 | #ifdef CONFIG_XEN_HAVE_PVMMU |
290 | /* |
291 | * We don't support PV MMU when Linux and Xen is using |
292 | * different page granularity. |
293 | */ |
294 | BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); |
295 | |
296 | /* |
297 | * add_memory() will build page tables for the new memory so |
298 | * the p2m must contain invalid entries so the correct |
299 | * non-present PTEs will be written. |
300 | * |
301 | * If a failure occurs, the original (identity) p2m entries |
302 | * are not restored since this region is now known not to |
303 | * conflict with any devices. |
304 | */ |
305 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
306 | unsigned long pfn, i; |
307 | |
308 | pfn = PFN_DOWN(resource->start); |
309 | for (i = 0; i < balloon_hotplug; i++) { |
310 | if (!set_phys_to_machine(pfn: pfn + i, INVALID_P2M_ENTRY)) { |
311 | pr_warn("set_phys_to_machine() failed, no memory added\n"); |
312 | goto err; |
313 | } |
314 | } |
315 | } |
316 | #endif |
317 | |
318 | /* |
319 | * add_memory_resource() will call online_pages() which in its turn |
320 | * will call xen_online_page() callback causing deadlock if we don't |
321 | * release balloon_mutex here. Unlocking here is safe because the |
322 | * callers drop the mutex before trying again. |
323 | */ |
324 | mutex_unlock(lock: &balloon_mutex); |
325 | /* add_memory_resource() requires the device_hotplug lock */ |
326 | lock_device_hotplug(); |
327 | rc = add_memory_resource(nid, resource, MHP_MERGE_RESOURCE); |
328 | unlock_device_hotplug(); |
329 | mutex_lock(&balloon_mutex); |
330 | |
331 | if (rc) { |
332 | pr_warn("Cannot add additional memory (%i)\n", rc); |
333 | goto err; |
334 | } |
335 | |
336 | balloon_stats.total_pages += balloon_hotplug; |
337 | |
338 | return BP_WAIT; |
339 | err: |
340 | release_memory_resource(resource); |
341 | return BP_ECANCELED; |
342 | } |
343 | |
344 | static void xen_online_page(struct page *page, unsigned int order) |
345 | { |
346 | unsigned long i, size = (1 << order); |
347 | unsigned long start_pfn = page_to_pfn(page); |
348 | struct page *p; |
349 | |
350 | pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); |
351 | mutex_lock(&balloon_mutex); |
352 | for (i = 0; i < size; i++) { |
353 | p = pfn_to_page(start_pfn + i); |
354 | balloon_append(page: p); |
355 | } |
356 | mutex_unlock(lock: &balloon_mutex); |
357 | } |
358 | |
359 | static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) |
360 | { |
361 | if (val == MEM_ONLINE) |
362 | wake_up(&balloon_thread_wq); |
363 | |
364 | return NOTIFY_OK; |
365 | } |
366 | |
367 | static struct notifier_block xen_memory_nb = { |
368 | .notifier_call = xen_memory_notifier, |
369 | .priority = 0 |
370 | }; |
371 | #else |
372 | static enum bp_state reserve_additional_memory(void) |
373 | { |
374 | balloon_stats.target_pages = balloon_stats.current_pages + |
375 | balloon_stats.target_unpopulated; |
376 | return BP_ECANCELED; |
377 | } |
378 | #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ |
379 | |
380 | static long current_credit(void) |
381 | { |
382 | return balloon_stats.target_pages - balloon_stats.current_pages; |
383 | } |
384 | |
385 | static bool balloon_is_inflated(void) |
386 | { |
387 | return balloon_stats.balloon_low || balloon_stats.balloon_high; |
388 | } |
389 | |
390 | static enum bp_state increase_reservation(unsigned long nr_pages) |
391 | { |
392 | int rc; |
393 | unsigned long i; |
394 | struct page *page; |
395 | |
396 | if (nr_pages > ARRAY_SIZE(frame_list)) |
397 | nr_pages = ARRAY_SIZE(frame_list); |
398 | |
399 | page = list_first_entry_or_null(&ballooned_pages, struct page, lru); |
400 | for (i = 0; i < nr_pages; i++) { |
401 | if (!page) { |
402 | nr_pages = i; |
403 | break; |
404 | } |
405 | |
406 | frame_list[i] = page_to_xen_pfn(page); |
407 | page = balloon_next_page(page); |
408 | } |
409 | |
410 | rc = xenmem_reservation_increase(count: nr_pages, frames: frame_list); |
411 | if (rc <= 0) |
412 | return BP_EAGAIN; |
413 | |
414 | for (i = 0; i < rc; i++) { |
415 | page = balloon_retrieve(require_lowmem: false); |
416 | BUG_ON(page == NULL); |
417 | |
418 | xenmem_reservation_va_mapping_update(count: 1, pages: &page, frames: &frame_list[i]); |
419 | |
420 | /* |
421 | * Relinquish the page back to the allocator. Note that |
422 | * some pages, including ones added via xen_online_page(), might |
423 | * not be marked reserved; free_reserved_page() will handle that. |
424 | */ |
425 | free_reserved_page(page); |
426 | } |
427 | |
428 | balloon_stats.current_pages += rc; |
429 | |
430 | return BP_DONE; |
431 | } |
432 | |
433 | static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) |
434 | { |
435 | enum bp_state state = BP_DONE; |
436 | unsigned long i; |
437 | struct page *page, *tmp; |
438 | int ret; |
439 | LIST_HEAD(pages); |
440 | |
441 | if (nr_pages > ARRAY_SIZE(frame_list)) |
442 | nr_pages = ARRAY_SIZE(frame_list); |
443 | |
444 | for (i = 0; i < nr_pages; i++) { |
445 | page = alloc_page(gfp); |
446 | if (page == NULL) { |
447 | nr_pages = i; |
448 | state = BP_EAGAIN; |
449 | break; |
450 | } |
451 | adjust_managed_page_count(page, count: -1); |
452 | xenmem_reservation_scrub_page(page); |
453 | list_add(new: &page->lru, head: &pages); |
454 | } |
455 | |
456 | /* |
457 | * Ensure that ballooned highmem pages don't have kmaps. |
458 | * |
459 | * Do this before changing the p2m as kmap_flush_unused() |
460 | * reads PTEs to obtain pages (and hence needs the original |
461 | * p2m entry). |
462 | */ |
463 | kmap_flush_unused(); |
464 | |
465 | /* |
466 | * Setup the frame, update direct mapping, invalidate P2M, |
467 | * and add to balloon. |
468 | */ |
469 | i = 0; |
470 | list_for_each_entry_safe(page, tmp, &pages, lru) { |
471 | frame_list[i++] = xen_page_to_gfn(page); |
472 | |
473 | xenmem_reservation_va_mapping_reset(count: 1, pages: &page); |
474 | |
475 | list_del(entry: &page->lru); |
476 | |
477 | balloon_append(page); |
478 | } |
479 | |
480 | flush_tlb_all(); |
481 | |
482 | ret = xenmem_reservation_decrease(count: nr_pages, frames: frame_list); |
483 | BUG_ON(ret != nr_pages); |
484 | |
485 | balloon_stats.current_pages -= nr_pages; |
486 | |
487 | return state; |
488 | } |
489 | |
490 | /* |
491 | * Stop waiting if either state is BP_DONE and ballooning action is |
492 | * needed, or if the credit has changed while state is not BP_DONE. |
493 | */ |
494 | static bool balloon_thread_cond(long credit) |
495 | { |
496 | if (balloon_state == BP_DONE) |
497 | credit = 0; |
498 | |
499 | return current_credit() != credit || kthread_should_stop(); |
500 | } |
501 | |
502 | /* |
503 | * As this is a kthread it is guaranteed to run as a single instance only. |
504 | * We may of course race updates of the target counts (which are protected |
505 | * by the balloon lock), or with changes to the Xen hard limit, but we will |
506 | * recover from these in time. |
507 | */ |
508 | static int balloon_thread(void *unused) |
509 | { |
510 | long credit; |
511 | unsigned long timeout; |
512 | |
513 | set_freezable(); |
514 | for (;;) { |
515 | switch (balloon_state) { |
516 | case BP_DONE: |
517 | case BP_ECANCELED: |
518 | timeout = 3600 * HZ; |
519 | break; |
520 | case BP_EAGAIN: |
521 | timeout = balloon_stats.schedule_delay * HZ; |
522 | break; |
523 | case BP_WAIT: |
524 | timeout = HZ; |
525 | break; |
526 | } |
527 | |
528 | credit = current_credit(); |
529 | |
530 | wait_event_freezable_timeout(balloon_thread_wq, |
531 | balloon_thread_cond(credit), timeout); |
532 | |
533 | if (kthread_should_stop()) |
534 | return 0; |
535 | |
536 | mutex_lock(&balloon_mutex); |
537 | |
538 | credit = current_credit(); |
539 | |
540 | if (credit > 0) { |
541 | if (balloon_is_inflated()) |
542 | balloon_state = increase_reservation(nr_pages: credit); |
543 | else |
544 | balloon_state = reserve_additional_memory(); |
545 | } |
546 | |
547 | if (credit < 0) { |
548 | long n_pages; |
549 | |
550 | n_pages = min(-credit, si_mem_available()); |
551 | balloon_state = decrease_reservation(nr_pages: n_pages, |
552 | GFP_BALLOON); |
553 | if (balloon_state == BP_DONE && n_pages != -credit && |
554 | n_pages < totalreserve_pages) |
555 | balloon_state = BP_EAGAIN; |
556 | } |
557 | |
558 | update_schedule(); |
559 | |
560 | mutex_unlock(lock: &balloon_mutex); |
561 | |
562 | cond_resched(); |
563 | } |
564 | } |
565 | |
566 | /* Resets the Xen limit, sets new target, and kicks off processing. */ |
567 | void balloon_set_new_target(unsigned long target) |
568 | { |
569 | /* No need for lock. Not read-modify-write updates. */ |
570 | balloon_stats.target_pages = target; |
571 | wake_up(&balloon_thread_wq); |
572 | } |
573 | EXPORT_SYMBOL_GPL(balloon_set_new_target); |
574 | |
575 | static int add_ballooned_pages(unsigned int nr_pages) |
576 | { |
577 | enum bp_state st; |
578 | |
579 | if (xen_hotplug_unpopulated) { |
580 | st = reserve_additional_memory(); |
581 | if (st != BP_ECANCELED) { |
582 | int rc; |
583 | |
584 | mutex_unlock(lock: &balloon_mutex); |
585 | rc = wait_event_interruptible(balloon_wq, |
586 | !list_empty(&ballooned_pages)); |
587 | mutex_lock(&balloon_mutex); |
588 | return rc ? -ENOMEM : 0; |
589 | } |
590 | } |
591 | |
592 | if (si_mem_available() < nr_pages) |
593 | return -ENOMEM; |
594 | |
595 | st = decrease_reservation(nr_pages, GFP_USER); |
596 | if (st != BP_DONE) |
597 | return -ENOMEM; |
598 | |
599 | return 0; |
600 | } |
601 | |
602 | /** |
603 | * xen_alloc_ballooned_pages - get pages that have been ballooned out |
604 | * @nr_pages: Number of pages to get |
605 | * @pages: pages returned |
606 | * @return 0 on success, error otherwise |
607 | */ |
608 | int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages) |
609 | { |
610 | unsigned int pgno = 0; |
611 | struct page *page; |
612 | int ret; |
613 | |
614 | mutex_lock(&balloon_mutex); |
615 | |
616 | balloon_stats.target_unpopulated += nr_pages; |
617 | |
618 | while (pgno < nr_pages) { |
619 | page = balloon_retrieve(require_lowmem: true); |
620 | if (page) { |
621 | pages[pgno++] = page; |
622 | #ifdef CONFIG_XEN_HAVE_PVMMU |
623 | /* |
624 | * We don't support PV MMU when Linux and Xen is using |
625 | * different page granularity. |
626 | */ |
627 | BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); |
628 | |
629 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
630 | ret = xen_alloc_p2m_entry(page_to_pfn(page)); |
631 | if (ret < 0) |
632 | goto out_undo; |
633 | } |
634 | #endif |
635 | } else { |
636 | ret = add_ballooned_pages(nr_pages: nr_pages - pgno); |
637 | if (ret < 0) |
638 | goto out_undo; |
639 | } |
640 | } |
641 | mutex_unlock(lock: &balloon_mutex); |
642 | return 0; |
643 | out_undo: |
644 | mutex_unlock(lock: &balloon_mutex); |
645 | xen_free_ballooned_pages(nr_pages: pgno, pages); |
646 | /* |
647 | * NB: xen_free_ballooned_pages will only subtract pgno pages, but since |
648 | * target_unpopulated is incremented with nr_pages at the start we need |
649 | * to remove the remaining ones also, or accounting will be screwed. |
650 | */ |
651 | balloon_stats.target_unpopulated -= nr_pages - pgno; |
652 | return ret; |
653 | } |
654 | EXPORT_SYMBOL(xen_alloc_ballooned_pages); |
655 | |
656 | /** |
657 | * xen_free_ballooned_pages - return pages retrieved with get_ballooned_pages |
658 | * @nr_pages: Number of pages |
659 | * @pages: pages to return |
660 | */ |
661 | void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages) |
662 | { |
663 | unsigned int i; |
664 | |
665 | mutex_lock(&balloon_mutex); |
666 | |
667 | for (i = 0; i < nr_pages; i++) { |
668 | if (pages[i]) |
669 | balloon_append(page: pages[i]); |
670 | } |
671 | |
672 | balloon_stats.target_unpopulated -= nr_pages; |
673 | |
674 | /* The balloon may be too large now. Shrink it if needed. */ |
675 | if (current_credit()) |
676 | wake_up(&balloon_thread_wq); |
677 | |
678 | mutex_unlock(lock: &balloon_mutex); |
679 | } |
680 | EXPORT_SYMBOL(xen_free_ballooned_pages); |
681 | |
682 | static int __init balloon_add_regions(void) |
683 | { |
684 | unsigned long start_pfn, pages; |
685 | unsigned long pfn, extra_pfn_end; |
686 | unsigned int i; |
687 | |
688 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { |
689 | pages = xen_extra_mem[i].n_pfns; |
690 | if (!pages) |
691 | continue; |
692 | |
693 | start_pfn = xen_extra_mem[i].start_pfn; |
694 | |
695 | /* |
696 | * If the amount of usable memory has been limited (e.g., with |
697 | * the 'mem' command line parameter), don't add pages beyond |
698 | * this limit. |
699 | */ |
700 | extra_pfn_end = min(max_pfn, start_pfn + pages); |
701 | |
702 | for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) |
703 | balloon_append(pfn_to_page(pfn)); |
704 | |
705 | /* |
706 | * Extra regions are accounted for in the physmap, but need |
707 | * decreasing from current_pages and target_pages to balloon |
708 | * down the initial allocation, because they are already |
709 | * accounted for in total_pages. |
710 | */ |
711 | pages = extra_pfn_end - start_pfn; |
712 | if (pages >= balloon_stats.current_pages || |
713 | pages >= balloon_stats.target_pages) { |
714 | WARN(1, "Extra pages underflow current target"); |
715 | return -ERANGE; |
716 | } |
717 | balloon_stats.current_pages -= pages; |
718 | balloon_stats.target_pages -= pages; |
719 | } |
720 | |
721 | return 0; |
722 | } |
723 | |
724 | static int __init balloon_init(void) |
725 | { |
726 | struct task_struct *task; |
727 | int rc; |
728 | |
729 | if (!xen_domain()) |
730 | return -ENODEV; |
731 | |
732 | pr_info("Initialising balloon driver\n"); |
733 | |
734 | if (xen_released_pages >= get_num_physpages()) { |
735 | WARN(1, "Released pages underflow current target"); |
736 | return -ERANGE; |
737 | } |
738 | |
739 | balloon_stats.current_pages = get_num_physpages() - xen_released_pages; |
740 | balloon_stats.target_pages = balloon_stats.current_pages; |
741 | balloon_stats.balloon_low = 0; |
742 | balloon_stats.balloon_high = 0; |
743 | balloon_stats.total_pages = balloon_stats.current_pages; |
744 | |
745 | balloon_stats.schedule_delay = 1; |
746 | balloon_stats.max_schedule_delay = 32; |
747 | balloon_stats.retry_count = 1; |
748 | balloon_stats.max_retry_count = 4; |
749 | |
750 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG |
751 | set_online_page_callback(&xen_online_page); |
752 | register_memory_notifier(nb: &xen_memory_nb); |
753 | register_sysctl_init("xen/balloon", balloon_table); |
754 | #endif |
755 | |
756 | rc = balloon_add_regions(); |
757 | if (rc) |
758 | return rc; |
759 | |
760 | task = kthread_run(balloon_thread, NULL, "xen-balloon"); |
761 | if (IS_ERR(ptr: task)) { |
762 | pr_err("xen-balloon thread could not be started, ballooning will not work!\n"); |
763 | return PTR_ERR(ptr: task); |
764 | } |
765 | |
766 | /* Init the xen-balloon driver. */ |
767 | xen_balloon_init(); |
768 | |
769 | return 0; |
770 | } |
771 | subsys_initcall(balloon_init); |
772 | |
773 | static int __init balloon_wait_finish(void) |
774 | { |
775 | long credit, last_credit = 0; |
776 | unsigned long last_changed = 0; |
777 | |
778 | if (!xen_domain()) |
779 | return -ENODEV; |
780 | |
781 | /* PV guests don't need to wait. */ |
782 | if (xen_pv_domain() || !current_credit()) |
783 | return 0; |
784 | |
785 | pr_notice("Waiting for initial ballooning down having finished.\n"); |
786 | |
787 | while ((credit = current_credit()) < 0) { |
788 | if (credit != last_credit) { |
789 | last_changed = jiffies; |
790 | last_credit = credit; |
791 | } |
792 | if (balloon_state == BP_ECANCELED) { |
793 | pr_warn_once("Initial ballooning failed, %ld pages need to be freed.\n", |
794 | -credit); |
795 | if (time_is_before_eq_jiffies(last_changed + HZ * balloon_boot_timeout)) |
796 | panic(fmt: "Initial ballooning failed!\n"); |
797 | } |
798 | |
799 | schedule_timeout_interruptible(HZ / 10); |
800 | } |
801 | |
802 | pr_notice("Initial ballooning down finished.\n"); |
803 | |
804 | return 0; |
805 | } |
806 | late_initcall_sync(balloon_wait_finish); |
807 |
Definitions
- balloon_boot_timeout
- xen_hotplug_unpopulated
- balloon_table
- bp_state
- balloon_state
- balloon_thread_wq
- balloon_mutex
- balloon_stats
- frame_list
- ballooned_pages
- balloon_wq
- balloon_append
- balloon_retrieve
- balloon_next_page
- update_schedule
- release_memory_resource
- additional_memory_resource
- reserve_additional_memory
- xen_online_page
- xen_memory_notifier
- xen_memory_nb
- current_credit
- balloon_is_inflated
- increase_reservation
- decrease_reservation
- balloon_thread_cond
- balloon_thread
- balloon_set_new_target
- add_ballooned_pages
- xen_alloc_ballooned_pages
- xen_free_ballooned_pages
- balloon_add_regions
- balloon_init
Improve your Profiling and Debugging skills
Find out more