1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * linux/kernel/power/swap.c |
4 | * |
5 | * This file provides functions for reading the suspend image from |
6 | * and writing it to a swap partition. |
7 | * |
8 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> |
9 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> |
10 | * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com> |
11 | */ |
12 | |
13 | #define pr_fmt(fmt) "PM: " fmt |
14 | |
15 | #include <linux/module.h> |
16 | #include <linux/file.h> |
17 | #include <linux/delay.h> |
18 | #include <linux/bitops.h> |
19 | #include <linux/device.h> |
20 | #include <linux/bio.h> |
21 | #include <linux/blkdev.h> |
22 | #include <linux/swap.h> |
23 | #include <linux/swapops.h> |
24 | #include <linux/pm.h> |
25 | #include <linux/slab.h> |
26 | #include <linux/vmalloc.h> |
27 | #include <linux/cpumask.h> |
28 | #include <linux/atomic.h> |
29 | #include <linux/kthread.h> |
30 | #include <linux/crc32.h> |
31 | #include <linux/ktime.h> |
32 | |
33 | #include "power.h" |
34 | |
35 | #define HIBERNATE_SIG "S1SUSPEND" |
36 | |
37 | u32 swsusp_hardware_signature; |
38 | |
39 | /* |
40 | * When reading an {un,}compressed image, we may restore pages in place, |
41 | * in which case some architectures need these pages cleaning before they |
42 | * can be executed. We don't know which pages these may be, so clean the lot. |
43 | */ |
44 | static bool clean_pages_on_read; |
45 | static bool clean_pages_on_decompress; |
46 | |
47 | /* |
48 | * The swap map is a data structure used for keeping track of each page |
49 | * written to a swap partition. It consists of many swap_map_page |
50 | * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. |
51 | * These structures are stored on the swap and linked together with the |
52 | * help of the .next_swap member. |
53 | * |
54 | * The swap map is created during suspend. The swap map pages are |
55 | * allocated and populated one at a time, so we only need one memory |
56 | * page to set up the entire structure. |
57 | * |
58 | * During resume we pick up all swap_map_page structures into a list. |
59 | */ |
60 | |
61 | #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) |
62 | |
63 | /* |
64 | * Number of free pages that are not high. |
65 | */ |
66 | static inline unsigned long low_free_pages(void) |
67 | { |
68 | return nr_free_pages() - nr_free_highpages(); |
69 | } |
70 | |
71 | /* |
72 | * Number of pages required to be kept free while writing the image. Always |
73 | * half of all available low pages before the writing starts. |
74 | */ |
75 | static inline unsigned long reqd_free_pages(void) |
76 | { |
77 | return low_free_pages() / 2; |
78 | } |
79 | |
80 | struct swap_map_page { |
81 | sector_t entries[MAP_PAGE_ENTRIES]; |
82 | sector_t next_swap; |
83 | }; |
84 | |
85 | struct swap_map_page_list { |
86 | struct swap_map_page *map; |
87 | struct swap_map_page_list *next; |
88 | }; |
89 | |
90 | /* |
91 | * The swap_map_handle structure is used for handling swap in |
92 | * a file-alike way |
93 | */ |
94 | |
95 | struct swap_map_handle { |
96 | struct swap_map_page *cur; |
97 | struct swap_map_page_list *maps; |
98 | sector_t cur_swap; |
99 | sector_t first_sector; |
100 | unsigned int k; |
101 | unsigned long reqd_free_pages; |
102 | u32 crc32; |
103 | }; |
104 | |
105 | struct swsusp_header { |
106 | char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) - |
107 | sizeof(u32) - sizeof(u32)]; |
108 | u32 hw_sig; |
109 | u32 crc32; |
110 | sector_t image; |
111 | unsigned int flags; /* Flags to pass to the "boot" kernel */ |
112 | char orig_sig[10]; |
113 | char sig[10]; |
114 | } __packed; |
115 | |
116 | static struct swsusp_header *swsusp_header; |
117 | |
118 | /* |
119 | * The following functions are used for tracing the allocated |
120 | * swap pages, so that they can be freed in case of an error. |
121 | */ |
122 | |
123 | struct swsusp_extent { |
124 | struct rb_node node; |
125 | unsigned long start; |
126 | unsigned long end; |
127 | }; |
128 | |
129 | static struct rb_root swsusp_extents = RB_ROOT; |
130 | |
131 | static int swsusp_extents_insert(unsigned long swap_offset) |
132 | { |
133 | struct rb_node **new = &(swsusp_extents.rb_node); |
134 | struct rb_node *parent = NULL; |
135 | struct swsusp_extent *ext; |
136 | |
137 | /* Figure out where to put the new node */ |
138 | while (*new) { |
139 | ext = rb_entry(*new, struct swsusp_extent, node); |
140 | parent = *new; |
141 | if (swap_offset < ext->start) { |
142 | /* Try to merge */ |
143 | if (swap_offset == ext->start - 1) { |
144 | ext->start--; |
145 | return 0; |
146 | } |
147 | new = &((*new)->rb_left); |
148 | } else if (swap_offset > ext->end) { |
149 | /* Try to merge */ |
150 | if (swap_offset == ext->end + 1) { |
151 | ext->end++; |
152 | return 0; |
153 | } |
154 | new = &((*new)->rb_right); |
155 | } else { |
156 | /* It already is in the tree */ |
157 | return -EINVAL; |
158 | } |
159 | } |
160 | /* Add the new node and rebalance the tree. */ |
161 | ext = kzalloc(size: sizeof(struct swsusp_extent), GFP_KERNEL); |
162 | if (!ext) |
163 | return -ENOMEM; |
164 | |
165 | ext->start = swap_offset; |
166 | ext->end = swap_offset; |
167 | rb_link_node(node: &ext->node, parent, rb_link: new); |
168 | rb_insert_color(&ext->node, &swsusp_extents); |
169 | return 0; |
170 | } |
171 | |
172 | /* |
173 | * alloc_swapdev_block - allocate a swap page and register that it has |
174 | * been allocated, so that it can be freed in case of an error. |
175 | */ |
176 | |
177 | sector_t alloc_swapdev_block(int swap) |
178 | { |
179 | unsigned long offset; |
180 | |
181 | offset = swp_offset(entry: get_swap_page_of_type(swap)); |
182 | if (offset) { |
183 | if (swsusp_extents_insert(swap_offset: offset)) |
184 | swap_free(swp_entry(type: swap, offset)); |
185 | else |
186 | return swapdev_block(swap, offset); |
187 | } |
188 | return 0; |
189 | } |
190 | |
191 | /* |
192 | * free_all_swap_pages - free swap pages allocated for saving image data. |
193 | * It also frees the extents used to register which swap entries had been |
194 | * allocated. |
195 | */ |
196 | |
197 | void free_all_swap_pages(int swap) |
198 | { |
199 | struct rb_node *node; |
200 | |
201 | while ((node = swsusp_extents.rb_node)) { |
202 | struct swsusp_extent *ext; |
203 | unsigned long offset; |
204 | |
205 | ext = rb_entry(node, struct swsusp_extent, node); |
206 | rb_erase(node, &swsusp_extents); |
207 | for (offset = ext->start; offset <= ext->end; offset++) |
208 | swap_free(swp_entry(type: swap, offset)); |
209 | |
210 | kfree(objp: ext); |
211 | } |
212 | } |
213 | |
214 | int swsusp_swap_in_use(void) |
215 | { |
216 | return (swsusp_extents.rb_node != NULL); |
217 | } |
218 | |
219 | /* |
220 | * General things |
221 | */ |
222 | |
223 | static unsigned short root_swap = 0xffff; |
224 | static struct file *hib_resume_bdev_file; |
225 | |
226 | struct hib_bio_batch { |
227 | atomic_t count; |
228 | wait_queue_head_t wait; |
229 | blk_status_t error; |
230 | struct blk_plug plug; |
231 | }; |
232 | |
233 | static void hib_init_batch(struct hib_bio_batch *hb) |
234 | { |
235 | atomic_set(v: &hb->count, i: 0); |
236 | init_waitqueue_head(&hb->wait); |
237 | hb->error = BLK_STS_OK; |
238 | blk_start_plug(&hb->plug); |
239 | } |
240 | |
241 | static void hib_finish_batch(struct hib_bio_batch *hb) |
242 | { |
243 | blk_finish_plug(&hb->plug); |
244 | } |
245 | |
246 | static void hib_end_io(struct bio *bio) |
247 | { |
248 | struct hib_bio_batch *hb = bio->bi_private; |
249 | struct page *page = bio_first_page_all(bio); |
250 | |
251 | if (bio->bi_status) { |
252 | pr_alert("Read-error on swap-device (%u:%u:%Lu)\n", |
253 | MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), |
254 | (unsigned long long)bio->bi_iter.bi_sector); |
255 | } |
256 | |
257 | if (bio_data_dir(bio) == WRITE) |
258 | put_page(page); |
259 | else if (clean_pages_on_read) |
260 | flush_icache_range(start: (unsigned long)page_address(page), |
261 | end: (unsigned long)page_address(page) + PAGE_SIZE); |
262 | |
263 | if (bio->bi_status && !hb->error) |
264 | hb->error = bio->bi_status; |
265 | if (atomic_dec_and_test(v: &hb->count)) |
266 | wake_up(&hb->wait); |
267 | |
268 | bio_put(bio); |
269 | } |
270 | |
271 | static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr, |
272 | struct hib_bio_batch *hb) |
273 | { |
274 | struct page *page = virt_to_page(addr); |
275 | struct bio *bio; |
276 | int error = 0; |
277 | |
278 | bio = bio_alloc(bdev: file_bdev(bdev_file: hib_resume_bdev_file), nr_vecs: 1, opf, |
279 | GFP_NOIO | __GFP_HIGH); |
280 | bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); |
281 | |
282 | if (bio_add_page(bio, page, PAGE_SIZE, off: 0) < PAGE_SIZE) { |
283 | pr_err("Adding page to bio failed at %llu\n", |
284 | (unsigned long long)bio->bi_iter.bi_sector); |
285 | bio_put(bio); |
286 | return -EFAULT; |
287 | } |
288 | |
289 | if (hb) { |
290 | bio->bi_end_io = hib_end_io; |
291 | bio->bi_private = hb; |
292 | atomic_inc(v: &hb->count); |
293 | submit_bio(bio); |
294 | } else { |
295 | error = submit_bio_wait(bio); |
296 | bio_put(bio); |
297 | } |
298 | |
299 | return error; |
300 | } |
301 | |
302 | static int hib_wait_io(struct hib_bio_batch *hb) |
303 | { |
304 | /* |
305 | * We are relying on the behavior of blk_plug that a thread with |
306 | * a plug will flush the plug list before sleeping. |
307 | */ |
308 | wait_event(hb->wait, atomic_read(&hb->count) == 0); |
309 | return blk_status_to_errno(status: hb->error); |
310 | } |
311 | |
312 | /* |
313 | * Saving part |
314 | */ |
315 | static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) |
316 | { |
317 | int error; |
318 | |
319 | hib_submit_io(opf: REQ_OP_READ, page_off: swsusp_resume_block, addr: swsusp_header, NULL); |
320 | if (!memcmp(p: "SWAP-SPACE",q: swsusp_header->sig, size: 10) || |
321 | !memcmp(p: "SWAPSPACE2",q: swsusp_header->sig, size: 10)) { |
322 | memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); |
323 | memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); |
324 | swsusp_header->image = handle->first_sector; |
325 | if (swsusp_hardware_signature) { |
326 | swsusp_header->hw_sig = swsusp_hardware_signature; |
327 | flags |= SF_HW_SIG; |
328 | } |
329 | swsusp_header->flags = flags; |
330 | if (flags & SF_CRC32_MODE) |
331 | swsusp_header->crc32 = handle->crc32; |
332 | error = hib_submit_io(opf: REQ_OP_WRITE | REQ_SYNC, |
333 | page_off: swsusp_resume_block, addr: swsusp_header, NULL); |
334 | } else { |
335 | pr_err("Swap header not found!\n"); |
336 | error = -ENODEV; |
337 | } |
338 | return error; |
339 | } |
340 | |
341 | /* |
342 | * Hold the swsusp_header flag. This is used in software_resume() in |
343 | * 'kernel/power/hibernate' to check if the image is compressed and query |
344 | * for the compression algorithm support(if so). |
345 | */ |
346 | unsigned int swsusp_header_flags; |
347 | |
348 | /** |
349 | * swsusp_swap_check - check if the resume device is a swap device |
350 | * and get its index (if so) |
351 | * |
352 | * This is called before saving image |
353 | */ |
354 | static int swsusp_swap_check(void) |
355 | { |
356 | int res; |
357 | |
358 | if (swsusp_resume_device) |
359 | res = swap_type_of(device: swsusp_resume_device, offset: swsusp_resume_block); |
360 | else |
361 | res = find_first_swap(device: &swsusp_resume_device); |
362 | if (res < 0) |
363 | return res; |
364 | root_swap = res; |
365 | |
366 | hib_resume_bdev_file = bdev_file_open_by_dev(dev: swsusp_resume_device, |
367 | BLK_OPEN_WRITE, NULL, NULL); |
368 | if (IS_ERR(ptr: hib_resume_bdev_file)) |
369 | return PTR_ERR(ptr: hib_resume_bdev_file); |
370 | |
371 | res = set_blocksize(bdev: file_bdev(bdev_file: hib_resume_bdev_file), PAGE_SIZE); |
372 | if (res < 0) |
373 | fput(hib_resume_bdev_file); |
374 | |
375 | return res; |
376 | } |
377 | |
378 | /** |
379 | * write_page - Write one page to given swap location. |
380 | * @buf: Address we're writing. |
381 | * @offset: Offset of the swap page we're writing to. |
382 | * @hb: bio completion batch |
383 | */ |
384 | |
385 | static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) |
386 | { |
387 | void *src; |
388 | int ret; |
389 | |
390 | if (!offset) |
391 | return -ENOSPC; |
392 | |
393 | if (hb) { |
394 | src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN | |
395 | __GFP_NORETRY); |
396 | if (src) { |
397 | copy_page(to: src, from: buf); |
398 | } else { |
399 | ret = hib_wait_io(hb); /* Free pages */ |
400 | if (ret) |
401 | return ret; |
402 | src = (void *)__get_free_page(GFP_NOIO | |
403 | __GFP_NOWARN | |
404 | __GFP_NORETRY); |
405 | if (src) { |
406 | copy_page(to: src, from: buf); |
407 | } else { |
408 | WARN_ON_ONCE(1); |
409 | hb = NULL; /* Go synchronous */ |
410 | src = buf; |
411 | } |
412 | } |
413 | } else { |
414 | src = buf; |
415 | } |
416 | return hib_submit_io(opf: REQ_OP_WRITE | REQ_SYNC, page_off: offset, addr: src, hb); |
417 | } |
418 | |
419 | static void release_swap_writer(struct swap_map_handle *handle) |
420 | { |
421 | if (handle->cur) |
422 | free_page((unsigned long)handle->cur); |
423 | handle->cur = NULL; |
424 | } |
425 | |
426 | static int get_swap_writer(struct swap_map_handle *handle) |
427 | { |
428 | int ret; |
429 | |
430 | ret = swsusp_swap_check(); |
431 | if (ret) { |
432 | if (ret != -ENOSPC) |
433 | pr_err("Cannot find swap device, try swapon -a\n"); |
434 | return ret; |
435 | } |
436 | handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); |
437 | if (!handle->cur) { |
438 | ret = -ENOMEM; |
439 | goto err_close; |
440 | } |
441 | handle->cur_swap = alloc_swapdev_block(swap: root_swap); |
442 | if (!handle->cur_swap) { |
443 | ret = -ENOSPC; |
444 | goto err_rel; |
445 | } |
446 | handle->k = 0; |
447 | handle->reqd_free_pages = reqd_free_pages(); |
448 | handle->first_sector = handle->cur_swap; |
449 | return 0; |
450 | err_rel: |
451 | release_swap_writer(handle); |
452 | err_close: |
453 | swsusp_close(); |
454 | return ret; |
455 | } |
456 | |
457 | static int swap_write_page(struct swap_map_handle *handle, void *buf, |
458 | struct hib_bio_batch *hb) |
459 | { |
460 | int error; |
461 | sector_t offset; |
462 | |
463 | if (!handle->cur) |
464 | return -EINVAL; |
465 | offset = alloc_swapdev_block(swap: root_swap); |
466 | error = write_page(buf, offset, hb); |
467 | if (error) |
468 | return error; |
469 | handle->cur->entries[handle->k++] = offset; |
470 | if (handle->k >= MAP_PAGE_ENTRIES) { |
471 | offset = alloc_swapdev_block(swap: root_swap); |
472 | if (!offset) |
473 | return -ENOSPC; |
474 | handle->cur->next_swap = offset; |
475 | error = write_page(buf: handle->cur, offset: handle->cur_swap, hb); |
476 | if (error) |
477 | goto out; |
478 | clear_page(page: handle->cur); |
479 | handle->cur_swap = offset; |
480 | handle->k = 0; |
481 | |
482 | if (hb && low_free_pages() <= handle->reqd_free_pages) { |
483 | error = hib_wait_io(hb); |
484 | if (error) |
485 | goto out; |
486 | /* |
487 | * Recalculate the number of required free pages, to |
488 | * make sure we never take more than half. |
489 | */ |
490 | handle->reqd_free_pages = reqd_free_pages(); |
491 | } |
492 | } |
493 | out: |
494 | return error; |
495 | } |
496 | |
497 | static int flush_swap_writer(struct swap_map_handle *handle) |
498 | { |
499 | if (handle->cur && handle->cur_swap) |
500 | return write_page(buf: handle->cur, offset: handle->cur_swap, NULL); |
501 | else |
502 | return -EINVAL; |
503 | } |
504 | |
505 | static int swap_writer_finish(struct swap_map_handle *handle, |
506 | unsigned int flags, int error) |
507 | { |
508 | if (!error) { |
509 | pr_info("S"); |
510 | error = mark_swapfiles(handle, flags); |
511 | pr_cont("|\n"); |
512 | flush_swap_writer(handle); |
513 | } |
514 | |
515 | if (error) |
516 | free_all_swap_pages(swap: root_swap); |
517 | release_swap_writer(handle); |
518 | swsusp_close(); |
519 | |
520 | return error; |
521 | } |
522 | |
523 | /* |
524 | * Bytes we need for compressed data in worst case. We assume(limitation) |
525 | * this is the worst of all the compression algorithms. |
526 | */ |
527 | #define bytes_worst_compress(x) ((x) + ((x) / 16) + 64 + 3 + 2) |
528 | |
529 | /* We need to remember how much compressed data we need to read. */ |
530 | #define CMP_HEADER sizeof(size_t) |
531 | |
532 | /* Number of pages/bytes we'll compress at one time. */ |
533 | #define UNC_PAGES 32 |
534 | #define UNC_SIZE (UNC_PAGES * PAGE_SIZE) |
535 | |
536 | /* Number of pages we need for compressed data (worst case). */ |
537 | #define CMP_PAGES DIV_ROUND_UP(bytes_worst_compress(UNC_SIZE) + \ |
538 | CMP_HEADER, PAGE_SIZE) |
539 | #define CMP_SIZE (CMP_PAGES * PAGE_SIZE) |
540 | |
541 | /* Maximum number of threads for compression/decompression. */ |
542 | #define CMP_THREADS 3 |
543 | |
544 | /* Minimum/maximum number of pages for read buffering. */ |
545 | #define CMP_MIN_RD_PAGES 1024 |
546 | #define CMP_MAX_RD_PAGES 8192 |
547 | |
548 | /** |
549 | * save_image - save the suspend image data |
550 | */ |
551 | |
552 | static int save_image(struct swap_map_handle *handle, |
553 | struct snapshot_handle *snapshot, |
554 | unsigned int nr_to_write) |
555 | { |
556 | unsigned int m; |
557 | int ret; |
558 | int nr_pages; |
559 | int err2; |
560 | struct hib_bio_batch hb; |
561 | ktime_t start; |
562 | ktime_t stop; |
563 | |
564 | hib_init_batch(hb: &hb); |
565 | |
566 | pr_info("Saving image data pages (%u pages)...\n", |
567 | nr_to_write); |
568 | m = nr_to_write / 10; |
569 | if (!m) |
570 | m = 1; |
571 | nr_pages = 0; |
572 | start = ktime_get(); |
573 | while (1) { |
574 | ret = snapshot_read_next(handle: snapshot); |
575 | if (ret <= 0) |
576 | break; |
577 | ret = swap_write_page(handle, data_of(*snapshot), hb: &hb); |
578 | if (ret) |
579 | break; |
580 | if (!(nr_pages % m)) |
581 | pr_info("Image saving progress: %3d%%\n", |
582 | nr_pages / m * 10); |
583 | nr_pages++; |
584 | } |
585 | err2 = hib_wait_io(hb: &hb); |
586 | hib_finish_batch(hb: &hb); |
587 | stop = ktime_get(); |
588 | if (!ret) |
589 | ret = err2; |
590 | if (!ret) |
591 | pr_info("Image saving done\n"); |
592 | swsusp_show_speed(start, stop, nr_to_write, "Wrote"); |
593 | return ret; |
594 | } |
595 | |
596 | /* |
597 | * Structure used for CRC32. |
598 | */ |
599 | struct crc_data { |
600 | struct task_struct *thr; /* thread */ |
601 | atomic_t ready; /* ready to start flag */ |
602 | atomic_t stop; /* ready to stop flag */ |
603 | unsigned run_threads; /* nr current threads */ |
604 | wait_queue_head_t go; /* start crc update */ |
605 | wait_queue_head_t done; /* crc update done */ |
606 | u32 *crc32; /* points to handle's crc32 */ |
607 | size_t *unc_len[CMP_THREADS]; /* uncompressed lengths */ |
608 | unsigned char *unc[CMP_THREADS]; /* uncompressed data */ |
609 | }; |
610 | |
611 | /* |
612 | * CRC32 update function that runs in its own thread. |
613 | */ |
614 | static int crc32_threadfn(void *data) |
615 | { |
616 | struct crc_data *d = data; |
617 | unsigned i; |
618 | |
619 | while (1) { |
620 | wait_event(d->go, atomic_read_acquire(&d->ready) || |
621 | kthread_should_stop()); |
622 | if (kthread_should_stop()) { |
623 | d->thr = NULL; |
624 | atomic_set_release(v: &d->stop, i: 1); |
625 | wake_up(&d->done); |
626 | break; |
627 | } |
628 | atomic_set(v: &d->ready, i: 0); |
629 | |
630 | for (i = 0; i < d->run_threads; i++) |
631 | *d->crc32 = crc32_le(crc: *d->crc32, |
632 | p: d->unc[i], len: *d->unc_len[i]); |
633 | atomic_set_release(v: &d->stop, i: 1); |
634 | wake_up(&d->done); |
635 | } |
636 | return 0; |
637 | } |
638 | /* |
639 | * Structure used for data compression. |
640 | */ |
641 | struct cmp_data { |
642 | struct task_struct *thr; /* thread */ |
643 | struct crypto_comp *cc; /* crypto compressor stream */ |
644 | atomic_t ready; /* ready to start flag */ |
645 | atomic_t stop; /* ready to stop flag */ |
646 | int ret; /* return code */ |
647 | wait_queue_head_t go; /* start compression */ |
648 | wait_queue_head_t done; /* compression done */ |
649 | size_t unc_len; /* uncompressed length */ |
650 | size_t cmp_len; /* compressed length */ |
651 | unsigned char unc[UNC_SIZE]; /* uncompressed buffer */ |
652 | unsigned char cmp[CMP_SIZE]; /* compressed buffer */ |
653 | }; |
654 | |
655 | /* Indicates the image size after compression */ |
656 | static atomic_t compressed_size = ATOMIC_INIT(0); |
657 | |
658 | /* |
659 | * Compression function that runs in its own thread. |
660 | */ |
661 | static int compress_threadfn(void *data) |
662 | { |
663 | struct cmp_data *d = data; |
664 | unsigned int cmp_len = 0; |
665 | |
666 | while (1) { |
667 | wait_event(d->go, atomic_read_acquire(&d->ready) || |
668 | kthread_should_stop()); |
669 | if (kthread_should_stop()) { |
670 | d->thr = NULL; |
671 | d->ret = -1; |
672 | atomic_set_release(v: &d->stop, i: 1); |
673 | wake_up(&d->done); |
674 | break; |
675 | } |
676 | atomic_set(v: &d->ready, i: 0); |
677 | |
678 | cmp_len = CMP_SIZE - CMP_HEADER; |
679 | d->ret = crypto_comp_compress(tfm: d->cc, src: d->unc, slen: d->unc_len, |
680 | dst: d->cmp + CMP_HEADER, |
681 | dlen: &cmp_len); |
682 | d->cmp_len = cmp_len; |
683 | |
684 | atomic_set(v: &compressed_size, i: atomic_read(v: &compressed_size) + d->cmp_len); |
685 | atomic_set_release(v: &d->stop, i: 1); |
686 | wake_up(&d->done); |
687 | } |
688 | return 0; |
689 | } |
690 | |
691 | /** |
692 | * save_compressed_image - Save the suspend image data after compression. |
693 | * @handle: Swap map handle to use for saving the image. |
694 | * @snapshot: Image to read data from. |
695 | * @nr_to_write: Number of pages to save. |
696 | */ |
697 | static int save_compressed_image(struct swap_map_handle *handle, |
698 | struct snapshot_handle *snapshot, |
699 | unsigned int nr_to_write) |
700 | { |
701 | unsigned int m; |
702 | int ret = 0; |
703 | int nr_pages; |
704 | int err2; |
705 | struct hib_bio_batch hb; |
706 | ktime_t start; |
707 | ktime_t stop; |
708 | size_t off; |
709 | unsigned thr, run_threads, nr_threads; |
710 | unsigned char *page = NULL; |
711 | struct cmp_data *data = NULL; |
712 | struct crc_data *crc = NULL; |
713 | |
714 | hib_init_batch(hb: &hb); |
715 | |
716 | atomic_set(v: &compressed_size, i: 0); |
717 | |
718 | /* |
719 | * We'll limit the number of threads for compression to limit memory |
720 | * footprint. |
721 | */ |
722 | nr_threads = num_online_cpus() - 1; |
723 | nr_threads = clamp_val(nr_threads, 1, CMP_THREADS); |
724 | |
725 | page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH); |
726 | if (!page) { |
727 | pr_err("Failed to allocate %s page\n", hib_comp_algo); |
728 | ret = -ENOMEM; |
729 | goto out_clean; |
730 | } |
731 | |
732 | data = vzalloc(array_size(nr_threads, sizeof(*data))); |
733 | if (!data) { |
734 | pr_err("Failed to allocate %s data\n", hib_comp_algo); |
735 | ret = -ENOMEM; |
736 | goto out_clean; |
737 | } |
738 | |
739 | crc = kzalloc(size: sizeof(*crc), GFP_KERNEL); |
740 | if (!crc) { |
741 | pr_err("Failed to allocate crc\n"); |
742 | ret = -ENOMEM; |
743 | goto out_clean; |
744 | } |
745 | |
746 | /* |
747 | * Start the compression threads. |
748 | */ |
749 | for (thr = 0; thr < nr_threads; thr++) { |
750 | init_waitqueue_head(&data[thr].go); |
751 | init_waitqueue_head(&data[thr].done); |
752 | |
753 | data[thr].cc = crypto_alloc_comp(alg_name: hib_comp_algo, type: 0, mask: 0); |
754 | if (IS_ERR_OR_NULL(ptr: data[thr].cc)) { |
755 | pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc)); |
756 | ret = -EFAULT; |
757 | goto out_clean; |
758 | } |
759 | |
760 | data[thr].thr = kthread_run(compress_threadfn, |
761 | &data[thr], |
762 | "image_compress/%u", thr); |
763 | if (IS_ERR(ptr: data[thr].thr)) { |
764 | data[thr].thr = NULL; |
765 | pr_err("Cannot start compression threads\n"); |
766 | ret = -ENOMEM; |
767 | goto out_clean; |
768 | } |
769 | } |
770 | |
771 | /* |
772 | * Start the CRC32 thread. |
773 | */ |
774 | init_waitqueue_head(&crc->go); |
775 | init_waitqueue_head(&crc->done); |
776 | |
777 | handle->crc32 = 0; |
778 | crc->crc32 = &handle->crc32; |
779 | for (thr = 0; thr < nr_threads; thr++) { |
780 | crc->unc[thr] = data[thr].unc; |
781 | crc->unc_len[thr] = &data[thr].unc_len; |
782 | } |
783 | |
784 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); |
785 | if (IS_ERR(ptr: crc->thr)) { |
786 | crc->thr = NULL; |
787 | pr_err("Cannot start CRC32 thread\n"); |
788 | ret = -ENOMEM; |
789 | goto out_clean; |
790 | } |
791 | |
792 | /* |
793 | * Adjust the number of required free pages after all allocations have |
794 | * been done. We don't want to run out of pages when writing. |
795 | */ |
796 | handle->reqd_free_pages = reqd_free_pages(); |
797 | |
798 | pr_info("Using %u thread(s) for %s compression\n", nr_threads, hib_comp_algo); |
799 | pr_info("Compressing and saving image data (%u pages)...\n", |
800 | nr_to_write); |
801 | m = nr_to_write / 10; |
802 | if (!m) |
803 | m = 1; |
804 | nr_pages = 0; |
805 | start = ktime_get(); |
806 | for (;;) { |
807 | for (thr = 0; thr < nr_threads; thr++) { |
808 | for (off = 0; off < UNC_SIZE; off += PAGE_SIZE) { |
809 | ret = snapshot_read_next(handle: snapshot); |
810 | if (ret < 0) |
811 | goto out_finish; |
812 | |
813 | if (!ret) |
814 | break; |
815 | |
816 | memcpy(data[thr].unc + off, |
817 | data_of(*snapshot), PAGE_SIZE); |
818 | |
819 | if (!(nr_pages % m)) |
820 | pr_info("Image saving progress: %3d%%\n", |
821 | nr_pages / m * 10); |
822 | nr_pages++; |
823 | } |
824 | if (!off) |
825 | break; |
826 | |
827 | data[thr].unc_len = off; |
828 | |
829 | atomic_set_release(v: &data[thr].ready, i: 1); |
830 | wake_up(&data[thr].go); |
831 | } |
832 | |
833 | if (!thr) |
834 | break; |
835 | |
836 | crc->run_threads = thr; |
837 | atomic_set_release(v: &crc->ready, i: 1); |
838 | wake_up(&crc->go); |
839 | |
840 | for (run_threads = thr, thr = 0; thr < run_threads; thr++) { |
841 | wait_event(data[thr].done, |
842 | atomic_read_acquire(&data[thr].stop)); |
843 | atomic_set(v: &data[thr].stop, i: 0); |
844 | |
845 | ret = data[thr].ret; |
846 | |
847 | if (ret < 0) { |
848 | pr_err("%s compression failed\n", hib_comp_algo); |
849 | goto out_finish; |
850 | } |
851 | |
852 | if (unlikely(!data[thr].cmp_len || |
853 | data[thr].cmp_len > |
854 | bytes_worst_compress(data[thr].unc_len))) { |
855 | pr_err("Invalid %s compressed length\n", hib_comp_algo); |
856 | ret = -1; |
857 | goto out_finish; |
858 | } |
859 | |
860 | *(size_t *)data[thr].cmp = data[thr].cmp_len; |
861 | |
862 | /* |
863 | * Given we are writing one page at a time to disk, we |
864 | * copy that much from the buffer, although the last |
865 | * bit will likely be smaller than full page. This is |
866 | * OK - we saved the length of the compressed data, so |
867 | * any garbage at the end will be discarded when we |
868 | * read it. |
869 | */ |
870 | for (off = 0; |
871 | off < CMP_HEADER + data[thr].cmp_len; |
872 | off += PAGE_SIZE) { |
873 | memcpy(page, data[thr].cmp + off, PAGE_SIZE); |
874 | |
875 | ret = swap_write_page(handle, buf: page, hb: &hb); |
876 | if (ret) |
877 | goto out_finish; |
878 | } |
879 | } |
880 | |
881 | wait_event(crc->done, atomic_read_acquire(&crc->stop)); |
882 | atomic_set(v: &crc->stop, i: 0); |
883 | } |
884 | |
885 | out_finish: |
886 | err2 = hib_wait_io(hb: &hb); |
887 | stop = ktime_get(); |
888 | if (!ret) |
889 | ret = err2; |
890 | if (!ret) |
891 | pr_info("Image saving done\n"); |
892 | swsusp_show_speed(start, stop, nr_to_write, "Wrote"); |
893 | pr_info("Image size after compression: %d kbytes\n", |
894 | (atomic_read(&compressed_size) / 1024)); |
895 | |
896 | out_clean: |
897 | hib_finish_batch(hb: &hb); |
898 | if (crc) { |
899 | if (crc->thr) |
900 | kthread_stop(k: crc->thr); |
901 | kfree(objp: crc); |
902 | } |
903 | if (data) { |
904 | for (thr = 0; thr < nr_threads; thr++) { |
905 | if (data[thr].thr) |
906 | kthread_stop(k: data[thr].thr); |
907 | if (data[thr].cc) |
908 | crypto_free_comp(tfm: data[thr].cc); |
909 | } |
910 | vfree(addr: data); |
911 | } |
912 | if (page) free_page((unsigned long)page); |
913 | |
914 | return ret; |
915 | } |
916 | |
917 | /** |
918 | * enough_swap - Make sure we have enough swap to save the image. |
919 | * |
920 | * Returns TRUE or FALSE after checking the total amount of swap |
921 | * space available from the resume partition. |
922 | */ |
923 | |
924 | static int enough_swap(unsigned int nr_pages) |
925 | { |
926 | unsigned int free_swap = count_swap_pages(root_swap, 1); |
927 | unsigned int required; |
928 | |
929 | pr_debug("Free swap pages: %u\n", free_swap); |
930 | |
931 | required = PAGES_FOR_IO + nr_pages; |
932 | return free_swap > required; |
933 | } |
934 | |
935 | /** |
936 | * swsusp_write - Write entire image and metadata. |
937 | * @flags: flags to pass to the "boot" kernel in the image header |
938 | * |
939 | * It is important _NOT_ to umount filesystems at this point. We want |
940 | * them synced (in case something goes wrong) but we DO not want to mark |
941 | * filesystem clean: it is not. (And it does not matter, if we resume |
942 | * correctly, we'll mark system clean, anyway.) |
943 | */ |
944 | |
945 | int swsusp_write(unsigned int flags) |
946 | { |
947 | struct swap_map_handle handle; |
948 | struct snapshot_handle snapshot; |
949 | struct swsusp_info *header; |
950 | unsigned long pages; |
951 | int error; |
952 | |
953 | pages = snapshot_get_image_size(); |
954 | error = get_swap_writer(handle: &handle); |
955 | if (error) { |
956 | pr_err("Cannot get swap writer\n"); |
957 | return error; |
958 | } |
959 | if (flags & SF_NOCOMPRESS_MODE) { |
960 | if (!enough_swap(nr_pages: pages)) { |
961 | pr_err("Not enough free swap\n"); |
962 | error = -ENOSPC; |
963 | goto out_finish; |
964 | } |
965 | } |
966 | memset(&snapshot, 0, sizeof(struct snapshot_handle)); |
967 | error = snapshot_read_next(handle: &snapshot); |
968 | if (error < (int)PAGE_SIZE) { |
969 | if (error >= 0) |
970 | error = -EFAULT; |
971 | |
972 | goto out_finish; |
973 | } |
974 | header = (struct swsusp_info *)data_of(snapshot); |
975 | error = swap_write_page(handle: &handle, buf: header, NULL); |
976 | if (!error) { |
977 | error = (flags & SF_NOCOMPRESS_MODE) ? |
978 | save_image(handle: &handle, snapshot: &snapshot, nr_to_write: pages - 1) : |
979 | save_compressed_image(handle: &handle, snapshot: &snapshot, nr_to_write: pages - 1); |
980 | } |
981 | out_finish: |
982 | error = swap_writer_finish(handle: &handle, flags, error); |
983 | return error; |
984 | } |
985 | |
986 | /* |
987 | * The following functions allow us to read data using a swap map |
988 | * in a file-like way. |
989 | */ |
990 | |
991 | static void release_swap_reader(struct swap_map_handle *handle) |
992 | { |
993 | struct swap_map_page_list *tmp; |
994 | |
995 | while (handle->maps) { |
996 | if (handle->maps->map) |
997 | free_page((unsigned long)handle->maps->map); |
998 | tmp = handle->maps; |
999 | handle->maps = handle->maps->next; |
1000 | kfree(objp: tmp); |
1001 | } |
1002 | handle->cur = NULL; |
1003 | } |
1004 | |
1005 | static int get_swap_reader(struct swap_map_handle *handle, |
1006 | unsigned int *flags_p) |
1007 | { |
1008 | int error; |
1009 | struct swap_map_page_list *tmp, *last; |
1010 | sector_t offset; |
1011 | |
1012 | *flags_p = swsusp_header->flags; |
1013 | |
1014 | if (!swsusp_header->image) /* how can this happen? */ |
1015 | return -EINVAL; |
1016 | |
1017 | handle->cur = NULL; |
1018 | last = handle->maps = NULL; |
1019 | offset = swsusp_header->image; |
1020 | while (offset) { |
1021 | tmp = kzalloc(size: sizeof(*handle->maps), GFP_KERNEL); |
1022 | if (!tmp) { |
1023 | release_swap_reader(handle); |
1024 | return -ENOMEM; |
1025 | } |
1026 | if (!handle->maps) |
1027 | handle->maps = tmp; |
1028 | if (last) |
1029 | last->next = tmp; |
1030 | last = tmp; |
1031 | |
1032 | tmp->map = (struct swap_map_page *) |
1033 | __get_free_page(GFP_NOIO | __GFP_HIGH); |
1034 | if (!tmp->map) { |
1035 | release_swap_reader(handle); |
1036 | return -ENOMEM; |
1037 | } |
1038 | |
1039 | error = hib_submit_io(opf: REQ_OP_READ, page_off: offset, addr: tmp->map, NULL); |
1040 | if (error) { |
1041 | release_swap_reader(handle); |
1042 | return error; |
1043 | } |
1044 | offset = tmp->map->next_swap; |
1045 | } |
1046 | handle->k = 0; |
1047 | handle->cur = handle->maps->map; |
1048 | return 0; |
1049 | } |
1050 | |
1051 | static int swap_read_page(struct swap_map_handle *handle, void *buf, |
1052 | struct hib_bio_batch *hb) |
1053 | { |
1054 | sector_t offset; |
1055 | int error; |
1056 | struct swap_map_page_list *tmp; |
1057 | |
1058 | if (!handle->cur) |
1059 | return -EINVAL; |
1060 | offset = handle->cur->entries[handle->k]; |
1061 | if (!offset) |
1062 | return -EFAULT; |
1063 | error = hib_submit_io(opf: REQ_OP_READ, page_off: offset, addr: buf, hb); |
1064 | if (error) |
1065 | return error; |
1066 | if (++handle->k >= MAP_PAGE_ENTRIES) { |
1067 | handle->k = 0; |
1068 | free_page((unsigned long)handle->maps->map); |
1069 | tmp = handle->maps; |
1070 | handle->maps = handle->maps->next; |
1071 | kfree(objp: tmp); |
1072 | if (!handle->maps) |
1073 | release_swap_reader(handle); |
1074 | else |
1075 | handle->cur = handle->maps->map; |
1076 | } |
1077 | return error; |
1078 | } |
1079 | |
1080 | static int swap_reader_finish(struct swap_map_handle *handle) |
1081 | { |
1082 | release_swap_reader(handle); |
1083 | |
1084 | return 0; |
1085 | } |
1086 | |
1087 | /** |
1088 | * load_image - load the image using the swap map handle |
1089 | * @handle and the snapshot handle @snapshot |
1090 | * (assume there are @nr_pages pages to load) |
1091 | */ |
1092 | |
1093 | static int load_image(struct swap_map_handle *handle, |
1094 | struct snapshot_handle *snapshot, |
1095 | unsigned int nr_to_read) |
1096 | { |
1097 | unsigned int m; |
1098 | int ret = 0; |
1099 | ktime_t start; |
1100 | ktime_t stop; |
1101 | struct hib_bio_batch hb; |
1102 | int err2; |
1103 | unsigned nr_pages; |
1104 | |
1105 | hib_init_batch(hb: &hb); |
1106 | |
1107 | clean_pages_on_read = true; |
1108 | pr_info("Loading image data pages (%u pages)...\n", nr_to_read); |
1109 | m = nr_to_read / 10; |
1110 | if (!m) |
1111 | m = 1; |
1112 | nr_pages = 0; |
1113 | start = ktime_get(); |
1114 | for ( ; ; ) { |
1115 | ret = snapshot_write_next(handle: snapshot); |
1116 | if (ret <= 0) |
1117 | break; |
1118 | ret = swap_read_page(handle, data_of(*snapshot), hb: &hb); |
1119 | if (ret) |
1120 | break; |
1121 | if (snapshot->sync_read) |
1122 | ret = hib_wait_io(hb: &hb); |
1123 | if (ret) |
1124 | break; |
1125 | if (!(nr_pages % m)) |
1126 | pr_info("Image loading progress: %3d%%\n", |
1127 | nr_pages / m * 10); |
1128 | nr_pages++; |
1129 | } |
1130 | err2 = hib_wait_io(hb: &hb); |
1131 | hib_finish_batch(hb: &hb); |
1132 | stop = ktime_get(); |
1133 | if (!ret) |
1134 | ret = err2; |
1135 | if (!ret) { |
1136 | pr_info("Image loading done\n"); |
1137 | ret = snapshot_write_finalize(handle: snapshot); |
1138 | if (!ret && !snapshot_image_loaded(handle: snapshot)) |
1139 | ret = -ENODATA; |
1140 | } |
1141 | swsusp_show_speed(start, stop, nr_to_read, "Read"); |
1142 | return ret; |
1143 | } |
1144 | |
1145 | /* |
1146 | * Structure used for data decompression. |
1147 | */ |
1148 | struct dec_data { |
1149 | struct task_struct *thr; /* thread */ |
1150 | struct crypto_comp *cc; /* crypto compressor stream */ |
1151 | atomic_t ready; /* ready to start flag */ |
1152 | atomic_t stop; /* ready to stop flag */ |
1153 | int ret; /* return code */ |
1154 | wait_queue_head_t go; /* start decompression */ |
1155 | wait_queue_head_t done; /* decompression done */ |
1156 | size_t unc_len; /* uncompressed length */ |
1157 | size_t cmp_len; /* compressed length */ |
1158 | unsigned char unc[UNC_SIZE]; /* uncompressed buffer */ |
1159 | unsigned char cmp[CMP_SIZE]; /* compressed buffer */ |
1160 | }; |
1161 | |
1162 | /* |
1163 | * Decompression function that runs in its own thread. |
1164 | */ |
1165 | static int decompress_threadfn(void *data) |
1166 | { |
1167 | struct dec_data *d = data; |
1168 | unsigned int unc_len = 0; |
1169 | |
1170 | while (1) { |
1171 | wait_event(d->go, atomic_read_acquire(&d->ready) || |
1172 | kthread_should_stop()); |
1173 | if (kthread_should_stop()) { |
1174 | d->thr = NULL; |
1175 | d->ret = -1; |
1176 | atomic_set_release(v: &d->stop, i: 1); |
1177 | wake_up(&d->done); |
1178 | break; |
1179 | } |
1180 | atomic_set(v: &d->ready, i: 0); |
1181 | |
1182 | unc_len = UNC_SIZE; |
1183 | d->ret = crypto_comp_decompress(tfm: d->cc, src: d->cmp + CMP_HEADER, slen: d->cmp_len, |
1184 | dst: d->unc, dlen: &unc_len); |
1185 | d->unc_len = unc_len; |
1186 | |
1187 | if (clean_pages_on_decompress) |
1188 | flush_icache_range(start: (unsigned long)d->unc, |
1189 | end: (unsigned long)d->unc + d->unc_len); |
1190 | |
1191 | atomic_set_release(v: &d->stop, i: 1); |
1192 | wake_up(&d->done); |
1193 | } |
1194 | return 0; |
1195 | } |
1196 | |
1197 | /** |
1198 | * load_compressed_image - Load compressed image data and decompress it. |
1199 | * @handle: Swap map handle to use for loading data. |
1200 | * @snapshot: Image to copy uncompressed data into. |
1201 | * @nr_to_read: Number of pages to load. |
1202 | */ |
1203 | static int load_compressed_image(struct swap_map_handle *handle, |
1204 | struct snapshot_handle *snapshot, |
1205 | unsigned int nr_to_read) |
1206 | { |
1207 | unsigned int m; |
1208 | int ret = 0; |
1209 | int eof = 0; |
1210 | struct hib_bio_batch hb; |
1211 | ktime_t start; |
1212 | ktime_t stop; |
1213 | unsigned nr_pages; |
1214 | size_t off; |
1215 | unsigned i, thr, run_threads, nr_threads; |
1216 | unsigned ring = 0, pg = 0, ring_size = 0, |
1217 | have = 0, want, need, asked = 0; |
1218 | unsigned long read_pages = 0; |
1219 | unsigned char **page = NULL; |
1220 | struct dec_data *data = NULL; |
1221 | struct crc_data *crc = NULL; |
1222 | |
1223 | hib_init_batch(hb: &hb); |
1224 | |
1225 | /* |
1226 | * We'll limit the number of threads for decompression to limit memory |
1227 | * footprint. |
1228 | */ |
1229 | nr_threads = num_online_cpus() - 1; |
1230 | nr_threads = clamp_val(nr_threads, 1, CMP_THREADS); |
1231 | |
1232 | page = vmalloc(array_size(CMP_MAX_RD_PAGES, sizeof(*page))); |
1233 | if (!page) { |
1234 | pr_err("Failed to allocate %s page\n", hib_comp_algo); |
1235 | ret = -ENOMEM; |
1236 | goto out_clean; |
1237 | } |
1238 | |
1239 | data = vzalloc(array_size(nr_threads, sizeof(*data))); |
1240 | if (!data) { |
1241 | pr_err("Failed to allocate %s data\n", hib_comp_algo); |
1242 | ret = -ENOMEM; |
1243 | goto out_clean; |
1244 | } |
1245 | |
1246 | crc = kzalloc(size: sizeof(*crc), GFP_KERNEL); |
1247 | if (!crc) { |
1248 | pr_err("Failed to allocate crc\n"); |
1249 | ret = -ENOMEM; |
1250 | goto out_clean; |
1251 | } |
1252 | |
1253 | clean_pages_on_decompress = true; |
1254 | |
1255 | /* |
1256 | * Start the decompression threads. |
1257 | */ |
1258 | for (thr = 0; thr < nr_threads; thr++) { |
1259 | init_waitqueue_head(&data[thr].go); |
1260 | init_waitqueue_head(&data[thr].done); |
1261 | |
1262 | data[thr].cc = crypto_alloc_comp(alg_name: hib_comp_algo, type: 0, mask: 0); |
1263 | if (IS_ERR_OR_NULL(ptr: data[thr].cc)) { |
1264 | pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc)); |
1265 | ret = -EFAULT; |
1266 | goto out_clean; |
1267 | } |
1268 | |
1269 | data[thr].thr = kthread_run(decompress_threadfn, |
1270 | &data[thr], |
1271 | "image_decompress/%u", thr); |
1272 | if (IS_ERR(ptr: data[thr].thr)) { |
1273 | data[thr].thr = NULL; |
1274 | pr_err("Cannot start decompression threads\n"); |
1275 | ret = -ENOMEM; |
1276 | goto out_clean; |
1277 | } |
1278 | } |
1279 | |
1280 | /* |
1281 | * Start the CRC32 thread. |
1282 | */ |
1283 | init_waitqueue_head(&crc->go); |
1284 | init_waitqueue_head(&crc->done); |
1285 | |
1286 | handle->crc32 = 0; |
1287 | crc->crc32 = &handle->crc32; |
1288 | for (thr = 0; thr < nr_threads; thr++) { |
1289 | crc->unc[thr] = data[thr].unc; |
1290 | crc->unc_len[thr] = &data[thr].unc_len; |
1291 | } |
1292 | |
1293 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); |
1294 | if (IS_ERR(ptr: crc->thr)) { |
1295 | crc->thr = NULL; |
1296 | pr_err("Cannot start CRC32 thread\n"); |
1297 | ret = -ENOMEM; |
1298 | goto out_clean; |
1299 | } |
1300 | |
1301 | /* |
1302 | * Set the number of pages for read buffering. |
1303 | * This is complete guesswork, because we'll only know the real |
1304 | * picture once prepare_image() is called, which is much later on |
1305 | * during the image load phase. We'll assume the worst case and |
1306 | * say that none of the image pages are from high memory. |
1307 | */ |
1308 | if (low_free_pages() > snapshot_get_image_size()) |
1309 | read_pages = (low_free_pages() - snapshot_get_image_size()) / 2; |
1310 | read_pages = clamp_val(read_pages, CMP_MIN_RD_PAGES, CMP_MAX_RD_PAGES); |
1311 | |
1312 | for (i = 0; i < read_pages; i++) { |
1313 | page[i] = (void *)__get_free_page(i < CMP_PAGES ? |
1314 | GFP_NOIO | __GFP_HIGH : |
1315 | GFP_NOIO | __GFP_NOWARN | |
1316 | __GFP_NORETRY); |
1317 | |
1318 | if (!page[i]) { |
1319 | if (i < CMP_PAGES) { |
1320 | ring_size = i; |
1321 | pr_err("Failed to allocate %s pages\n", hib_comp_algo); |
1322 | ret = -ENOMEM; |
1323 | goto out_clean; |
1324 | } else { |
1325 | break; |
1326 | } |
1327 | } |
1328 | } |
1329 | want = ring_size = i; |
1330 | |
1331 | pr_info("Using %u thread(s) for %s decompression\n", nr_threads, hib_comp_algo); |
1332 | pr_info("Loading and decompressing image data (%u pages)...\n", |
1333 | nr_to_read); |
1334 | m = nr_to_read / 10; |
1335 | if (!m) |
1336 | m = 1; |
1337 | nr_pages = 0; |
1338 | start = ktime_get(); |
1339 | |
1340 | ret = snapshot_write_next(handle: snapshot); |
1341 | if (ret <= 0) |
1342 | goto out_finish; |
1343 | |
1344 | for(;;) { |
1345 | for (i = 0; !eof && i < want; i++) { |
1346 | ret = swap_read_page(handle, buf: page[ring], hb: &hb); |
1347 | if (ret) { |
1348 | /* |
1349 | * On real read error, finish. On end of data, |
1350 | * set EOF flag and just exit the read loop. |
1351 | */ |
1352 | if (handle->cur && |
1353 | handle->cur->entries[handle->k]) { |
1354 | goto out_finish; |
1355 | } else { |
1356 | eof = 1; |
1357 | break; |
1358 | } |
1359 | } |
1360 | if (++ring >= ring_size) |
1361 | ring = 0; |
1362 | } |
1363 | asked += i; |
1364 | want -= i; |
1365 | |
1366 | /* |
1367 | * We are out of data, wait for some more. |
1368 | */ |
1369 | if (!have) { |
1370 | if (!asked) |
1371 | break; |
1372 | |
1373 | ret = hib_wait_io(hb: &hb); |
1374 | if (ret) |
1375 | goto out_finish; |
1376 | have += asked; |
1377 | asked = 0; |
1378 | if (eof) |
1379 | eof = 2; |
1380 | } |
1381 | |
1382 | if (crc->run_threads) { |
1383 | wait_event(crc->done, atomic_read_acquire(&crc->stop)); |
1384 | atomic_set(v: &crc->stop, i: 0); |
1385 | crc->run_threads = 0; |
1386 | } |
1387 | |
1388 | for (thr = 0; have && thr < nr_threads; thr++) { |
1389 | data[thr].cmp_len = *(size_t *)page[pg]; |
1390 | if (unlikely(!data[thr].cmp_len || |
1391 | data[thr].cmp_len > |
1392 | bytes_worst_compress(UNC_SIZE))) { |
1393 | pr_err("Invalid %s compressed length\n", hib_comp_algo); |
1394 | ret = -1; |
1395 | goto out_finish; |
1396 | } |
1397 | |
1398 | need = DIV_ROUND_UP(data[thr].cmp_len + CMP_HEADER, |
1399 | PAGE_SIZE); |
1400 | if (need > have) { |
1401 | if (eof > 1) { |
1402 | ret = -1; |
1403 | goto out_finish; |
1404 | } |
1405 | break; |
1406 | } |
1407 | |
1408 | for (off = 0; |
1409 | off < CMP_HEADER + data[thr].cmp_len; |
1410 | off += PAGE_SIZE) { |
1411 | memcpy(data[thr].cmp + off, |
1412 | page[pg], PAGE_SIZE); |
1413 | have--; |
1414 | want++; |
1415 | if (++pg >= ring_size) |
1416 | pg = 0; |
1417 | } |
1418 | |
1419 | atomic_set_release(v: &data[thr].ready, i: 1); |
1420 | wake_up(&data[thr].go); |
1421 | } |
1422 | |
1423 | /* |
1424 | * Wait for more data while we are decompressing. |
1425 | */ |
1426 | if (have < CMP_PAGES && asked) { |
1427 | ret = hib_wait_io(hb: &hb); |
1428 | if (ret) |
1429 | goto out_finish; |
1430 | have += asked; |
1431 | asked = 0; |
1432 | if (eof) |
1433 | eof = 2; |
1434 | } |
1435 | |
1436 | for (run_threads = thr, thr = 0; thr < run_threads; thr++) { |
1437 | wait_event(data[thr].done, |
1438 | atomic_read_acquire(&data[thr].stop)); |
1439 | atomic_set(v: &data[thr].stop, i: 0); |
1440 | |
1441 | ret = data[thr].ret; |
1442 | |
1443 | if (ret < 0) { |
1444 | pr_err("%s decompression failed\n", hib_comp_algo); |
1445 | goto out_finish; |
1446 | } |
1447 | |
1448 | if (unlikely(!data[thr].unc_len || |
1449 | data[thr].unc_len > UNC_SIZE || |
1450 | data[thr].unc_len & (PAGE_SIZE - 1))) { |
1451 | pr_err("Invalid %s uncompressed length\n", hib_comp_algo); |
1452 | ret = -1; |
1453 | goto out_finish; |
1454 | } |
1455 | |
1456 | for (off = 0; |
1457 | off < data[thr].unc_len; off += PAGE_SIZE) { |
1458 | memcpy(data_of(*snapshot), |
1459 | data[thr].unc + off, PAGE_SIZE); |
1460 | |
1461 | if (!(nr_pages % m)) |
1462 | pr_info("Image loading progress: %3d%%\n", |
1463 | nr_pages / m * 10); |
1464 | nr_pages++; |
1465 | |
1466 | ret = snapshot_write_next(handle: snapshot); |
1467 | if (ret <= 0) { |
1468 | crc->run_threads = thr + 1; |
1469 | atomic_set_release(v: &crc->ready, i: 1); |
1470 | wake_up(&crc->go); |
1471 | goto out_finish; |
1472 | } |
1473 | } |
1474 | } |
1475 | |
1476 | crc->run_threads = thr; |
1477 | atomic_set_release(v: &crc->ready, i: 1); |
1478 | wake_up(&crc->go); |
1479 | } |
1480 | |
1481 | out_finish: |
1482 | if (crc->run_threads) { |
1483 | wait_event(crc->done, atomic_read_acquire(&crc->stop)); |
1484 | atomic_set(v: &crc->stop, i: 0); |
1485 | } |
1486 | stop = ktime_get(); |
1487 | if (!ret) { |
1488 | pr_info("Image loading done\n"); |
1489 | ret = snapshot_write_finalize(handle: snapshot); |
1490 | if (!ret && !snapshot_image_loaded(handle: snapshot)) |
1491 | ret = -ENODATA; |
1492 | if (!ret) { |
1493 | if (swsusp_header->flags & SF_CRC32_MODE) { |
1494 | if(handle->crc32 != swsusp_header->crc32) { |
1495 | pr_err("Invalid image CRC32!\n"); |
1496 | ret = -ENODATA; |
1497 | } |
1498 | } |
1499 | } |
1500 | } |
1501 | swsusp_show_speed(start, stop, nr_to_read, "Read"); |
1502 | out_clean: |
1503 | hib_finish_batch(hb: &hb); |
1504 | for (i = 0; i < ring_size; i++) |
1505 | free_page((unsigned long)page[i]); |
1506 | if (crc) { |
1507 | if (crc->thr) |
1508 | kthread_stop(k: crc->thr); |
1509 | kfree(objp: crc); |
1510 | } |
1511 | if (data) { |
1512 | for (thr = 0; thr < nr_threads; thr++) { |
1513 | if (data[thr].thr) |
1514 | kthread_stop(k: data[thr].thr); |
1515 | if (data[thr].cc) |
1516 | crypto_free_comp(tfm: data[thr].cc); |
1517 | } |
1518 | vfree(addr: data); |
1519 | } |
1520 | vfree(addr: page); |
1521 | |
1522 | return ret; |
1523 | } |
1524 | |
1525 | /** |
1526 | * swsusp_read - read the hibernation image. |
1527 | * @flags_p: flags passed by the "frozen" kernel in the image header should |
1528 | * be written into this memory location |
1529 | */ |
1530 | |
1531 | int swsusp_read(unsigned int *flags_p) |
1532 | { |
1533 | int error; |
1534 | struct swap_map_handle handle; |
1535 | struct snapshot_handle snapshot; |
1536 | struct swsusp_info *header; |
1537 | |
1538 | memset(&snapshot, 0, sizeof(struct snapshot_handle)); |
1539 | error = snapshot_write_next(handle: &snapshot); |
1540 | if (error < (int)PAGE_SIZE) |
1541 | return error < 0 ? error : -EFAULT; |
1542 | header = (struct swsusp_info *)data_of(snapshot); |
1543 | error = get_swap_reader(handle: &handle, flags_p); |
1544 | if (error) |
1545 | goto end; |
1546 | if (!error) |
1547 | error = swap_read_page(handle: &handle, buf: header, NULL); |
1548 | if (!error) { |
1549 | error = (*flags_p & SF_NOCOMPRESS_MODE) ? |
1550 | load_image(handle: &handle, snapshot: &snapshot, nr_to_read: header->pages - 1) : |
1551 | load_compressed_image(handle: &handle, snapshot: &snapshot, nr_to_read: header->pages - 1); |
1552 | } |
1553 | swap_reader_finish(handle: &handle); |
1554 | end: |
1555 | if (!error) |
1556 | pr_debug("Image successfully loaded\n"); |
1557 | else |
1558 | pr_debug("Error %d resuming\n", error); |
1559 | return error; |
1560 | } |
1561 | |
1562 | static void *swsusp_holder; |
1563 | |
1564 | /** |
1565 | * swsusp_check - Open the resume device and check for the swsusp signature. |
1566 | * @exclusive: Open the resume device exclusively. |
1567 | */ |
1568 | |
1569 | int swsusp_check(bool exclusive) |
1570 | { |
1571 | void *holder = exclusive ? &swsusp_holder : NULL; |
1572 | int error; |
1573 | |
1574 | hib_resume_bdev_file = bdev_file_open_by_dev(dev: swsusp_resume_device, |
1575 | BLK_OPEN_READ, holder, NULL); |
1576 | if (!IS_ERR(ptr: hib_resume_bdev_file)) { |
1577 | set_blocksize(bdev: file_bdev(bdev_file: hib_resume_bdev_file), PAGE_SIZE); |
1578 | clear_page(page: swsusp_header); |
1579 | error = hib_submit_io(opf: REQ_OP_READ, page_off: swsusp_resume_block, |
1580 | addr: swsusp_header, NULL); |
1581 | if (error) |
1582 | goto put; |
1583 | |
1584 | if (!memcmp(HIBERNATE_SIG, q: swsusp_header->sig, size: 10)) { |
1585 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); |
1586 | swsusp_header_flags = swsusp_header->flags; |
1587 | /* Reset swap signature now */ |
1588 | error = hib_submit_io(opf: REQ_OP_WRITE | REQ_SYNC, |
1589 | page_off: swsusp_resume_block, |
1590 | addr: swsusp_header, NULL); |
1591 | } else { |
1592 | error = -EINVAL; |
1593 | } |
1594 | if (!error && swsusp_header->flags & SF_HW_SIG && |
1595 | swsusp_header->hw_sig != swsusp_hardware_signature) { |
1596 | pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n", |
1597 | swsusp_header->hw_sig, swsusp_hardware_signature); |
1598 | error = -EINVAL; |
1599 | } |
1600 | |
1601 | put: |
1602 | if (error) |
1603 | fput(hib_resume_bdev_file); |
1604 | else |
1605 | pr_debug("Image signature found, resuming\n"); |
1606 | } else { |
1607 | error = PTR_ERR(ptr: hib_resume_bdev_file); |
1608 | } |
1609 | |
1610 | if (error) |
1611 | pr_debug("Image not found (code %d)\n", error); |
1612 | |
1613 | return error; |
1614 | } |
1615 | |
1616 | /** |
1617 | * swsusp_close - close resume device. |
1618 | */ |
1619 | |
1620 | void swsusp_close(void) |
1621 | { |
1622 | if (IS_ERR(ptr: hib_resume_bdev_file)) { |
1623 | pr_debug("Image device not initialised\n"); |
1624 | return; |
1625 | } |
1626 | |
1627 | fput(hib_resume_bdev_file); |
1628 | } |
1629 | |
1630 | /** |
1631 | * swsusp_unmark - Unmark swsusp signature in the resume device |
1632 | */ |
1633 | |
1634 | #ifdef CONFIG_SUSPEND |
1635 | int swsusp_unmark(void) |
1636 | { |
1637 | int error; |
1638 | |
1639 | hib_submit_io(opf: REQ_OP_READ, page_off: swsusp_resume_block, |
1640 | addr: swsusp_header, NULL); |
1641 | if (!memcmp(HIBERNATE_SIG,q: swsusp_header->sig, size: 10)) { |
1642 | memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); |
1643 | error = hib_submit_io(opf: REQ_OP_WRITE | REQ_SYNC, |
1644 | page_off: swsusp_resume_block, |
1645 | addr: swsusp_header, NULL); |
1646 | } else { |
1647 | pr_err("Cannot find swsusp signature!\n"); |
1648 | error = -ENODEV; |
1649 | } |
1650 | |
1651 | /* |
1652 | * We just returned from suspend, we don't need the image any more. |
1653 | */ |
1654 | free_all_swap_pages(swap: root_swap); |
1655 | |
1656 | return error; |
1657 | } |
1658 | #endif |
1659 | |
1660 | static int __init swsusp_header_init(void) |
1661 | { |
1662 | swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL); |
1663 | if (!swsusp_header) |
1664 | panic(fmt: "Could not allocate memory for swsusp_header\n"); |
1665 | return 0; |
1666 | } |
1667 | |
1668 | core_initcall(swsusp_header_init); |
1669 |
Definitions
- swsusp_hardware_signature
- clean_pages_on_read
- clean_pages_on_decompress
- low_free_pages
- reqd_free_pages
- swap_map_page
- swap_map_page_list
- swap_map_handle
- swsusp_header
- swsusp_header
- swsusp_extent
- swsusp_extents
- swsusp_extents_insert
- alloc_swapdev_block
- free_all_swap_pages
- swsusp_swap_in_use
- root_swap
- hib_resume_bdev_file
- hib_bio_batch
- hib_init_batch
- hib_finish_batch
- hib_end_io
- hib_submit_io
- hib_wait_io
- mark_swapfiles
- swsusp_header_flags
- swsusp_swap_check
- write_page
- release_swap_writer
- get_swap_writer
- swap_write_page
- flush_swap_writer
- swap_writer_finish
- save_image
- crc_data
- crc32_threadfn
- cmp_data
- compressed_size
- compress_threadfn
- save_compressed_image
- enough_swap
- swsusp_write
- release_swap_reader
- get_swap_reader
- swap_read_page
- swap_reader_finish
- load_image
- dec_data
- decompress_threadfn
- load_compressed_image
- swsusp_read
- swsusp_holder
- swsusp_check
- swsusp_close
- swsusp_unmark
Improve your Profiling and Debugging skills
Find out more