1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * linux/kernel/power/swap.c |
4 | * |
5 | * This file provides functions for reading the suspend image from |
6 | * and writing it to a swap partition. |
7 | * |
8 | * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> |
9 | * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> |
10 | * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com> |
11 | */ |
12 | |
13 | #define pr_fmt(fmt) "PM: " fmt |
14 | |
15 | #include <linux/module.h> |
16 | #include <linux/file.h> |
17 | #include <linux/delay.h> |
18 | #include <linux/bitops.h> |
19 | #include <linux/device.h> |
20 | #include <linux/bio.h> |
21 | #include <linux/blkdev.h> |
22 | #include <linux/swap.h> |
23 | #include <linux/swapops.h> |
24 | #include <linux/pm.h> |
25 | #include <linux/slab.h> |
26 | #include <linux/lzo.h> |
27 | #include <linux/vmalloc.h> |
28 | #include <linux/cpumask.h> |
29 | #include <linux/atomic.h> |
30 | #include <linux/kthread.h> |
31 | #include <linux/crc32.h> |
32 | #include <linux/ktime.h> |
33 | |
34 | #include "power.h" |
35 | |
36 | #define HIBERNATE_SIG "S1SUSPEND" |
37 | |
38 | u32 swsusp_hardware_signature; |
39 | |
40 | /* |
41 | * When reading an {un,}compressed image, we may restore pages in place, |
42 | * in which case some architectures need these pages cleaning before they |
43 | * can be executed. We don't know which pages these may be, so clean the lot. |
44 | */ |
45 | static bool clean_pages_on_read; |
46 | static bool clean_pages_on_decompress; |
47 | |
48 | /* |
49 | * The swap map is a data structure used for keeping track of each page |
50 | * written to a swap partition. It consists of many swap_map_page |
51 | * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. |
52 | * These structures are stored on the swap and linked together with the |
53 | * help of the .next_swap member. |
54 | * |
55 | * The swap map is created during suspend. The swap map pages are |
56 | * allocated and populated one at a time, so we only need one memory |
57 | * page to set up the entire structure. |
58 | * |
59 | * During resume we pick up all swap_map_page structures into a list. |
60 | */ |
61 | |
62 | #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) |
63 | |
64 | /* |
65 | * Number of free pages that are not high. |
66 | */ |
67 | static inline unsigned long low_free_pages(void) |
68 | { |
69 | return nr_free_pages() - nr_free_highpages(); |
70 | } |
71 | |
72 | /* |
73 | * Number of pages required to be kept free while writing the image. Always |
74 | * half of all available low pages before the writing starts. |
75 | */ |
76 | static inline unsigned long reqd_free_pages(void) |
77 | { |
78 | return low_free_pages() / 2; |
79 | } |
80 | |
81 | struct swap_map_page { |
82 | sector_t entries[MAP_PAGE_ENTRIES]; |
83 | sector_t next_swap; |
84 | }; |
85 | |
86 | struct swap_map_page_list { |
87 | struct swap_map_page *map; |
88 | struct swap_map_page_list *next; |
89 | }; |
90 | |
91 | /* |
92 | * The swap_map_handle structure is used for handling swap in |
93 | * a file-alike way |
94 | */ |
95 | |
96 | struct swap_map_handle { |
97 | struct swap_map_page *cur; |
98 | struct swap_map_page_list *maps; |
99 | sector_t cur_swap; |
100 | sector_t first_sector; |
101 | unsigned int k; |
102 | unsigned long reqd_free_pages; |
103 | u32 crc32; |
104 | }; |
105 | |
106 | struct { |
107 | char [PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) - |
108 | sizeof(u32) - sizeof(u32)]; |
109 | u32 ; |
110 | u32 ; |
111 | sector_t ; |
112 | unsigned int ; /* Flags to pass to the "boot" kernel */ |
113 | char [10]; |
114 | char [10]; |
115 | } __packed; |
116 | |
117 | static struct swsusp_header *; |
118 | |
119 | /* |
120 | * The following functions are used for tracing the allocated |
121 | * swap pages, so that they can be freed in case of an error. |
122 | */ |
123 | |
124 | struct swsusp_extent { |
125 | struct rb_node node; |
126 | unsigned long start; |
127 | unsigned long end; |
128 | }; |
129 | |
130 | static struct rb_root swsusp_extents = RB_ROOT; |
131 | |
132 | static int swsusp_extents_insert(unsigned long swap_offset) |
133 | { |
134 | struct rb_node **new = &(swsusp_extents.rb_node); |
135 | struct rb_node *parent = NULL; |
136 | struct swsusp_extent *ext; |
137 | |
138 | /* Figure out where to put the new node */ |
139 | while (*new) { |
140 | ext = rb_entry(*new, struct swsusp_extent, node); |
141 | parent = *new; |
142 | if (swap_offset < ext->start) { |
143 | /* Try to merge */ |
144 | if (swap_offset == ext->start - 1) { |
145 | ext->start--; |
146 | return 0; |
147 | } |
148 | new = &((*new)->rb_left); |
149 | } else if (swap_offset > ext->end) { |
150 | /* Try to merge */ |
151 | if (swap_offset == ext->end + 1) { |
152 | ext->end++; |
153 | return 0; |
154 | } |
155 | new = &((*new)->rb_right); |
156 | } else { |
157 | /* It already is in the tree */ |
158 | return -EINVAL; |
159 | } |
160 | } |
161 | /* Add the new node and rebalance the tree. */ |
162 | ext = kzalloc(size: sizeof(struct swsusp_extent), GFP_KERNEL); |
163 | if (!ext) |
164 | return -ENOMEM; |
165 | |
166 | ext->start = swap_offset; |
167 | ext->end = swap_offset; |
168 | rb_link_node(node: &ext->node, parent, rb_link: new); |
169 | rb_insert_color(&ext->node, &swsusp_extents); |
170 | return 0; |
171 | } |
172 | |
173 | /* |
174 | * alloc_swapdev_block - allocate a swap page and register that it has |
175 | * been allocated, so that it can be freed in case of an error. |
176 | */ |
177 | |
178 | sector_t alloc_swapdev_block(int swap) |
179 | { |
180 | unsigned long offset; |
181 | |
182 | offset = swp_offset(entry: get_swap_page_of_type(swap)); |
183 | if (offset) { |
184 | if (swsusp_extents_insert(swap_offset: offset)) |
185 | swap_free(swp_entry(type: swap, offset)); |
186 | else |
187 | return swapdev_block(swap, offset); |
188 | } |
189 | return 0; |
190 | } |
191 | |
192 | /* |
193 | * free_all_swap_pages - free swap pages allocated for saving image data. |
194 | * It also frees the extents used to register which swap entries had been |
195 | * allocated. |
196 | */ |
197 | |
198 | void free_all_swap_pages(int swap) |
199 | { |
200 | struct rb_node *node; |
201 | |
202 | while ((node = swsusp_extents.rb_node)) { |
203 | struct swsusp_extent *ext; |
204 | unsigned long offset; |
205 | |
206 | ext = rb_entry(node, struct swsusp_extent, node); |
207 | rb_erase(node, &swsusp_extents); |
208 | for (offset = ext->start; offset <= ext->end; offset++) |
209 | swap_free(swp_entry(type: swap, offset)); |
210 | |
211 | kfree(objp: ext); |
212 | } |
213 | } |
214 | |
215 | int swsusp_swap_in_use(void) |
216 | { |
217 | return (swsusp_extents.rb_node != NULL); |
218 | } |
219 | |
220 | /* |
221 | * General things |
222 | */ |
223 | |
224 | static unsigned short root_swap = 0xffff; |
225 | static struct bdev_handle *hib_resume_bdev_handle; |
226 | |
227 | struct hib_bio_batch { |
228 | atomic_t count; |
229 | wait_queue_head_t wait; |
230 | blk_status_t error; |
231 | struct blk_plug plug; |
232 | }; |
233 | |
234 | static void hib_init_batch(struct hib_bio_batch *hb) |
235 | { |
236 | atomic_set(v: &hb->count, i: 0); |
237 | init_waitqueue_head(&hb->wait); |
238 | hb->error = BLK_STS_OK; |
239 | blk_start_plug(&hb->plug); |
240 | } |
241 | |
242 | static void hib_finish_batch(struct hib_bio_batch *hb) |
243 | { |
244 | blk_finish_plug(&hb->plug); |
245 | } |
246 | |
247 | static void hib_end_io(struct bio *bio) |
248 | { |
249 | struct hib_bio_batch *hb = bio->bi_private; |
250 | struct page *page = bio_first_page_all(bio); |
251 | |
252 | if (bio->bi_status) { |
253 | pr_alert("Read-error on swap-device (%u:%u:%Lu)\n" , |
254 | MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), |
255 | (unsigned long long)bio->bi_iter.bi_sector); |
256 | } |
257 | |
258 | if (bio_data_dir(bio) == WRITE) |
259 | put_page(page); |
260 | else if (clean_pages_on_read) |
261 | flush_icache_range(start: (unsigned long)page_address(page), |
262 | end: (unsigned long)page_address(page) + PAGE_SIZE); |
263 | |
264 | if (bio->bi_status && !hb->error) |
265 | hb->error = bio->bi_status; |
266 | if (atomic_dec_and_test(v: &hb->count)) |
267 | wake_up(&hb->wait); |
268 | |
269 | bio_put(bio); |
270 | } |
271 | |
272 | static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr, |
273 | struct hib_bio_batch *hb) |
274 | { |
275 | struct page *page = virt_to_page(addr); |
276 | struct bio *bio; |
277 | int error = 0; |
278 | |
279 | bio = bio_alloc(bdev: hib_resume_bdev_handle->bdev, nr_vecs: 1, opf, |
280 | GFP_NOIO | __GFP_HIGH); |
281 | bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); |
282 | |
283 | if (bio_add_page(bio, page, PAGE_SIZE, off: 0) < PAGE_SIZE) { |
284 | pr_err("Adding page to bio failed at %llu\n" , |
285 | (unsigned long long)bio->bi_iter.bi_sector); |
286 | bio_put(bio); |
287 | return -EFAULT; |
288 | } |
289 | |
290 | if (hb) { |
291 | bio->bi_end_io = hib_end_io; |
292 | bio->bi_private = hb; |
293 | atomic_inc(v: &hb->count); |
294 | submit_bio(bio); |
295 | } else { |
296 | error = submit_bio_wait(bio); |
297 | bio_put(bio); |
298 | } |
299 | |
300 | return error; |
301 | } |
302 | |
303 | static int hib_wait_io(struct hib_bio_batch *hb) |
304 | { |
305 | /* |
306 | * We are relying on the behavior of blk_plug that a thread with |
307 | * a plug will flush the plug list before sleeping. |
308 | */ |
309 | wait_event(hb->wait, atomic_read(&hb->count) == 0); |
310 | return blk_status_to_errno(status: hb->error); |
311 | } |
312 | |
313 | /* |
314 | * Saving part |
315 | */ |
316 | static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) |
317 | { |
318 | int error; |
319 | |
320 | hib_submit_io(opf: REQ_OP_READ, page_off: swsusp_resume_block, addr: swsusp_header, NULL); |
321 | if (!memcmp(p: "SWAP-SPACE" ,q: swsusp_header->sig, size: 10) || |
322 | !memcmp(p: "SWAPSPACE2" ,q: swsusp_header->sig, size: 10)) { |
323 | memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); |
324 | memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); |
325 | swsusp_header->image = handle->first_sector; |
326 | if (swsusp_hardware_signature) { |
327 | swsusp_header->hw_sig = swsusp_hardware_signature; |
328 | flags |= SF_HW_SIG; |
329 | } |
330 | swsusp_header->flags = flags; |
331 | if (flags & SF_CRC32_MODE) |
332 | swsusp_header->crc32 = handle->crc32; |
333 | error = hib_submit_io(opf: REQ_OP_WRITE | REQ_SYNC, |
334 | page_off: swsusp_resume_block, addr: swsusp_header, NULL); |
335 | } else { |
336 | pr_err("Swap header not found!\n" ); |
337 | error = -ENODEV; |
338 | } |
339 | return error; |
340 | } |
341 | |
342 | /** |
343 | * swsusp_swap_check - check if the resume device is a swap device |
344 | * and get its index (if so) |
345 | * |
346 | * This is called before saving image |
347 | */ |
348 | static int swsusp_swap_check(void) |
349 | { |
350 | int res; |
351 | |
352 | if (swsusp_resume_device) |
353 | res = swap_type_of(device: swsusp_resume_device, offset: swsusp_resume_block); |
354 | else |
355 | res = find_first_swap(device: &swsusp_resume_device); |
356 | if (res < 0) |
357 | return res; |
358 | root_swap = res; |
359 | |
360 | hib_resume_bdev_handle = bdev_open_by_dev(dev: swsusp_resume_device, |
361 | BLK_OPEN_WRITE, NULL, NULL); |
362 | if (IS_ERR(ptr: hib_resume_bdev_handle)) |
363 | return PTR_ERR(ptr: hib_resume_bdev_handle); |
364 | |
365 | res = set_blocksize(bdev: hib_resume_bdev_handle->bdev, PAGE_SIZE); |
366 | if (res < 0) |
367 | bdev_release(handle: hib_resume_bdev_handle); |
368 | |
369 | return res; |
370 | } |
371 | |
372 | /** |
373 | * write_page - Write one page to given swap location. |
374 | * @buf: Address we're writing. |
375 | * @offset: Offset of the swap page we're writing to. |
376 | * @hb: bio completion batch |
377 | */ |
378 | |
379 | static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) |
380 | { |
381 | void *src; |
382 | int ret; |
383 | |
384 | if (!offset) |
385 | return -ENOSPC; |
386 | |
387 | if (hb) { |
388 | src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN | |
389 | __GFP_NORETRY); |
390 | if (src) { |
391 | copy_page(to: src, from: buf); |
392 | } else { |
393 | ret = hib_wait_io(hb); /* Free pages */ |
394 | if (ret) |
395 | return ret; |
396 | src = (void *)__get_free_page(GFP_NOIO | |
397 | __GFP_NOWARN | |
398 | __GFP_NORETRY); |
399 | if (src) { |
400 | copy_page(to: src, from: buf); |
401 | } else { |
402 | WARN_ON_ONCE(1); |
403 | hb = NULL; /* Go synchronous */ |
404 | src = buf; |
405 | } |
406 | } |
407 | } else { |
408 | src = buf; |
409 | } |
410 | return hib_submit_io(opf: REQ_OP_WRITE | REQ_SYNC, page_off: offset, addr: src, hb); |
411 | } |
412 | |
413 | static void release_swap_writer(struct swap_map_handle *handle) |
414 | { |
415 | if (handle->cur) |
416 | free_page((unsigned long)handle->cur); |
417 | handle->cur = NULL; |
418 | } |
419 | |
420 | static int get_swap_writer(struct swap_map_handle *handle) |
421 | { |
422 | int ret; |
423 | |
424 | ret = swsusp_swap_check(); |
425 | if (ret) { |
426 | if (ret != -ENOSPC) |
427 | pr_err("Cannot find swap device, try swapon -a\n" ); |
428 | return ret; |
429 | } |
430 | handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); |
431 | if (!handle->cur) { |
432 | ret = -ENOMEM; |
433 | goto err_close; |
434 | } |
435 | handle->cur_swap = alloc_swapdev_block(swap: root_swap); |
436 | if (!handle->cur_swap) { |
437 | ret = -ENOSPC; |
438 | goto err_rel; |
439 | } |
440 | handle->k = 0; |
441 | handle->reqd_free_pages = reqd_free_pages(); |
442 | handle->first_sector = handle->cur_swap; |
443 | return 0; |
444 | err_rel: |
445 | release_swap_writer(handle); |
446 | err_close: |
447 | swsusp_close(); |
448 | return ret; |
449 | } |
450 | |
451 | static int swap_write_page(struct swap_map_handle *handle, void *buf, |
452 | struct hib_bio_batch *hb) |
453 | { |
454 | int error = 0; |
455 | sector_t offset; |
456 | |
457 | if (!handle->cur) |
458 | return -EINVAL; |
459 | offset = alloc_swapdev_block(swap: root_swap); |
460 | error = write_page(buf, offset, hb); |
461 | if (error) |
462 | return error; |
463 | handle->cur->entries[handle->k++] = offset; |
464 | if (handle->k >= MAP_PAGE_ENTRIES) { |
465 | offset = alloc_swapdev_block(swap: root_swap); |
466 | if (!offset) |
467 | return -ENOSPC; |
468 | handle->cur->next_swap = offset; |
469 | error = write_page(buf: handle->cur, offset: handle->cur_swap, hb); |
470 | if (error) |
471 | goto out; |
472 | clear_page(page: handle->cur); |
473 | handle->cur_swap = offset; |
474 | handle->k = 0; |
475 | |
476 | if (hb && low_free_pages() <= handle->reqd_free_pages) { |
477 | error = hib_wait_io(hb); |
478 | if (error) |
479 | goto out; |
480 | /* |
481 | * Recalculate the number of required free pages, to |
482 | * make sure we never take more than half. |
483 | */ |
484 | handle->reqd_free_pages = reqd_free_pages(); |
485 | } |
486 | } |
487 | out: |
488 | return error; |
489 | } |
490 | |
491 | static int flush_swap_writer(struct swap_map_handle *handle) |
492 | { |
493 | if (handle->cur && handle->cur_swap) |
494 | return write_page(buf: handle->cur, offset: handle->cur_swap, NULL); |
495 | else |
496 | return -EINVAL; |
497 | } |
498 | |
499 | static int swap_writer_finish(struct swap_map_handle *handle, |
500 | unsigned int flags, int error) |
501 | { |
502 | if (!error) { |
503 | pr_info("S" ); |
504 | error = mark_swapfiles(handle, flags); |
505 | pr_cont("|\n" ); |
506 | flush_swap_writer(handle); |
507 | } |
508 | |
509 | if (error) |
510 | free_all_swap_pages(swap: root_swap); |
511 | release_swap_writer(handle); |
512 | swsusp_close(); |
513 | |
514 | return error; |
515 | } |
516 | |
517 | /* We need to remember how much compressed data we need to read. */ |
518 | #define sizeof(size_t) |
519 | |
520 | /* Number of pages/bytes we'll compress at one time. */ |
521 | #define LZO_UNC_PAGES 32 |
522 | #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE) |
523 | |
524 | /* Number of pages/bytes we need for compressed data (worst case). */ |
525 | #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \ |
526 | LZO_HEADER, PAGE_SIZE) |
527 | #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) |
528 | |
529 | /* Maximum number of threads for compression/decompression. */ |
530 | #define LZO_THREADS 3 |
531 | |
532 | /* Minimum/maximum number of pages for read buffering. */ |
533 | #define LZO_MIN_RD_PAGES 1024 |
534 | #define LZO_MAX_RD_PAGES 8192 |
535 | |
536 | |
537 | /** |
538 | * save_image - save the suspend image data |
539 | */ |
540 | |
541 | static int save_image(struct swap_map_handle *handle, |
542 | struct snapshot_handle *snapshot, |
543 | unsigned int nr_to_write) |
544 | { |
545 | unsigned int m; |
546 | int ret; |
547 | int nr_pages; |
548 | int err2; |
549 | struct hib_bio_batch hb; |
550 | ktime_t start; |
551 | ktime_t stop; |
552 | |
553 | hib_init_batch(hb: &hb); |
554 | |
555 | pr_info("Saving image data pages (%u pages)...\n" , |
556 | nr_to_write); |
557 | m = nr_to_write / 10; |
558 | if (!m) |
559 | m = 1; |
560 | nr_pages = 0; |
561 | start = ktime_get(); |
562 | while (1) { |
563 | ret = snapshot_read_next(handle: snapshot); |
564 | if (ret <= 0) |
565 | break; |
566 | ret = swap_write_page(handle, data_of(*snapshot), hb: &hb); |
567 | if (ret) |
568 | break; |
569 | if (!(nr_pages % m)) |
570 | pr_info("Image saving progress: %3d%%\n" , |
571 | nr_pages / m * 10); |
572 | nr_pages++; |
573 | } |
574 | err2 = hib_wait_io(hb: &hb); |
575 | hib_finish_batch(hb: &hb); |
576 | stop = ktime_get(); |
577 | if (!ret) |
578 | ret = err2; |
579 | if (!ret) |
580 | pr_info("Image saving done\n" ); |
581 | swsusp_show_speed(start, stop, nr_to_write, "Wrote" ); |
582 | return ret; |
583 | } |
584 | |
585 | /* |
586 | * Structure used for CRC32. |
587 | */ |
588 | struct crc_data { |
589 | struct task_struct *thr; /* thread */ |
590 | atomic_t ready; /* ready to start flag */ |
591 | atomic_t stop; /* ready to stop flag */ |
592 | unsigned run_threads; /* nr current threads */ |
593 | wait_queue_head_t go; /* start crc update */ |
594 | wait_queue_head_t done; /* crc update done */ |
595 | u32 *crc32; /* points to handle's crc32 */ |
596 | size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */ |
597 | unsigned char *unc[LZO_THREADS]; /* uncompressed data */ |
598 | }; |
599 | |
600 | /* |
601 | * CRC32 update function that runs in its own thread. |
602 | */ |
603 | static int crc32_threadfn(void *data) |
604 | { |
605 | struct crc_data *d = data; |
606 | unsigned i; |
607 | |
608 | while (1) { |
609 | wait_event(d->go, atomic_read(&d->ready) || |
610 | kthread_should_stop()); |
611 | if (kthread_should_stop()) { |
612 | d->thr = NULL; |
613 | atomic_set(v: &d->stop, i: 1); |
614 | wake_up(&d->done); |
615 | break; |
616 | } |
617 | atomic_set(v: &d->ready, i: 0); |
618 | |
619 | for (i = 0; i < d->run_threads; i++) |
620 | *d->crc32 = crc32_le(crc: *d->crc32, |
621 | p: d->unc[i], len: *d->unc_len[i]); |
622 | atomic_set(v: &d->stop, i: 1); |
623 | wake_up(&d->done); |
624 | } |
625 | return 0; |
626 | } |
627 | /* |
628 | * Structure used for LZO data compression. |
629 | */ |
630 | struct cmp_data { |
631 | struct task_struct *thr; /* thread */ |
632 | atomic_t ready; /* ready to start flag */ |
633 | atomic_t stop; /* ready to stop flag */ |
634 | int ret; /* return code */ |
635 | wait_queue_head_t go; /* start compression */ |
636 | wait_queue_head_t done; /* compression done */ |
637 | size_t unc_len; /* uncompressed length */ |
638 | size_t cmp_len; /* compressed length */ |
639 | unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ |
640 | unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ |
641 | unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */ |
642 | }; |
643 | |
644 | /* |
645 | * Compression function that runs in its own thread. |
646 | */ |
647 | static int lzo_compress_threadfn(void *data) |
648 | { |
649 | struct cmp_data *d = data; |
650 | |
651 | while (1) { |
652 | wait_event(d->go, atomic_read(&d->ready) || |
653 | kthread_should_stop()); |
654 | if (kthread_should_stop()) { |
655 | d->thr = NULL; |
656 | d->ret = -1; |
657 | atomic_set(v: &d->stop, i: 1); |
658 | wake_up(&d->done); |
659 | break; |
660 | } |
661 | atomic_set(v: &d->ready, i: 0); |
662 | |
663 | d->ret = lzo1x_1_compress(src: d->unc, src_len: d->unc_len, |
664 | dst: d->cmp + LZO_HEADER, dst_len: &d->cmp_len, |
665 | wrkmem: d->wrk); |
666 | atomic_set(v: &d->stop, i: 1); |
667 | wake_up(&d->done); |
668 | } |
669 | return 0; |
670 | } |
671 | |
672 | /** |
673 | * save_image_lzo - Save the suspend image data compressed with LZO. |
674 | * @handle: Swap map handle to use for saving the image. |
675 | * @snapshot: Image to read data from. |
676 | * @nr_to_write: Number of pages to save. |
677 | */ |
678 | static int save_image_lzo(struct swap_map_handle *handle, |
679 | struct snapshot_handle *snapshot, |
680 | unsigned int nr_to_write) |
681 | { |
682 | unsigned int m; |
683 | int ret = 0; |
684 | int nr_pages; |
685 | int err2; |
686 | struct hib_bio_batch hb; |
687 | ktime_t start; |
688 | ktime_t stop; |
689 | size_t off; |
690 | unsigned thr, run_threads, nr_threads; |
691 | unsigned char *page = NULL; |
692 | struct cmp_data *data = NULL; |
693 | struct crc_data *crc = NULL; |
694 | |
695 | hib_init_batch(hb: &hb); |
696 | |
697 | /* |
698 | * We'll limit the number of threads for compression to limit memory |
699 | * footprint. |
700 | */ |
701 | nr_threads = num_online_cpus() - 1; |
702 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); |
703 | |
704 | page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH); |
705 | if (!page) { |
706 | pr_err("Failed to allocate LZO page\n" ); |
707 | ret = -ENOMEM; |
708 | goto out_clean; |
709 | } |
710 | |
711 | data = vzalloc(array_size(nr_threads, sizeof(*data))); |
712 | if (!data) { |
713 | pr_err("Failed to allocate LZO data\n" ); |
714 | ret = -ENOMEM; |
715 | goto out_clean; |
716 | } |
717 | |
718 | crc = kzalloc(size: sizeof(*crc), GFP_KERNEL); |
719 | if (!crc) { |
720 | pr_err("Failed to allocate crc\n" ); |
721 | ret = -ENOMEM; |
722 | goto out_clean; |
723 | } |
724 | |
725 | /* |
726 | * Start the compression threads. |
727 | */ |
728 | for (thr = 0; thr < nr_threads; thr++) { |
729 | init_waitqueue_head(&data[thr].go); |
730 | init_waitqueue_head(&data[thr].done); |
731 | |
732 | data[thr].thr = kthread_run(lzo_compress_threadfn, |
733 | &data[thr], |
734 | "image_compress/%u" , thr); |
735 | if (IS_ERR(ptr: data[thr].thr)) { |
736 | data[thr].thr = NULL; |
737 | pr_err("Cannot start compression threads\n" ); |
738 | ret = -ENOMEM; |
739 | goto out_clean; |
740 | } |
741 | } |
742 | |
743 | /* |
744 | * Start the CRC32 thread. |
745 | */ |
746 | init_waitqueue_head(&crc->go); |
747 | init_waitqueue_head(&crc->done); |
748 | |
749 | handle->crc32 = 0; |
750 | crc->crc32 = &handle->crc32; |
751 | for (thr = 0; thr < nr_threads; thr++) { |
752 | crc->unc[thr] = data[thr].unc; |
753 | crc->unc_len[thr] = &data[thr].unc_len; |
754 | } |
755 | |
756 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32" ); |
757 | if (IS_ERR(ptr: crc->thr)) { |
758 | crc->thr = NULL; |
759 | pr_err("Cannot start CRC32 thread\n" ); |
760 | ret = -ENOMEM; |
761 | goto out_clean; |
762 | } |
763 | |
764 | /* |
765 | * Adjust the number of required free pages after all allocations have |
766 | * been done. We don't want to run out of pages when writing. |
767 | */ |
768 | handle->reqd_free_pages = reqd_free_pages(); |
769 | |
770 | pr_info("Using %u thread(s) for compression\n" , nr_threads); |
771 | pr_info("Compressing and saving image data (%u pages)...\n" , |
772 | nr_to_write); |
773 | m = nr_to_write / 10; |
774 | if (!m) |
775 | m = 1; |
776 | nr_pages = 0; |
777 | start = ktime_get(); |
778 | for (;;) { |
779 | for (thr = 0; thr < nr_threads; thr++) { |
780 | for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { |
781 | ret = snapshot_read_next(handle: snapshot); |
782 | if (ret < 0) |
783 | goto out_finish; |
784 | |
785 | if (!ret) |
786 | break; |
787 | |
788 | memcpy(data[thr].unc + off, |
789 | data_of(*snapshot), PAGE_SIZE); |
790 | |
791 | if (!(nr_pages % m)) |
792 | pr_info("Image saving progress: %3d%%\n" , |
793 | nr_pages / m * 10); |
794 | nr_pages++; |
795 | } |
796 | if (!off) |
797 | break; |
798 | |
799 | data[thr].unc_len = off; |
800 | |
801 | atomic_set(v: &data[thr].ready, i: 1); |
802 | wake_up(&data[thr].go); |
803 | } |
804 | |
805 | if (!thr) |
806 | break; |
807 | |
808 | crc->run_threads = thr; |
809 | atomic_set(v: &crc->ready, i: 1); |
810 | wake_up(&crc->go); |
811 | |
812 | for (run_threads = thr, thr = 0; thr < run_threads; thr++) { |
813 | wait_event(data[thr].done, |
814 | atomic_read(&data[thr].stop)); |
815 | atomic_set(v: &data[thr].stop, i: 0); |
816 | |
817 | ret = data[thr].ret; |
818 | |
819 | if (ret < 0) { |
820 | pr_err("LZO compression failed\n" ); |
821 | goto out_finish; |
822 | } |
823 | |
824 | if (unlikely(!data[thr].cmp_len || |
825 | data[thr].cmp_len > |
826 | lzo1x_worst_compress(data[thr].unc_len))) { |
827 | pr_err("Invalid LZO compressed length\n" ); |
828 | ret = -1; |
829 | goto out_finish; |
830 | } |
831 | |
832 | *(size_t *)data[thr].cmp = data[thr].cmp_len; |
833 | |
834 | /* |
835 | * Given we are writing one page at a time to disk, we |
836 | * copy that much from the buffer, although the last |
837 | * bit will likely be smaller than full page. This is |
838 | * OK - we saved the length of the compressed data, so |
839 | * any garbage at the end will be discarded when we |
840 | * read it. |
841 | */ |
842 | for (off = 0; |
843 | off < LZO_HEADER + data[thr].cmp_len; |
844 | off += PAGE_SIZE) { |
845 | memcpy(page, data[thr].cmp + off, PAGE_SIZE); |
846 | |
847 | ret = swap_write_page(handle, buf: page, hb: &hb); |
848 | if (ret) |
849 | goto out_finish; |
850 | } |
851 | } |
852 | |
853 | wait_event(crc->done, atomic_read(&crc->stop)); |
854 | atomic_set(v: &crc->stop, i: 0); |
855 | } |
856 | |
857 | out_finish: |
858 | err2 = hib_wait_io(hb: &hb); |
859 | stop = ktime_get(); |
860 | if (!ret) |
861 | ret = err2; |
862 | if (!ret) |
863 | pr_info("Image saving done\n" ); |
864 | swsusp_show_speed(start, stop, nr_to_write, "Wrote" ); |
865 | out_clean: |
866 | hib_finish_batch(hb: &hb); |
867 | if (crc) { |
868 | if (crc->thr) |
869 | kthread_stop(k: crc->thr); |
870 | kfree(objp: crc); |
871 | } |
872 | if (data) { |
873 | for (thr = 0; thr < nr_threads; thr++) |
874 | if (data[thr].thr) |
875 | kthread_stop(k: data[thr].thr); |
876 | vfree(addr: data); |
877 | } |
878 | if (page) free_page((unsigned long)page); |
879 | |
880 | return ret; |
881 | } |
882 | |
883 | /** |
884 | * enough_swap - Make sure we have enough swap to save the image. |
885 | * |
886 | * Returns TRUE or FALSE after checking the total amount of swap |
887 | * space available from the resume partition. |
888 | */ |
889 | |
890 | static int enough_swap(unsigned int nr_pages) |
891 | { |
892 | unsigned int free_swap = count_swap_pages(root_swap, 1); |
893 | unsigned int required; |
894 | |
895 | pr_debug("Free swap pages: %u\n" , free_swap); |
896 | |
897 | required = PAGES_FOR_IO + nr_pages; |
898 | return free_swap > required; |
899 | } |
900 | |
901 | /** |
902 | * swsusp_write - Write entire image and metadata. |
903 | * @flags: flags to pass to the "boot" kernel in the image header |
904 | * |
905 | * It is important _NOT_ to umount filesystems at this point. We want |
906 | * them synced (in case something goes wrong) but we DO not want to mark |
907 | * filesystem clean: it is not. (And it does not matter, if we resume |
908 | * correctly, we'll mark system clean, anyway.) |
909 | */ |
910 | |
911 | int swsusp_write(unsigned int flags) |
912 | { |
913 | struct swap_map_handle handle; |
914 | struct snapshot_handle snapshot; |
915 | struct swsusp_info *; |
916 | unsigned long pages; |
917 | int error; |
918 | |
919 | pages = snapshot_get_image_size(); |
920 | error = get_swap_writer(handle: &handle); |
921 | if (error) { |
922 | pr_err("Cannot get swap writer\n" ); |
923 | return error; |
924 | } |
925 | if (flags & SF_NOCOMPRESS_MODE) { |
926 | if (!enough_swap(nr_pages: pages)) { |
927 | pr_err("Not enough free swap\n" ); |
928 | error = -ENOSPC; |
929 | goto out_finish; |
930 | } |
931 | } |
932 | memset(&snapshot, 0, sizeof(struct snapshot_handle)); |
933 | error = snapshot_read_next(handle: &snapshot); |
934 | if (error < (int)PAGE_SIZE) { |
935 | if (error >= 0) |
936 | error = -EFAULT; |
937 | |
938 | goto out_finish; |
939 | } |
940 | header = (struct swsusp_info *)data_of(snapshot); |
941 | error = swap_write_page(handle: &handle, buf: header, NULL); |
942 | if (!error) { |
943 | error = (flags & SF_NOCOMPRESS_MODE) ? |
944 | save_image(handle: &handle, snapshot: &snapshot, nr_to_write: pages - 1) : |
945 | save_image_lzo(handle: &handle, snapshot: &snapshot, nr_to_write: pages - 1); |
946 | } |
947 | out_finish: |
948 | error = swap_writer_finish(handle: &handle, flags, error); |
949 | return error; |
950 | } |
951 | |
952 | /* |
953 | * The following functions allow us to read data using a swap map |
954 | * in a file-like way. |
955 | */ |
956 | |
957 | static void release_swap_reader(struct swap_map_handle *handle) |
958 | { |
959 | struct swap_map_page_list *tmp; |
960 | |
961 | while (handle->maps) { |
962 | if (handle->maps->map) |
963 | free_page((unsigned long)handle->maps->map); |
964 | tmp = handle->maps; |
965 | handle->maps = handle->maps->next; |
966 | kfree(objp: tmp); |
967 | } |
968 | handle->cur = NULL; |
969 | } |
970 | |
971 | static int get_swap_reader(struct swap_map_handle *handle, |
972 | unsigned int *flags_p) |
973 | { |
974 | int error; |
975 | struct swap_map_page_list *tmp, *last; |
976 | sector_t offset; |
977 | |
978 | *flags_p = swsusp_header->flags; |
979 | |
980 | if (!swsusp_header->image) /* how can this happen? */ |
981 | return -EINVAL; |
982 | |
983 | handle->cur = NULL; |
984 | last = handle->maps = NULL; |
985 | offset = swsusp_header->image; |
986 | while (offset) { |
987 | tmp = kzalloc(size: sizeof(*handle->maps), GFP_KERNEL); |
988 | if (!tmp) { |
989 | release_swap_reader(handle); |
990 | return -ENOMEM; |
991 | } |
992 | if (!handle->maps) |
993 | handle->maps = tmp; |
994 | if (last) |
995 | last->next = tmp; |
996 | last = tmp; |
997 | |
998 | tmp->map = (struct swap_map_page *) |
999 | __get_free_page(GFP_NOIO | __GFP_HIGH); |
1000 | if (!tmp->map) { |
1001 | release_swap_reader(handle); |
1002 | return -ENOMEM; |
1003 | } |
1004 | |
1005 | error = hib_submit_io(opf: REQ_OP_READ, page_off: offset, addr: tmp->map, NULL); |
1006 | if (error) { |
1007 | release_swap_reader(handle); |
1008 | return error; |
1009 | } |
1010 | offset = tmp->map->next_swap; |
1011 | } |
1012 | handle->k = 0; |
1013 | handle->cur = handle->maps->map; |
1014 | return 0; |
1015 | } |
1016 | |
1017 | static int swap_read_page(struct swap_map_handle *handle, void *buf, |
1018 | struct hib_bio_batch *hb) |
1019 | { |
1020 | sector_t offset; |
1021 | int error; |
1022 | struct swap_map_page_list *tmp; |
1023 | |
1024 | if (!handle->cur) |
1025 | return -EINVAL; |
1026 | offset = handle->cur->entries[handle->k]; |
1027 | if (!offset) |
1028 | return -EFAULT; |
1029 | error = hib_submit_io(opf: REQ_OP_READ, page_off: offset, addr: buf, hb); |
1030 | if (error) |
1031 | return error; |
1032 | if (++handle->k >= MAP_PAGE_ENTRIES) { |
1033 | handle->k = 0; |
1034 | free_page((unsigned long)handle->maps->map); |
1035 | tmp = handle->maps; |
1036 | handle->maps = handle->maps->next; |
1037 | kfree(objp: tmp); |
1038 | if (!handle->maps) |
1039 | release_swap_reader(handle); |
1040 | else |
1041 | handle->cur = handle->maps->map; |
1042 | } |
1043 | return error; |
1044 | } |
1045 | |
1046 | static int swap_reader_finish(struct swap_map_handle *handle) |
1047 | { |
1048 | release_swap_reader(handle); |
1049 | |
1050 | return 0; |
1051 | } |
1052 | |
1053 | /** |
1054 | * load_image - load the image using the swap map handle |
1055 | * @handle and the snapshot handle @snapshot |
1056 | * (assume there are @nr_pages pages to load) |
1057 | */ |
1058 | |
1059 | static int load_image(struct swap_map_handle *handle, |
1060 | struct snapshot_handle *snapshot, |
1061 | unsigned int nr_to_read) |
1062 | { |
1063 | unsigned int m; |
1064 | int ret = 0; |
1065 | ktime_t start; |
1066 | ktime_t stop; |
1067 | struct hib_bio_batch hb; |
1068 | int err2; |
1069 | unsigned nr_pages; |
1070 | |
1071 | hib_init_batch(hb: &hb); |
1072 | |
1073 | clean_pages_on_read = true; |
1074 | pr_info("Loading image data pages (%u pages)...\n" , nr_to_read); |
1075 | m = nr_to_read / 10; |
1076 | if (!m) |
1077 | m = 1; |
1078 | nr_pages = 0; |
1079 | start = ktime_get(); |
1080 | for ( ; ; ) { |
1081 | ret = snapshot_write_next(handle: snapshot); |
1082 | if (ret <= 0) |
1083 | break; |
1084 | ret = swap_read_page(handle, data_of(*snapshot), hb: &hb); |
1085 | if (ret) |
1086 | break; |
1087 | if (snapshot->sync_read) |
1088 | ret = hib_wait_io(hb: &hb); |
1089 | if (ret) |
1090 | break; |
1091 | if (!(nr_pages % m)) |
1092 | pr_info("Image loading progress: %3d%%\n" , |
1093 | nr_pages / m * 10); |
1094 | nr_pages++; |
1095 | } |
1096 | err2 = hib_wait_io(hb: &hb); |
1097 | hib_finish_batch(hb: &hb); |
1098 | stop = ktime_get(); |
1099 | if (!ret) |
1100 | ret = err2; |
1101 | if (!ret) { |
1102 | pr_info("Image loading done\n" ); |
1103 | snapshot_write_finalize(handle: snapshot); |
1104 | if (!snapshot_image_loaded(handle: snapshot)) |
1105 | ret = -ENODATA; |
1106 | } |
1107 | swsusp_show_speed(start, stop, nr_to_read, "Read" ); |
1108 | return ret; |
1109 | } |
1110 | |
1111 | /* |
1112 | * Structure used for LZO data decompression. |
1113 | */ |
1114 | struct dec_data { |
1115 | struct task_struct *thr; /* thread */ |
1116 | atomic_t ready; /* ready to start flag */ |
1117 | atomic_t stop; /* ready to stop flag */ |
1118 | int ret; /* return code */ |
1119 | wait_queue_head_t go; /* start decompression */ |
1120 | wait_queue_head_t done; /* decompression done */ |
1121 | size_t unc_len; /* uncompressed length */ |
1122 | size_t cmp_len; /* compressed length */ |
1123 | unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ |
1124 | unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ |
1125 | }; |
1126 | |
1127 | /* |
1128 | * Decompression function that runs in its own thread. |
1129 | */ |
1130 | static int lzo_decompress_threadfn(void *data) |
1131 | { |
1132 | struct dec_data *d = data; |
1133 | |
1134 | while (1) { |
1135 | wait_event(d->go, atomic_read(&d->ready) || |
1136 | kthread_should_stop()); |
1137 | if (kthread_should_stop()) { |
1138 | d->thr = NULL; |
1139 | d->ret = -1; |
1140 | atomic_set(v: &d->stop, i: 1); |
1141 | wake_up(&d->done); |
1142 | break; |
1143 | } |
1144 | atomic_set(v: &d->ready, i: 0); |
1145 | |
1146 | d->unc_len = LZO_UNC_SIZE; |
1147 | d->ret = lzo1x_decompress_safe(src: d->cmp + LZO_HEADER, src_len: d->cmp_len, |
1148 | dst: d->unc, dst_len: &d->unc_len); |
1149 | if (clean_pages_on_decompress) |
1150 | flush_icache_range(start: (unsigned long)d->unc, |
1151 | end: (unsigned long)d->unc + d->unc_len); |
1152 | |
1153 | atomic_set(v: &d->stop, i: 1); |
1154 | wake_up(&d->done); |
1155 | } |
1156 | return 0; |
1157 | } |
1158 | |
1159 | /** |
1160 | * load_image_lzo - Load compressed image data and decompress them with LZO. |
1161 | * @handle: Swap map handle to use for loading data. |
1162 | * @snapshot: Image to copy uncompressed data into. |
1163 | * @nr_to_read: Number of pages to load. |
1164 | */ |
1165 | static int load_image_lzo(struct swap_map_handle *handle, |
1166 | struct snapshot_handle *snapshot, |
1167 | unsigned int nr_to_read) |
1168 | { |
1169 | unsigned int m; |
1170 | int ret = 0; |
1171 | int eof = 0; |
1172 | struct hib_bio_batch hb; |
1173 | ktime_t start; |
1174 | ktime_t stop; |
1175 | unsigned nr_pages; |
1176 | size_t off; |
1177 | unsigned i, thr, run_threads, nr_threads; |
1178 | unsigned ring = 0, pg = 0, ring_size = 0, |
1179 | have = 0, want, need, asked = 0; |
1180 | unsigned long read_pages = 0; |
1181 | unsigned char **page = NULL; |
1182 | struct dec_data *data = NULL; |
1183 | struct crc_data *crc = NULL; |
1184 | |
1185 | hib_init_batch(hb: &hb); |
1186 | |
1187 | /* |
1188 | * We'll limit the number of threads for decompression to limit memory |
1189 | * footprint. |
1190 | */ |
1191 | nr_threads = num_online_cpus() - 1; |
1192 | nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); |
1193 | |
1194 | page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page))); |
1195 | if (!page) { |
1196 | pr_err("Failed to allocate LZO page\n" ); |
1197 | ret = -ENOMEM; |
1198 | goto out_clean; |
1199 | } |
1200 | |
1201 | data = vzalloc(array_size(nr_threads, sizeof(*data))); |
1202 | if (!data) { |
1203 | pr_err("Failed to allocate LZO data\n" ); |
1204 | ret = -ENOMEM; |
1205 | goto out_clean; |
1206 | } |
1207 | |
1208 | crc = kzalloc(size: sizeof(*crc), GFP_KERNEL); |
1209 | if (!crc) { |
1210 | pr_err("Failed to allocate crc\n" ); |
1211 | ret = -ENOMEM; |
1212 | goto out_clean; |
1213 | } |
1214 | |
1215 | clean_pages_on_decompress = true; |
1216 | |
1217 | /* |
1218 | * Start the decompression threads. |
1219 | */ |
1220 | for (thr = 0; thr < nr_threads; thr++) { |
1221 | init_waitqueue_head(&data[thr].go); |
1222 | init_waitqueue_head(&data[thr].done); |
1223 | |
1224 | data[thr].thr = kthread_run(lzo_decompress_threadfn, |
1225 | &data[thr], |
1226 | "image_decompress/%u" , thr); |
1227 | if (IS_ERR(ptr: data[thr].thr)) { |
1228 | data[thr].thr = NULL; |
1229 | pr_err("Cannot start decompression threads\n" ); |
1230 | ret = -ENOMEM; |
1231 | goto out_clean; |
1232 | } |
1233 | } |
1234 | |
1235 | /* |
1236 | * Start the CRC32 thread. |
1237 | */ |
1238 | init_waitqueue_head(&crc->go); |
1239 | init_waitqueue_head(&crc->done); |
1240 | |
1241 | handle->crc32 = 0; |
1242 | crc->crc32 = &handle->crc32; |
1243 | for (thr = 0; thr < nr_threads; thr++) { |
1244 | crc->unc[thr] = data[thr].unc; |
1245 | crc->unc_len[thr] = &data[thr].unc_len; |
1246 | } |
1247 | |
1248 | crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32" ); |
1249 | if (IS_ERR(ptr: crc->thr)) { |
1250 | crc->thr = NULL; |
1251 | pr_err("Cannot start CRC32 thread\n" ); |
1252 | ret = -ENOMEM; |
1253 | goto out_clean; |
1254 | } |
1255 | |
1256 | /* |
1257 | * Set the number of pages for read buffering. |
1258 | * This is complete guesswork, because we'll only know the real |
1259 | * picture once prepare_image() is called, which is much later on |
1260 | * during the image load phase. We'll assume the worst case and |
1261 | * say that none of the image pages are from high memory. |
1262 | */ |
1263 | if (low_free_pages() > snapshot_get_image_size()) |
1264 | read_pages = (low_free_pages() - snapshot_get_image_size()) / 2; |
1265 | read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES); |
1266 | |
1267 | for (i = 0; i < read_pages; i++) { |
1268 | page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? |
1269 | GFP_NOIO | __GFP_HIGH : |
1270 | GFP_NOIO | __GFP_NOWARN | |
1271 | __GFP_NORETRY); |
1272 | |
1273 | if (!page[i]) { |
1274 | if (i < LZO_CMP_PAGES) { |
1275 | ring_size = i; |
1276 | pr_err("Failed to allocate LZO pages\n" ); |
1277 | ret = -ENOMEM; |
1278 | goto out_clean; |
1279 | } else { |
1280 | break; |
1281 | } |
1282 | } |
1283 | } |
1284 | want = ring_size = i; |
1285 | |
1286 | pr_info("Using %u thread(s) for decompression\n" , nr_threads); |
1287 | pr_info("Loading and decompressing image data (%u pages)...\n" , |
1288 | nr_to_read); |
1289 | m = nr_to_read / 10; |
1290 | if (!m) |
1291 | m = 1; |
1292 | nr_pages = 0; |
1293 | start = ktime_get(); |
1294 | |
1295 | ret = snapshot_write_next(handle: snapshot); |
1296 | if (ret <= 0) |
1297 | goto out_finish; |
1298 | |
1299 | for(;;) { |
1300 | for (i = 0; !eof && i < want; i++) { |
1301 | ret = swap_read_page(handle, buf: page[ring], hb: &hb); |
1302 | if (ret) { |
1303 | /* |
1304 | * On real read error, finish. On end of data, |
1305 | * set EOF flag and just exit the read loop. |
1306 | */ |
1307 | if (handle->cur && |
1308 | handle->cur->entries[handle->k]) { |
1309 | goto out_finish; |
1310 | } else { |
1311 | eof = 1; |
1312 | break; |
1313 | } |
1314 | } |
1315 | if (++ring >= ring_size) |
1316 | ring = 0; |
1317 | } |
1318 | asked += i; |
1319 | want -= i; |
1320 | |
1321 | /* |
1322 | * We are out of data, wait for some more. |
1323 | */ |
1324 | if (!have) { |
1325 | if (!asked) |
1326 | break; |
1327 | |
1328 | ret = hib_wait_io(hb: &hb); |
1329 | if (ret) |
1330 | goto out_finish; |
1331 | have += asked; |
1332 | asked = 0; |
1333 | if (eof) |
1334 | eof = 2; |
1335 | } |
1336 | |
1337 | if (crc->run_threads) { |
1338 | wait_event(crc->done, atomic_read(&crc->stop)); |
1339 | atomic_set(v: &crc->stop, i: 0); |
1340 | crc->run_threads = 0; |
1341 | } |
1342 | |
1343 | for (thr = 0; have && thr < nr_threads; thr++) { |
1344 | data[thr].cmp_len = *(size_t *)page[pg]; |
1345 | if (unlikely(!data[thr].cmp_len || |
1346 | data[thr].cmp_len > |
1347 | lzo1x_worst_compress(LZO_UNC_SIZE))) { |
1348 | pr_err("Invalid LZO compressed length\n" ); |
1349 | ret = -1; |
1350 | goto out_finish; |
1351 | } |
1352 | |
1353 | need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER, |
1354 | PAGE_SIZE); |
1355 | if (need > have) { |
1356 | if (eof > 1) { |
1357 | ret = -1; |
1358 | goto out_finish; |
1359 | } |
1360 | break; |
1361 | } |
1362 | |
1363 | for (off = 0; |
1364 | off < LZO_HEADER + data[thr].cmp_len; |
1365 | off += PAGE_SIZE) { |
1366 | memcpy(data[thr].cmp + off, |
1367 | page[pg], PAGE_SIZE); |
1368 | have--; |
1369 | want++; |
1370 | if (++pg >= ring_size) |
1371 | pg = 0; |
1372 | } |
1373 | |
1374 | atomic_set(v: &data[thr].ready, i: 1); |
1375 | wake_up(&data[thr].go); |
1376 | } |
1377 | |
1378 | /* |
1379 | * Wait for more data while we are decompressing. |
1380 | */ |
1381 | if (have < LZO_CMP_PAGES && asked) { |
1382 | ret = hib_wait_io(hb: &hb); |
1383 | if (ret) |
1384 | goto out_finish; |
1385 | have += asked; |
1386 | asked = 0; |
1387 | if (eof) |
1388 | eof = 2; |
1389 | } |
1390 | |
1391 | for (run_threads = thr, thr = 0; thr < run_threads; thr++) { |
1392 | wait_event(data[thr].done, |
1393 | atomic_read(&data[thr].stop)); |
1394 | atomic_set(v: &data[thr].stop, i: 0); |
1395 | |
1396 | ret = data[thr].ret; |
1397 | |
1398 | if (ret < 0) { |
1399 | pr_err("LZO decompression failed\n" ); |
1400 | goto out_finish; |
1401 | } |
1402 | |
1403 | if (unlikely(!data[thr].unc_len || |
1404 | data[thr].unc_len > LZO_UNC_SIZE || |
1405 | data[thr].unc_len & (PAGE_SIZE - 1))) { |
1406 | pr_err("Invalid LZO uncompressed length\n" ); |
1407 | ret = -1; |
1408 | goto out_finish; |
1409 | } |
1410 | |
1411 | for (off = 0; |
1412 | off < data[thr].unc_len; off += PAGE_SIZE) { |
1413 | memcpy(data_of(*snapshot), |
1414 | data[thr].unc + off, PAGE_SIZE); |
1415 | |
1416 | if (!(nr_pages % m)) |
1417 | pr_info("Image loading progress: %3d%%\n" , |
1418 | nr_pages / m * 10); |
1419 | nr_pages++; |
1420 | |
1421 | ret = snapshot_write_next(handle: snapshot); |
1422 | if (ret <= 0) { |
1423 | crc->run_threads = thr + 1; |
1424 | atomic_set(v: &crc->ready, i: 1); |
1425 | wake_up(&crc->go); |
1426 | goto out_finish; |
1427 | } |
1428 | } |
1429 | } |
1430 | |
1431 | crc->run_threads = thr; |
1432 | atomic_set(v: &crc->ready, i: 1); |
1433 | wake_up(&crc->go); |
1434 | } |
1435 | |
1436 | out_finish: |
1437 | if (crc->run_threads) { |
1438 | wait_event(crc->done, atomic_read(&crc->stop)); |
1439 | atomic_set(v: &crc->stop, i: 0); |
1440 | } |
1441 | stop = ktime_get(); |
1442 | if (!ret) { |
1443 | pr_info("Image loading done\n" ); |
1444 | snapshot_write_finalize(handle: snapshot); |
1445 | if (!snapshot_image_loaded(handle: snapshot)) |
1446 | ret = -ENODATA; |
1447 | if (!ret) { |
1448 | if (swsusp_header->flags & SF_CRC32_MODE) { |
1449 | if(handle->crc32 != swsusp_header->crc32) { |
1450 | pr_err("Invalid image CRC32!\n" ); |
1451 | ret = -ENODATA; |
1452 | } |
1453 | } |
1454 | } |
1455 | } |
1456 | swsusp_show_speed(start, stop, nr_to_read, "Read" ); |
1457 | out_clean: |
1458 | hib_finish_batch(hb: &hb); |
1459 | for (i = 0; i < ring_size; i++) |
1460 | free_page((unsigned long)page[i]); |
1461 | if (crc) { |
1462 | if (crc->thr) |
1463 | kthread_stop(k: crc->thr); |
1464 | kfree(objp: crc); |
1465 | } |
1466 | if (data) { |
1467 | for (thr = 0; thr < nr_threads; thr++) |
1468 | if (data[thr].thr) |
1469 | kthread_stop(k: data[thr].thr); |
1470 | vfree(addr: data); |
1471 | } |
1472 | vfree(addr: page); |
1473 | |
1474 | return ret; |
1475 | } |
1476 | |
1477 | /** |
1478 | * swsusp_read - read the hibernation image. |
1479 | * @flags_p: flags passed by the "frozen" kernel in the image header should |
1480 | * be written into this memory location |
1481 | */ |
1482 | |
1483 | int swsusp_read(unsigned int *flags_p) |
1484 | { |
1485 | int error; |
1486 | struct swap_map_handle handle; |
1487 | struct snapshot_handle snapshot; |
1488 | struct swsusp_info *; |
1489 | |
1490 | memset(&snapshot, 0, sizeof(struct snapshot_handle)); |
1491 | error = snapshot_write_next(handle: &snapshot); |
1492 | if (error < (int)PAGE_SIZE) |
1493 | return error < 0 ? error : -EFAULT; |
1494 | header = (struct swsusp_info *)data_of(snapshot); |
1495 | error = get_swap_reader(handle: &handle, flags_p); |
1496 | if (error) |
1497 | goto end; |
1498 | if (!error) |
1499 | error = swap_read_page(handle: &handle, buf: header, NULL); |
1500 | if (!error) { |
1501 | error = (*flags_p & SF_NOCOMPRESS_MODE) ? |
1502 | load_image(handle: &handle, snapshot: &snapshot, nr_to_read: header->pages - 1) : |
1503 | load_image_lzo(handle: &handle, snapshot: &snapshot, nr_to_read: header->pages - 1); |
1504 | } |
1505 | swap_reader_finish(handle: &handle); |
1506 | end: |
1507 | if (!error) |
1508 | pr_debug("Image successfully loaded\n" ); |
1509 | else |
1510 | pr_debug("Error %d resuming\n" , error); |
1511 | return error; |
1512 | } |
1513 | |
1514 | static void *swsusp_holder; |
1515 | |
1516 | /** |
1517 | * swsusp_check - Open the resume device and check for the swsusp signature. |
1518 | * @exclusive: Open the resume device exclusively. |
1519 | */ |
1520 | |
1521 | int swsusp_check(bool exclusive) |
1522 | { |
1523 | void *holder = exclusive ? &swsusp_holder : NULL; |
1524 | int error; |
1525 | |
1526 | hib_resume_bdev_handle = bdev_open_by_dev(dev: swsusp_resume_device, |
1527 | BLK_OPEN_READ, holder, NULL); |
1528 | if (!IS_ERR(ptr: hib_resume_bdev_handle)) { |
1529 | set_blocksize(bdev: hib_resume_bdev_handle->bdev, PAGE_SIZE); |
1530 | clear_page(page: swsusp_header); |
1531 | error = hib_submit_io(opf: REQ_OP_READ, page_off: swsusp_resume_block, |
1532 | addr: swsusp_header, NULL); |
1533 | if (error) |
1534 | goto put; |
1535 | |
1536 | if (!memcmp(HIBERNATE_SIG, q: swsusp_header->sig, size: 10)) { |
1537 | memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); |
1538 | /* Reset swap signature now */ |
1539 | error = hib_submit_io(opf: REQ_OP_WRITE | REQ_SYNC, |
1540 | page_off: swsusp_resume_block, |
1541 | addr: swsusp_header, NULL); |
1542 | } else { |
1543 | error = -EINVAL; |
1544 | } |
1545 | if (!error && swsusp_header->flags & SF_HW_SIG && |
1546 | swsusp_header->hw_sig != swsusp_hardware_signature) { |
1547 | pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n" , |
1548 | swsusp_header->hw_sig, swsusp_hardware_signature); |
1549 | error = -EINVAL; |
1550 | } |
1551 | |
1552 | put: |
1553 | if (error) |
1554 | bdev_release(handle: hib_resume_bdev_handle); |
1555 | else |
1556 | pr_debug("Image signature found, resuming\n" ); |
1557 | } else { |
1558 | error = PTR_ERR(ptr: hib_resume_bdev_handle); |
1559 | } |
1560 | |
1561 | if (error) |
1562 | pr_debug("Image not found (code %d)\n" , error); |
1563 | |
1564 | return error; |
1565 | } |
1566 | |
1567 | /** |
1568 | * swsusp_close - close resume device. |
1569 | * @exclusive: Close the resume device which is exclusively opened. |
1570 | */ |
1571 | |
1572 | void swsusp_close(void) |
1573 | { |
1574 | if (IS_ERR(ptr: hib_resume_bdev_handle)) { |
1575 | pr_debug("Image device not initialised\n" ); |
1576 | return; |
1577 | } |
1578 | |
1579 | bdev_release(handle: hib_resume_bdev_handle); |
1580 | } |
1581 | |
1582 | /** |
1583 | * swsusp_unmark - Unmark swsusp signature in the resume device |
1584 | */ |
1585 | |
1586 | #ifdef CONFIG_SUSPEND |
1587 | int swsusp_unmark(void) |
1588 | { |
1589 | int error; |
1590 | |
1591 | hib_submit_io(opf: REQ_OP_READ, page_off: swsusp_resume_block, |
1592 | addr: swsusp_header, NULL); |
1593 | if (!memcmp(HIBERNATE_SIG,q: swsusp_header->sig, size: 10)) { |
1594 | memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); |
1595 | error = hib_submit_io(opf: REQ_OP_WRITE | REQ_SYNC, |
1596 | page_off: swsusp_resume_block, |
1597 | addr: swsusp_header, NULL); |
1598 | } else { |
1599 | pr_err("Cannot find swsusp signature!\n" ); |
1600 | error = -ENODEV; |
1601 | } |
1602 | |
1603 | /* |
1604 | * We just returned from suspend, we don't need the image any more. |
1605 | */ |
1606 | free_all_swap_pages(swap: root_swap); |
1607 | |
1608 | return error; |
1609 | } |
1610 | #endif |
1611 | |
1612 | static int __init (void) |
1613 | { |
1614 | swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL); |
1615 | if (!swsusp_header) |
1616 | panic(fmt: "Could not allocate memory for swsusp_header\n" ); |
1617 | return 0; |
1618 | } |
1619 | |
1620 | core_initcall(swsusp_header_init); |
1621 | |