1 | /* |
2 | * Compressed RAM block device |
3 | * |
4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta |
5 | * 2012, 2013 Minchan Kim |
6 | * |
7 | * This code is released using a dual license strategy: BSD/GPL |
8 | * You can choose the licence that better fits your requirements. |
9 | * |
10 | * Released under the terms of 3-clause BSD License |
11 | * Released under the terms of GNU General Public License Version 2.0 |
12 | * |
13 | */ |
14 | |
15 | #define KMSG_COMPONENT "zram" |
16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
17 | |
18 | #include <linux/module.h> |
19 | #include <linux/kernel.h> |
20 | #include <linux/bio.h> |
21 | #include <linux/bitops.h> |
22 | #include <linux/blkdev.h> |
23 | #include <linux/buffer_head.h> |
24 | #include <linux/device.h> |
25 | #include <linux/highmem.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/backing-dev.h> |
28 | #include <linux/string.h> |
29 | #include <linux/vmalloc.h> |
30 | #include <linux/err.h> |
31 | #include <linux/idr.h> |
32 | #include <linux/sysfs.h> |
33 | #include <linux/debugfs.h> |
34 | #include <linux/cpuhotplug.h> |
35 | #include <linux/part_stat.h> |
36 | |
37 | #include "zram_drv.h" |
38 | |
39 | static DEFINE_IDR(zram_index_idr); |
40 | /* idr index must be protected */ |
41 | static DEFINE_MUTEX(zram_index_mutex); |
42 | |
43 | static int zram_major; |
44 | static const char *default_compressor = CONFIG_ZRAM_DEF_COMP; |
45 | |
46 | /* Module params (documentation at end) */ |
47 | static unsigned int num_devices = 1; |
48 | /* |
49 | * Pages that compress to sizes equals or greater than this are stored |
50 | * uncompressed in memory. |
51 | */ |
52 | static size_t huge_class_size; |
53 | |
54 | static const struct block_device_operations zram_devops; |
55 | |
56 | static void zram_free_page(struct zram *zram, size_t index); |
57 | static int zram_read_page(struct zram *zram, struct page *page, u32 index, |
58 | struct bio *parent); |
59 | |
60 | static int zram_slot_trylock(struct zram *zram, u32 index) |
61 | { |
62 | return bit_spin_trylock(bitnum: ZRAM_LOCK, addr: &zram->table[index].flags); |
63 | } |
64 | |
65 | static void zram_slot_lock(struct zram *zram, u32 index) |
66 | { |
67 | bit_spin_lock(bitnum: ZRAM_LOCK, addr: &zram->table[index].flags); |
68 | } |
69 | |
70 | static void zram_slot_unlock(struct zram *zram, u32 index) |
71 | { |
72 | bit_spin_unlock(bitnum: ZRAM_LOCK, addr: &zram->table[index].flags); |
73 | } |
74 | |
75 | static inline bool init_done(struct zram *zram) |
76 | { |
77 | return zram->disksize; |
78 | } |
79 | |
80 | static inline struct zram *dev_to_zram(struct device *dev) |
81 | { |
82 | return (struct zram *)dev_to_disk(dev)->private_data; |
83 | } |
84 | |
85 | static unsigned long zram_get_handle(struct zram *zram, u32 index) |
86 | { |
87 | return zram->table[index].handle; |
88 | } |
89 | |
90 | static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) |
91 | { |
92 | zram->table[index].handle = handle; |
93 | } |
94 | |
95 | /* flag operations require table entry bit_spin_lock() being held */ |
96 | static bool zram_test_flag(struct zram *zram, u32 index, |
97 | enum zram_pageflags flag) |
98 | { |
99 | return zram->table[index].flags & BIT(flag); |
100 | } |
101 | |
102 | static void zram_set_flag(struct zram *zram, u32 index, |
103 | enum zram_pageflags flag) |
104 | { |
105 | zram->table[index].flags |= BIT(flag); |
106 | } |
107 | |
108 | static void zram_clear_flag(struct zram *zram, u32 index, |
109 | enum zram_pageflags flag) |
110 | { |
111 | zram->table[index].flags &= ~BIT(flag); |
112 | } |
113 | |
114 | static inline void zram_set_element(struct zram *zram, u32 index, |
115 | unsigned long element) |
116 | { |
117 | zram->table[index].element = element; |
118 | } |
119 | |
120 | static unsigned long zram_get_element(struct zram *zram, u32 index) |
121 | { |
122 | return zram->table[index].element; |
123 | } |
124 | |
125 | static size_t zram_get_obj_size(struct zram *zram, u32 index) |
126 | { |
127 | return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1); |
128 | } |
129 | |
130 | static void zram_set_obj_size(struct zram *zram, |
131 | u32 index, size_t size) |
132 | { |
133 | unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT; |
134 | |
135 | zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size; |
136 | } |
137 | |
138 | static inline bool zram_allocated(struct zram *zram, u32 index) |
139 | { |
140 | return zram_get_obj_size(zram, index) || |
141 | zram_test_flag(zram, index, flag: ZRAM_SAME) || |
142 | zram_test_flag(zram, index, flag: ZRAM_WB); |
143 | } |
144 | |
145 | #if PAGE_SIZE != 4096 |
146 | static inline bool is_partial_io(struct bio_vec *bvec) |
147 | { |
148 | return bvec->bv_len != PAGE_SIZE; |
149 | } |
150 | #define ZRAM_PARTIAL_IO 1 |
151 | #else |
152 | static inline bool is_partial_io(struct bio_vec *bvec) |
153 | { |
154 | return false; |
155 | } |
156 | #endif |
157 | |
158 | static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio) |
159 | { |
160 | prio &= ZRAM_COMP_PRIORITY_MASK; |
161 | /* |
162 | * Clear previous priority value first, in case if we recompress |
163 | * further an already recompressed page |
164 | */ |
165 | zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK << |
166 | ZRAM_COMP_PRIORITY_BIT1); |
167 | zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1); |
168 | } |
169 | |
170 | static inline u32 zram_get_priority(struct zram *zram, u32 index) |
171 | { |
172 | u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1; |
173 | |
174 | return prio & ZRAM_COMP_PRIORITY_MASK; |
175 | } |
176 | |
177 | static inline void update_used_max(struct zram *zram, |
178 | const unsigned long pages) |
179 | { |
180 | unsigned long cur_max = atomic_long_read(v: &zram->stats.max_used_pages); |
181 | |
182 | do { |
183 | if (cur_max >= pages) |
184 | return; |
185 | } while (!atomic_long_try_cmpxchg(v: &zram->stats.max_used_pages, |
186 | old: &cur_max, new: pages)); |
187 | } |
188 | |
189 | static inline void zram_fill_page(void *ptr, unsigned long len, |
190 | unsigned long value) |
191 | { |
192 | WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long))); |
193 | memset_l(p: ptr, v: value, n: len / sizeof(unsigned long)); |
194 | } |
195 | |
196 | static bool page_same_filled(void *ptr, unsigned long *element) |
197 | { |
198 | unsigned long *page; |
199 | unsigned long val; |
200 | unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; |
201 | |
202 | page = (unsigned long *)ptr; |
203 | val = page[0]; |
204 | |
205 | if (val != page[last_pos]) |
206 | return false; |
207 | |
208 | for (pos = 1; pos < last_pos; pos++) { |
209 | if (val != page[pos]) |
210 | return false; |
211 | } |
212 | |
213 | *element = val; |
214 | |
215 | return true; |
216 | } |
217 | |
218 | static ssize_t initstate_show(struct device *dev, |
219 | struct device_attribute *attr, char *buf) |
220 | { |
221 | u32 val; |
222 | struct zram *zram = dev_to_zram(dev); |
223 | |
224 | down_read(sem: &zram->init_lock); |
225 | val = init_done(zram); |
226 | up_read(sem: &zram->init_lock); |
227 | |
228 | return scnprintf(buf, PAGE_SIZE, fmt: "%u\n" , val); |
229 | } |
230 | |
231 | static ssize_t disksize_show(struct device *dev, |
232 | struct device_attribute *attr, char *buf) |
233 | { |
234 | struct zram *zram = dev_to_zram(dev); |
235 | |
236 | return scnprintf(buf, PAGE_SIZE, fmt: "%llu\n" , zram->disksize); |
237 | } |
238 | |
239 | static ssize_t mem_limit_store(struct device *dev, |
240 | struct device_attribute *attr, const char *buf, size_t len) |
241 | { |
242 | u64 limit; |
243 | char *tmp; |
244 | struct zram *zram = dev_to_zram(dev); |
245 | |
246 | limit = memparse(ptr: buf, retptr: &tmp); |
247 | if (buf == tmp) /* no chars parsed, invalid input */ |
248 | return -EINVAL; |
249 | |
250 | down_write(sem: &zram->init_lock); |
251 | zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; |
252 | up_write(sem: &zram->init_lock); |
253 | |
254 | return len; |
255 | } |
256 | |
257 | static ssize_t mem_used_max_store(struct device *dev, |
258 | struct device_attribute *attr, const char *buf, size_t len) |
259 | { |
260 | int err; |
261 | unsigned long val; |
262 | struct zram *zram = dev_to_zram(dev); |
263 | |
264 | err = kstrtoul(s: buf, base: 10, res: &val); |
265 | if (err || val != 0) |
266 | return -EINVAL; |
267 | |
268 | down_read(sem: &zram->init_lock); |
269 | if (init_done(zram)) { |
270 | atomic_long_set(v: &zram->stats.max_used_pages, |
271 | i: zs_get_total_pages(pool: zram->mem_pool)); |
272 | } |
273 | up_read(sem: &zram->init_lock); |
274 | |
275 | return len; |
276 | } |
277 | |
278 | /* |
279 | * Mark all pages which are older than or equal to cutoff as IDLE. |
280 | * Callers should hold the zram init lock in read mode |
281 | */ |
282 | static void mark_idle(struct zram *zram, ktime_t cutoff) |
283 | { |
284 | int is_idle = 1; |
285 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
286 | int index; |
287 | |
288 | for (index = 0; index < nr_pages; index++) { |
289 | /* |
290 | * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race. |
291 | * See the comment in writeback_store. |
292 | */ |
293 | zram_slot_lock(zram, index); |
294 | if (zram_allocated(zram, index) && |
295 | !zram_test_flag(zram, index, flag: ZRAM_UNDER_WB)) { |
296 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING |
297 | is_idle = !cutoff || ktime_after(cmp1: cutoff, cmp2: zram->table[index].ac_time); |
298 | #endif |
299 | if (is_idle) |
300 | zram_set_flag(zram, index, flag: ZRAM_IDLE); |
301 | } |
302 | zram_slot_unlock(zram, index); |
303 | } |
304 | } |
305 | |
306 | static ssize_t idle_store(struct device *dev, |
307 | struct device_attribute *attr, const char *buf, size_t len) |
308 | { |
309 | struct zram *zram = dev_to_zram(dev); |
310 | ktime_t cutoff_time = 0; |
311 | ssize_t rv = -EINVAL; |
312 | |
313 | if (!sysfs_streq(s1: buf, s2: "all" )) { |
314 | /* |
315 | * If it did not parse as 'all' try to treat it as an integer |
316 | * when we have memory tracking enabled. |
317 | */ |
318 | u64 age_sec; |
319 | |
320 | if (IS_ENABLED(CONFIG_ZRAM_MEMORY_TRACKING) && !kstrtoull(s: buf, base: 0, res: &age_sec)) |
321 | cutoff_time = ktime_sub(ktime_get_boottime(), |
322 | ns_to_ktime(age_sec * NSEC_PER_SEC)); |
323 | else |
324 | goto out; |
325 | } |
326 | |
327 | down_read(sem: &zram->init_lock); |
328 | if (!init_done(zram)) |
329 | goto out_unlock; |
330 | |
331 | /* |
332 | * A cutoff_time of 0 marks everything as idle, this is the |
333 | * "all" behavior. |
334 | */ |
335 | mark_idle(zram, cutoff: cutoff_time); |
336 | rv = len; |
337 | |
338 | out_unlock: |
339 | up_read(sem: &zram->init_lock); |
340 | out: |
341 | return rv; |
342 | } |
343 | |
344 | #ifdef CONFIG_ZRAM_WRITEBACK |
345 | static ssize_t writeback_limit_enable_store(struct device *dev, |
346 | struct device_attribute *attr, const char *buf, size_t len) |
347 | { |
348 | struct zram *zram = dev_to_zram(dev); |
349 | u64 val; |
350 | ssize_t ret = -EINVAL; |
351 | |
352 | if (kstrtoull(s: buf, base: 10, res: &val)) |
353 | return ret; |
354 | |
355 | down_read(sem: &zram->init_lock); |
356 | spin_lock(lock: &zram->wb_limit_lock); |
357 | zram->wb_limit_enable = val; |
358 | spin_unlock(lock: &zram->wb_limit_lock); |
359 | up_read(sem: &zram->init_lock); |
360 | ret = len; |
361 | |
362 | return ret; |
363 | } |
364 | |
365 | static ssize_t writeback_limit_enable_show(struct device *dev, |
366 | struct device_attribute *attr, char *buf) |
367 | { |
368 | bool val; |
369 | struct zram *zram = dev_to_zram(dev); |
370 | |
371 | down_read(sem: &zram->init_lock); |
372 | spin_lock(lock: &zram->wb_limit_lock); |
373 | val = zram->wb_limit_enable; |
374 | spin_unlock(lock: &zram->wb_limit_lock); |
375 | up_read(sem: &zram->init_lock); |
376 | |
377 | return scnprintf(buf, PAGE_SIZE, fmt: "%d\n" , val); |
378 | } |
379 | |
380 | static ssize_t writeback_limit_store(struct device *dev, |
381 | struct device_attribute *attr, const char *buf, size_t len) |
382 | { |
383 | struct zram *zram = dev_to_zram(dev); |
384 | u64 val; |
385 | ssize_t ret = -EINVAL; |
386 | |
387 | if (kstrtoull(s: buf, base: 10, res: &val)) |
388 | return ret; |
389 | |
390 | down_read(sem: &zram->init_lock); |
391 | spin_lock(lock: &zram->wb_limit_lock); |
392 | zram->bd_wb_limit = val; |
393 | spin_unlock(lock: &zram->wb_limit_lock); |
394 | up_read(sem: &zram->init_lock); |
395 | ret = len; |
396 | |
397 | return ret; |
398 | } |
399 | |
400 | static ssize_t writeback_limit_show(struct device *dev, |
401 | struct device_attribute *attr, char *buf) |
402 | { |
403 | u64 val; |
404 | struct zram *zram = dev_to_zram(dev); |
405 | |
406 | down_read(sem: &zram->init_lock); |
407 | spin_lock(lock: &zram->wb_limit_lock); |
408 | val = zram->bd_wb_limit; |
409 | spin_unlock(lock: &zram->wb_limit_lock); |
410 | up_read(sem: &zram->init_lock); |
411 | |
412 | return scnprintf(buf, PAGE_SIZE, fmt: "%llu\n" , val); |
413 | } |
414 | |
415 | static void reset_bdev(struct zram *zram) |
416 | { |
417 | if (!zram->backing_dev) |
418 | return; |
419 | |
420 | bdev_release(handle: zram->bdev_handle); |
421 | /* hope filp_close flush all of IO */ |
422 | filp_close(zram->backing_dev, NULL); |
423 | zram->backing_dev = NULL; |
424 | zram->bdev_handle = NULL; |
425 | zram->disk->fops = &zram_devops; |
426 | kvfree(addr: zram->bitmap); |
427 | zram->bitmap = NULL; |
428 | } |
429 | |
430 | static ssize_t backing_dev_show(struct device *dev, |
431 | struct device_attribute *attr, char *buf) |
432 | { |
433 | struct file *file; |
434 | struct zram *zram = dev_to_zram(dev); |
435 | char *p; |
436 | ssize_t ret; |
437 | |
438 | down_read(sem: &zram->init_lock); |
439 | file = zram->backing_dev; |
440 | if (!file) { |
441 | memcpy(buf, "none\n" , 5); |
442 | up_read(sem: &zram->init_lock); |
443 | return 5; |
444 | } |
445 | |
446 | p = file_path(file, buf, PAGE_SIZE - 1); |
447 | if (IS_ERR(ptr: p)) { |
448 | ret = PTR_ERR(ptr: p); |
449 | goto out; |
450 | } |
451 | |
452 | ret = strlen(p); |
453 | memmove(buf, p, ret); |
454 | buf[ret++] = '\n'; |
455 | out: |
456 | up_read(sem: &zram->init_lock); |
457 | return ret; |
458 | } |
459 | |
460 | static ssize_t backing_dev_store(struct device *dev, |
461 | struct device_attribute *attr, const char *buf, size_t len) |
462 | { |
463 | char *file_name; |
464 | size_t sz; |
465 | struct file *backing_dev = NULL; |
466 | struct inode *inode; |
467 | struct address_space *mapping; |
468 | unsigned int bitmap_sz; |
469 | unsigned long nr_pages, *bitmap = NULL; |
470 | struct bdev_handle *bdev_handle = NULL; |
471 | int err; |
472 | struct zram *zram = dev_to_zram(dev); |
473 | |
474 | file_name = kmalloc(PATH_MAX, GFP_KERNEL); |
475 | if (!file_name) |
476 | return -ENOMEM; |
477 | |
478 | down_write(sem: &zram->init_lock); |
479 | if (init_done(zram)) { |
480 | pr_info("Can't setup backing device for initialized device\n" ); |
481 | err = -EBUSY; |
482 | goto out; |
483 | } |
484 | |
485 | strscpy(p: file_name, q: buf, PATH_MAX); |
486 | /* ignore trailing newline */ |
487 | sz = strlen(file_name); |
488 | if (sz > 0 && file_name[sz - 1] == '\n') |
489 | file_name[sz - 1] = 0x00; |
490 | |
491 | backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0); |
492 | if (IS_ERR(ptr: backing_dev)) { |
493 | err = PTR_ERR(ptr: backing_dev); |
494 | backing_dev = NULL; |
495 | goto out; |
496 | } |
497 | |
498 | mapping = backing_dev->f_mapping; |
499 | inode = mapping->host; |
500 | |
501 | /* Support only block device in this moment */ |
502 | if (!S_ISBLK(inode->i_mode)) { |
503 | err = -ENOTBLK; |
504 | goto out; |
505 | } |
506 | |
507 | bdev_handle = bdev_open_by_dev(dev: inode->i_rdev, |
508 | BLK_OPEN_READ | BLK_OPEN_WRITE, holder: zram, NULL); |
509 | if (IS_ERR(ptr: bdev_handle)) { |
510 | err = PTR_ERR(ptr: bdev_handle); |
511 | bdev_handle = NULL; |
512 | goto out; |
513 | } |
514 | |
515 | nr_pages = i_size_read(inode) >> PAGE_SHIFT; |
516 | bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); |
517 | bitmap = kvzalloc(size: bitmap_sz, GFP_KERNEL); |
518 | if (!bitmap) { |
519 | err = -ENOMEM; |
520 | goto out; |
521 | } |
522 | |
523 | reset_bdev(zram); |
524 | |
525 | zram->bdev_handle = bdev_handle; |
526 | zram->backing_dev = backing_dev; |
527 | zram->bitmap = bitmap; |
528 | zram->nr_pages = nr_pages; |
529 | up_write(sem: &zram->init_lock); |
530 | |
531 | pr_info("setup backing device %s\n" , file_name); |
532 | kfree(objp: file_name); |
533 | |
534 | return len; |
535 | out: |
536 | kvfree(addr: bitmap); |
537 | |
538 | if (bdev_handle) |
539 | bdev_release(handle: bdev_handle); |
540 | |
541 | if (backing_dev) |
542 | filp_close(backing_dev, NULL); |
543 | |
544 | up_write(sem: &zram->init_lock); |
545 | |
546 | kfree(objp: file_name); |
547 | |
548 | return err; |
549 | } |
550 | |
551 | static unsigned long alloc_block_bdev(struct zram *zram) |
552 | { |
553 | unsigned long blk_idx = 1; |
554 | retry: |
555 | /* skip 0 bit to confuse zram.handle = 0 */ |
556 | blk_idx = find_next_zero_bit(addr: zram->bitmap, size: zram->nr_pages, offset: blk_idx); |
557 | if (blk_idx == zram->nr_pages) |
558 | return 0; |
559 | |
560 | if (test_and_set_bit(nr: blk_idx, addr: zram->bitmap)) |
561 | goto retry; |
562 | |
563 | atomic64_inc(v: &zram->stats.bd_count); |
564 | return blk_idx; |
565 | } |
566 | |
567 | static void free_block_bdev(struct zram *zram, unsigned long blk_idx) |
568 | { |
569 | int was_set; |
570 | |
571 | was_set = test_and_clear_bit(nr: blk_idx, addr: zram->bitmap); |
572 | WARN_ON_ONCE(!was_set); |
573 | atomic64_dec(v: &zram->stats.bd_count); |
574 | } |
575 | |
576 | static void read_from_bdev_async(struct zram *zram, struct page *page, |
577 | unsigned long entry, struct bio *parent) |
578 | { |
579 | struct bio *bio; |
580 | |
581 | bio = bio_alloc(bdev: zram->bdev_handle->bdev, nr_vecs: 1, opf: parent->bi_opf, GFP_NOIO); |
582 | bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); |
583 | __bio_add_page(bio, page, PAGE_SIZE, off: 0); |
584 | bio_chain(bio, parent); |
585 | submit_bio(bio); |
586 | } |
587 | |
588 | #define PAGE_WB_SIG "page_index=" |
589 | |
590 | #define PAGE_WRITEBACK 0 |
591 | #define HUGE_WRITEBACK (1<<0) |
592 | #define IDLE_WRITEBACK (1<<1) |
593 | #define INCOMPRESSIBLE_WRITEBACK (1<<2) |
594 | |
595 | static ssize_t writeback_store(struct device *dev, |
596 | struct device_attribute *attr, const char *buf, size_t len) |
597 | { |
598 | struct zram *zram = dev_to_zram(dev); |
599 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
600 | unsigned long index = 0; |
601 | struct bio bio; |
602 | struct bio_vec bio_vec; |
603 | struct page *page; |
604 | ssize_t ret = len; |
605 | int mode, err; |
606 | unsigned long blk_idx = 0; |
607 | |
608 | if (sysfs_streq(s1: buf, s2: "idle" )) |
609 | mode = IDLE_WRITEBACK; |
610 | else if (sysfs_streq(s1: buf, s2: "huge" )) |
611 | mode = HUGE_WRITEBACK; |
612 | else if (sysfs_streq(s1: buf, s2: "huge_idle" )) |
613 | mode = IDLE_WRITEBACK | HUGE_WRITEBACK; |
614 | else if (sysfs_streq(s1: buf, s2: "incompressible" )) |
615 | mode = INCOMPRESSIBLE_WRITEBACK; |
616 | else { |
617 | if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1)) |
618 | return -EINVAL; |
619 | |
620 | if (kstrtol(s: buf + sizeof(PAGE_WB_SIG) - 1, base: 10, res: &index) || |
621 | index >= nr_pages) |
622 | return -EINVAL; |
623 | |
624 | nr_pages = 1; |
625 | mode = PAGE_WRITEBACK; |
626 | } |
627 | |
628 | down_read(sem: &zram->init_lock); |
629 | if (!init_done(zram)) { |
630 | ret = -EINVAL; |
631 | goto release_init_lock; |
632 | } |
633 | |
634 | if (!zram->backing_dev) { |
635 | ret = -ENODEV; |
636 | goto release_init_lock; |
637 | } |
638 | |
639 | page = alloc_page(GFP_KERNEL); |
640 | if (!page) { |
641 | ret = -ENOMEM; |
642 | goto release_init_lock; |
643 | } |
644 | |
645 | for (; nr_pages != 0; index++, nr_pages--) { |
646 | spin_lock(lock: &zram->wb_limit_lock); |
647 | if (zram->wb_limit_enable && !zram->bd_wb_limit) { |
648 | spin_unlock(lock: &zram->wb_limit_lock); |
649 | ret = -EIO; |
650 | break; |
651 | } |
652 | spin_unlock(lock: &zram->wb_limit_lock); |
653 | |
654 | if (!blk_idx) { |
655 | blk_idx = alloc_block_bdev(zram); |
656 | if (!blk_idx) { |
657 | ret = -ENOSPC; |
658 | break; |
659 | } |
660 | } |
661 | |
662 | zram_slot_lock(zram, index); |
663 | if (!zram_allocated(zram, index)) |
664 | goto next; |
665 | |
666 | if (zram_test_flag(zram, index, flag: ZRAM_WB) || |
667 | zram_test_flag(zram, index, flag: ZRAM_SAME) || |
668 | zram_test_flag(zram, index, flag: ZRAM_UNDER_WB)) |
669 | goto next; |
670 | |
671 | if (mode & IDLE_WRITEBACK && |
672 | !zram_test_flag(zram, index, flag: ZRAM_IDLE)) |
673 | goto next; |
674 | if (mode & HUGE_WRITEBACK && |
675 | !zram_test_flag(zram, index, flag: ZRAM_HUGE)) |
676 | goto next; |
677 | if (mode & INCOMPRESSIBLE_WRITEBACK && |
678 | !zram_test_flag(zram, index, flag: ZRAM_INCOMPRESSIBLE)) |
679 | goto next; |
680 | |
681 | /* |
682 | * Clearing ZRAM_UNDER_WB is duty of caller. |
683 | * IOW, zram_free_page never clear it. |
684 | */ |
685 | zram_set_flag(zram, index, flag: ZRAM_UNDER_WB); |
686 | /* Need for hugepage writeback racing */ |
687 | zram_set_flag(zram, index, flag: ZRAM_IDLE); |
688 | zram_slot_unlock(zram, index); |
689 | if (zram_read_page(zram, page, index, NULL)) { |
690 | zram_slot_lock(zram, index); |
691 | zram_clear_flag(zram, index, flag: ZRAM_UNDER_WB); |
692 | zram_clear_flag(zram, index, flag: ZRAM_IDLE); |
693 | zram_slot_unlock(zram, index); |
694 | continue; |
695 | } |
696 | |
697 | bio_init(bio: &bio, bdev: zram->bdev_handle->bdev, table: &bio_vec, max_vecs: 1, |
698 | opf: REQ_OP_WRITE | REQ_SYNC); |
699 | bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9); |
700 | __bio_add_page(bio: &bio, page, PAGE_SIZE, off: 0); |
701 | |
702 | /* |
703 | * XXX: A single page IO would be inefficient for write |
704 | * but it would be not bad as starter. |
705 | */ |
706 | err = submit_bio_wait(bio: &bio); |
707 | if (err) { |
708 | zram_slot_lock(zram, index); |
709 | zram_clear_flag(zram, index, flag: ZRAM_UNDER_WB); |
710 | zram_clear_flag(zram, index, flag: ZRAM_IDLE); |
711 | zram_slot_unlock(zram, index); |
712 | /* |
713 | * BIO errors are not fatal, we continue and simply |
714 | * attempt to writeback the remaining objects (pages). |
715 | * At the same time we need to signal user-space that |
716 | * some writes (at least one, but also could be all of |
717 | * them) were not successful and we do so by returning |
718 | * the most recent BIO error. |
719 | */ |
720 | ret = err; |
721 | continue; |
722 | } |
723 | |
724 | atomic64_inc(v: &zram->stats.bd_writes); |
725 | /* |
726 | * We released zram_slot_lock so need to check if the slot was |
727 | * changed. If there is freeing for the slot, we can catch it |
728 | * easily by zram_allocated. |
729 | * A subtle case is the slot is freed/reallocated/marked as |
730 | * ZRAM_IDLE again. To close the race, idle_store doesn't |
731 | * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB. |
732 | * Thus, we could close the race by checking ZRAM_IDLE bit. |
733 | */ |
734 | zram_slot_lock(zram, index); |
735 | if (!zram_allocated(zram, index) || |
736 | !zram_test_flag(zram, index, flag: ZRAM_IDLE)) { |
737 | zram_clear_flag(zram, index, flag: ZRAM_UNDER_WB); |
738 | zram_clear_flag(zram, index, flag: ZRAM_IDLE); |
739 | goto next; |
740 | } |
741 | |
742 | zram_free_page(zram, index); |
743 | zram_clear_flag(zram, index, flag: ZRAM_UNDER_WB); |
744 | zram_set_flag(zram, index, flag: ZRAM_WB); |
745 | zram_set_element(zram, index, element: blk_idx); |
746 | blk_idx = 0; |
747 | atomic64_inc(v: &zram->stats.pages_stored); |
748 | spin_lock(lock: &zram->wb_limit_lock); |
749 | if (zram->wb_limit_enable && zram->bd_wb_limit > 0) |
750 | zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12); |
751 | spin_unlock(lock: &zram->wb_limit_lock); |
752 | next: |
753 | zram_slot_unlock(zram, index); |
754 | } |
755 | |
756 | if (blk_idx) |
757 | free_block_bdev(zram, blk_idx); |
758 | __free_page(page); |
759 | release_init_lock: |
760 | up_read(sem: &zram->init_lock); |
761 | |
762 | return ret; |
763 | } |
764 | |
765 | struct zram_work { |
766 | struct work_struct work; |
767 | struct zram *zram; |
768 | unsigned long entry; |
769 | struct page *page; |
770 | int error; |
771 | }; |
772 | |
773 | static void zram_sync_read(struct work_struct *work) |
774 | { |
775 | struct zram_work *zw = container_of(work, struct zram_work, work); |
776 | struct bio_vec bv; |
777 | struct bio bio; |
778 | |
779 | bio_init(bio: &bio, bdev: zw->zram->bdev_handle->bdev, table: &bv, max_vecs: 1, opf: REQ_OP_READ); |
780 | bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9); |
781 | __bio_add_page(bio: &bio, page: zw->page, PAGE_SIZE, off: 0); |
782 | zw->error = submit_bio_wait(bio: &bio); |
783 | } |
784 | |
785 | /* |
786 | * Block layer want one ->submit_bio to be active at a time, so if we use |
787 | * chained IO with parent IO in same context, it's a deadlock. To avoid that, |
788 | * use a worker thread context. |
789 | */ |
790 | static int read_from_bdev_sync(struct zram *zram, struct page *page, |
791 | unsigned long entry) |
792 | { |
793 | struct zram_work work; |
794 | |
795 | work.page = page; |
796 | work.zram = zram; |
797 | work.entry = entry; |
798 | |
799 | INIT_WORK_ONSTACK(&work.work, zram_sync_read); |
800 | queue_work(wq: system_unbound_wq, work: &work.work); |
801 | flush_work(work: &work.work); |
802 | destroy_work_on_stack(work: &work.work); |
803 | |
804 | return work.error; |
805 | } |
806 | |
807 | static int read_from_bdev(struct zram *zram, struct page *page, |
808 | unsigned long entry, struct bio *parent) |
809 | { |
810 | atomic64_inc(v: &zram->stats.bd_reads); |
811 | if (!parent) { |
812 | if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO))) |
813 | return -EIO; |
814 | return read_from_bdev_sync(zram, page, entry); |
815 | } |
816 | read_from_bdev_async(zram, page, entry, parent); |
817 | return 0; |
818 | } |
819 | #else |
820 | static inline void reset_bdev(struct zram *zram) {}; |
821 | static int read_from_bdev(struct zram *zram, struct page *page, |
822 | unsigned long entry, struct bio *parent) |
823 | { |
824 | return -EIO; |
825 | } |
826 | |
827 | static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {}; |
828 | #endif |
829 | |
830 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING |
831 | |
832 | static struct dentry *zram_debugfs_root; |
833 | |
834 | static void zram_debugfs_create(void) |
835 | { |
836 | zram_debugfs_root = debugfs_create_dir(name: "zram" , NULL); |
837 | } |
838 | |
839 | static void zram_debugfs_destroy(void) |
840 | { |
841 | debugfs_remove_recursive(dentry: zram_debugfs_root); |
842 | } |
843 | |
844 | static void zram_accessed(struct zram *zram, u32 index) |
845 | { |
846 | zram_clear_flag(zram, index, flag: ZRAM_IDLE); |
847 | zram->table[index].ac_time = ktime_get_boottime(); |
848 | } |
849 | |
850 | static ssize_t read_block_state(struct file *file, char __user *buf, |
851 | size_t count, loff_t *ppos) |
852 | { |
853 | char *kbuf; |
854 | ssize_t index, written = 0; |
855 | struct zram *zram = file->private_data; |
856 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
857 | struct timespec64 ts; |
858 | |
859 | kbuf = kvmalloc(size: count, GFP_KERNEL); |
860 | if (!kbuf) |
861 | return -ENOMEM; |
862 | |
863 | down_read(sem: &zram->init_lock); |
864 | if (!init_done(zram)) { |
865 | up_read(sem: &zram->init_lock); |
866 | kvfree(addr: kbuf); |
867 | return -EINVAL; |
868 | } |
869 | |
870 | for (index = *ppos; index < nr_pages; index++) { |
871 | int copied; |
872 | |
873 | zram_slot_lock(zram, index); |
874 | if (!zram_allocated(zram, index)) |
875 | goto next; |
876 | |
877 | ts = ktime_to_timespec64(zram->table[index].ac_time); |
878 | copied = snprintf(buf: kbuf + written, size: count, |
879 | fmt: "%12zd %12lld.%06lu %c%c%c%c%c%c\n" , |
880 | index, (s64)ts.tv_sec, |
881 | ts.tv_nsec / NSEC_PER_USEC, |
882 | zram_test_flag(zram, index, flag: ZRAM_SAME) ? 's' : '.', |
883 | zram_test_flag(zram, index, flag: ZRAM_WB) ? 'w' : '.', |
884 | zram_test_flag(zram, index, flag: ZRAM_HUGE) ? 'h' : '.', |
885 | zram_test_flag(zram, index, flag: ZRAM_IDLE) ? 'i' : '.', |
886 | zram_get_priority(zram, index) ? 'r' : '.', |
887 | zram_test_flag(zram, index, |
888 | flag: ZRAM_INCOMPRESSIBLE) ? 'n' : '.'); |
889 | |
890 | if (count <= copied) { |
891 | zram_slot_unlock(zram, index); |
892 | break; |
893 | } |
894 | written += copied; |
895 | count -= copied; |
896 | next: |
897 | zram_slot_unlock(zram, index); |
898 | *ppos += 1; |
899 | } |
900 | |
901 | up_read(sem: &zram->init_lock); |
902 | if (copy_to_user(to: buf, from: kbuf, n: written)) |
903 | written = -EFAULT; |
904 | kvfree(addr: kbuf); |
905 | |
906 | return written; |
907 | } |
908 | |
909 | static const struct file_operations proc_zram_block_state_op = { |
910 | .open = simple_open, |
911 | .read = read_block_state, |
912 | .llseek = default_llseek, |
913 | }; |
914 | |
915 | static void zram_debugfs_register(struct zram *zram) |
916 | { |
917 | if (!zram_debugfs_root) |
918 | return; |
919 | |
920 | zram->debugfs_dir = debugfs_create_dir(name: zram->disk->disk_name, |
921 | parent: zram_debugfs_root); |
922 | debugfs_create_file(name: "block_state" , mode: 0400, parent: zram->debugfs_dir, |
923 | data: zram, fops: &proc_zram_block_state_op); |
924 | } |
925 | |
926 | static void zram_debugfs_unregister(struct zram *zram) |
927 | { |
928 | debugfs_remove_recursive(dentry: zram->debugfs_dir); |
929 | } |
930 | #else |
931 | static void zram_debugfs_create(void) {}; |
932 | static void zram_debugfs_destroy(void) {}; |
933 | static void zram_accessed(struct zram *zram, u32 index) |
934 | { |
935 | zram_clear_flag(zram, index, ZRAM_IDLE); |
936 | }; |
937 | static void zram_debugfs_register(struct zram *zram) {}; |
938 | static void zram_debugfs_unregister(struct zram *zram) {}; |
939 | #endif |
940 | |
941 | /* |
942 | * We switched to per-cpu streams and this attr is not needed anymore. |
943 | * However, we will keep it around for some time, because: |
944 | * a) we may revert per-cpu streams in the future |
945 | * b) it's visible to user space and we need to follow our 2 years |
946 | * retirement rule; but we already have a number of 'soon to be |
947 | * altered' attrs, so max_comp_streams need to wait for the next |
948 | * layoff cycle. |
949 | */ |
950 | static ssize_t max_comp_streams_show(struct device *dev, |
951 | struct device_attribute *attr, char *buf) |
952 | { |
953 | return scnprintf(buf, PAGE_SIZE, fmt: "%d\n" , num_online_cpus()); |
954 | } |
955 | |
956 | static ssize_t max_comp_streams_store(struct device *dev, |
957 | struct device_attribute *attr, const char *buf, size_t len) |
958 | { |
959 | return len; |
960 | } |
961 | |
962 | static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg) |
963 | { |
964 | /* Do not free statically defined compression algorithms */ |
965 | if (zram->comp_algs[prio] != default_compressor) |
966 | kfree(objp: zram->comp_algs[prio]); |
967 | |
968 | zram->comp_algs[prio] = alg; |
969 | } |
970 | |
971 | static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf) |
972 | { |
973 | ssize_t sz; |
974 | |
975 | down_read(sem: &zram->init_lock); |
976 | sz = zcomp_available_show(comp: zram->comp_algs[prio], buf); |
977 | up_read(sem: &zram->init_lock); |
978 | |
979 | return sz; |
980 | } |
981 | |
982 | static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf) |
983 | { |
984 | char *compressor; |
985 | size_t sz; |
986 | |
987 | sz = strlen(buf); |
988 | if (sz >= CRYPTO_MAX_ALG_NAME) |
989 | return -E2BIG; |
990 | |
991 | compressor = kstrdup(s: buf, GFP_KERNEL); |
992 | if (!compressor) |
993 | return -ENOMEM; |
994 | |
995 | /* ignore trailing newline */ |
996 | if (sz > 0 && compressor[sz - 1] == '\n') |
997 | compressor[sz - 1] = 0x00; |
998 | |
999 | if (!zcomp_available_algorithm(comp: compressor)) { |
1000 | kfree(objp: compressor); |
1001 | return -EINVAL; |
1002 | } |
1003 | |
1004 | down_write(sem: &zram->init_lock); |
1005 | if (init_done(zram)) { |
1006 | up_write(sem: &zram->init_lock); |
1007 | kfree(objp: compressor); |
1008 | pr_info("Can't change algorithm for initialized device\n" ); |
1009 | return -EBUSY; |
1010 | } |
1011 | |
1012 | comp_algorithm_set(zram, prio, alg: compressor); |
1013 | up_write(sem: &zram->init_lock); |
1014 | return 0; |
1015 | } |
1016 | |
1017 | static ssize_t comp_algorithm_show(struct device *dev, |
1018 | struct device_attribute *attr, |
1019 | char *buf) |
1020 | { |
1021 | struct zram *zram = dev_to_zram(dev); |
1022 | |
1023 | return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf); |
1024 | } |
1025 | |
1026 | static ssize_t comp_algorithm_store(struct device *dev, |
1027 | struct device_attribute *attr, |
1028 | const char *buf, |
1029 | size_t len) |
1030 | { |
1031 | struct zram *zram = dev_to_zram(dev); |
1032 | int ret; |
1033 | |
1034 | ret = __comp_algorithm_store(zram, ZRAM_PRIMARY_COMP, buf); |
1035 | return ret ? ret : len; |
1036 | } |
1037 | |
1038 | #ifdef CONFIG_ZRAM_MULTI_COMP |
1039 | static ssize_t recomp_algorithm_show(struct device *dev, |
1040 | struct device_attribute *attr, |
1041 | char *buf) |
1042 | { |
1043 | struct zram *zram = dev_to_zram(dev); |
1044 | ssize_t sz = 0; |
1045 | u32 prio; |
1046 | |
1047 | for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) { |
1048 | if (!zram->comp_algs[prio]) |
1049 | continue; |
1050 | |
1051 | sz += scnprintf(buf: buf + sz, PAGE_SIZE - sz - 2, fmt: "#%d: " , prio); |
1052 | sz += __comp_algorithm_show(zram, prio, buf: buf + sz); |
1053 | } |
1054 | |
1055 | return sz; |
1056 | } |
1057 | |
1058 | static ssize_t recomp_algorithm_store(struct device *dev, |
1059 | struct device_attribute *attr, |
1060 | const char *buf, |
1061 | size_t len) |
1062 | { |
1063 | struct zram *zram = dev_to_zram(dev); |
1064 | int prio = ZRAM_SECONDARY_COMP; |
1065 | char *args, *param, *val; |
1066 | char *alg = NULL; |
1067 | int ret; |
1068 | |
1069 | args = skip_spaces(buf); |
1070 | while (*args) { |
1071 | args = next_arg(args, param: ¶m, val: &val); |
1072 | |
1073 | if (!val || !*val) |
1074 | return -EINVAL; |
1075 | |
1076 | if (!strcmp(param, "algo" )) { |
1077 | alg = val; |
1078 | continue; |
1079 | } |
1080 | |
1081 | if (!strcmp(param, "priority" )) { |
1082 | ret = kstrtoint(s: val, base: 10, res: &prio); |
1083 | if (ret) |
1084 | return ret; |
1085 | continue; |
1086 | } |
1087 | } |
1088 | |
1089 | if (!alg) |
1090 | return -EINVAL; |
1091 | |
1092 | if (prio < ZRAM_SECONDARY_COMP || prio >= ZRAM_MAX_COMPS) |
1093 | return -EINVAL; |
1094 | |
1095 | ret = __comp_algorithm_store(zram, prio, buf: alg); |
1096 | return ret ? ret : len; |
1097 | } |
1098 | #endif |
1099 | |
1100 | static ssize_t compact_store(struct device *dev, |
1101 | struct device_attribute *attr, const char *buf, size_t len) |
1102 | { |
1103 | struct zram *zram = dev_to_zram(dev); |
1104 | |
1105 | down_read(sem: &zram->init_lock); |
1106 | if (!init_done(zram)) { |
1107 | up_read(sem: &zram->init_lock); |
1108 | return -EINVAL; |
1109 | } |
1110 | |
1111 | zs_compact(pool: zram->mem_pool); |
1112 | up_read(sem: &zram->init_lock); |
1113 | |
1114 | return len; |
1115 | } |
1116 | |
1117 | static ssize_t io_stat_show(struct device *dev, |
1118 | struct device_attribute *attr, char *buf) |
1119 | { |
1120 | struct zram *zram = dev_to_zram(dev); |
1121 | ssize_t ret; |
1122 | |
1123 | down_read(sem: &zram->init_lock); |
1124 | ret = scnprintf(buf, PAGE_SIZE, |
1125 | fmt: "%8llu %8llu 0 %8llu\n" , |
1126 | (u64)atomic64_read(v: &zram->stats.failed_reads), |
1127 | (u64)atomic64_read(v: &zram->stats.failed_writes), |
1128 | (u64)atomic64_read(v: &zram->stats.notify_free)); |
1129 | up_read(sem: &zram->init_lock); |
1130 | |
1131 | return ret; |
1132 | } |
1133 | |
1134 | static ssize_t mm_stat_show(struct device *dev, |
1135 | struct device_attribute *attr, char *buf) |
1136 | { |
1137 | struct zram *zram = dev_to_zram(dev); |
1138 | struct zs_pool_stats pool_stats; |
1139 | u64 orig_size, mem_used = 0; |
1140 | long max_used; |
1141 | ssize_t ret; |
1142 | |
1143 | memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats)); |
1144 | |
1145 | down_read(sem: &zram->init_lock); |
1146 | if (init_done(zram)) { |
1147 | mem_used = zs_get_total_pages(pool: zram->mem_pool); |
1148 | zs_pool_stats(pool: zram->mem_pool, stats: &pool_stats); |
1149 | } |
1150 | |
1151 | orig_size = atomic64_read(v: &zram->stats.pages_stored); |
1152 | max_used = atomic_long_read(v: &zram->stats.max_used_pages); |
1153 | |
1154 | ret = scnprintf(buf, PAGE_SIZE, |
1155 | fmt: "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n" , |
1156 | orig_size << PAGE_SHIFT, |
1157 | (u64)atomic64_read(v: &zram->stats.compr_data_size), |
1158 | mem_used << PAGE_SHIFT, |
1159 | zram->limit_pages << PAGE_SHIFT, |
1160 | max_used << PAGE_SHIFT, |
1161 | (u64)atomic64_read(v: &zram->stats.same_pages), |
1162 | atomic_long_read(v: &pool_stats.pages_compacted), |
1163 | (u64)atomic64_read(v: &zram->stats.huge_pages), |
1164 | (u64)atomic64_read(v: &zram->stats.huge_pages_since)); |
1165 | up_read(sem: &zram->init_lock); |
1166 | |
1167 | return ret; |
1168 | } |
1169 | |
1170 | #ifdef CONFIG_ZRAM_WRITEBACK |
1171 | #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12))) |
1172 | static ssize_t bd_stat_show(struct device *dev, |
1173 | struct device_attribute *attr, char *buf) |
1174 | { |
1175 | struct zram *zram = dev_to_zram(dev); |
1176 | ssize_t ret; |
1177 | |
1178 | down_read(sem: &zram->init_lock); |
1179 | ret = scnprintf(buf, PAGE_SIZE, |
1180 | fmt: "%8llu %8llu %8llu\n" , |
1181 | FOUR_K((u64)atomic64_read(&zram->stats.bd_count)), |
1182 | FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)), |
1183 | FOUR_K((u64)atomic64_read(&zram->stats.bd_writes))); |
1184 | up_read(sem: &zram->init_lock); |
1185 | |
1186 | return ret; |
1187 | } |
1188 | #endif |
1189 | |
1190 | static ssize_t debug_stat_show(struct device *dev, |
1191 | struct device_attribute *attr, char *buf) |
1192 | { |
1193 | int version = 1; |
1194 | struct zram *zram = dev_to_zram(dev); |
1195 | ssize_t ret; |
1196 | |
1197 | down_read(sem: &zram->init_lock); |
1198 | ret = scnprintf(buf, PAGE_SIZE, |
1199 | fmt: "version: %d\n%8llu %8llu\n" , |
1200 | version, |
1201 | (u64)atomic64_read(v: &zram->stats.writestall), |
1202 | (u64)atomic64_read(v: &zram->stats.miss_free)); |
1203 | up_read(sem: &zram->init_lock); |
1204 | |
1205 | return ret; |
1206 | } |
1207 | |
1208 | static DEVICE_ATTR_RO(io_stat); |
1209 | static DEVICE_ATTR_RO(mm_stat); |
1210 | #ifdef CONFIG_ZRAM_WRITEBACK |
1211 | static DEVICE_ATTR_RO(bd_stat); |
1212 | #endif |
1213 | static DEVICE_ATTR_RO(debug_stat); |
1214 | |
1215 | static void zram_meta_free(struct zram *zram, u64 disksize) |
1216 | { |
1217 | size_t num_pages = disksize >> PAGE_SHIFT; |
1218 | size_t index; |
1219 | |
1220 | /* Free all pages that are still in this zram device */ |
1221 | for (index = 0; index < num_pages; index++) |
1222 | zram_free_page(zram, index); |
1223 | |
1224 | zs_destroy_pool(pool: zram->mem_pool); |
1225 | vfree(addr: zram->table); |
1226 | } |
1227 | |
1228 | static bool zram_meta_alloc(struct zram *zram, u64 disksize) |
1229 | { |
1230 | size_t num_pages; |
1231 | |
1232 | num_pages = disksize >> PAGE_SHIFT; |
1233 | zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table))); |
1234 | if (!zram->table) |
1235 | return false; |
1236 | |
1237 | zram->mem_pool = zs_create_pool(name: zram->disk->disk_name); |
1238 | if (!zram->mem_pool) { |
1239 | vfree(addr: zram->table); |
1240 | return false; |
1241 | } |
1242 | |
1243 | if (!huge_class_size) |
1244 | huge_class_size = zs_huge_class_size(pool: zram->mem_pool); |
1245 | return true; |
1246 | } |
1247 | |
1248 | /* |
1249 | * To protect concurrent access to the same index entry, |
1250 | * caller should hold this table index entry's bit_spinlock to |
1251 | * indicate this index entry is accessing. |
1252 | */ |
1253 | static void zram_free_page(struct zram *zram, size_t index) |
1254 | { |
1255 | unsigned long handle; |
1256 | |
1257 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING |
1258 | zram->table[index].ac_time = 0; |
1259 | #endif |
1260 | if (zram_test_flag(zram, index, flag: ZRAM_IDLE)) |
1261 | zram_clear_flag(zram, index, flag: ZRAM_IDLE); |
1262 | |
1263 | if (zram_test_flag(zram, index, flag: ZRAM_HUGE)) { |
1264 | zram_clear_flag(zram, index, flag: ZRAM_HUGE); |
1265 | atomic64_dec(v: &zram->stats.huge_pages); |
1266 | } |
1267 | |
1268 | if (zram_test_flag(zram, index, flag: ZRAM_INCOMPRESSIBLE)) |
1269 | zram_clear_flag(zram, index, flag: ZRAM_INCOMPRESSIBLE); |
1270 | |
1271 | zram_set_priority(zram, index, prio: 0); |
1272 | |
1273 | if (zram_test_flag(zram, index, flag: ZRAM_WB)) { |
1274 | zram_clear_flag(zram, index, flag: ZRAM_WB); |
1275 | free_block_bdev(zram, blk_idx: zram_get_element(zram, index)); |
1276 | goto out; |
1277 | } |
1278 | |
1279 | /* |
1280 | * No memory is allocated for same element filled pages. |
1281 | * Simply clear same page flag. |
1282 | */ |
1283 | if (zram_test_flag(zram, index, flag: ZRAM_SAME)) { |
1284 | zram_clear_flag(zram, index, flag: ZRAM_SAME); |
1285 | atomic64_dec(v: &zram->stats.same_pages); |
1286 | goto out; |
1287 | } |
1288 | |
1289 | handle = zram_get_handle(zram, index); |
1290 | if (!handle) |
1291 | return; |
1292 | |
1293 | zs_free(pool: zram->mem_pool, obj: handle); |
1294 | |
1295 | atomic64_sub(i: zram_get_obj_size(zram, index), |
1296 | v: &zram->stats.compr_data_size); |
1297 | out: |
1298 | atomic64_dec(v: &zram->stats.pages_stored); |
1299 | zram_set_handle(zram, index, handle: 0); |
1300 | zram_set_obj_size(zram, index, size: 0); |
1301 | WARN_ON_ONCE(zram->table[index].flags & |
1302 | ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB)); |
1303 | } |
1304 | |
1305 | /* |
1306 | * Reads (decompresses if needed) a page from zspool (zsmalloc). |
1307 | * Corresponding ZRAM slot should be locked. |
1308 | */ |
1309 | static int zram_read_from_zspool(struct zram *zram, struct page *page, |
1310 | u32 index) |
1311 | { |
1312 | struct zcomp_strm *zstrm; |
1313 | unsigned long handle; |
1314 | unsigned int size; |
1315 | void *src, *dst; |
1316 | u32 prio; |
1317 | int ret; |
1318 | |
1319 | handle = zram_get_handle(zram, index); |
1320 | if (!handle || zram_test_flag(zram, index, flag: ZRAM_SAME)) { |
1321 | unsigned long value; |
1322 | void *mem; |
1323 | |
1324 | value = handle ? zram_get_element(zram, index) : 0; |
1325 | mem = kmap_atomic(page); |
1326 | zram_fill_page(ptr: mem, PAGE_SIZE, value); |
1327 | kunmap_atomic(mem); |
1328 | return 0; |
1329 | } |
1330 | |
1331 | size = zram_get_obj_size(zram, index); |
1332 | |
1333 | if (size != PAGE_SIZE) { |
1334 | prio = zram_get_priority(zram, index); |
1335 | zstrm = zcomp_stream_get(comp: zram->comps[prio]); |
1336 | } |
1337 | |
1338 | src = zs_map_object(pool: zram->mem_pool, handle, mm: ZS_MM_RO); |
1339 | if (size == PAGE_SIZE) { |
1340 | dst = kmap_atomic(page); |
1341 | memcpy(dst, src, PAGE_SIZE); |
1342 | kunmap_atomic(dst); |
1343 | ret = 0; |
1344 | } else { |
1345 | dst = kmap_atomic(page); |
1346 | ret = zcomp_decompress(zstrm, src, src_len: size, dst); |
1347 | kunmap_atomic(dst); |
1348 | zcomp_stream_put(comp: zram->comps[prio]); |
1349 | } |
1350 | zs_unmap_object(pool: zram->mem_pool, handle); |
1351 | return ret; |
1352 | } |
1353 | |
1354 | static int zram_read_page(struct zram *zram, struct page *page, u32 index, |
1355 | struct bio *parent) |
1356 | { |
1357 | int ret; |
1358 | |
1359 | zram_slot_lock(zram, index); |
1360 | if (!zram_test_flag(zram, index, flag: ZRAM_WB)) { |
1361 | /* Slot should be locked through out the function call */ |
1362 | ret = zram_read_from_zspool(zram, page, index); |
1363 | zram_slot_unlock(zram, index); |
1364 | } else { |
1365 | /* |
1366 | * The slot should be unlocked before reading from the backing |
1367 | * device. |
1368 | */ |
1369 | zram_slot_unlock(zram, index); |
1370 | |
1371 | ret = read_from_bdev(zram, page, entry: zram_get_element(zram, index), |
1372 | parent); |
1373 | } |
1374 | |
1375 | /* Should NEVER happen. Return bio error if it does. */ |
1376 | if (WARN_ON(ret < 0)) |
1377 | pr_err("Decompression failed! err=%d, page=%u\n" , ret, index); |
1378 | |
1379 | return ret; |
1380 | } |
1381 | |
1382 | /* |
1383 | * Use a temporary buffer to decompress the page, as the decompressor |
1384 | * always expects a full page for the output. |
1385 | */ |
1386 | static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec, |
1387 | u32 index, int offset) |
1388 | { |
1389 | struct page *page = alloc_page(GFP_NOIO); |
1390 | int ret; |
1391 | |
1392 | if (!page) |
1393 | return -ENOMEM; |
1394 | ret = zram_read_page(zram, page, index, NULL); |
1395 | if (likely(!ret)) |
1396 | memcpy_to_bvec(bvec, page_address(page) + offset); |
1397 | __free_page(page); |
1398 | return ret; |
1399 | } |
1400 | |
1401 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
1402 | u32 index, int offset, struct bio *bio) |
1403 | { |
1404 | if (is_partial_io(bvec)) |
1405 | return zram_bvec_read_partial(zram, bvec, index, offset); |
1406 | return zram_read_page(zram, page: bvec->bv_page, index, parent: bio); |
1407 | } |
1408 | |
1409 | static int zram_write_page(struct zram *zram, struct page *page, u32 index) |
1410 | { |
1411 | int ret = 0; |
1412 | unsigned long alloced_pages; |
1413 | unsigned long handle = -ENOMEM; |
1414 | unsigned int comp_len = 0; |
1415 | void *src, *dst, *mem; |
1416 | struct zcomp_strm *zstrm; |
1417 | unsigned long element = 0; |
1418 | enum zram_pageflags flags = 0; |
1419 | |
1420 | mem = kmap_atomic(page); |
1421 | if (page_same_filled(ptr: mem, element: &element)) { |
1422 | kunmap_atomic(mem); |
1423 | /* Free memory associated with this sector now. */ |
1424 | flags = ZRAM_SAME; |
1425 | atomic64_inc(v: &zram->stats.same_pages); |
1426 | goto out; |
1427 | } |
1428 | kunmap_atomic(mem); |
1429 | |
1430 | compress_again: |
1431 | zstrm = zcomp_stream_get(comp: zram->comps[ZRAM_PRIMARY_COMP]); |
1432 | src = kmap_atomic(page); |
1433 | ret = zcomp_compress(zstrm, src, dst_len: &comp_len); |
1434 | kunmap_atomic(src); |
1435 | |
1436 | if (unlikely(ret)) { |
1437 | zcomp_stream_put(comp: zram->comps[ZRAM_PRIMARY_COMP]); |
1438 | pr_err("Compression failed! err=%d\n" , ret); |
1439 | zs_free(pool: zram->mem_pool, obj: handle); |
1440 | return ret; |
1441 | } |
1442 | |
1443 | if (comp_len >= huge_class_size) |
1444 | comp_len = PAGE_SIZE; |
1445 | /* |
1446 | * handle allocation has 2 paths: |
1447 | * a) fast path is executed with preemption disabled (for |
1448 | * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear, |
1449 | * since we can't sleep; |
1450 | * b) slow path enables preemption and attempts to allocate |
1451 | * the page with __GFP_DIRECT_RECLAIM bit set. we have to |
1452 | * put per-cpu compression stream and, thus, to re-do |
1453 | * the compression once handle is allocated. |
1454 | * |
1455 | * if we have a 'non-null' handle here then we are coming |
1456 | * from the slow path and handle has already been allocated. |
1457 | */ |
1458 | if (IS_ERR_VALUE(handle)) |
1459 | handle = zs_malloc(pool: zram->mem_pool, size: comp_len, |
1460 | __GFP_KSWAPD_RECLAIM | |
1461 | __GFP_NOWARN | |
1462 | __GFP_HIGHMEM | |
1463 | __GFP_MOVABLE); |
1464 | if (IS_ERR_VALUE(handle)) { |
1465 | zcomp_stream_put(comp: zram->comps[ZRAM_PRIMARY_COMP]); |
1466 | atomic64_inc(v: &zram->stats.writestall); |
1467 | handle = zs_malloc(pool: zram->mem_pool, size: comp_len, |
1468 | GFP_NOIO | __GFP_HIGHMEM | |
1469 | __GFP_MOVABLE); |
1470 | if (IS_ERR_VALUE(handle)) |
1471 | return PTR_ERR(ptr: (void *)handle); |
1472 | |
1473 | if (comp_len != PAGE_SIZE) |
1474 | goto compress_again; |
1475 | /* |
1476 | * If the page is not compressible, you need to acquire the |
1477 | * lock and execute the code below. The zcomp_stream_get() |
1478 | * call is needed to disable the cpu hotplug and grab the |
1479 | * zstrm buffer back. It is necessary that the dereferencing |
1480 | * of the zstrm variable below occurs correctly. |
1481 | */ |
1482 | zstrm = zcomp_stream_get(comp: zram->comps[ZRAM_PRIMARY_COMP]); |
1483 | } |
1484 | |
1485 | alloced_pages = zs_get_total_pages(pool: zram->mem_pool); |
1486 | update_used_max(zram, pages: alloced_pages); |
1487 | |
1488 | if (zram->limit_pages && alloced_pages > zram->limit_pages) { |
1489 | zcomp_stream_put(comp: zram->comps[ZRAM_PRIMARY_COMP]); |
1490 | zs_free(pool: zram->mem_pool, obj: handle); |
1491 | return -ENOMEM; |
1492 | } |
1493 | |
1494 | dst = zs_map_object(pool: zram->mem_pool, handle, mm: ZS_MM_WO); |
1495 | |
1496 | src = zstrm->buffer; |
1497 | if (comp_len == PAGE_SIZE) |
1498 | src = kmap_atomic(page); |
1499 | memcpy(dst, src, comp_len); |
1500 | if (comp_len == PAGE_SIZE) |
1501 | kunmap_atomic(src); |
1502 | |
1503 | zcomp_stream_put(comp: zram->comps[ZRAM_PRIMARY_COMP]); |
1504 | zs_unmap_object(pool: zram->mem_pool, handle); |
1505 | atomic64_add(i: comp_len, v: &zram->stats.compr_data_size); |
1506 | out: |
1507 | /* |
1508 | * Free memory associated with this sector |
1509 | * before overwriting unused sectors. |
1510 | */ |
1511 | zram_slot_lock(zram, index); |
1512 | zram_free_page(zram, index); |
1513 | |
1514 | if (comp_len == PAGE_SIZE) { |
1515 | zram_set_flag(zram, index, flag: ZRAM_HUGE); |
1516 | atomic64_inc(v: &zram->stats.huge_pages); |
1517 | atomic64_inc(v: &zram->stats.huge_pages_since); |
1518 | } |
1519 | |
1520 | if (flags) { |
1521 | zram_set_flag(zram, index, flag: flags); |
1522 | zram_set_element(zram, index, element); |
1523 | } else { |
1524 | zram_set_handle(zram, index, handle); |
1525 | zram_set_obj_size(zram, index, size: comp_len); |
1526 | } |
1527 | zram_slot_unlock(zram, index); |
1528 | |
1529 | /* Update stats */ |
1530 | atomic64_inc(v: &zram->stats.pages_stored); |
1531 | return ret; |
1532 | } |
1533 | |
1534 | /* |
1535 | * This is a partial IO. Read the full page before writing the changes. |
1536 | */ |
1537 | static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec, |
1538 | u32 index, int offset, struct bio *bio) |
1539 | { |
1540 | struct page *page = alloc_page(GFP_NOIO); |
1541 | int ret; |
1542 | |
1543 | if (!page) |
1544 | return -ENOMEM; |
1545 | |
1546 | ret = zram_read_page(zram, page, index, parent: bio); |
1547 | if (!ret) { |
1548 | memcpy_from_bvec(page_address(page) + offset, bvec); |
1549 | ret = zram_write_page(zram, page, index); |
1550 | } |
1551 | __free_page(page); |
1552 | return ret; |
1553 | } |
1554 | |
1555 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, |
1556 | u32 index, int offset, struct bio *bio) |
1557 | { |
1558 | if (is_partial_io(bvec)) |
1559 | return zram_bvec_write_partial(zram, bvec, index, offset, bio); |
1560 | return zram_write_page(zram, page: bvec->bv_page, index); |
1561 | } |
1562 | |
1563 | #ifdef CONFIG_ZRAM_MULTI_COMP |
1564 | /* |
1565 | * This function will decompress (unless it's ZRAM_HUGE) the page and then |
1566 | * attempt to compress it using provided compression algorithm priority |
1567 | * (which is potentially more effective). |
1568 | * |
1569 | * Corresponding ZRAM slot should be locked. |
1570 | */ |
1571 | static int zram_recompress(struct zram *zram, u32 index, struct page *page, |
1572 | u32 threshold, u32 prio, u32 prio_max) |
1573 | { |
1574 | struct zcomp_strm *zstrm = NULL; |
1575 | unsigned long handle_old; |
1576 | unsigned long handle_new; |
1577 | unsigned int comp_len_old; |
1578 | unsigned int comp_len_new; |
1579 | unsigned int class_index_old; |
1580 | unsigned int class_index_new; |
1581 | u32 num_recomps = 0; |
1582 | void *src, *dst; |
1583 | int ret; |
1584 | |
1585 | handle_old = zram_get_handle(zram, index); |
1586 | if (!handle_old) |
1587 | return -EINVAL; |
1588 | |
1589 | comp_len_old = zram_get_obj_size(zram, index); |
1590 | /* |
1591 | * Do not recompress objects that are already "small enough". |
1592 | */ |
1593 | if (comp_len_old < threshold) |
1594 | return 0; |
1595 | |
1596 | ret = zram_read_from_zspool(zram, page, index); |
1597 | if (ret) |
1598 | return ret; |
1599 | |
1600 | class_index_old = zs_lookup_class_index(pool: zram->mem_pool, size: comp_len_old); |
1601 | /* |
1602 | * Iterate the secondary comp algorithms list (in order of priority) |
1603 | * and try to recompress the page. |
1604 | */ |
1605 | for (; prio < prio_max; prio++) { |
1606 | if (!zram->comps[prio]) |
1607 | continue; |
1608 | |
1609 | /* |
1610 | * Skip if the object is already re-compressed with a higher |
1611 | * priority algorithm (or same algorithm). |
1612 | */ |
1613 | if (prio <= zram_get_priority(zram, index)) |
1614 | continue; |
1615 | |
1616 | num_recomps++; |
1617 | zstrm = zcomp_stream_get(comp: zram->comps[prio]); |
1618 | src = kmap_atomic(page); |
1619 | ret = zcomp_compress(zstrm, src, dst_len: &comp_len_new); |
1620 | kunmap_atomic(src); |
1621 | |
1622 | if (ret) { |
1623 | zcomp_stream_put(comp: zram->comps[prio]); |
1624 | return ret; |
1625 | } |
1626 | |
1627 | class_index_new = zs_lookup_class_index(pool: zram->mem_pool, |
1628 | size: comp_len_new); |
1629 | |
1630 | /* Continue until we make progress */ |
1631 | if (class_index_new >= class_index_old || |
1632 | (threshold && comp_len_new >= threshold)) { |
1633 | zcomp_stream_put(comp: zram->comps[prio]); |
1634 | continue; |
1635 | } |
1636 | |
1637 | /* Recompression was successful so break out */ |
1638 | break; |
1639 | } |
1640 | |
1641 | /* |
1642 | * We did not try to recompress, e.g. when we have only one |
1643 | * secondary algorithm and the page is already recompressed |
1644 | * using that algorithm |
1645 | */ |
1646 | if (!zstrm) |
1647 | return 0; |
1648 | |
1649 | if (class_index_new >= class_index_old) { |
1650 | /* |
1651 | * Secondary algorithms failed to re-compress the page |
1652 | * in a way that would save memory, mark the object as |
1653 | * incompressible so that we will not try to compress |
1654 | * it again. |
1655 | * |
1656 | * We need to make sure that all secondary algorithms have |
1657 | * failed, so we test if the number of recompressions matches |
1658 | * the number of active secondary algorithms. |
1659 | */ |
1660 | if (num_recomps == zram->num_active_comps - 1) |
1661 | zram_set_flag(zram, index, flag: ZRAM_INCOMPRESSIBLE); |
1662 | return 0; |
1663 | } |
1664 | |
1665 | /* Successful recompression but above threshold */ |
1666 | if (threshold && comp_len_new >= threshold) |
1667 | return 0; |
1668 | |
1669 | /* |
1670 | * No direct reclaim (slow path) for handle allocation and no |
1671 | * re-compression attempt (unlike in zram_write_bvec()) since |
1672 | * we already have stored that object in zsmalloc. If we cannot |
1673 | * alloc memory for recompressed object then we bail out and |
1674 | * simply keep the old (existing) object in zsmalloc. |
1675 | */ |
1676 | handle_new = zs_malloc(pool: zram->mem_pool, size: comp_len_new, |
1677 | __GFP_KSWAPD_RECLAIM | |
1678 | __GFP_NOWARN | |
1679 | __GFP_HIGHMEM | |
1680 | __GFP_MOVABLE); |
1681 | if (IS_ERR_VALUE(handle_new)) { |
1682 | zcomp_stream_put(comp: zram->comps[prio]); |
1683 | return PTR_ERR(ptr: (void *)handle_new); |
1684 | } |
1685 | |
1686 | dst = zs_map_object(pool: zram->mem_pool, handle: handle_new, mm: ZS_MM_WO); |
1687 | memcpy(dst, zstrm->buffer, comp_len_new); |
1688 | zcomp_stream_put(comp: zram->comps[prio]); |
1689 | |
1690 | zs_unmap_object(pool: zram->mem_pool, handle: handle_new); |
1691 | |
1692 | zram_free_page(zram, index); |
1693 | zram_set_handle(zram, index, handle: handle_new); |
1694 | zram_set_obj_size(zram, index, size: comp_len_new); |
1695 | zram_set_priority(zram, index, prio); |
1696 | |
1697 | atomic64_add(i: comp_len_new, v: &zram->stats.compr_data_size); |
1698 | atomic64_inc(v: &zram->stats.pages_stored); |
1699 | |
1700 | return 0; |
1701 | } |
1702 | |
1703 | #define RECOMPRESS_IDLE (1 << 0) |
1704 | #define RECOMPRESS_HUGE (1 << 1) |
1705 | |
1706 | static ssize_t recompress_store(struct device *dev, |
1707 | struct device_attribute *attr, |
1708 | const char *buf, size_t len) |
1709 | { |
1710 | u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS; |
1711 | struct zram *zram = dev_to_zram(dev); |
1712 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
1713 | char *args, *param, *val, *algo = NULL; |
1714 | u32 mode = 0, threshold = 0; |
1715 | unsigned long index; |
1716 | struct page *page; |
1717 | ssize_t ret; |
1718 | |
1719 | args = skip_spaces(buf); |
1720 | while (*args) { |
1721 | args = next_arg(args, param: ¶m, val: &val); |
1722 | |
1723 | if (!val || !*val) |
1724 | return -EINVAL; |
1725 | |
1726 | if (!strcmp(param, "type" )) { |
1727 | if (!strcmp(val, "idle" )) |
1728 | mode = RECOMPRESS_IDLE; |
1729 | if (!strcmp(val, "huge" )) |
1730 | mode = RECOMPRESS_HUGE; |
1731 | if (!strcmp(val, "huge_idle" )) |
1732 | mode = RECOMPRESS_IDLE | RECOMPRESS_HUGE; |
1733 | continue; |
1734 | } |
1735 | |
1736 | if (!strcmp(param, "threshold" )) { |
1737 | /* |
1738 | * We will re-compress only idle objects equal or |
1739 | * greater in size than watermark. |
1740 | */ |
1741 | ret = kstrtouint(s: val, base: 10, res: &threshold); |
1742 | if (ret) |
1743 | return ret; |
1744 | continue; |
1745 | } |
1746 | |
1747 | if (!strcmp(param, "algo" )) { |
1748 | algo = val; |
1749 | continue; |
1750 | } |
1751 | } |
1752 | |
1753 | if (threshold >= huge_class_size) |
1754 | return -EINVAL; |
1755 | |
1756 | down_read(sem: &zram->init_lock); |
1757 | if (!init_done(zram)) { |
1758 | ret = -EINVAL; |
1759 | goto release_init_lock; |
1760 | } |
1761 | |
1762 | if (algo) { |
1763 | bool found = false; |
1764 | |
1765 | for (; prio < ZRAM_MAX_COMPS; prio++) { |
1766 | if (!zram->comp_algs[prio]) |
1767 | continue; |
1768 | |
1769 | if (!strcmp(zram->comp_algs[prio], algo)) { |
1770 | prio_max = min(prio + 1, ZRAM_MAX_COMPS); |
1771 | found = true; |
1772 | break; |
1773 | } |
1774 | } |
1775 | |
1776 | if (!found) { |
1777 | ret = -EINVAL; |
1778 | goto release_init_lock; |
1779 | } |
1780 | } |
1781 | |
1782 | page = alloc_page(GFP_KERNEL); |
1783 | if (!page) { |
1784 | ret = -ENOMEM; |
1785 | goto release_init_lock; |
1786 | } |
1787 | |
1788 | ret = len; |
1789 | for (index = 0; index < nr_pages; index++) { |
1790 | int err = 0; |
1791 | |
1792 | zram_slot_lock(zram, index); |
1793 | |
1794 | if (!zram_allocated(zram, index)) |
1795 | goto next; |
1796 | |
1797 | if (mode & RECOMPRESS_IDLE && |
1798 | !zram_test_flag(zram, index, flag: ZRAM_IDLE)) |
1799 | goto next; |
1800 | |
1801 | if (mode & RECOMPRESS_HUGE && |
1802 | !zram_test_flag(zram, index, flag: ZRAM_HUGE)) |
1803 | goto next; |
1804 | |
1805 | if (zram_test_flag(zram, index, flag: ZRAM_WB) || |
1806 | zram_test_flag(zram, index, flag: ZRAM_UNDER_WB) || |
1807 | zram_test_flag(zram, index, flag: ZRAM_SAME) || |
1808 | zram_test_flag(zram, index, flag: ZRAM_INCOMPRESSIBLE)) |
1809 | goto next; |
1810 | |
1811 | err = zram_recompress(zram, index, page, threshold, |
1812 | prio, prio_max); |
1813 | next: |
1814 | zram_slot_unlock(zram, index); |
1815 | if (err) { |
1816 | ret = err; |
1817 | break; |
1818 | } |
1819 | |
1820 | cond_resched(); |
1821 | } |
1822 | |
1823 | __free_page(page); |
1824 | |
1825 | release_init_lock: |
1826 | up_read(sem: &zram->init_lock); |
1827 | return ret; |
1828 | } |
1829 | #endif |
1830 | |
1831 | static void zram_bio_discard(struct zram *zram, struct bio *bio) |
1832 | { |
1833 | size_t n = bio->bi_iter.bi_size; |
1834 | u32 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
1835 | u32 offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) << |
1836 | SECTOR_SHIFT; |
1837 | |
1838 | /* |
1839 | * zram manages data in physical block size units. Because logical block |
1840 | * size isn't identical with physical block size on some arch, we |
1841 | * could get a discard request pointing to a specific offset within a |
1842 | * certain physical block. Although we can handle this request by |
1843 | * reading that physiclal block and decompressing and partially zeroing |
1844 | * and re-compressing and then re-storing it, this isn't reasonable |
1845 | * because our intent with a discard request is to save memory. So |
1846 | * skipping this logical block is appropriate here. |
1847 | */ |
1848 | if (offset) { |
1849 | if (n <= (PAGE_SIZE - offset)) |
1850 | return; |
1851 | |
1852 | n -= (PAGE_SIZE - offset); |
1853 | index++; |
1854 | } |
1855 | |
1856 | while (n >= PAGE_SIZE) { |
1857 | zram_slot_lock(zram, index); |
1858 | zram_free_page(zram, index); |
1859 | zram_slot_unlock(zram, index); |
1860 | atomic64_inc(v: &zram->stats.notify_free); |
1861 | index++; |
1862 | n -= PAGE_SIZE; |
1863 | } |
1864 | |
1865 | bio_endio(bio); |
1866 | } |
1867 | |
1868 | static void zram_bio_read(struct zram *zram, struct bio *bio) |
1869 | { |
1870 | unsigned long start_time = bio_start_io_acct(bio); |
1871 | struct bvec_iter iter = bio->bi_iter; |
1872 | |
1873 | do { |
1874 | u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
1875 | u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) << |
1876 | SECTOR_SHIFT; |
1877 | struct bio_vec bv = bio_iter_iovec(bio, iter); |
1878 | |
1879 | bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); |
1880 | |
1881 | if (zram_bvec_read(zram, bvec: &bv, index, offset, bio) < 0) { |
1882 | atomic64_inc(v: &zram->stats.failed_reads); |
1883 | bio->bi_status = BLK_STS_IOERR; |
1884 | break; |
1885 | } |
1886 | flush_dcache_page(page: bv.bv_page); |
1887 | |
1888 | zram_slot_lock(zram, index); |
1889 | zram_accessed(zram, index); |
1890 | zram_slot_unlock(zram, index); |
1891 | |
1892 | bio_advance_iter_single(bio, iter: &iter, bytes: bv.bv_len); |
1893 | } while (iter.bi_size); |
1894 | |
1895 | bio_end_io_acct(bio, start_time); |
1896 | bio_endio(bio); |
1897 | } |
1898 | |
1899 | static void zram_bio_write(struct zram *zram, struct bio *bio) |
1900 | { |
1901 | unsigned long start_time = bio_start_io_acct(bio); |
1902 | struct bvec_iter iter = bio->bi_iter; |
1903 | |
1904 | do { |
1905 | u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
1906 | u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) << |
1907 | SECTOR_SHIFT; |
1908 | struct bio_vec bv = bio_iter_iovec(bio, iter); |
1909 | |
1910 | bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); |
1911 | |
1912 | if (zram_bvec_write(zram, bvec: &bv, index, offset, bio) < 0) { |
1913 | atomic64_inc(v: &zram->stats.failed_writes); |
1914 | bio->bi_status = BLK_STS_IOERR; |
1915 | break; |
1916 | } |
1917 | |
1918 | zram_slot_lock(zram, index); |
1919 | zram_accessed(zram, index); |
1920 | zram_slot_unlock(zram, index); |
1921 | |
1922 | bio_advance_iter_single(bio, iter: &iter, bytes: bv.bv_len); |
1923 | } while (iter.bi_size); |
1924 | |
1925 | bio_end_io_acct(bio, start_time); |
1926 | bio_endio(bio); |
1927 | } |
1928 | |
1929 | /* |
1930 | * Handler function for all zram I/O requests. |
1931 | */ |
1932 | static void zram_submit_bio(struct bio *bio) |
1933 | { |
1934 | struct zram *zram = bio->bi_bdev->bd_disk->private_data; |
1935 | |
1936 | switch (bio_op(bio)) { |
1937 | case REQ_OP_READ: |
1938 | zram_bio_read(zram, bio); |
1939 | break; |
1940 | case REQ_OP_WRITE: |
1941 | zram_bio_write(zram, bio); |
1942 | break; |
1943 | case REQ_OP_DISCARD: |
1944 | case REQ_OP_WRITE_ZEROES: |
1945 | zram_bio_discard(zram, bio); |
1946 | break; |
1947 | default: |
1948 | WARN_ON_ONCE(1); |
1949 | bio_endio(bio); |
1950 | } |
1951 | } |
1952 | |
1953 | static void zram_slot_free_notify(struct block_device *bdev, |
1954 | unsigned long index) |
1955 | { |
1956 | struct zram *zram; |
1957 | |
1958 | zram = bdev->bd_disk->private_data; |
1959 | |
1960 | atomic64_inc(v: &zram->stats.notify_free); |
1961 | if (!zram_slot_trylock(zram, index)) { |
1962 | atomic64_inc(v: &zram->stats.miss_free); |
1963 | return; |
1964 | } |
1965 | |
1966 | zram_free_page(zram, index); |
1967 | zram_slot_unlock(zram, index); |
1968 | } |
1969 | |
1970 | static void zram_destroy_comps(struct zram *zram) |
1971 | { |
1972 | u32 prio; |
1973 | |
1974 | for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) { |
1975 | struct zcomp *comp = zram->comps[prio]; |
1976 | |
1977 | zram->comps[prio] = NULL; |
1978 | if (!comp) |
1979 | continue; |
1980 | zcomp_destroy(comp); |
1981 | zram->num_active_comps--; |
1982 | } |
1983 | } |
1984 | |
1985 | static void zram_reset_device(struct zram *zram) |
1986 | { |
1987 | down_write(sem: &zram->init_lock); |
1988 | |
1989 | zram->limit_pages = 0; |
1990 | |
1991 | if (!init_done(zram)) { |
1992 | up_write(sem: &zram->init_lock); |
1993 | return; |
1994 | } |
1995 | |
1996 | set_capacity_and_notify(disk: zram->disk, size: 0); |
1997 | part_stat_set_all(part: zram->disk->part0, value: 0); |
1998 | |
1999 | /* I/O operation under all of CPU are done so let's free */ |
2000 | zram_meta_free(zram, disksize: zram->disksize); |
2001 | zram->disksize = 0; |
2002 | zram_destroy_comps(zram); |
2003 | memset(&zram->stats, 0, sizeof(zram->stats)); |
2004 | reset_bdev(zram); |
2005 | |
2006 | comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, alg: default_compressor); |
2007 | up_write(sem: &zram->init_lock); |
2008 | } |
2009 | |
2010 | static ssize_t disksize_store(struct device *dev, |
2011 | struct device_attribute *attr, const char *buf, size_t len) |
2012 | { |
2013 | u64 disksize; |
2014 | struct zcomp *comp; |
2015 | struct zram *zram = dev_to_zram(dev); |
2016 | int err; |
2017 | u32 prio; |
2018 | |
2019 | disksize = memparse(ptr: buf, NULL); |
2020 | if (!disksize) |
2021 | return -EINVAL; |
2022 | |
2023 | down_write(sem: &zram->init_lock); |
2024 | if (init_done(zram)) { |
2025 | pr_info("Cannot change disksize for initialized device\n" ); |
2026 | err = -EBUSY; |
2027 | goto out_unlock; |
2028 | } |
2029 | |
2030 | disksize = PAGE_ALIGN(disksize); |
2031 | if (!zram_meta_alloc(zram, disksize)) { |
2032 | err = -ENOMEM; |
2033 | goto out_unlock; |
2034 | } |
2035 | |
2036 | for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) { |
2037 | if (!zram->comp_algs[prio]) |
2038 | continue; |
2039 | |
2040 | comp = zcomp_create(alg: zram->comp_algs[prio]); |
2041 | if (IS_ERR(ptr: comp)) { |
2042 | pr_err("Cannot initialise %s compressing backend\n" , |
2043 | zram->comp_algs[prio]); |
2044 | err = PTR_ERR(ptr: comp); |
2045 | goto out_free_comps; |
2046 | } |
2047 | |
2048 | zram->comps[prio] = comp; |
2049 | zram->num_active_comps++; |
2050 | } |
2051 | zram->disksize = disksize; |
2052 | set_capacity_and_notify(disk: zram->disk, size: zram->disksize >> SECTOR_SHIFT); |
2053 | up_write(sem: &zram->init_lock); |
2054 | |
2055 | return len; |
2056 | |
2057 | out_free_comps: |
2058 | zram_destroy_comps(zram); |
2059 | zram_meta_free(zram, disksize); |
2060 | out_unlock: |
2061 | up_write(sem: &zram->init_lock); |
2062 | return err; |
2063 | } |
2064 | |
2065 | static ssize_t reset_store(struct device *dev, |
2066 | struct device_attribute *attr, const char *buf, size_t len) |
2067 | { |
2068 | int ret; |
2069 | unsigned short do_reset; |
2070 | struct zram *zram; |
2071 | struct gendisk *disk; |
2072 | |
2073 | ret = kstrtou16(s: buf, base: 10, res: &do_reset); |
2074 | if (ret) |
2075 | return ret; |
2076 | |
2077 | if (!do_reset) |
2078 | return -EINVAL; |
2079 | |
2080 | zram = dev_to_zram(dev); |
2081 | disk = zram->disk; |
2082 | |
2083 | mutex_lock(&disk->open_mutex); |
2084 | /* Do not reset an active device or claimed device */ |
2085 | if (disk_openers(disk) || zram->claim) { |
2086 | mutex_unlock(lock: &disk->open_mutex); |
2087 | return -EBUSY; |
2088 | } |
2089 | |
2090 | /* From now on, anyone can't open /dev/zram[0-9] */ |
2091 | zram->claim = true; |
2092 | mutex_unlock(lock: &disk->open_mutex); |
2093 | |
2094 | /* Make sure all the pending I/O are finished */ |
2095 | sync_blockdev(bdev: disk->part0); |
2096 | zram_reset_device(zram); |
2097 | |
2098 | mutex_lock(&disk->open_mutex); |
2099 | zram->claim = false; |
2100 | mutex_unlock(lock: &disk->open_mutex); |
2101 | |
2102 | return len; |
2103 | } |
2104 | |
2105 | static int zram_open(struct gendisk *disk, blk_mode_t mode) |
2106 | { |
2107 | struct zram *zram = disk->private_data; |
2108 | |
2109 | WARN_ON(!mutex_is_locked(&disk->open_mutex)); |
2110 | |
2111 | /* zram was claimed to reset so open request fails */ |
2112 | if (zram->claim) |
2113 | return -EBUSY; |
2114 | return 0; |
2115 | } |
2116 | |
2117 | static const struct block_device_operations zram_devops = { |
2118 | .open = zram_open, |
2119 | .submit_bio = zram_submit_bio, |
2120 | .swap_slot_free_notify = zram_slot_free_notify, |
2121 | .owner = THIS_MODULE |
2122 | }; |
2123 | |
2124 | static DEVICE_ATTR_WO(compact); |
2125 | static DEVICE_ATTR_RW(disksize); |
2126 | static DEVICE_ATTR_RO(initstate); |
2127 | static DEVICE_ATTR_WO(reset); |
2128 | static DEVICE_ATTR_WO(mem_limit); |
2129 | static DEVICE_ATTR_WO(mem_used_max); |
2130 | static DEVICE_ATTR_WO(idle); |
2131 | static DEVICE_ATTR_RW(max_comp_streams); |
2132 | static DEVICE_ATTR_RW(comp_algorithm); |
2133 | #ifdef CONFIG_ZRAM_WRITEBACK |
2134 | static DEVICE_ATTR_RW(backing_dev); |
2135 | static DEVICE_ATTR_WO(writeback); |
2136 | static DEVICE_ATTR_RW(writeback_limit); |
2137 | static DEVICE_ATTR_RW(writeback_limit_enable); |
2138 | #endif |
2139 | #ifdef CONFIG_ZRAM_MULTI_COMP |
2140 | static DEVICE_ATTR_RW(recomp_algorithm); |
2141 | static DEVICE_ATTR_WO(recompress); |
2142 | #endif |
2143 | |
2144 | static struct attribute *zram_disk_attrs[] = { |
2145 | &dev_attr_disksize.attr, |
2146 | &dev_attr_initstate.attr, |
2147 | &dev_attr_reset.attr, |
2148 | &dev_attr_compact.attr, |
2149 | &dev_attr_mem_limit.attr, |
2150 | &dev_attr_mem_used_max.attr, |
2151 | &dev_attr_idle.attr, |
2152 | &dev_attr_max_comp_streams.attr, |
2153 | &dev_attr_comp_algorithm.attr, |
2154 | #ifdef CONFIG_ZRAM_WRITEBACK |
2155 | &dev_attr_backing_dev.attr, |
2156 | &dev_attr_writeback.attr, |
2157 | &dev_attr_writeback_limit.attr, |
2158 | &dev_attr_writeback_limit_enable.attr, |
2159 | #endif |
2160 | &dev_attr_io_stat.attr, |
2161 | &dev_attr_mm_stat.attr, |
2162 | #ifdef CONFIG_ZRAM_WRITEBACK |
2163 | &dev_attr_bd_stat.attr, |
2164 | #endif |
2165 | &dev_attr_debug_stat.attr, |
2166 | #ifdef CONFIG_ZRAM_MULTI_COMP |
2167 | &dev_attr_recomp_algorithm.attr, |
2168 | &dev_attr_recompress.attr, |
2169 | #endif |
2170 | NULL, |
2171 | }; |
2172 | |
2173 | ATTRIBUTE_GROUPS(zram_disk); |
2174 | |
2175 | /* |
2176 | * Allocate and initialize new zram device. the function returns |
2177 | * '>= 0' device_id upon success, and negative value otherwise. |
2178 | */ |
2179 | static int zram_add(void) |
2180 | { |
2181 | struct zram *zram; |
2182 | int ret, device_id; |
2183 | |
2184 | zram = kzalloc(size: sizeof(struct zram), GFP_KERNEL); |
2185 | if (!zram) |
2186 | return -ENOMEM; |
2187 | |
2188 | ret = idr_alloc(&zram_index_idr, ptr: zram, start: 0, end: 0, GFP_KERNEL); |
2189 | if (ret < 0) |
2190 | goto out_free_dev; |
2191 | device_id = ret; |
2192 | |
2193 | init_rwsem(&zram->init_lock); |
2194 | #ifdef CONFIG_ZRAM_WRITEBACK |
2195 | spin_lock_init(&zram->wb_limit_lock); |
2196 | #endif |
2197 | |
2198 | /* gendisk structure */ |
2199 | zram->disk = blk_alloc_disk(NUMA_NO_NODE); |
2200 | if (!zram->disk) { |
2201 | pr_err("Error allocating disk structure for device %d\n" , |
2202 | device_id); |
2203 | ret = -ENOMEM; |
2204 | goto out_free_idr; |
2205 | } |
2206 | |
2207 | zram->disk->major = zram_major; |
2208 | zram->disk->first_minor = device_id; |
2209 | zram->disk->minors = 1; |
2210 | zram->disk->flags |= GENHD_FL_NO_PART; |
2211 | zram->disk->fops = &zram_devops; |
2212 | zram->disk->private_data = zram; |
2213 | snprintf(buf: zram->disk->disk_name, size: 16, fmt: "zram%d" , device_id); |
2214 | |
2215 | /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */ |
2216 | set_capacity(disk: zram->disk, size: 0); |
2217 | /* zram devices sort of resembles non-rotational disks */ |
2218 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q: zram->disk->queue); |
2219 | blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q: zram->disk->queue); |
2220 | |
2221 | /* |
2222 | * To ensure that we always get PAGE_SIZE aligned |
2223 | * and n*PAGE_SIZED sized I/O requests. |
2224 | */ |
2225 | blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); |
2226 | blk_queue_logical_block_size(zram->disk->queue, |
2227 | ZRAM_LOGICAL_BLOCK_SIZE); |
2228 | blk_queue_io_min(q: zram->disk->queue, PAGE_SIZE); |
2229 | blk_queue_io_opt(q: zram->disk->queue, PAGE_SIZE); |
2230 | zram->disk->queue->limits.discard_granularity = PAGE_SIZE; |
2231 | blk_queue_max_discard_sectors(q: zram->disk->queue, UINT_MAX); |
2232 | |
2233 | /* |
2234 | * zram_bio_discard() will clear all logical blocks if logical block |
2235 | * size is identical with physical block size(PAGE_SIZE). But if it is |
2236 | * different, we will skip discarding some parts of logical blocks in |
2237 | * the part of the request range which isn't aligned to physical block |
2238 | * size. So we can't ensure that all discarded logical blocks are |
2239 | * zeroed. |
2240 | */ |
2241 | if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE) |
2242 | blk_queue_max_write_zeroes_sectors(q: zram->disk->queue, UINT_MAX); |
2243 | |
2244 | blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q: zram->disk->queue); |
2245 | ret = device_add_disk(NULL, disk: zram->disk, groups: zram_disk_groups); |
2246 | if (ret) |
2247 | goto out_cleanup_disk; |
2248 | |
2249 | comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, alg: default_compressor); |
2250 | |
2251 | zram_debugfs_register(zram); |
2252 | pr_info("Added device: %s\n" , zram->disk->disk_name); |
2253 | return device_id; |
2254 | |
2255 | out_cleanup_disk: |
2256 | put_disk(disk: zram->disk); |
2257 | out_free_idr: |
2258 | idr_remove(&zram_index_idr, id: device_id); |
2259 | out_free_dev: |
2260 | kfree(objp: zram); |
2261 | return ret; |
2262 | } |
2263 | |
2264 | static int zram_remove(struct zram *zram) |
2265 | { |
2266 | bool claimed; |
2267 | |
2268 | mutex_lock(&zram->disk->open_mutex); |
2269 | if (disk_openers(disk: zram->disk)) { |
2270 | mutex_unlock(lock: &zram->disk->open_mutex); |
2271 | return -EBUSY; |
2272 | } |
2273 | |
2274 | claimed = zram->claim; |
2275 | if (!claimed) |
2276 | zram->claim = true; |
2277 | mutex_unlock(lock: &zram->disk->open_mutex); |
2278 | |
2279 | zram_debugfs_unregister(zram); |
2280 | |
2281 | if (claimed) { |
2282 | /* |
2283 | * If we were claimed by reset_store(), del_gendisk() will |
2284 | * wait until reset_store() is done, so nothing need to do. |
2285 | */ |
2286 | ; |
2287 | } else { |
2288 | /* Make sure all the pending I/O are finished */ |
2289 | sync_blockdev(bdev: zram->disk->part0); |
2290 | zram_reset_device(zram); |
2291 | } |
2292 | |
2293 | pr_info("Removed device: %s\n" , zram->disk->disk_name); |
2294 | |
2295 | del_gendisk(gp: zram->disk); |
2296 | |
2297 | /* del_gendisk drains pending reset_store */ |
2298 | WARN_ON_ONCE(claimed && zram->claim); |
2299 | |
2300 | /* |
2301 | * disksize_store() may be called in between zram_reset_device() |
2302 | * and del_gendisk(), so run the last reset to avoid leaking |
2303 | * anything allocated with disksize_store() |
2304 | */ |
2305 | zram_reset_device(zram); |
2306 | |
2307 | put_disk(disk: zram->disk); |
2308 | kfree(objp: zram); |
2309 | return 0; |
2310 | } |
2311 | |
2312 | /* zram-control sysfs attributes */ |
2313 | |
2314 | /* |
2315 | * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a |
2316 | * sense that reading from this file does alter the state of your system -- it |
2317 | * creates a new un-initialized zram device and returns back this device's |
2318 | * device_id (or an error code if it fails to create a new device). |
2319 | */ |
2320 | static ssize_t hot_add_show(const struct class *class, |
2321 | const struct class_attribute *attr, |
2322 | char *buf) |
2323 | { |
2324 | int ret; |
2325 | |
2326 | mutex_lock(&zram_index_mutex); |
2327 | ret = zram_add(); |
2328 | mutex_unlock(lock: &zram_index_mutex); |
2329 | |
2330 | if (ret < 0) |
2331 | return ret; |
2332 | return scnprintf(buf, PAGE_SIZE, fmt: "%d\n" , ret); |
2333 | } |
2334 | /* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */ |
2335 | static struct class_attribute class_attr_hot_add = |
2336 | __ATTR(hot_add, 0400, hot_add_show, NULL); |
2337 | |
2338 | static ssize_t hot_remove_store(const struct class *class, |
2339 | const struct class_attribute *attr, |
2340 | const char *buf, |
2341 | size_t count) |
2342 | { |
2343 | struct zram *zram; |
2344 | int ret, dev_id; |
2345 | |
2346 | /* dev_id is gendisk->first_minor, which is `int' */ |
2347 | ret = kstrtoint(s: buf, base: 10, res: &dev_id); |
2348 | if (ret) |
2349 | return ret; |
2350 | if (dev_id < 0) |
2351 | return -EINVAL; |
2352 | |
2353 | mutex_lock(&zram_index_mutex); |
2354 | |
2355 | zram = idr_find(&zram_index_idr, id: dev_id); |
2356 | if (zram) { |
2357 | ret = zram_remove(zram); |
2358 | if (!ret) |
2359 | idr_remove(&zram_index_idr, id: dev_id); |
2360 | } else { |
2361 | ret = -ENODEV; |
2362 | } |
2363 | |
2364 | mutex_unlock(lock: &zram_index_mutex); |
2365 | return ret ? ret : count; |
2366 | } |
2367 | static CLASS_ATTR_WO(hot_remove); |
2368 | |
2369 | static struct attribute *zram_control_class_attrs[] = { |
2370 | &class_attr_hot_add.attr, |
2371 | &class_attr_hot_remove.attr, |
2372 | NULL, |
2373 | }; |
2374 | ATTRIBUTE_GROUPS(zram_control_class); |
2375 | |
2376 | static struct class zram_control_class = { |
2377 | .name = "zram-control" , |
2378 | .class_groups = zram_control_class_groups, |
2379 | }; |
2380 | |
2381 | static int zram_remove_cb(int id, void *ptr, void *data) |
2382 | { |
2383 | WARN_ON_ONCE(zram_remove(ptr)); |
2384 | return 0; |
2385 | } |
2386 | |
2387 | static void destroy_devices(void) |
2388 | { |
2389 | class_unregister(class: &zram_control_class); |
2390 | idr_for_each(&zram_index_idr, fn: &zram_remove_cb, NULL); |
2391 | zram_debugfs_destroy(); |
2392 | idr_destroy(&zram_index_idr); |
2393 | unregister_blkdev(major: zram_major, name: "zram" ); |
2394 | cpuhp_remove_multi_state(state: CPUHP_ZCOMP_PREPARE); |
2395 | } |
2396 | |
2397 | static int __init zram_init(void) |
2398 | { |
2399 | int ret; |
2400 | |
2401 | BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG); |
2402 | |
2403 | ret = cpuhp_setup_state_multi(state: CPUHP_ZCOMP_PREPARE, name: "block/zram:prepare" , |
2404 | startup: zcomp_cpu_up_prepare, teardown: zcomp_cpu_dead); |
2405 | if (ret < 0) |
2406 | return ret; |
2407 | |
2408 | ret = class_register(class: &zram_control_class); |
2409 | if (ret) { |
2410 | pr_err("Unable to register zram-control class\n" ); |
2411 | cpuhp_remove_multi_state(state: CPUHP_ZCOMP_PREPARE); |
2412 | return ret; |
2413 | } |
2414 | |
2415 | zram_debugfs_create(); |
2416 | zram_major = register_blkdev(0, "zram" ); |
2417 | if (zram_major <= 0) { |
2418 | pr_err("Unable to get major number\n" ); |
2419 | class_unregister(class: &zram_control_class); |
2420 | cpuhp_remove_multi_state(state: CPUHP_ZCOMP_PREPARE); |
2421 | return -EBUSY; |
2422 | } |
2423 | |
2424 | while (num_devices != 0) { |
2425 | mutex_lock(&zram_index_mutex); |
2426 | ret = zram_add(); |
2427 | mutex_unlock(lock: &zram_index_mutex); |
2428 | if (ret < 0) |
2429 | goto out_error; |
2430 | num_devices--; |
2431 | } |
2432 | |
2433 | return 0; |
2434 | |
2435 | out_error: |
2436 | destroy_devices(); |
2437 | return ret; |
2438 | } |
2439 | |
2440 | static void __exit zram_exit(void) |
2441 | { |
2442 | destroy_devices(); |
2443 | } |
2444 | |
2445 | module_init(zram_init); |
2446 | module_exit(zram_exit); |
2447 | |
2448 | module_param(num_devices, uint, 0); |
2449 | MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices" ); |
2450 | |
2451 | MODULE_LICENSE("Dual BSD/GPL" ); |
2452 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>" ); |
2453 | MODULE_DESCRIPTION("Compressed RAM Block Device" ); |
2454 | |