1 | /* |
2 | * Compressed RAM block device |
3 | * |
4 | * Copyright (C) 2008, 2009, 2010 Nitin Gupta |
5 | * 2012, 2013 Minchan Kim |
6 | * |
7 | * This code is released using a dual license strategy: BSD/GPL |
8 | * You can choose the licence that better fits your requirements. |
9 | * |
10 | * Released under the terms of 3-clause BSD License |
11 | * Released under the terms of GNU General Public License Version 2.0 |
12 | * |
13 | */ |
14 | |
15 | #define KMSG_COMPONENT "zram" |
16 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
17 | |
18 | #include <linux/module.h> |
19 | #include <linux/kernel.h> |
20 | #include <linux/bio.h> |
21 | #include <linux/bitops.h> |
22 | #include <linux/blkdev.h> |
23 | #include <linux/buffer_head.h> |
24 | #include <linux/device.h> |
25 | #include <linux/highmem.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/backing-dev.h> |
28 | #include <linux/string.h> |
29 | #include <linux/vmalloc.h> |
30 | #include <linux/err.h> |
31 | #include <linux/idr.h> |
32 | #include <linux/sysfs.h> |
33 | #include <linux/debugfs.h> |
34 | #include <linux/cpuhotplug.h> |
35 | #include <linux/part_stat.h> |
36 | |
37 | #include "zram_drv.h" |
38 | |
39 | static DEFINE_IDR(zram_index_idr); |
40 | /* idr index must be protected */ |
41 | static DEFINE_MUTEX(zram_index_mutex); |
42 | |
43 | static int zram_major; |
44 | static const char *default_compressor = CONFIG_ZRAM_DEF_COMP; |
45 | |
46 | /* Module params (documentation at end) */ |
47 | static unsigned int num_devices = 1; |
48 | /* |
49 | * Pages that compress to sizes equals or greater than this are stored |
50 | * uncompressed in memory. |
51 | */ |
52 | static size_t huge_class_size; |
53 | |
54 | static const struct block_device_operations zram_devops; |
55 | |
56 | static void zram_free_page(struct zram *zram, size_t index); |
57 | static int zram_read_page(struct zram *zram, struct page *page, u32 index, |
58 | struct bio *parent); |
59 | |
60 | static int zram_slot_trylock(struct zram *zram, u32 index) |
61 | { |
62 | return bit_spin_trylock(bitnum: ZRAM_LOCK, addr: &zram->table[index].flags); |
63 | } |
64 | |
65 | static void zram_slot_lock(struct zram *zram, u32 index) |
66 | { |
67 | bit_spin_lock(bitnum: ZRAM_LOCK, addr: &zram->table[index].flags); |
68 | } |
69 | |
70 | static void zram_slot_unlock(struct zram *zram, u32 index) |
71 | { |
72 | bit_spin_unlock(bitnum: ZRAM_LOCK, addr: &zram->table[index].flags); |
73 | } |
74 | |
75 | static inline bool init_done(struct zram *zram) |
76 | { |
77 | return zram->disksize; |
78 | } |
79 | |
80 | static inline struct zram *dev_to_zram(struct device *dev) |
81 | { |
82 | return (struct zram *)dev_to_disk(dev)->private_data; |
83 | } |
84 | |
85 | static unsigned long zram_get_handle(struct zram *zram, u32 index) |
86 | { |
87 | return zram->table[index].handle; |
88 | } |
89 | |
90 | static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle) |
91 | { |
92 | zram->table[index].handle = handle; |
93 | } |
94 | |
95 | /* flag operations require table entry bit_spin_lock() being held */ |
96 | static bool zram_test_flag(struct zram *zram, u32 index, |
97 | enum zram_pageflags flag) |
98 | { |
99 | return zram->table[index].flags & BIT(flag); |
100 | } |
101 | |
102 | static void zram_set_flag(struct zram *zram, u32 index, |
103 | enum zram_pageflags flag) |
104 | { |
105 | zram->table[index].flags |= BIT(flag); |
106 | } |
107 | |
108 | static void zram_clear_flag(struct zram *zram, u32 index, |
109 | enum zram_pageflags flag) |
110 | { |
111 | zram->table[index].flags &= ~BIT(flag); |
112 | } |
113 | |
114 | static inline void zram_set_element(struct zram *zram, u32 index, |
115 | unsigned long element) |
116 | { |
117 | zram->table[index].element = element; |
118 | } |
119 | |
120 | static unsigned long zram_get_element(struct zram *zram, u32 index) |
121 | { |
122 | return zram->table[index].element; |
123 | } |
124 | |
125 | static size_t zram_get_obj_size(struct zram *zram, u32 index) |
126 | { |
127 | return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1); |
128 | } |
129 | |
130 | static void zram_set_obj_size(struct zram *zram, |
131 | u32 index, size_t size) |
132 | { |
133 | unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT; |
134 | |
135 | zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size; |
136 | } |
137 | |
138 | static inline bool zram_allocated(struct zram *zram, u32 index) |
139 | { |
140 | return zram_get_obj_size(zram, index) || |
141 | zram_test_flag(zram, index, flag: ZRAM_SAME) || |
142 | zram_test_flag(zram, index, flag: ZRAM_WB); |
143 | } |
144 | |
145 | #if PAGE_SIZE != 4096 |
146 | static inline bool is_partial_io(struct bio_vec *bvec) |
147 | { |
148 | return bvec->bv_len != PAGE_SIZE; |
149 | } |
150 | #define ZRAM_PARTIAL_IO 1 |
151 | #else |
152 | static inline bool is_partial_io(struct bio_vec *bvec) |
153 | { |
154 | return false; |
155 | } |
156 | #endif |
157 | |
158 | static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio) |
159 | { |
160 | prio &= ZRAM_COMP_PRIORITY_MASK; |
161 | /* |
162 | * Clear previous priority value first, in case if we recompress |
163 | * further an already recompressed page |
164 | */ |
165 | zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK << |
166 | ZRAM_COMP_PRIORITY_BIT1); |
167 | zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1); |
168 | } |
169 | |
170 | static inline u32 zram_get_priority(struct zram *zram, u32 index) |
171 | { |
172 | u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1; |
173 | |
174 | return prio & ZRAM_COMP_PRIORITY_MASK; |
175 | } |
176 | |
177 | static void zram_accessed(struct zram *zram, u32 index) |
178 | { |
179 | zram_clear_flag(zram, index, flag: ZRAM_IDLE); |
180 | #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME |
181 | zram->table[index].ac_time = ktime_get_boottime(); |
182 | #endif |
183 | } |
184 | |
185 | static inline void update_used_max(struct zram *zram, |
186 | const unsigned long pages) |
187 | { |
188 | unsigned long cur_max = atomic_long_read(v: &zram->stats.max_used_pages); |
189 | |
190 | do { |
191 | if (cur_max >= pages) |
192 | return; |
193 | } while (!atomic_long_try_cmpxchg(v: &zram->stats.max_used_pages, |
194 | old: &cur_max, new: pages)); |
195 | } |
196 | |
197 | static inline void zram_fill_page(void *ptr, unsigned long len, |
198 | unsigned long value) |
199 | { |
200 | WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long))); |
201 | memset_l(p: ptr, v: value, n: len / sizeof(unsigned long)); |
202 | } |
203 | |
204 | static bool page_same_filled(void *ptr, unsigned long *element) |
205 | { |
206 | unsigned long *page; |
207 | unsigned long val; |
208 | unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; |
209 | |
210 | page = (unsigned long *)ptr; |
211 | val = page[0]; |
212 | |
213 | if (val != page[last_pos]) |
214 | return false; |
215 | |
216 | for (pos = 1; pos < last_pos; pos++) { |
217 | if (val != page[pos]) |
218 | return false; |
219 | } |
220 | |
221 | *element = val; |
222 | |
223 | return true; |
224 | } |
225 | |
226 | static ssize_t initstate_show(struct device *dev, |
227 | struct device_attribute *attr, char *buf) |
228 | { |
229 | u32 val; |
230 | struct zram *zram = dev_to_zram(dev); |
231 | |
232 | down_read(sem: &zram->init_lock); |
233 | val = init_done(zram); |
234 | up_read(sem: &zram->init_lock); |
235 | |
236 | return scnprintf(buf, PAGE_SIZE, fmt: "%u\n" , val); |
237 | } |
238 | |
239 | static ssize_t disksize_show(struct device *dev, |
240 | struct device_attribute *attr, char *buf) |
241 | { |
242 | struct zram *zram = dev_to_zram(dev); |
243 | |
244 | return scnprintf(buf, PAGE_SIZE, fmt: "%llu\n" , zram->disksize); |
245 | } |
246 | |
247 | static ssize_t mem_limit_store(struct device *dev, |
248 | struct device_attribute *attr, const char *buf, size_t len) |
249 | { |
250 | u64 limit; |
251 | char *tmp; |
252 | struct zram *zram = dev_to_zram(dev); |
253 | |
254 | limit = memparse(ptr: buf, retptr: &tmp); |
255 | if (buf == tmp) /* no chars parsed, invalid input */ |
256 | return -EINVAL; |
257 | |
258 | down_write(sem: &zram->init_lock); |
259 | zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; |
260 | up_write(sem: &zram->init_lock); |
261 | |
262 | return len; |
263 | } |
264 | |
265 | static ssize_t mem_used_max_store(struct device *dev, |
266 | struct device_attribute *attr, const char *buf, size_t len) |
267 | { |
268 | int err; |
269 | unsigned long val; |
270 | struct zram *zram = dev_to_zram(dev); |
271 | |
272 | err = kstrtoul(s: buf, base: 10, res: &val); |
273 | if (err || val != 0) |
274 | return -EINVAL; |
275 | |
276 | down_read(sem: &zram->init_lock); |
277 | if (init_done(zram)) { |
278 | atomic_long_set(v: &zram->stats.max_used_pages, |
279 | i: zs_get_total_pages(pool: zram->mem_pool)); |
280 | } |
281 | up_read(sem: &zram->init_lock); |
282 | |
283 | return len; |
284 | } |
285 | |
286 | /* |
287 | * Mark all pages which are older than or equal to cutoff as IDLE. |
288 | * Callers should hold the zram init lock in read mode |
289 | */ |
290 | static void mark_idle(struct zram *zram, ktime_t cutoff) |
291 | { |
292 | int is_idle = 1; |
293 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
294 | int index; |
295 | |
296 | for (index = 0; index < nr_pages; index++) { |
297 | /* |
298 | * Do not mark ZRAM_UNDER_WB slot as ZRAM_IDLE to close race. |
299 | * See the comment in writeback_store. |
300 | */ |
301 | zram_slot_lock(zram, index); |
302 | if (zram_allocated(zram, index) && |
303 | !zram_test_flag(zram, index, flag: ZRAM_UNDER_WB)) { |
304 | #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME |
305 | is_idle = !cutoff || ktime_after(cmp1: cutoff, |
306 | cmp2: zram->table[index].ac_time); |
307 | #endif |
308 | if (is_idle) |
309 | zram_set_flag(zram, index, flag: ZRAM_IDLE); |
310 | } |
311 | zram_slot_unlock(zram, index); |
312 | } |
313 | } |
314 | |
315 | static ssize_t idle_store(struct device *dev, |
316 | struct device_attribute *attr, const char *buf, size_t len) |
317 | { |
318 | struct zram *zram = dev_to_zram(dev); |
319 | ktime_t cutoff_time = 0; |
320 | ssize_t rv = -EINVAL; |
321 | |
322 | if (!sysfs_streq(s1: buf, s2: "all" )) { |
323 | /* |
324 | * If it did not parse as 'all' try to treat it as an integer |
325 | * when we have memory tracking enabled. |
326 | */ |
327 | u64 age_sec; |
328 | |
329 | if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) && !kstrtoull(s: buf, base: 0, res: &age_sec)) |
330 | cutoff_time = ktime_sub(ktime_get_boottime(), |
331 | ns_to_ktime(age_sec * NSEC_PER_SEC)); |
332 | else |
333 | goto out; |
334 | } |
335 | |
336 | down_read(sem: &zram->init_lock); |
337 | if (!init_done(zram)) |
338 | goto out_unlock; |
339 | |
340 | /* |
341 | * A cutoff_time of 0 marks everything as idle, this is the |
342 | * "all" behavior. |
343 | */ |
344 | mark_idle(zram, cutoff: cutoff_time); |
345 | rv = len; |
346 | |
347 | out_unlock: |
348 | up_read(sem: &zram->init_lock); |
349 | out: |
350 | return rv; |
351 | } |
352 | |
353 | #ifdef CONFIG_ZRAM_WRITEBACK |
354 | static ssize_t writeback_limit_enable_store(struct device *dev, |
355 | struct device_attribute *attr, const char *buf, size_t len) |
356 | { |
357 | struct zram *zram = dev_to_zram(dev); |
358 | u64 val; |
359 | ssize_t ret = -EINVAL; |
360 | |
361 | if (kstrtoull(s: buf, base: 10, res: &val)) |
362 | return ret; |
363 | |
364 | down_read(sem: &zram->init_lock); |
365 | spin_lock(lock: &zram->wb_limit_lock); |
366 | zram->wb_limit_enable = val; |
367 | spin_unlock(lock: &zram->wb_limit_lock); |
368 | up_read(sem: &zram->init_lock); |
369 | ret = len; |
370 | |
371 | return ret; |
372 | } |
373 | |
374 | static ssize_t writeback_limit_enable_show(struct device *dev, |
375 | struct device_attribute *attr, char *buf) |
376 | { |
377 | bool val; |
378 | struct zram *zram = dev_to_zram(dev); |
379 | |
380 | down_read(sem: &zram->init_lock); |
381 | spin_lock(lock: &zram->wb_limit_lock); |
382 | val = zram->wb_limit_enable; |
383 | spin_unlock(lock: &zram->wb_limit_lock); |
384 | up_read(sem: &zram->init_lock); |
385 | |
386 | return scnprintf(buf, PAGE_SIZE, fmt: "%d\n" , val); |
387 | } |
388 | |
389 | static ssize_t writeback_limit_store(struct device *dev, |
390 | struct device_attribute *attr, const char *buf, size_t len) |
391 | { |
392 | struct zram *zram = dev_to_zram(dev); |
393 | u64 val; |
394 | ssize_t ret = -EINVAL; |
395 | |
396 | if (kstrtoull(s: buf, base: 10, res: &val)) |
397 | return ret; |
398 | |
399 | down_read(sem: &zram->init_lock); |
400 | spin_lock(lock: &zram->wb_limit_lock); |
401 | zram->bd_wb_limit = val; |
402 | spin_unlock(lock: &zram->wb_limit_lock); |
403 | up_read(sem: &zram->init_lock); |
404 | ret = len; |
405 | |
406 | return ret; |
407 | } |
408 | |
409 | static ssize_t writeback_limit_show(struct device *dev, |
410 | struct device_attribute *attr, char *buf) |
411 | { |
412 | u64 val; |
413 | struct zram *zram = dev_to_zram(dev); |
414 | |
415 | down_read(sem: &zram->init_lock); |
416 | spin_lock(lock: &zram->wb_limit_lock); |
417 | val = zram->bd_wb_limit; |
418 | spin_unlock(lock: &zram->wb_limit_lock); |
419 | up_read(sem: &zram->init_lock); |
420 | |
421 | return scnprintf(buf, PAGE_SIZE, fmt: "%llu\n" , val); |
422 | } |
423 | |
424 | static void reset_bdev(struct zram *zram) |
425 | { |
426 | if (!zram->backing_dev) |
427 | return; |
428 | |
429 | fput(zram->bdev_file); |
430 | /* hope filp_close flush all of IO */ |
431 | filp_close(zram->backing_dev, NULL); |
432 | zram->backing_dev = NULL; |
433 | zram->bdev_file = NULL; |
434 | zram->disk->fops = &zram_devops; |
435 | kvfree(addr: zram->bitmap); |
436 | zram->bitmap = NULL; |
437 | } |
438 | |
439 | static ssize_t backing_dev_show(struct device *dev, |
440 | struct device_attribute *attr, char *buf) |
441 | { |
442 | struct file *file; |
443 | struct zram *zram = dev_to_zram(dev); |
444 | char *p; |
445 | ssize_t ret; |
446 | |
447 | down_read(sem: &zram->init_lock); |
448 | file = zram->backing_dev; |
449 | if (!file) { |
450 | memcpy(buf, "none\n" , 5); |
451 | up_read(sem: &zram->init_lock); |
452 | return 5; |
453 | } |
454 | |
455 | p = file_path(file, buf, PAGE_SIZE - 1); |
456 | if (IS_ERR(ptr: p)) { |
457 | ret = PTR_ERR(ptr: p); |
458 | goto out; |
459 | } |
460 | |
461 | ret = strlen(p); |
462 | memmove(buf, p, ret); |
463 | buf[ret++] = '\n'; |
464 | out: |
465 | up_read(sem: &zram->init_lock); |
466 | return ret; |
467 | } |
468 | |
469 | static ssize_t backing_dev_store(struct device *dev, |
470 | struct device_attribute *attr, const char *buf, size_t len) |
471 | { |
472 | char *file_name; |
473 | size_t sz; |
474 | struct file *backing_dev = NULL; |
475 | struct inode *inode; |
476 | struct address_space *mapping; |
477 | unsigned int bitmap_sz; |
478 | unsigned long nr_pages, *bitmap = NULL; |
479 | struct file *bdev_file = NULL; |
480 | int err; |
481 | struct zram *zram = dev_to_zram(dev); |
482 | |
483 | file_name = kmalloc(PATH_MAX, GFP_KERNEL); |
484 | if (!file_name) |
485 | return -ENOMEM; |
486 | |
487 | down_write(sem: &zram->init_lock); |
488 | if (init_done(zram)) { |
489 | pr_info("Can't setup backing device for initialized device\n" ); |
490 | err = -EBUSY; |
491 | goto out; |
492 | } |
493 | |
494 | strscpy(file_name, buf, PATH_MAX); |
495 | /* ignore trailing newline */ |
496 | sz = strlen(file_name); |
497 | if (sz > 0 && file_name[sz - 1] == '\n') |
498 | file_name[sz - 1] = 0x00; |
499 | |
500 | backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0); |
501 | if (IS_ERR(ptr: backing_dev)) { |
502 | err = PTR_ERR(ptr: backing_dev); |
503 | backing_dev = NULL; |
504 | goto out; |
505 | } |
506 | |
507 | mapping = backing_dev->f_mapping; |
508 | inode = mapping->host; |
509 | |
510 | /* Support only block device in this moment */ |
511 | if (!S_ISBLK(inode->i_mode)) { |
512 | err = -ENOTBLK; |
513 | goto out; |
514 | } |
515 | |
516 | bdev_file = bdev_file_open_by_dev(dev: inode->i_rdev, |
517 | BLK_OPEN_READ | BLK_OPEN_WRITE, holder: zram, NULL); |
518 | if (IS_ERR(ptr: bdev_file)) { |
519 | err = PTR_ERR(ptr: bdev_file); |
520 | bdev_file = NULL; |
521 | goto out; |
522 | } |
523 | |
524 | nr_pages = i_size_read(inode) >> PAGE_SHIFT; |
525 | bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); |
526 | bitmap = kvzalloc(size: bitmap_sz, GFP_KERNEL); |
527 | if (!bitmap) { |
528 | err = -ENOMEM; |
529 | goto out; |
530 | } |
531 | |
532 | reset_bdev(zram); |
533 | |
534 | zram->bdev_file = bdev_file; |
535 | zram->backing_dev = backing_dev; |
536 | zram->bitmap = bitmap; |
537 | zram->nr_pages = nr_pages; |
538 | up_write(sem: &zram->init_lock); |
539 | |
540 | pr_info("setup backing device %s\n" , file_name); |
541 | kfree(objp: file_name); |
542 | |
543 | return len; |
544 | out: |
545 | kvfree(addr: bitmap); |
546 | |
547 | if (bdev_file) |
548 | fput(bdev_file); |
549 | |
550 | if (backing_dev) |
551 | filp_close(backing_dev, NULL); |
552 | |
553 | up_write(sem: &zram->init_lock); |
554 | |
555 | kfree(objp: file_name); |
556 | |
557 | return err; |
558 | } |
559 | |
560 | static unsigned long alloc_block_bdev(struct zram *zram) |
561 | { |
562 | unsigned long blk_idx = 1; |
563 | retry: |
564 | /* skip 0 bit to confuse zram.handle = 0 */ |
565 | blk_idx = find_next_zero_bit(addr: zram->bitmap, size: zram->nr_pages, offset: blk_idx); |
566 | if (blk_idx == zram->nr_pages) |
567 | return 0; |
568 | |
569 | if (test_and_set_bit(nr: blk_idx, addr: zram->bitmap)) |
570 | goto retry; |
571 | |
572 | atomic64_inc(v: &zram->stats.bd_count); |
573 | return blk_idx; |
574 | } |
575 | |
576 | static void free_block_bdev(struct zram *zram, unsigned long blk_idx) |
577 | { |
578 | int was_set; |
579 | |
580 | was_set = test_and_clear_bit(nr: blk_idx, addr: zram->bitmap); |
581 | WARN_ON_ONCE(!was_set); |
582 | atomic64_dec(v: &zram->stats.bd_count); |
583 | } |
584 | |
585 | static void read_from_bdev_async(struct zram *zram, struct page *page, |
586 | unsigned long entry, struct bio *parent) |
587 | { |
588 | struct bio *bio; |
589 | |
590 | bio = bio_alloc(bdev: file_bdev(bdev_file: zram->bdev_file), nr_vecs: 1, opf: parent->bi_opf, GFP_NOIO); |
591 | bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9); |
592 | __bio_add_page(bio, page, PAGE_SIZE, off: 0); |
593 | bio_chain(bio, parent); |
594 | submit_bio(bio); |
595 | } |
596 | |
597 | #define PAGE_WB_SIG "page_index=" |
598 | |
599 | #define PAGE_WRITEBACK 0 |
600 | #define HUGE_WRITEBACK (1<<0) |
601 | #define IDLE_WRITEBACK (1<<1) |
602 | #define INCOMPRESSIBLE_WRITEBACK (1<<2) |
603 | |
604 | static ssize_t writeback_store(struct device *dev, |
605 | struct device_attribute *attr, const char *buf, size_t len) |
606 | { |
607 | struct zram *zram = dev_to_zram(dev); |
608 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
609 | unsigned long index = 0; |
610 | struct bio bio; |
611 | struct bio_vec bio_vec; |
612 | struct page *page; |
613 | ssize_t ret = len; |
614 | int mode, err; |
615 | unsigned long blk_idx = 0; |
616 | |
617 | if (sysfs_streq(s1: buf, s2: "idle" )) |
618 | mode = IDLE_WRITEBACK; |
619 | else if (sysfs_streq(s1: buf, s2: "huge" )) |
620 | mode = HUGE_WRITEBACK; |
621 | else if (sysfs_streq(s1: buf, s2: "huge_idle" )) |
622 | mode = IDLE_WRITEBACK | HUGE_WRITEBACK; |
623 | else if (sysfs_streq(s1: buf, s2: "incompressible" )) |
624 | mode = INCOMPRESSIBLE_WRITEBACK; |
625 | else { |
626 | if (strncmp(buf, PAGE_WB_SIG, sizeof(PAGE_WB_SIG) - 1)) |
627 | return -EINVAL; |
628 | |
629 | if (kstrtol(s: buf + sizeof(PAGE_WB_SIG) - 1, base: 10, res: &index) || |
630 | index >= nr_pages) |
631 | return -EINVAL; |
632 | |
633 | nr_pages = 1; |
634 | mode = PAGE_WRITEBACK; |
635 | } |
636 | |
637 | down_read(sem: &zram->init_lock); |
638 | if (!init_done(zram)) { |
639 | ret = -EINVAL; |
640 | goto release_init_lock; |
641 | } |
642 | |
643 | if (!zram->backing_dev) { |
644 | ret = -ENODEV; |
645 | goto release_init_lock; |
646 | } |
647 | |
648 | page = alloc_page(GFP_KERNEL); |
649 | if (!page) { |
650 | ret = -ENOMEM; |
651 | goto release_init_lock; |
652 | } |
653 | |
654 | for (; nr_pages != 0; index++, nr_pages--) { |
655 | spin_lock(lock: &zram->wb_limit_lock); |
656 | if (zram->wb_limit_enable && !zram->bd_wb_limit) { |
657 | spin_unlock(lock: &zram->wb_limit_lock); |
658 | ret = -EIO; |
659 | break; |
660 | } |
661 | spin_unlock(lock: &zram->wb_limit_lock); |
662 | |
663 | if (!blk_idx) { |
664 | blk_idx = alloc_block_bdev(zram); |
665 | if (!blk_idx) { |
666 | ret = -ENOSPC; |
667 | break; |
668 | } |
669 | } |
670 | |
671 | zram_slot_lock(zram, index); |
672 | if (!zram_allocated(zram, index)) |
673 | goto next; |
674 | |
675 | if (zram_test_flag(zram, index, flag: ZRAM_WB) || |
676 | zram_test_flag(zram, index, flag: ZRAM_SAME) || |
677 | zram_test_flag(zram, index, flag: ZRAM_UNDER_WB)) |
678 | goto next; |
679 | |
680 | if (mode & IDLE_WRITEBACK && |
681 | !zram_test_flag(zram, index, flag: ZRAM_IDLE)) |
682 | goto next; |
683 | if (mode & HUGE_WRITEBACK && |
684 | !zram_test_flag(zram, index, flag: ZRAM_HUGE)) |
685 | goto next; |
686 | if (mode & INCOMPRESSIBLE_WRITEBACK && |
687 | !zram_test_flag(zram, index, flag: ZRAM_INCOMPRESSIBLE)) |
688 | goto next; |
689 | |
690 | /* |
691 | * Clearing ZRAM_UNDER_WB is duty of caller. |
692 | * IOW, zram_free_page never clear it. |
693 | */ |
694 | zram_set_flag(zram, index, flag: ZRAM_UNDER_WB); |
695 | /* Need for hugepage writeback racing */ |
696 | zram_set_flag(zram, index, flag: ZRAM_IDLE); |
697 | zram_slot_unlock(zram, index); |
698 | if (zram_read_page(zram, page, index, NULL)) { |
699 | zram_slot_lock(zram, index); |
700 | zram_clear_flag(zram, index, flag: ZRAM_UNDER_WB); |
701 | zram_clear_flag(zram, index, flag: ZRAM_IDLE); |
702 | zram_slot_unlock(zram, index); |
703 | continue; |
704 | } |
705 | |
706 | bio_init(bio: &bio, bdev: file_bdev(bdev_file: zram->bdev_file), table: &bio_vec, max_vecs: 1, |
707 | opf: REQ_OP_WRITE | REQ_SYNC); |
708 | bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9); |
709 | __bio_add_page(bio: &bio, page, PAGE_SIZE, off: 0); |
710 | |
711 | /* |
712 | * XXX: A single page IO would be inefficient for write |
713 | * but it would be not bad as starter. |
714 | */ |
715 | err = submit_bio_wait(bio: &bio); |
716 | if (err) { |
717 | zram_slot_lock(zram, index); |
718 | zram_clear_flag(zram, index, flag: ZRAM_UNDER_WB); |
719 | zram_clear_flag(zram, index, flag: ZRAM_IDLE); |
720 | zram_slot_unlock(zram, index); |
721 | /* |
722 | * BIO errors are not fatal, we continue and simply |
723 | * attempt to writeback the remaining objects (pages). |
724 | * At the same time we need to signal user-space that |
725 | * some writes (at least one, but also could be all of |
726 | * them) were not successful and we do so by returning |
727 | * the most recent BIO error. |
728 | */ |
729 | ret = err; |
730 | continue; |
731 | } |
732 | |
733 | atomic64_inc(v: &zram->stats.bd_writes); |
734 | /* |
735 | * We released zram_slot_lock so need to check if the slot was |
736 | * changed. If there is freeing for the slot, we can catch it |
737 | * easily by zram_allocated. |
738 | * A subtle case is the slot is freed/reallocated/marked as |
739 | * ZRAM_IDLE again. To close the race, idle_store doesn't |
740 | * mark ZRAM_IDLE once it found the slot was ZRAM_UNDER_WB. |
741 | * Thus, we could close the race by checking ZRAM_IDLE bit. |
742 | */ |
743 | zram_slot_lock(zram, index); |
744 | if (!zram_allocated(zram, index) || |
745 | !zram_test_flag(zram, index, flag: ZRAM_IDLE)) { |
746 | zram_clear_flag(zram, index, flag: ZRAM_UNDER_WB); |
747 | zram_clear_flag(zram, index, flag: ZRAM_IDLE); |
748 | goto next; |
749 | } |
750 | |
751 | zram_free_page(zram, index); |
752 | zram_clear_flag(zram, index, flag: ZRAM_UNDER_WB); |
753 | zram_set_flag(zram, index, flag: ZRAM_WB); |
754 | zram_set_element(zram, index, element: blk_idx); |
755 | blk_idx = 0; |
756 | atomic64_inc(v: &zram->stats.pages_stored); |
757 | spin_lock(lock: &zram->wb_limit_lock); |
758 | if (zram->wb_limit_enable && zram->bd_wb_limit > 0) |
759 | zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12); |
760 | spin_unlock(lock: &zram->wb_limit_lock); |
761 | next: |
762 | zram_slot_unlock(zram, index); |
763 | } |
764 | |
765 | if (blk_idx) |
766 | free_block_bdev(zram, blk_idx); |
767 | __free_page(page); |
768 | release_init_lock: |
769 | up_read(sem: &zram->init_lock); |
770 | |
771 | return ret; |
772 | } |
773 | |
774 | struct zram_work { |
775 | struct work_struct work; |
776 | struct zram *zram; |
777 | unsigned long entry; |
778 | struct page *page; |
779 | int error; |
780 | }; |
781 | |
782 | static void zram_sync_read(struct work_struct *work) |
783 | { |
784 | struct zram_work *zw = container_of(work, struct zram_work, work); |
785 | struct bio_vec bv; |
786 | struct bio bio; |
787 | |
788 | bio_init(bio: &bio, bdev: file_bdev(bdev_file: zw->zram->bdev_file), table: &bv, max_vecs: 1, opf: REQ_OP_READ); |
789 | bio.bi_iter.bi_sector = zw->entry * (PAGE_SIZE >> 9); |
790 | __bio_add_page(bio: &bio, page: zw->page, PAGE_SIZE, off: 0); |
791 | zw->error = submit_bio_wait(bio: &bio); |
792 | } |
793 | |
794 | /* |
795 | * Block layer want one ->submit_bio to be active at a time, so if we use |
796 | * chained IO with parent IO in same context, it's a deadlock. To avoid that, |
797 | * use a worker thread context. |
798 | */ |
799 | static int read_from_bdev_sync(struct zram *zram, struct page *page, |
800 | unsigned long entry) |
801 | { |
802 | struct zram_work work; |
803 | |
804 | work.page = page; |
805 | work.zram = zram; |
806 | work.entry = entry; |
807 | |
808 | INIT_WORK_ONSTACK(&work.work, zram_sync_read); |
809 | queue_work(wq: system_unbound_wq, work: &work.work); |
810 | flush_work(work: &work.work); |
811 | destroy_work_on_stack(work: &work.work); |
812 | |
813 | return work.error; |
814 | } |
815 | |
816 | static int read_from_bdev(struct zram *zram, struct page *page, |
817 | unsigned long entry, struct bio *parent) |
818 | { |
819 | atomic64_inc(v: &zram->stats.bd_reads); |
820 | if (!parent) { |
821 | if (WARN_ON_ONCE(!IS_ENABLED(ZRAM_PARTIAL_IO))) |
822 | return -EIO; |
823 | return read_from_bdev_sync(zram, page, entry); |
824 | } |
825 | read_from_bdev_async(zram, page, entry, parent); |
826 | return 0; |
827 | } |
828 | #else |
829 | static inline void reset_bdev(struct zram *zram) {}; |
830 | static int read_from_bdev(struct zram *zram, struct page *page, |
831 | unsigned long entry, struct bio *parent) |
832 | { |
833 | return -EIO; |
834 | } |
835 | |
836 | static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {}; |
837 | #endif |
838 | |
839 | #ifdef CONFIG_ZRAM_MEMORY_TRACKING |
840 | |
841 | static struct dentry *zram_debugfs_root; |
842 | |
843 | static void zram_debugfs_create(void) |
844 | { |
845 | zram_debugfs_root = debugfs_create_dir(name: "zram" , NULL); |
846 | } |
847 | |
848 | static void zram_debugfs_destroy(void) |
849 | { |
850 | debugfs_remove_recursive(dentry: zram_debugfs_root); |
851 | } |
852 | |
853 | static ssize_t read_block_state(struct file *file, char __user *buf, |
854 | size_t count, loff_t *ppos) |
855 | { |
856 | char *kbuf; |
857 | ssize_t index, written = 0; |
858 | struct zram *zram = file->private_data; |
859 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
860 | struct timespec64 ts; |
861 | |
862 | kbuf = kvmalloc(size: count, GFP_KERNEL); |
863 | if (!kbuf) |
864 | return -ENOMEM; |
865 | |
866 | down_read(sem: &zram->init_lock); |
867 | if (!init_done(zram)) { |
868 | up_read(sem: &zram->init_lock); |
869 | kvfree(addr: kbuf); |
870 | return -EINVAL; |
871 | } |
872 | |
873 | for (index = *ppos; index < nr_pages; index++) { |
874 | int copied; |
875 | |
876 | zram_slot_lock(zram, index); |
877 | if (!zram_allocated(zram, index)) |
878 | goto next; |
879 | |
880 | ts = ktime_to_timespec64(zram->table[index].ac_time); |
881 | copied = snprintf(buf: kbuf + written, size: count, |
882 | fmt: "%12zd %12lld.%06lu %c%c%c%c%c%c\n" , |
883 | index, (s64)ts.tv_sec, |
884 | ts.tv_nsec / NSEC_PER_USEC, |
885 | zram_test_flag(zram, index, flag: ZRAM_SAME) ? 's' : '.', |
886 | zram_test_flag(zram, index, flag: ZRAM_WB) ? 'w' : '.', |
887 | zram_test_flag(zram, index, flag: ZRAM_HUGE) ? 'h' : '.', |
888 | zram_test_flag(zram, index, flag: ZRAM_IDLE) ? 'i' : '.', |
889 | zram_get_priority(zram, index) ? 'r' : '.', |
890 | zram_test_flag(zram, index, |
891 | flag: ZRAM_INCOMPRESSIBLE) ? 'n' : '.'); |
892 | |
893 | if (count <= copied) { |
894 | zram_slot_unlock(zram, index); |
895 | break; |
896 | } |
897 | written += copied; |
898 | count -= copied; |
899 | next: |
900 | zram_slot_unlock(zram, index); |
901 | *ppos += 1; |
902 | } |
903 | |
904 | up_read(sem: &zram->init_lock); |
905 | if (copy_to_user(to: buf, from: kbuf, n: written)) |
906 | written = -EFAULT; |
907 | kvfree(addr: kbuf); |
908 | |
909 | return written; |
910 | } |
911 | |
912 | static const struct file_operations proc_zram_block_state_op = { |
913 | .open = simple_open, |
914 | .read = read_block_state, |
915 | .llseek = default_llseek, |
916 | }; |
917 | |
918 | static void zram_debugfs_register(struct zram *zram) |
919 | { |
920 | if (!zram_debugfs_root) |
921 | return; |
922 | |
923 | zram->debugfs_dir = debugfs_create_dir(name: zram->disk->disk_name, |
924 | parent: zram_debugfs_root); |
925 | debugfs_create_file(name: "block_state" , mode: 0400, parent: zram->debugfs_dir, |
926 | data: zram, fops: &proc_zram_block_state_op); |
927 | } |
928 | |
929 | static void zram_debugfs_unregister(struct zram *zram) |
930 | { |
931 | debugfs_remove_recursive(dentry: zram->debugfs_dir); |
932 | } |
933 | #else |
934 | static void zram_debugfs_create(void) {}; |
935 | static void zram_debugfs_destroy(void) {}; |
936 | static void zram_debugfs_register(struct zram *zram) {}; |
937 | static void zram_debugfs_unregister(struct zram *zram) {}; |
938 | #endif |
939 | |
940 | /* |
941 | * We switched to per-cpu streams and this attr is not needed anymore. |
942 | * However, we will keep it around for some time, because: |
943 | * a) we may revert per-cpu streams in the future |
944 | * b) it's visible to user space and we need to follow our 2 years |
945 | * retirement rule; but we already have a number of 'soon to be |
946 | * altered' attrs, so max_comp_streams need to wait for the next |
947 | * layoff cycle. |
948 | */ |
949 | static ssize_t max_comp_streams_show(struct device *dev, |
950 | struct device_attribute *attr, char *buf) |
951 | { |
952 | return scnprintf(buf, PAGE_SIZE, fmt: "%d\n" , num_online_cpus()); |
953 | } |
954 | |
955 | static ssize_t max_comp_streams_store(struct device *dev, |
956 | struct device_attribute *attr, const char *buf, size_t len) |
957 | { |
958 | return len; |
959 | } |
960 | |
961 | static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg) |
962 | { |
963 | /* Do not free statically defined compression algorithms */ |
964 | if (zram->comp_algs[prio] != default_compressor) |
965 | kfree(objp: zram->comp_algs[prio]); |
966 | |
967 | zram->comp_algs[prio] = alg; |
968 | } |
969 | |
970 | static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf) |
971 | { |
972 | ssize_t sz; |
973 | |
974 | down_read(sem: &zram->init_lock); |
975 | sz = zcomp_available_show(comp: zram->comp_algs[prio], buf); |
976 | up_read(sem: &zram->init_lock); |
977 | |
978 | return sz; |
979 | } |
980 | |
981 | static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf) |
982 | { |
983 | char *compressor; |
984 | size_t sz; |
985 | |
986 | sz = strlen(buf); |
987 | if (sz >= CRYPTO_MAX_ALG_NAME) |
988 | return -E2BIG; |
989 | |
990 | compressor = kstrdup(s: buf, GFP_KERNEL); |
991 | if (!compressor) |
992 | return -ENOMEM; |
993 | |
994 | /* ignore trailing newline */ |
995 | if (sz > 0 && compressor[sz - 1] == '\n') |
996 | compressor[sz - 1] = 0x00; |
997 | |
998 | if (!zcomp_available_algorithm(comp: compressor)) { |
999 | kfree(objp: compressor); |
1000 | return -EINVAL; |
1001 | } |
1002 | |
1003 | down_write(sem: &zram->init_lock); |
1004 | if (init_done(zram)) { |
1005 | up_write(sem: &zram->init_lock); |
1006 | kfree(objp: compressor); |
1007 | pr_info("Can't change algorithm for initialized device\n" ); |
1008 | return -EBUSY; |
1009 | } |
1010 | |
1011 | comp_algorithm_set(zram, prio, alg: compressor); |
1012 | up_write(sem: &zram->init_lock); |
1013 | return 0; |
1014 | } |
1015 | |
1016 | static ssize_t comp_algorithm_show(struct device *dev, |
1017 | struct device_attribute *attr, |
1018 | char *buf) |
1019 | { |
1020 | struct zram *zram = dev_to_zram(dev); |
1021 | |
1022 | return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf); |
1023 | } |
1024 | |
1025 | static ssize_t comp_algorithm_store(struct device *dev, |
1026 | struct device_attribute *attr, |
1027 | const char *buf, |
1028 | size_t len) |
1029 | { |
1030 | struct zram *zram = dev_to_zram(dev); |
1031 | int ret; |
1032 | |
1033 | ret = __comp_algorithm_store(zram, ZRAM_PRIMARY_COMP, buf); |
1034 | return ret ? ret : len; |
1035 | } |
1036 | |
1037 | #ifdef CONFIG_ZRAM_MULTI_COMP |
1038 | static ssize_t recomp_algorithm_show(struct device *dev, |
1039 | struct device_attribute *attr, |
1040 | char *buf) |
1041 | { |
1042 | struct zram *zram = dev_to_zram(dev); |
1043 | ssize_t sz = 0; |
1044 | u32 prio; |
1045 | |
1046 | for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) { |
1047 | if (!zram->comp_algs[prio]) |
1048 | continue; |
1049 | |
1050 | sz += scnprintf(buf: buf + sz, PAGE_SIZE - sz - 2, fmt: "#%d: " , prio); |
1051 | sz += __comp_algorithm_show(zram, prio, buf: buf + sz); |
1052 | } |
1053 | |
1054 | return sz; |
1055 | } |
1056 | |
1057 | static ssize_t recomp_algorithm_store(struct device *dev, |
1058 | struct device_attribute *attr, |
1059 | const char *buf, |
1060 | size_t len) |
1061 | { |
1062 | struct zram *zram = dev_to_zram(dev); |
1063 | int prio = ZRAM_SECONDARY_COMP; |
1064 | char *args, *param, *val; |
1065 | char *alg = NULL; |
1066 | int ret; |
1067 | |
1068 | args = skip_spaces(buf); |
1069 | while (*args) { |
1070 | args = next_arg(args, param: ¶m, val: &val); |
1071 | |
1072 | if (!val || !*val) |
1073 | return -EINVAL; |
1074 | |
1075 | if (!strcmp(param, "algo" )) { |
1076 | alg = val; |
1077 | continue; |
1078 | } |
1079 | |
1080 | if (!strcmp(param, "priority" )) { |
1081 | ret = kstrtoint(s: val, base: 10, res: &prio); |
1082 | if (ret) |
1083 | return ret; |
1084 | continue; |
1085 | } |
1086 | } |
1087 | |
1088 | if (!alg) |
1089 | return -EINVAL; |
1090 | |
1091 | if (prio < ZRAM_SECONDARY_COMP || prio >= ZRAM_MAX_COMPS) |
1092 | return -EINVAL; |
1093 | |
1094 | ret = __comp_algorithm_store(zram, prio, buf: alg); |
1095 | return ret ? ret : len; |
1096 | } |
1097 | #endif |
1098 | |
1099 | static ssize_t compact_store(struct device *dev, |
1100 | struct device_attribute *attr, const char *buf, size_t len) |
1101 | { |
1102 | struct zram *zram = dev_to_zram(dev); |
1103 | |
1104 | down_read(sem: &zram->init_lock); |
1105 | if (!init_done(zram)) { |
1106 | up_read(sem: &zram->init_lock); |
1107 | return -EINVAL; |
1108 | } |
1109 | |
1110 | zs_compact(pool: zram->mem_pool); |
1111 | up_read(sem: &zram->init_lock); |
1112 | |
1113 | return len; |
1114 | } |
1115 | |
1116 | static ssize_t io_stat_show(struct device *dev, |
1117 | struct device_attribute *attr, char *buf) |
1118 | { |
1119 | struct zram *zram = dev_to_zram(dev); |
1120 | ssize_t ret; |
1121 | |
1122 | down_read(sem: &zram->init_lock); |
1123 | ret = scnprintf(buf, PAGE_SIZE, |
1124 | fmt: "%8llu %8llu 0 %8llu\n" , |
1125 | (u64)atomic64_read(v: &zram->stats.failed_reads), |
1126 | (u64)atomic64_read(v: &zram->stats.failed_writes), |
1127 | (u64)atomic64_read(v: &zram->stats.notify_free)); |
1128 | up_read(sem: &zram->init_lock); |
1129 | |
1130 | return ret; |
1131 | } |
1132 | |
1133 | static ssize_t mm_stat_show(struct device *dev, |
1134 | struct device_attribute *attr, char *buf) |
1135 | { |
1136 | struct zram *zram = dev_to_zram(dev); |
1137 | struct zs_pool_stats pool_stats; |
1138 | u64 orig_size, mem_used = 0; |
1139 | long max_used; |
1140 | ssize_t ret; |
1141 | |
1142 | memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats)); |
1143 | |
1144 | down_read(sem: &zram->init_lock); |
1145 | if (init_done(zram)) { |
1146 | mem_used = zs_get_total_pages(pool: zram->mem_pool); |
1147 | zs_pool_stats(pool: zram->mem_pool, stats: &pool_stats); |
1148 | } |
1149 | |
1150 | orig_size = atomic64_read(v: &zram->stats.pages_stored); |
1151 | max_used = atomic_long_read(v: &zram->stats.max_used_pages); |
1152 | |
1153 | ret = scnprintf(buf, PAGE_SIZE, |
1154 | fmt: "%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n" , |
1155 | orig_size << PAGE_SHIFT, |
1156 | (u64)atomic64_read(v: &zram->stats.compr_data_size), |
1157 | mem_used << PAGE_SHIFT, |
1158 | zram->limit_pages << PAGE_SHIFT, |
1159 | max_used << PAGE_SHIFT, |
1160 | (u64)atomic64_read(v: &zram->stats.same_pages), |
1161 | atomic_long_read(v: &pool_stats.pages_compacted), |
1162 | (u64)atomic64_read(v: &zram->stats.huge_pages), |
1163 | (u64)atomic64_read(v: &zram->stats.huge_pages_since)); |
1164 | up_read(sem: &zram->init_lock); |
1165 | |
1166 | return ret; |
1167 | } |
1168 | |
1169 | #ifdef CONFIG_ZRAM_WRITEBACK |
1170 | #define FOUR_K(x) ((x) * (1 << (PAGE_SHIFT - 12))) |
1171 | static ssize_t bd_stat_show(struct device *dev, |
1172 | struct device_attribute *attr, char *buf) |
1173 | { |
1174 | struct zram *zram = dev_to_zram(dev); |
1175 | ssize_t ret; |
1176 | |
1177 | down_read(sem: &zram->init_lock); |
1178 | ret = scnprintf(buf, PAGE_SIZE, |
1179 | fmt: "%8llu %8llu %8llu\n" , |
1180 | FOUR_K((u64)atomic64_read(&zram->stats.bd_count)), |
1181 | FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)), |
1182 | FOUR_K((u64)atomic64_read(&zram->stats.bd_writes))); |
1183 | up_read(sem: &zram->init_lock); |
1184 | |
1185 | return ret; |
1186 | } |
1187 | #endif |
1188 | |
1189 | static ssize_t debug_stat_show(struct device *dev, |
1190 | struct device_attribute *attr, char *buf) |
1191 | { |
1192 | int version = 1; |
1193 | struct zram *zram = dev_to_zram(dev); |
1194 | ssize_t ret; |
1195 | |
1196 | down_read(sem: &zram->init_lock); |
1197 | ret = scnprintf(buf, PAGE_SIZE, |
1198 | fmt: "version: %d\n%8llu %8llu\n" , |
1199 | version, |
1200 | (u64)atomic64_read(v: &zram->stats.writestall), |
1201 | (u64)atomic64_read(v: &zram->stats.miss_free)); |
1202 | up_read(sem: &zram->init_lock); |
1203 | |
1204 | return ret; |
1205 | } |
1206 | |
1207 | static DEVICE_ATTR_RO(io_stat); |
1208 | static DEVICE_ATTR_RO(mm_stat); |
1209 | #ifdef CONFIG_ZRAM_WRITEBACK |
1210 | static DEVICE_ATTR_RO(bd_stat); |
1211 | #endif |
1212 | static DEVICE_ATTR_RO(debug_stat); |
1213 | |
1214 | static void zram_meta_free(struct zram *zram, u64 disksize) |
1215 | { |
1216 | size_t num_pages = disksize >> PAGE_SHIFT; |
1217 | size_t index; |
1218 | |
1219 | /* Free all pages that are still in this zram device */ |
1220 | for (index = 0; index < num_pages; index++) |
1221 | zram_free_page(zram, index); |
1222 | |
1223 | zs_destroy_pool(pool: zram->mem_pool); |
1224 | vfree(addr: zram->table); |
1225 | } |
1226 | |
1227 | static bool zram_meta_alloc(struct zram *zram, u64 disksize) |
1228 | { |
1229 | size_t num_pages; |
1230 | |
1231 | num_pages = disksize >> PAGE_SHIFT; |
1232 | zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table))); |
1233 | if (!zram->table) |
1234 | return false; |
1235 | |
1236 | zram->mem_pool = zs_create_pool(name: zram->disk->disk_name); |
1237 | if (!zram->mem_pool) { |
1238 | vfree(addr: zram->table); |
1239 | return false; |
1240 | } |
1241 | |
1242 | if (!huge_class_size) |
1243 | huge_class_size = zs_huge_class_size(pool: zram->mem_pool); |
1244 | return true; |
1245 | } |
1246 | |
1247 | /* |
1248 | * To protect concurrent access to the same index entry, |
1249 | * caller should hold this table index entry's bit_spinlock to |
1250 | * indicate this index entry is accessing. |
1251 | */ |
1252 | static void zram_free_page(struct zram *zram, size_t index) |
1253 | { |
1254 | unsigned long handle; |
1255 | |
1256 | #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME |
1257 | zram->table[index].ac_time = 0; |
1258 | #endif |
1259 | if (zram_test_flag(zram, index, flag: ZRAM_IDLE)) |
1260 | zram_clear_flag(zram, index, flag: ZRAM_IDLE); |
1261 | |
1262 | if (zram_test_flag(zram, index, flag: ZRAM_HUGE)) { |
1263 | zram_clear_flag(zram, index, flag: ZRAM_HUGE); |
1264 | atomic64_dec(v: &zram->stats.huge_pages); |
1265 | } |
1266 | |
1267 | if (zram_test_flag(zram, index, flag: ZRAM_INCOMPRESSIBLE)) |
1268 | zram_clear_flag(zram, index, flag: ZRAM_INCOMPRESSIBLE); |
1269 | |
1270 | zram_set_priority(zram, index, prio: 0); |
1271 | |
1272 | if (zram_test_flag(zram, index, flag: ZRAM_WB)) { |
1273 | zram_clear_flag(zram, index, flag: ZRAM_WB); |
1274 | free_block_bdev(zram, blk_idx: zram_get_element(zram, index)); |
1275 | goto out; |
1276 | } |
1277 | |
1278 | /* |
1279 | * No memory is allocated for same element filled pages. |
1280 | * Simply clear same page flag. |
1281 | */ |
1282 | if (zram_test_flag(zram, index, flag: ZRAM_SAME)) { |
1283 | zram_clear_flag(zram, index, flag: ZRAM_SAME); |
1284 | atomic64_dec(v: &zram->stats.same_pages); |
1285 | goto out; |
1286 | } |
1287 | |
1288 | handle = zram_get_handle(zram, index); |
1289 | if (!handle) |
1290 | return; |
1291 | |
1292 | zs_free(pool: zram->mem_pool, obj: handle); |
1293 | |
1294 | atomic64_sub(i: zram_get_obj_size(zram, index), |
1295 | v: &zram->stats.compr_data_size); |
1296 | out: |
1297 | atomic64_dec(v: &zram->stats.pages_stored); |
1298 | zram_set_handle(zram, index, handle: 0); |
1299 | zram_set_obj_size(zram, index, size: 0); |
1300 | WARN_ON_ONCE(zram->table[index].flags & |
1301 | ~(1UL << ZRAM_LOCK | 1UL << ZRAM_UNDER_WB)); |
1302 | } |
1303 | |
1304 | /* |
1305 | * Reads (decompresses if needed) a page from zspool (zsmalloc). |
1306 | * Corresponding ZRAM slot should be locked. |
1307 | */ |
1308 | static int zram_read_from_zspool(struct zram *zram, struct page *page, |
1309 | u32 index) |
1310 | { |
1311 | struct zcomp_strm *zstrm; |
1312 | unsigned long handle; |
1313 | unsigned int size; |
1314 | void *src, *dst; |
1315 | u32 prio; |
1316 | int ret; |
1317 | |
1318 | handle = zram_get_handle(zram, index); |
1319 | if (!handle || zram_test_flag(zram, index, flag: ZRAM_SAME)) { |
1320 | unsigned long value; |
1321 | void *mem; |
1322 | |
1323 | value = handle ? zram_get_element(zram, index) : 0; |
1324 | mem = kmap_local_page(page); |
1325 | zram_fill_page(ptr: mem, PAGE_SIZE, value); |
1326 | kunmap_local(mem); |
1327 | return 0; |
1328 | } |
1329 | |
1330 | size = zram_get_obj_size(zram, index); |
1331 | |
1332 | if (size != PAGE_SIZE) { |
1333 | prio = zram_get_priority(zram, index); |
1334 | zstrm = zcomp_stream_get(comp: zram->comps[prio]); |
1335 | } |
1336 | |
1337 | src = zs_map_object(pool: zram->mem_pool, handle, mm: ZS_MM_RO); |
1338 | if (size == PAGE_SIZE) { |
1339 | dst = kmap_local_page(page); |
1340 | copy_page(to: dst, from: src); |
1341 | kunmap_local(dst); |
1342 | ret = 0; |
1343 | } else { |
1344 | dst = kmap_local_page(page); |
1345 | ret = zcomp_decompress(zstrm, src, src_len: size, dst); |
1346 | kunmap_local(dst); |
1347 | zcomp_stream_put(comp: zram->comps[prio]); |
1348 | } |
1349 | zs_unmap_object(pool: zram->mem_pool, handle); |
1350 | return ret; |
1351 | } |
1352 | |
1353 | static int zram_read_page(struct zram *zram, struct page *page, u32 index, |
1354 | struct bio *parent) |
1355 | { |
1356 | int ret; |
1357 | |
1358 | zram_slot_lock(zram, index); |
1359 | if (!zram_test_flag(zram, index, flag: ZRAM_WB)) { |
1360 | /* Slot should be locked through out the function call */ |
1361 | ret = zram_read_from_zspool(zram, page, index); |
1362 | zram_slot_unlock(zram, index); |
1363 | } else { |
1364 | /* |
1365 | * The slot should be unlocked before reading from the backing |
1366 | * device. |
1367 | */ |
1368 | zram_slot_unlock(zram, index); |
1369 | |
1370 | ret = read_from_bdev(zram, page, entry: zram_get_element(zram, index), |
1371 | parent); |
1372 | } |
1373 | |
1374 | /* Should NEVER happen. Return bio error if it does. */ |
1375 | if (WARN_ON(ret < 0)) |
1376 | pr_err("Decompression failed! err=%d, page=%u\n" , ret, index); |
1377 | |
1378 | return ret; |
1379 | } |
1380 | |
1381 | /* |
1382 | * Use a temporary buffer to decompress the page, as the decompressor |
1383 | * always expects a full page for the output. |
1384 | */ |
1385 | static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec, |
1386 | u32 index, int offset) |
1387 | { |
1388 | struct page *page = alloc_page(GFP_NOIO); |
1389 | int ret; |
1390 | |
1391 | if (!page) |
1392 | return -ENOMEM; |
1393 | ret = zram_read_page(zram, page, index, NULL); |
1394 | if (likely(!ret)) |
1395 | memcpy_to_bvec(bvec, page_address(page) + offset); |
1396 | __free_page(page); |
1397 | return ret; |
1398 | } |
1399 | |
1400 | static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, |
1401 | u32 index, int offset, struct bio *bio) |
1402 | { |
1403 | if (is_partial_io(bvec)) |
1404 | return zram_bvec_read_partial(zram, bvec, index, offset); |
1405 | return zram_read_page(zram, page: bvec->bv_page, index, parent: bio); |
1406 | } |
1407 | |
1408 | static int zram_write_page(struct zram *zram, struct page *page, u32 index) |
1409 | { |
1410 | int ret = 0; |
1411 | unsigned long alloced_pages; |
1412 | unsigned long handle = -ENOMEM; |
1413 | unsigned int comp_len = 0; |
1414 | void *src, *dst, *mem; |
1415 | struct zcomp_strm *zstrm; |
1416 | unsigned long element = 0; |
1417 | enum zram_pageflags flags = 0; |
1418 | |
1419 | mem = kmap_local_page(page); |
1420 | if (page_same_filled(ptr: mem, element: &element)) { |
1421 | kunmap_local(mem); |
1422 | /* Free memory associated with this sector now. */ |
1423 | flags = ZRAM_SAME; |
1424 | atomic64_inc(v: &zram->stats.same_pages); |
1425 | goto out; |
1426 | } |
1427 | kunmap_local(mem); |
1428 | |
1429 | compress_again: |
1430 | zstrm = zcomp_stream_get(comp: zram->comps[ZRAM_PRIMARY_COMP]); |
1431 | src = kmap_local_page(page); |
1432 | ret = zcomp_compress(zstrm, src, dst_len: &comp_len); |
1433 | kunmap_local(src); |
1434 | |
1435 | if (unlikely(ret)) { |
1436 | zcomp_stream_put(comp: zram->comps[ZRAM_PRIMARY_COMP]); |
1437 | pr_err("Compression failed! err=%d\n" , ret); |
1438 | zs_free(pool: zram->mem_pool, obj: handle); |
1439 | return ret; |
1440 | } |
1441 | |
1442 | if (comp_len >= huge_class_size) |
1443 | comp_len = PAGE_SIZE; |
1444 | /* |
1445 | * handle allocation has 2 paths: |
1446 | * a) fast path is executed with preemption disabled (for |
1447 | * per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear, |
1448 | * since we can't sleep; |
1449 | * b) slow path enables preemption and attempts to allocate |
1450 | * the page with __GFP_DIRECT_RECLAIM bit set. we have to |
1451 | * put per-cpu compression stream and, thus, to re-do |
1452 | * the compression once handle is allocated. |
1453 | * |
1454 | * if we have a 'non-null' handle here then we are coming |
1455 | * from the slow path and handle has already been allocated. |
1456 | */ |
1457 | if (IS_ERR_VALUE(handle)) |
1458 | handle = zs_malloc(pool: zram->mem_pool, size: comp_len, |
1459 | __GFP_KSWAPD_RECLAIM | |
1460 | __GFP_NOWARN | |
1461 | __GFP_HIGHMEM | |
1462 | __GFP_MOVABLE); |
1463 | if (IS_ERR_VALUE(handle)) { |
1464 | zcomp_stream_put(comp: zram->comps[ZRAM_PRIMARY_COMP]); |
1465 | atomic64_inc(v: &zram->stats.writestall); |
1466 | handle = zs_malloc(pool: zram->mem_pool, size: comp_len, |
1467 | GFP_NOIO | __GFP_HIGHMEM | |
1468 | __GFP_MOVABLE); |
1469 | if (IS_ERR_VALUE(handle)) |
1470 | return PTR_ERR(ptr: (void *)handle); |
1471 | |
1472 | if (comp_len != PAGE_SIZE) |
1473 | goto compress_again; |
1474 | /* |
1475 | * If the page is not compressible, you need to acquire the |
1476 | * lock and execute the code below. The zcomp_stream_get() |
1477 | * call is needed to disable the cpu hotplug and grab the |
1478 | * zstrm buffer back. It is necessary that the dereferencing |
1479 | * of the zstrm variable below occurs correctly. |
1480 | */ |
1481 | zstrm = zcomp_stream_get(comp: zram->comps[ZRAM_PRIMARY_COMP]); |
1482 | } |
1483 | |
1484 | alloced_pages = zs_get_total_pages(pool: zram->mem_pool); |
1485 | update_used_max(zram, pages: alloced_pages); |
1486 | |
1487 | if (zram->limit_pages && alloced_pages > zram->limit_pages) { |
1488 | zcomp_stream_put(comp: zram->comps[ZRAM_PRIMARY_COMP]); |
1489 | zs_free(pool: zram->mem_pool, obj: handle); |
1490 | return -ENOMEM; |
1491 | } |
1492 | |
1493 | dst = zs_map_object(pool: zram->mem_pool, handle, mm: ZS_MM_WO); |
1494 | |
1495 | src = zstrm->buffer; |
1496 | if (comp_len == PAGE_SIZE) |
1497 | src = kmap_local_page(page); |
1498 | memcpy(dst, src, comp_len); |
1499 | if (comp_len == PAGE_SIZE) |
1500 | kunmap_local(src); |
1501 | |
1502 | zcomp_stream_put(comp: zram->comps[ZRAM_PRIMARY_COMP]); |
1503 | zs_unmap_object(pool: zram->mem_pool, handle); |
1504 | atomic64_add(i: comp_len, v: &zram->stats.compr_data_size); |
1505 | out: |
1506 | /* |
1507 | * Free memory associated with this sector |
1508 | * before overwriting unused sectors. |
1509 | */ |
1510 | zram_slot_lock(zram, index); |
1511 | zram_free_page(zram, index); |
1512 | |
1513 | if (comp_len == PAGE_SIZE) { |
1514 | zram_set_flag(zram, index, flag: ZRAM_HUGE); |
1515 | atomic64_inc(v: &zram->stats.huge_pages); |
1516 | atomic64_inc(v: &zram->stats.huge_pages_since); |
1517 | } |
1518 | |
1519 | if (flags) { |
1520 | zram_set_flag(zram, index, flag: flags); |
1521 | zram_set_element(zram, index, element); |
1522 | } else { |
1523 | zram_set_handle(zram, index, handle); |
1524 | zram_set_obj_size(zram, index, size: comp_len); |
1525 | } |
1526 | zram_slot_unlock(zram, index); |
1527 | |
1528 | /* Update stats */ |
1529 | atomic64_inc(v: &zram->stats.pages_stored); |
1530 | return ret; |
1531 | } |
1532 | |
1533 | /* |
1534 | * This is a partial IO. Read the full page before writing the changes. |
1535 | */ |
1536 | static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec, |
1537 | u32 index, int offset, struct bio *bio) |
1538 | { |
1539 | struct page *page = alloc_page(GFP_NOIO); |
1540 | int ret; |
1541 | |
1542 | if (!page) |
1543 | return -ENOMEM; |
1544 | |
1545 | ret = zram_read_page(zram, page, index, parent: bio); |
1546 | if (!ret) { |
1547 | memcpy_from_bvec(page_address(page) + offset, bvec); |
1548 | ret = zram_write_page(zram, page, index); |
1549 | } |
1550 | __free_page(page); |
1551 | return ret; |
1552 | } |
1553 | |
1554 | static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, |
1555 | u32 index, int offset, struct bio *bio) |
1556 | { |
1557 | if (is_partial_io(bvec)) |
1558 | return zram_bvec_write_partial(zram, bvec, index, offset, bio); |
1559 | return zram_write_page(zram, page: bvec->bv_page, index); |
1560 | } |
1561 | |
1562 | #ifdef CONFIG_ZRAM_MULTI_COMP |
1563 | /* |
1564 | * This function will decompress (unless it's ZRAM_HUGE) the page and then |
1565 | * attempt to compress it using provided compression algorithm priority |
1566 | * (which is potentially more effective). |
1567 | * |
1568 | * Corresponding ZRAM slot should be locked. |
1569 | */ |
1570 | static int zram_recompress(struct zram *zram, u32 index, struct page *page, |
1571 | u32 threshold, u32 prio, u32 prio_max) |
1572 | { |
1573 | struct zcomp_strm *zstrm = NULL; |
1574 | unsigned long handle_old; |
1575 | unsigned long handle_new; |
1576 | unsigned int comp_len_old; |
1577 | unsigned int comp_len_new; |
1578 | unsigned int class_index_old; |
1579 | unsigned int class_index_new; |
1580 | u32 num_recomps = 0; |
1581 | void *src, *dst; |
1582 | int ret; |
1583 | |
1584 | handle_old = zram_get_handle(zram, index); |
1585 | if (!handle_old) |
1586 | return -EINVAL; |
1587 | |
1588 | comp_len_old = zram_get_obj_size(zram, index); |
1589 | /* |
1590 | * Do not recompress objects that are already "small enough". |
1591 | */ |
1592 | if (comp_len_old < threshold) |
1593 | return 0; |
1594 | |
1595 | ret = zram_read_from_zspool(zram, page, index); |
1596 | if (ret) |
1597 | return ret; |
1598 | |
1599 | class_index_old = zs_lookup_class_index(pool: zram->mem_pool, size: comp_len_old); |
1600 | /* |
1601 | * Iterate the secondary comp algorithms list (in order of priority) |
1602 | * and try to recompress the page. |
1603 | */ |
1604 | for (; prio < prio_max; prio++) { |
1605 | if (!zram->comps[prio]) |
1606 | continue; |
1607 | |
1608 | /* |
1609 | * Skip if the object is already re-compressed with a higher |
1610 | * priority algorithm (or same algorithm). |
1611 | */ |
1612 | if (prio <= zram_get_priority(zram, index)) |
1613 | continue; |
1614 | |
1615 | num_recomps++; |
1616 | zstrm = zcomp_stream_get(comp: zram->comps[prio]); |
1617 | src = kmap_local_page(page); |
1618 | ret = zcomp_compress(zstrm, src, dst_len: &comp_len_new); |
1619 | kunmap_local(src); |
1620 | |
1621 | if (ret) { |
1622 | zcomp_stream_put(comp: zram->comps[prio]); |
1623 | return ret; |
1624 | } |
1625 | |
1626 | class_index_new = zs_lookup_class_index(pool: zram->mem_pool, |
1627 | size: comp_len_new); |
1628 | |
1629 | /* Continue until we make progress */ |
1630 | if (class_index_new >= class_index_old || |
1631 | (threshold && comp_len_new >= threshold)) { |
1632 | zcomp_stream_put(comp: zram->comps[prio]); |
1633 | continue; |
1634 | } |
1635 | |
1636 | /* Recompression was successful so break out */ |
1637 | break; |
1638 | } |
1639 | |
1640 | /* |
1641 | * We did not try to recompress, e.g. when we have only one |
1642 | * secondary algorithm and the page is already recompressed |
1643 | * using that algorithm |
1644 | */ |
1645 | if (!zstrm) |
1646 | return 0; |
1647 | |
1648 | if (class_index_new >= class_index_old) { |
1649 | /* |
1650 | * Secondary algorithms failed to re-compress the page |
1651 | * in a way that would save memory, mark the object as |
1652 | * incompressible so that we will not try to compress |
1653 | * it again. |
1654 | * |
1655 | * We need to make sure that all secondary algorithms have |
1656 | * failed, so we test if the number of recompressions matches |
1657 | * the number of active secondary algorithms. |
1658 | */ |
1659 | if (num_recomps == zram->num_active_comps - 1) |
1660 | zram_set_flag(zram, index, flag: ZRAM_INCOMPRESSIBLE); |
1661 | return 0; |
1662 | } |
1663 | |
1664 | /* Successful recompression but above threshold */ |
1665 | if (threshold && comp_len_new >= threshold) |
1666 | return 0; |
1667 | |
1668 | /* |
1669 | * No direct reclaim (slow path) for handle allocation and no |
1670 | * re-compression attempt (unlike in zram_write_bvec()) since |
1671 | * we already have stored that object in zsmalloc. If we cannot |
1672 | * alloc memory for recompressed object then we bail out and |
1673 | * simply keep the old (existing) object in zsmalloc. |
1674 | */ |
1675 | handle_new = zs_malloc(pool: zram->mem_pool, size: comp_len_new, |
1676 | __GFP_KSWAPD_RECLAIM | |
1677 | __GFP_NOWARN | |
1678 | __GFP_HIGHMEM | |
1679 | __GFP_MOVABLE); |
1680 | if (IS_ERR_VALUE(handle_new)) { |
1681 | zcomp_stream_put(comp: zram->comps[prio]); |
1682 | return PTR_ERR(ptr: (void *)handle_new); |
1683 | } |
1684 | |
1685 | dst = zs_map_object(pool: zram->mem_pool, handle: handle_new, mm: ZS_MM_WO); |
1686 | memcpy(dst, zstrm->buffer, comp_len_new); |
1687 | zcomp_stream_put(comp: zram->comps[prio]); |
1688 | |
1689 | zs_unmap_object(pool: zram->mem_pool, handle: handle_new); |
1690 | |
1691 | zram_free_page(zram, index); |
1692 | zram_set_handle(zram, index, handle: handle_new); |
1693 | zram_set_obj_size(zram, index, size: comp_len_new); |
1694 | zram_set_priority(zram, index, prio); |
1695 | |
1696 | atomic64_add(i: comp_len_new, v: &zram->stats.compr_data_size); |
1697 | atomic64_inc(v: &zram->stats.pages_stored); |
1698 | |
1699 | return 0; |
1700 | } |
1701 | |
1702 | #define RECOMPRESS_IDLE (1 << 0) |
1703 | #define RECOMPRESS_HUGE (1 << 1) |
1704 | |
1705 | static ssize_t recompress_store(struct device *dev, |
1706 | struct device_attribute *attr, |
1707 | const char *buf, size_t len) |
1708 | { |
1709 | u32 prio = ZRAM_SECONDARY_COMP, prio_max = ZRAM_MAX_COMPS; |
1710 | struct zram *zram = dev_to_zram(dev); |
1711 | unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; |
1712 | char *args, *param, *val, *algo = NULL; |
1713 | u32 mode = 0, threshold = 0; |
1714 | unsigned long index; |
1715 | struct page *page; |
1716 | ssize_t ret; |
1717 | |
1718 | args = skip_spaces(buf); |
1719 | while (*args) { |
1720 | args = next_arg(args, param: ¶m, val: &val); |
1721 | |
1722 | if (!val || !*val) |
1723 | return -EINVAL; |
1724 | |
1725 | if (!strcmp(param, "type" )) { |
1726 | if (!strcmp(val, "idle" )) |
1727 | mode = RECOMPRESS_IDLE; |
1728 | if (!strcmp(val, "huge" )) |
1729 | mode = RECOMPRESS_HUGE; |
1730 | if (!strcmp(val, "huge_idle" )) |
1731 | mode = RECOMPRESS_IDLE | RECOMPRESS_HUGE; |
1732 | continue; |
1733 | } |
1734 | |
1735 | if (!strcmp(param, "threshold" )) { |
1736 | /* |
1737 | * We will re-compress only idle objects equal or |
1738 | * greater in size than watermark. |
1739 | */ |
1740 | ret = kstrtouint(s: val, base: 10, res: &threshold); |
1741 | if (ret) |
1742 | return ret; |
1743 | continue; |
1744 | } |
1745 | |
1746 | if (!strcmp(param, "algo" )) { |
1747 | algo = val; |
1748 | continue; |
1749 | } |
1750 | } |
1751 | |
1752 | if (threshold >= huge_class_size) |
1753 | return -EINVAL; |
1754 | |
1755 | down_read(sem: &zram->init_lock); |
1756 | if (!init_done(zram)) { |
1757 | ret = -EINVAL; |
1758 | goto release_init_lock; |
1759 | } |
1760 | |
1761 | if (algo) { |
1762 | bool found = false; |
1763 | |
1764 | for (; prio < ZRAM_MAX_COMPS; prio++) { |
1765 | if (!zram->comp_algs[prio]) |
1766 | continue; |
1767 | |
1768 | if (!strcmp(zram->comp_algs[prio], algo)) { |
1769 | prio_max = min(prio + 1, ZRAM_MAX_COMPS); |
1770 | found = true; |
1771 | break; |
1772 | } |
1773 | } |
1774 | |
1775 | if (!found) { |
1776 | ret = -EINVAL; |
1777 | goto release_init_lock; |
1778 | } |
1779 | } |
1780 | |
1781 | page = alloc_page(GFP_KERNEL); |
1782 | if (!page) { |
1783 | ret = -ENOMEM; |
1784 | goto release_init_lock; |
1785 | } |
1786 | |
1787 | ret = len; |
1788 | for (index = 0; index < nr_pages; index++) { |
1789 | int err = 0; |
1790 | |
1791 | zram_slot_lock(zram, index); |
1792 | |
1793 | if (!zram_allocated(zram, index)) |
1794 | goto next; |
1795 | |
1796 | if (mode & RECOMPRESS_IDLE && |
1797 | !zram_test_flag(zram, index, flag: ZRAM_IDLE)) |
1798 | goto next; |
1799 | |
1800 | if (mode & RECOMPRESS_HUGE && |
1801 | !zram_test_flag(zram, index, flag: ZRAM_HUGE)) |
1802 | goto next; |
1803 | |
1804 | if (zram_test_flag(zram, index, flag: ZRAM_WB) || |
1805 | zram_test_flag(zram, index, flag: ZRAM_UNDER_WB) || |
1806 | zram_test_flag(zram, index, flag: ZRAM_SAME) || |
1807 | zram_test_flag(zram, index, flag: ZRAM_INCOMPRESSIBLE)) |
1808 | goto next; |
1809 | |
1810 | err = zram_recompress(zram, index, page, threshold, |
1811 | prio, prio_max); |
1812 | next: |
1813 | zram_slot_unlock(zram, index); |
1814 | if (err) { |
1815 | ret = err; |
1816 | break; |
1817 | } |
1818 | |
1819 | cond_resched(); |
1820 | } |
1821 | |
1822 | __free_page(page); |
1823 | |
1824 | release_init_lock: |
1825 | up_read(sem: &zram->init_lock); |
1826 | return ret; |
1827 | } |
1828 | #endif |
1829 | |
1830 | static void zram_bio_discard(struct zram *zram, struct bio *bio) |
1831 | { |
1832 | size_t n = bio->bi_iter.bi_size; |
1833 | u32 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
1834 | u32 offset = (bio->bi_iter.bi_sector & (SECTORS_PER_PAGE - 1)) << |
1835 | SECTOR_SHIFT; |
1836 | |
1837 | /* |
1838 | * zram manages data in physical block size units. Because logical block |
1839 | * size isn't identical with physical block size on some arch, we |
1840 | * could get a discard request pointing to a specific offset within a |
1841 | * certain physical block. Although we can handle this request by |
1842 | * reading that physiclal block and decompressing and partially zeroing |
1843 | * and re-compressing and then re-storing it, this isn't reasonable |
1844 | * because our intent with a discard request is to save memory. So |
1845 | * skipping this logical block is appropriate here. |
1846 | */ |
1847 | if (offset) { |
1848 | if (n <= (PAGE_SIZE - offset)) |
1849 | return; |
1850 | |
1851 | n -= (PAGE_SIZE - offset); |
1852 | index++; |
1853 | } |
1854 | |
1855 | while (n >= PAGE_SIZE) { |
1856 | zram_slot_lock(zram, index); |
1857 | zram_free_page(zram, index); |
1858 | zram_slot_unlock(zram, index); |
1859 | atomic64_inc(v: &zram->stats.notify_free); |
1860 | index++; |
1861 | n -= PAGE_SIZE; |
1862 | } |
1863 | |
1864 | bio_endio(bio); |
1865 | } |
1866 | |
1867 | static void zram_bio_read(struct zram *zram, struct bio *bio) |
1868 | { |
1869 | unsigned long start_time = bio_start_io_acct(bio); |
1870 | struct bvec_iter iter = bio->bi_iter; |
1871 | |
1872 | do { |
1873 | u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
1874 | u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) << |
1875 | SECTOR_SHIFT; |
1876 | struct bio_vec bv = bio_iter_iovec(bio, iter); |
1877 | |
1878 | bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); |
1879 | |
1880 | if (zram_bvec_read(zram, bvec: &bv, index, offset, bio) < 0) { |
1881 | atomic64_inc(v: &zram->stats.failed_reads); |
1882 | bio->bi_status = BLK_STS_IOERR; |
1883 | break; |
1884 | } |
1885 | flush_dcache_page(page: bv.bv_page); |
1886 | |
1887 | zram_slot_lock(zram, index); |
1888 | zram_accessed(zram, index); |
1889 | zram_slot_unlock(zram, index); |
1890 | |
1891 | bio_advance_iter_single(bio, iter: &iter, bytes: bv.bv_len); |
1892 | } while (iter.bi_size); |
1893 | |
1894 | bio_end_io_acct(bio, start_time); |
1895 | bio_endio(bio); |
1896 | } |
1897 | |
1898 | static void zram_bio_write(struct zram *zram, struct bio *bio) |
1899 | { |
1900 | unsigned long start_time = bio_start_io_acct(bio); |
1901 | struct bvec_iter iter = bio->bi_iter; |
1902 | |
1903 | do { |
1904 | u32 index = iter.bi_sector >> SECTORS_PER_PAGE_SHIFT; |
1905 | u32 offset = (iter.bi_sector & (SECTORS_PER_PAGE - 1)) << |
1906 | SECTOR_SHIFT; |
1907 | struct bio_vec bv = bio_iter_iovec(bio, iter); |
1908 | |
1909 | bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); |
1910 | |
1911 | if (zram_bvec_write(zram, bvec: &bv, index, offset, bio) < 0) { |
1912 | atomic64_inc(v: &zram->stats.failed_writes); |
1913 | bio->bi_status = BLK_STS_IOERR; |
1914 | break; |
1915 | } |
1916 | |
1917 | zram_slot_lock(zram, index); |
1918 | zram_accessed(zram, index); |
1919 | zram_slot_unlock(zram, index); |
1920 | |
1921 | bio_advance_iter_single(bio, iter: &iter, bytes: bv.bv_len); |
1922 | } while (iter.bi_size); |
1923 | |
1924 | bio_end_io_acct(bio, start_time); |
1925 | bio_endio(bio); |
1926 | } |
1927 | |
1928 | /* |
1929 | * Handler function for all zram I/O requests. |
1930 | */ |
1931 | static void zram_submit_bio(struct bio *bio) |
1932 | { |
1933 | struct zram *zram = bio->bi_bdev->bd_disk->private_data; |
1934 | |
1935 | switch (bio_op(bio)) { |
1936 | case REQ_OP_READ: |
1937 | zram_bio_read(zram, bio); |
1938 | break; |
1939 | case REQ_OP_WRITE: |
1940 | zram_bio_write(zram, bio); |
1941 | break; |
1942 | case REQ_OP_DISCARD: |
1943 | case REQ_OP_WRITE_ZEROES: |
1944 | zram_bio_discard(zram, bio); |
1945 | break; |
1946 | default: |
1947 | WARN_ON_ONCE(1); |
1948 | bio_endio(bio); |
1949 | } |
1950 | } |
1951 | |
1952 | static void zram_slot_free_notify(struct block_device *bdev, |
1953 | unsigned long index) |
1954 | { |
1955 | struct zram *zram; |
1956 | |
1957 | zram = bdev->bd_disk->private_data; |
1958 | |
1959 | atomic64_inc(v: &zram->stats.notify_free); |
1960 | if (!zram_slot_trylock(zram, index)) { |
1961 | atomic64_inc(v: &zram->stats.miss_free); |
1962 | return; |
1963 | } |
1964 | |
1965 | zram_free_page(zram, index); |
1966 | zram_slot_unlock(zram, index); |
1967 | } |
1968 | |
1969 | static void zram_destroy_comps(struct zram *zram) |
1970 | { |
1971 | u32 prio; |
1972 | |
1973 | for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) { |
1974 | struct zcomp *comp = zram->comps[prio]; |
1975 | |
1976 | zram->comps[prio] = NULL; |
1977 | if (!comp) |
1978 | continue; |
1979 | zcomp_destroy(comp); |
1980 | zram->num_active_comps--; |
1981 | } |
1982 | } |
1983 | |
1984 | static void zram_reset_device(struct zram *zram) |
1985 | { |
1986 | down_write(sem: &zram->init_lock); |
1987 | |
1988 | zram->limit_pages = 0; |
1989 | |
1990 | if (!init_done(zram)) { |
1991 | up_write(sem: &zram->init_lock); |
1992 | return; |
1993 | } |
1994 | |
1995 | set_capacity_and_notify(disk: zram->disk, size: 0); |
1996 | part_stat_set_all(part: zram->disk->part0, value: 0); |
1997 | |
1998 | /* I/O operation under all of CPU are done so let's free */ |
1999 | zram_meta_free(zram, disksize: zram->disksize); |
2000 | zram->disksize = 0; |
2001 | zram_destroy_comps(zram); |
2002 | memset(&zram->stats, 0, sizeof(zram->stats)); |
2003 | reset_bdev(zram); |
2004 | |
2005 | comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, alg: default_compressor); |
2006 | up_write(sem: &zram->init_lock); |
2007 | } |
2008 | |
2009 | static ssize_t disksize_store(struct device *dev, |
2010 | struct device_attribute *attr, const char *buf, size_t len) |
2011 | { |
2012 | u64 disksize; |
2013 | struct zcomp *comp; |
2014 | struct zram *zram = dev_to_zram(dev); |
2015 | int err; |
2016 | u32 prio; |
2017 | |
2018 | disksize = memparse(ptr: buf, NULL); |
2019 | if (!disksize) |
2020 | return -EINVAL; |
2021 | |
2022 | down_write(sem: &zram->init_lock); |
2023 | if (init_done(zram)) { |
2024 | pr_info("Cannot change disksize for initialized device\n" ); |
2025 | err = -EBUSY; |
2026 | goto out_unlock; |
2027 | } |
2028 | |
2029 | disksize = PAGE_ALIGN(disksize); |
2030 | if (!zram_meta_alloc(zram, disksize)) { |
2031 | err = -ENOMEM; |
2032 | goto out_unlock; |
2033 | } |
2034 | |
2035 | for (prio = 0; prio < ZRAM_MAX_COMPS; prio++) { |
2036 | if (!zram->comp_algs[prio]) |
2037 | continue; |
2038 | |
2039 | comp = zcomp_create(alg: zram->comp_algs[prio]); |
2040 | if (IS_ERR(ptr: comp)) { |
2041 | pr_err("Cannot initialise %s compressing backend\n" , |
2042 | zram->comp_algs[prio]); |
2043 | err = PTR_ERR(ptr: comp); |
2044 | goto out_free_comps; |
2045 | } |
2046 | |
2047 | zram->comps[prio] = comp; |
2048 | zram->num_active_comps++; |
2049 | } |
2050 | zram->disksize = disksize; |
2051 | set_capacity_and_notify(disk: zram->disk, size: zram->disksize >> SECTOR_SHIFT); |
2052 | up_write(sem: &zram->init_lock); |
2053 | |
2054 | return len; |
2055 | |
2056 | out_free_comps: |
2057 | zram_destroy_comps(zram); |
2058 | zram_meta_free(zram, disksize); |
2059 | out_unlock: |
2060 | up_write(sem: &zram->init_lock); |
2061 | return err; |
2062 | } |
2063 | |
2064 | static ssize_t reset_store(struct device *dev, |
2065 | struct device_attribute *attr, const char *buf, size_t len) |
2066 | { |
2067 | int ret; |
2068 | unsigned short do_reset; |
2069 | struct zram *zram; |
2070 | struct gendisk *disk; |
2071 | |
2072 | ret = kstrtou16(s: buf, base: 10, res: &do_reset); |
2073 | if (ret) |
2074 | return ret; |
2075 | |
2076 | if (!do_reset) |
2077 | return -EINVAL; |
2078 | |
2079 | zram = dev_to_zram(dev); |
2080 | disk = zram->disk; |
2081 | |
2082 | mutex_lock(&disk->open_mutex); |
2083 | /* Do not reset an active device or claimed device */ |
2084 | if (disk_openers(disk) || zram->claim) { |
2085 | mutex_unlock(lock: &disk->open_mutex); |
2086 | return -EBUSY; |
2087 | } |
2088 | |
2089 | /* From now on, anyone can't open /dev/zram[0-9] */ |
2090 | zram->claim = true; |
2091 | mutex_unlock(lock: &disk->open_mutex); |
2092 | |
2093 | /* Make sure all the pending I/O are finished */ |
2094 | sync_blockdev(bdev: disk->part0); |
2095 | zram_reset_device(zram); |
2096 | |
2097 | mutex_lock(&disk->open_mutex); |
2098 | zram->claim = false; |
2099 | mutex_unlock(lock: &disk->open_mutex); |
2100 | |
2101 | return len; |
2102 | } |
2103 | |
2104 | static int zram_open(struct gendisk *disk, blk_mode_t mode) |
2105 | { |
2106 | struct zram *zram = disk->private_data; |
2107 | |
2108 | WARN_ON(!mutex_is_locked(&disk->open_mutex)); |
2109 | |
2110 | /* zram was claimed to reset so open request fails */ |
2111 | if (zram->claim) |
2112 | return -EBUSY; |
2113 | return 0; |
2114 | } |
2115 | |
2116 | static const struct block_device_operations zram_devops = { |
2117 | .open = zram_open, |
2118 | .submit_bio = zram_submit_bio, |
2119 | .swap_slot_free_notify = zram_slot_free_notify, |
2120 | .owner = THIS_MODULE |
2121 | }; |
2122 | |
2123 | static DEVICE_ATTR_WO(compact); |
2124 | static DEVICE_ATTR_RW(disksize); |
2125 | static DEVICE_ATTR_RO(initstate); |
2126 | static DEVICE_ATTR_WO(reset); |
2127 | static DEVICE_ATTR_WO(mem_limit); |
2128 | static DEVICE_ATTR_WO(mem_used_max); |
2129 | static DEVICE_ATTR_WO(idle); |
2130 | static DEVICE_ATTR_RW(max_comp_streams); |
2131 | static DEVICE_ATTR_RW(comp_algorithm); |
2132 | #ifdef CONFIG_ZRAM_WRITEBACK |
2133 | static DEVICE_ATTR_RW(backing_dev); |
2134 | static DEVICE_ATTR_WO(writeback); |
2135 | static DEVICE_ATTR_RW(writeback_limit); |
2136 | static DEVICE_ATTR_RW(writeback_limit_enable); |
2137 | #endif |
2138 | #ifdef CONFIG_ZRAM_MULTI_COMP |
2139 | static DEVICE_ATTR_RW(recomp_algorithm); |
2140 | static DEVICE_ATTR_WO(recompress); |
2141 | #endif |
2142 | |
2143 | static struct attribute *zram_disk_attrs[] = { |
2144 | &dev_attr_disksize.attr, |
2145 | &dev_attr_initstate.attr, |
2146 | &dev_attr_reset.attr, |
2147 | &dev_attr_compact.attr, |
2148 | &dev_attr_mem_limit.attr, |
2149 | &dev_attr_mem_used_max.attr, |
2150 | &dev_attr_idle.attr, |
2151 | &dev_attr_max_comp_streams.attr, |
2152 | &dev_attr_comp_algorithm.attr, |
2153 | #ifdef CONFIG_ZRAM_WRITEBACK |
2154 | &dev_attr_backing_dev.attr, |
2155 | &dev_attr_writeback.attr, |
2156 | &dev_attr_writeback_limit.attr, |
2157 | &dev_attr_writeback_limit_enable.attr, |
2158 | #endif |
2159 | &dev_attr_io_stat.attr, |
2160 | &dev_attr_mm_stat.attr, |
2161 | #ifdef CONFIG_ZRAM_WRITEBACK |
2162 | &dev_attr_bd_stat.attr, |
2163 | #endif |
2164 | &dev_attr_debug_stat.attr, |
2165 | #ifdef CONFIG_ZRAM_MULTI_COMP |
2166 | &dev_attr_recomp_algorithm.attr, |
2167 | &dev_attr_recompress.attr, |
2168 | #endif |
2169 | NULL, |
2170 | }; |
2171 | |
2172 | ATTRIBUTE_GROUPS(zram_disk); |
2173 | |
2174 | /* |
2175 | * Allocate and initialize new zram device. the function returns |
2176 | * '>= 0' device_id upon success, and negative value otherwise. |
2177 | */ |
2178 | static int zram_add(void) |
2179 | { |
2180 | struct queue_limits lim = { |
2181 | .logical_block_size = ZRAM_LOGICAL_BLOCK_SIZE, |
2182 | /* |
2183 | * To ensure that we always get PAGE_SIZE aligned and |
2184 | * n*PAGE_SIZED sized I/O requests. |
2185 | */ |
2186 | .physical_block_size = PAGE_SIZE, |
2187 | .io_min = PAGE_SIZE, |
2188 | .io_opt = PAGE_SIZE, |
2189 | .max_hw_discard_sectors = UINT_MAX, |
2190 | /* |
2191 | * zram_bio_discard() will clear all logical blocks if logical |
2192 | * block size is identical with physical block size(PAGE_SIZE). |
2193 | * But if it is different, we will skip discarding some parts of |
2194 | * logical blocks in the part of the request range which isn't |
2195 | * aligned to physical block size. So we can't ensure that all |
2196 | * discarded logical blocks are zeroed. |
2197 | */ |
2198 | #if ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE |
2199 | .max_write_zeroes_sectors = UINT_MAX, |
2200 | #endif |
2201 | }; |
2202 | struct zram *zram; |
2203 | int ret, device_id; |
2204 | |
2205 | zram = kzalloc(size: sizeof(struct zram), GFP_KERNEL); |
2206 | if (!zram) |
2207 | return -ENOMEM; |
2208 | |
2209 | ret = idr_alloc(&zram_index_idr, ptr: zram, start: 0, end: 0, GFP_KERNEL); |
2210 | if (ret < 0) |
2211 | goto out_free_dev; |
2212 | device_id = ret; |
2213 | |
2214 | init_rwsem(&zram->init_lock); |
2215 | #ifdef CONFIG_ZRAM_WRITEBACK |
2216 | spin_lock_init(&zram->wb_limit_lock); |
2217 | #endif |
2218 | |
2219 | /* gendisk structure */ |
2220 | zram->disk = blk_alloc_disk(&lim, NUMA_NO_NODE); |
2221 | if (IS_ERR(ptr: zram->disk)) { |
2222 | pr_err("Error allocating disk structure for device %d\n" , |
2223 | device_id); |
2224 | ret = PTR_ERR(ptr: zram->disk); |
2225 | goto out_free_idr; |
2226 | } |
2227 | |
2228 | zram->disk->major = zram_major; |
2229 | zram->disk->first_minor = device_id; |
2230 | zram->disk->minors = 1; |
2231 | zram->disk->flags |= GENHD_FL_NO_PART; |
2232 | zram->disk->fops = &zram_devops; |
2233 | zram->disk->private_data = zram; |
2234 | snprintf(buf: zram->disk->disk_name, size: 16, fmt: "zram%d" , device_id); |
2235 | |
2236 | /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */ |
2237 | set_capacity(disk: zram->disk, size: 0); |
2238 | /* zram devices sort of resembles non-rotational disks */ |
2239 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q: zram->disk->queue); |
2240 | blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q: zram->disk->queue); |
2241 | blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q: zram->disk->queue); |
2242 | ret = device_add_disk(NULL, disk: zram->disk, groups: zram_disk_groups); |
2243 | if (ret) |
2244 | goto out_cleanup_disk; |
2245 | |
2246 | comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, alg: default_compressor); |
2247 | |
2248 | zram_debugfs_register(zram); |
2249 | pr_info("Added device: %s\n" , zram->disk->disk_name); |
2250 | return device_id; |
2251 | |
2252 | out_cleanup_disk: |
2253 | put_disk(disk: zram->disk); |
2254 | out_free_idr: |
2255 | idr_remove(&zram_index_idr, id: device_id); |
2256 | out_free_dev: |
2257 | kfree(objp: zram); |
2258 | return ret; |
2259 | } |
2260 | |
2261 | static int zram_remove(struct zram *zram) |
2262 | { |
2263 | bool claimed; |
2264 | |
2265 | mutex_lock(&zram->disk->open_mutex); |
2266 | if (disk_openers(disk: zram->disk)) { |
2267 | mutex_unlock(lock: &zram->disk->open_mutex); |
2268 | return -EBUSY; |
2269 | } |
2270 | |
2271 | claimed = zram->claim; |
2272 | if (!claimed) |
2273 | zram->claim = true; |
2274 | mutex_unlock(lock: &zram->disk->open_mutex); |
2275 | |
2276 | zram_debugfs_unregister(zram); |
2277 | |
2278 | if (claimed) { |
2279 | /* |
2280 | * If we were claimed by reset_store(), del_gendisk() will |
2281 | * wait until reset_store() is done, so nothing need to do. |
2282 | */ |
2283 | ; |
2284 | } else { |
2285 | /* Make sure all the pending I/O are finished */ |
2286 | sync_blockdev(bdev: zram->disk->part0); |
2287 | zram_reset_device(zram); |
2288 | } |
2289 | |
2290 | pr_info("Removed device: %s\n" , zram->disk->disk_name); |
2291 | |
2292 | del_gendisk(gp: zram->disk); |
2293 | |
2294 | /* del_gendisk drains pending reset_store */ |
2295 | WARN_ON_ONCE(claimed && zram->claim); |
2296 | |
2297 | /* |
2298 | * disksize_store() may be called in between zram_reset_device() |
2299 | * and del_gendisk(), so run the last reset to avoid leaking |
2300 | * anything allocated with disksize_store() |
2301 | */ |
2302 | zram_reset_device(zram); |
2303 | |
2304 | put_disk(disk: zram->disk); |
2305 | kfree(objp: zram); |
2306 | return 0; |
2307 | } |
2308 | |
2309 | /* zram-control sysfs attributes */ |
2310 | |
2311 | /* |
2312 | * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a |
2313 | * sense that reading from this file does alter the state of your system -- it |
2314 | * creates a new un-initialized zram device and returns back this device's |
2315 | * device_id (or an error code if it fails to create a new device). |
2316 | */ |
2317 | static ssize_t hot_add_show(const struct class *class, |
2318 | const struct class_attribute *attr, |
2319 | char *buf) |
2320 | { |
2321 | int ret; |
2322 | |
2323 | mutex_lock(&zram_index_mutex); |
2324 | ret = zram_add(); |
2325 | mutex_unlock(lock: &zram_index_mutex); |
2326 | |
2327 | if (ret < 0) |
2328 | return ret; |
2329 | return scnprintf(buf, PAGE_SIZE, fmt: "%d\n" , ret); |
2330 | } |
2331 | /* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */ |
2332 | static struct class_attribute class_attr_hot_add = |
2333 | __ATTR(hot_add, 0400, hot_add_show, NULL); |
2334 | |
2335 | static ssize_t hot_remove_store(const struct class *class, |
2336 | const struct class_attribute *attr, |
2337 | const char *buf, |
2338 | size_t count) |
2339 | { |
2340 | struct zram *zram; |
2341 | int ret, dev_id; |
2342 | |
2343 | /* dev_id is gendisk->first_minor, which is `int' */ |
2344 | ret = kstrtoint(s: buf, base: 10, res: &dev_id); |
2345 | if (ret) |
2346 | return ret; |
2347 | if (dev_id < 0) |
2348 | return -EINVAL; |
2349 | |
2350 | mutex_lock(&zram_index_mutex); |
2351 | |
2352 | zram = idr_find(&zram_index_idr, id: dev_id); |
2353 | if (zram) { |
2354 | ret = zram_remove(zram); |
2355 | if (!ret) |
2356 | idr_remove(&zram_index_idr, id: dev_id); |
2357 | } else { |
2358 | ret = -ENODEV; |
2359 | } |
2360 | |
2361 | mutex_unlock(lock: &zram_index_mutex); |
2362 | return ret ? ret : count; |
2363 | } |
2364 | static CLASS_ATTR_WO(hot_remove); |
2365 | |
2366 | static struct attribute *zram_control_class_attrs[] = { |
2367 | &class_attr_hot_add.attr, |
2368 | &class_attr_hot_remove.attr, |
2369 | NULL, |
2370 | }; |
2371 | ATTRIBUTE_GROUPS(zram_control_class); |
2372 | |
2373 | static struct class zram_control_class = { |
2374 | .name = "zram-control" , |
2375 | .class_groups = zram_control_class_groups, |
2376 | }; |
2377 | |
2378 | static int zram_remove_cb(int id, void *ptr, void *data) |
2379 | { |
2380 | WARN_ON_ONCE(zram_remove(ptr)); |
2381 | return 0; |
2382 | } |
2383 | |
2384 | static void destroy_devices(void) |
2385 | { |
2386 | class_unregister(class: &zram_control_class); |
2387 | idr_for_each(&zram_index_idr, fn: &zram_remove_cb, NULL); |
2388 | zram_debugfs_destroy(); |
2389 | idr_destroy(&zram_index_idr); |
2390 | unregister_blkdev(major: zram_major, name: "zram" ); |
2391 | cpuhp_remove_multi_state(state: CPUHP_ZCOMP_PREPARE); |
2392 | } |
2393 | |
2394 | static int __init zram_init(void) |
2395 | { |
2396 | int ret; |
2397 | |
2398 | BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG); |
2399 | |
2400 | ret = cpuhp_setup_state_multi(state: CPUHP_ZCOMP_PREPARE, name: "block/zram:prepare" , |
2401 | startup: zcomp_cpu_up_prepare, teardown: zcomp_cpu_dead); |
2402 | if (ret < 0) |
2403 | return ret; |
2404 | |
2405 | ret = class_register(class: &zram_control_class); |
2406 | if (ret) { |
2407 | pr_err("Unable to register zram-control class\n" ); |
2408 | cpuhp_remove_multi_state(state: CPUHP_ZCOMP_PREPARE); |
2409 | return ret; |
2410 | } |
2411 | |
2412 | zram_debugfs_create(); |
2413 | zram_major = register_blkdev(0, "zram" ); |
2414 | if (zram_major <= 0) { |
2415 | pr_err("Unable to get major number\n" ); |
2416 | class_unregister(class: &zram_control_class); |
2417 | cpuhp_remove_multi_state(state: CPUHP_ZCOMP_PREPARE); |
2418 | return -EBUSY; |
2419 | } |
2420 | |
2421 | while (num_devices != 0) { |
2422 | mutex_lock(&zram_index_mutex); |
2423 | ret = zram_add(); |
2424 | mutex_unlock(lock: &zram_index_mutex); |
2425 | if (ret < 0) |
2426 | goto out_error; |
2427 | num_devices--; |
2428 | } |
2429 | |
2430 | return 0; |
2431 | |
2432 | out_error: |
2433 | destroy_devices(); |
2434 | return ret; |
2435 | } |
2436 | |
2437 | static void __exit zram_exit(void) |
2438 | { |
2439 | destroy_devices(); |
2440 | } |
2441 | |
2442 | module_init(zram_init); |
2443 | module_exit(zram_exit); |
2444 | |
2445 | module_param(num_devices, uint, 0); |
2446 | MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices" ); |
2447 | |
2448 | MODULE_LICENSE("Dual BSD/GPL" ); |
2449 | MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>" ); |
2450 | MODULE_DESCRIPTION("Compressed RAM Block Device" ); |
2451 | |