Lines Matching refs:zram

15 #define KMSG_COMPONENT "zram"
56 static void zram_free_page(struct zram *zram, size_t index);
57 static int zram_read_page(struct zram *zram, struct page *page, u32 index,
60 static int zram_slot_trylock(struct zram *zram, u32 index)
62 return bit_spin_trylock(ZRAM_LOCK, &zram->table[index].flags);
65 static void zram_slot_lock(struct zram *zram, u32 index)
67 bit_spin_lock(ZRAM_LOCK, &zram->table[index].flags);
70 static void zram_slot_unlock(struct zram *zram, u32 index)
72 bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
75 static inline bool init_done(struct zram *zram)
77 return zram->disksize;
80 static inline struct zram *dev_to_zram(struct device *dev)
82 return (struct zram *)dev_to_disk(dev)->private_data;
85 static unsigned long zram_get_handle(struct zram *zram, u32 index)
87 return zram->table[index].handle;
90 static void zram_set_handle(struct zram *zram, u32 index, unsigned long handle)
92 zram->table[index].handle = handle;
96 static bool zram_test_flag(struct zram *zram, u32 index,
99 return zram->table[index].flags & BIT(flag);
102 static void zram_set_flag(struct zram *zram, u32 index,
105 zram->table[index].flags |= BIT(flag);
108 static void zram_clear_flag(struct zram *zram, u32 index,
111 zram->table[index].flags &= ~BIT(flag);
114 static inline void zram_set_element(struct zram *zram, u32 index,
117 zram->table[index].element = element;
120 static unsigned long zram_get_element(struct zram *zram, u32 index)
122 return zram->table[index].element;
125 static size_t zram_get_obj_size(struct zram *zram, u32 index)
127 return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
130 static void zram_set_obj_size(struct zram *zram,
133 unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
135 zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
138 static inline bool zram_allocated(struct zram *zram, u32 index)
140 return zram_get_obj_size(zram, index) ||
141 zram_test_flag(zram, index, ZRAM_SAME) ||
142 zram_test_flag(zram, index, ZRAM_WB);
158 static inline void zram_set_priority(struct zram *zram, u32 index, u32 prio)
165 zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
167 zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
170 static inline u32 zram_get_priority(struct zram *zram, u32 index)
172 u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1;
177 static void zram_accessed(struct zram *zram, u32 index)
179 zram_clear_flag(zram, index, ZRAM_IDLE);
181 zram->table[index].ac_time = ktime_get_boottime();
185 static inline void update_used_max(struct zram *zram,
188 unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
193 } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
230 struct zram *zram = dev_to_zram(dev);
232 down_read(&zram->init_lock);
233 val = init_done(zram);
234 up_read(&zram->init_lock);
242 struct zram *zram = dev_to_zram(dev);
244 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
252 struct zram *zram = dev_to_zram(dev);
258 down_write(&zram->init_lock);
259 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
260 up_write(&zram->init_lock);
270 struct zram *zram = dev_to_zram(dev);
276 down_read(&zram->init_lock);
277 if (init_done(zram)) {
278 atomic_long_set(&zram->stats.max_used_pages,
279 zs_get_total_pages(zram->mem_pool));
281 up_read(&zram->init_lock);
288 * Callers should hold the zram init lock in read mode
290 static void mark_idle(struct zram *zram, ktime_t cutoff)
293 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
301 zram_slot_lock(zram, index);
302 if (zram_allocated(zram, index) &&
303 !zram_test_flag(zram, index, ZRAM_UNDER_WB)) {
306 zram->table[index].ac_time);
309 zram_set_flag(zram, index, ZRAM_IDLE);
311 zram_slot_unlock(zram, index);
318 struct zram *zram = dev_to_zram(dev);
336 down_read(&zram->init_lock);
337 if (!init_done(zram))
344 mark_idle(zram, cutoff_time);
348 up_read(&zram->init_lock);
357 struct zram *zram = dev_to_zram(dev);
364 down_read(&zram->init_lock);
365 spin_lock(&zram->wb_limit_lock);
366 zram->wb_limit_enable = val;
367 spin_unlock(&zram->wb_limit_lock);
368 up_read(&zram->init_lock);
378 struct zram *zram = dev_to_zram(dev);
380 down_read(&zram->init_lock);
381 spin_lock(&zram->wb_limit_lock);
382 val = zram->wb_limit_enable;
383 spin_unlock(&zram->wb_limit_lock);
384 up_read(&zram->init_lock);
392 struct zram *zram = dev_to_zram(dev);
399 down_read(&zram->init_lock);
400 spin_lock(&zram->wb_limit_lock);
401 zram->bd_wb_limit = val;
402 spin_unlock(&zram->wb_limit_lock);
403 up_read(&zram->init_lock);
413 struct zram *zram = dev_to_zram(dev);
415 down_read(&zram->init_lock);
416 spin_lock(&zram->wb_limit_lock);
417 val = zram->bd_wb_limit;
418 spin_unlock(&zram->wb_limit_lock);
419 up_read(&zram->init_lock);
424 static void reset_bdev(struct zram *zram)
426 if (!zram->backing_dev)
430 filp_close(zram->backing_dev, NULL);
431 zram->backing_dev = NULL;
432 zram->bdev = NULL;
433 zram->disk->fops = &zram_devops;
434 kvfree(zram->bitmap);
435 zram->bitmap = NULL;
442 struct zram *zram = dev_to_zram(dev);
446 down_read(&zram->init_lock);
447 file = zram->backing_dev;
450 up_read(&zram->init_lock);
464 up_read(&zram->init_lock);
478 struct zram *zram = dev_to_zram(dev);
484 down_write(&zram->init_lock);
485 if (init_done(zram)) {
520 reset_bdev(zram);
522 zram->bdev = I_BDEV(inode);
523 zram->backing_dev = backing_dev;
524 zram->bitmap = bitmap;
525 zram->nr_pages = nr_pages;
526 up_write(&zram->init_lock);
538 up_write(&zram->init_lock);
545 static unsigned long alloc_block_bdev(struct zram *zram)
549 /* skip 0 bit to confuse zram.handle = 0 */
550 blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
551 if (blk_idx == zram->nr_pages)
554 if (test_and_set_bit(blk_idx, zram->bitmap))
557 atomic64_inc(&zram->stats.bd_count);
561 static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
565 was_set = test_and_clear_bit(blk_idx, zram->bitmap);
567 atomic64_dec(&zram->stats.bd_count);
570 static void read_from_bdev_async(struct zram *zram, struct page *page,
575 bio = bio_alloc(zram->bdev, 1, parent->bi_opf, GFP_NOIO);
592 struct zram *zram = dev_to_zram(dev);
593 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
622 down_read(&zram->init_lock);
623 if (!init_done(zram)) {
628 if (!zram->backing_dev) {
640 spin_lock(&zram->wb_limit_lock);
641 if (zram->wb_limit_enable && !zram->bd_wb_limit) {
642 spin_unlock(&zram->wb_limit_lock);
646 spin_unlock(&zram->wb_limit_lock);
649 blk_idx = alloc_block_bdev(zram);
656 zram_slot_lock(zram, index);
657 if (!zram_allocated(zram, index))
660 if (zram_test_flag(zram, index, ZRAM_WB) ||
661 zram_test_flag(zram, index, ZRAM_SAME) ||
662 zram_test_flag(zram, index, ZRAM_UNDER_WB))
666 !zram_test_flag(zram, index, ZRAM_IDLE))
669 !zram_test_flag(zram, index, ZRAM_HUGE))
672 !zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
679 zram_set_flag(zram, index, ZRAM_UNDER_WB);
681 zram_set_flag(zram, index, ZRAM_IDLE);
682 zram_slot_unlock(zram, index);
683 if (zram_read_page(zram, page, index, NULL)) {
684 zram_slot_lock(zram, index);
685 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
686 zram_clear_flag(zram, index, ZRAM_IDLE);
687 zram_slot_unlock(zram, index);
691 bio_init(&bio, zram->bdev, &bio_vec, 1,
702 zram_slot_lock(zram, index);
703 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
704 zram_clear_flag(zram, index, ZRAM_IDLE);
705 zram_slot_unlock(zram, index);
718 atomic64_inc(&zram->stats.bd_writes);
728 zram_slot_lock(zram, index);
729 if (!zram_allocated(zram, index) ||
730 !zram_test_flag(zram, index, ZRAM_IDLE)) {
731 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
732 zram_clear_flag(zram, index, ZRAM_IDLE);
736 zram_free_page(zram, index);
737 zram_clear_flag(zram, index, ZRAM_UNDER_WB);
738 zram_set_flag(zram, index, ZRAM_WB);
739 zram_set_element(zram, index, blk_idx);
741 atomic64_inc(&zram->stats.pages_stored);
742 spin_lock(&zram->wb_limit_lock);
743 if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
744 zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
745 spin_unlock(&zram->wb_limit_lock);
747 zram_slot_unlock(zram, index);
751 free_block_bdev(zram, blk_idx);
754 up_read(&zram->init_lock);
761 struct zram *zram;
773 bio_init(&bio, zw->zram->bdev, &bv, 1, REQ_OP_READ);
784 static int read_from_bdev_sync(struct zram *zram, struct page *page,
790 work.zram = zram;
801 static int read_from_bdev(struct zram *zram, struct page *page,
804 atomic64_inc(&zram->stats.bd_reads);
808 return read_from_bdev_sync(zram, page, entry);
810 read_from_bdev_async(zram, page, entry, parent);
814 static inline void reset_bdev(struct zram *zram) {};
815 static int read_from_bdev(struct zram *zram, struct page *page,
821 static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
830 zram_debugfs_root = debugfs_create_dir("zram", NULL);
843 struct zram *zram = file->private_data;
844 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
851 down_read(&zram->init_lock);
852 if (!init_done(zram)) {
853 up_read(&zram->init_lock);
861 zram_slot_lock(zram, index);
862 if (!zram_allocated(zram, index))
865 ts = ktime_to_timespec64(zram->table[index].ac_time);
870 zram_test_flag(zram, index, ZRAM_SAME) ? 's' : '.',
871 zram_test_flag(zram, index, ZRAM_WB) ? 'w' : '.',
872 zram_test_flag(zram, index, ZRAM_HUGE) ? 'h' : '.',
873 zram_test_flag(zram, index, ZRAM_IDLE) ? 'i' : '.',
874 zram_get_priority(zram, index) ? 'r' : '.',
875 zram_test_flag(zram, index,
879 zram_slot_unlock(zram, index);
885 zram_slot_unlock(zram, index);
889 up_read(&zram->init_lock);
903 static void zram_debugfs_register(struct zram *zram)
908 zram->debugfs_dir = debugfs_create_dir(zram->disk->disk_name,
910 debugfs_create_file("block_state", 0400, zram->debugfs_dir,
911 zram, &proc_zram_block_state_op);
914 static void zram_debugfs_unregister(struct zram *zram)
916 debugfs_remove_recursive(zram->debugfs_dir);
921 static void zram_debugfs_register(struct zram *zram) {};
922 static void zram_debugfs_unregister(struct zram *zram) {};
946 static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
949 if (zram->comp_algs[prio] != default_compressor)
950 kfree(zram->comp_algs[prio]);
952 zram->comp_algs[prio] = alg;
955 static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf)
959 down_read(&zram->init_lock);
960 sz = zcomp_available_show(zram->comp_algs[prio], buf);
961 up_read(&zram->init_lock);
966 static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
988 down_write(&zram->init_lock);
989 if (init_done(zram)) {
990 up_write(&zram->init_lock);
996 comp_algorithm_set(zram, prio, compressor);
997 up_write(&zram->init_lock);
1005 struct zram *zram = dev_to_zram(dev);
1007 return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf);
1015 struct zram *zram = dev_to_zram(dev);
1018 ret = __comp_algorithm_store(zram, ZRAM_PRIMARY_COMP, buf);
1027 struct zram *zram = dev_to_zram(dev);
1032 if (!zram->comp_algs[prio])
1036 sz += __comp_algorithm_show(zram, prio, buf + sz);
1047 struct zram *zram = dev_to_zram(dev);
1079 ret = __comp_algorithm_store(zram, prio, alg);
1087 struct zram *zram = dev_to_zram(dev);
1089 down_read(&zram->init_lock);
1090 if (!init_done(zram)) {
1091 up_read(&zram->init_lock);
1095 zs_compact(zram->mem_pool);
1096 up_read(&zram->init_lock);
1104 struct zram *zram = dev_to_zram(dev);
1107 down_read(&zram->init_lock);
1110 (u64)atomic64_read(&zram->stats.failed_reads),
1111 (u64)atomic64_read(&zram->stats.failed_writes),
1112 (u64)atomic64_read(&zram->stats.notify_free));
1113 up_read(&zram->init_lock);
1121 struct zram *zram = dev_to_zram(dev);
1129 down_read(&zram->init_lock);
1130 if (init_done(zram)) {
1131 mem_used = zs_get_total_pages(zram->mem_pool);
1132 zs_pool_stats(zram->mem_pool, &pool_stats);
1135 orig_size = atomic64_read(&zram->stats.pages_stored);
1136 max_used = atomic_long_read(&zram->stats.max_used_pages);
1141 (u64)atomic64_read(&zram->stats.compr_data_size),
1143 zram->limit_pages << PAGE_SHIFT,
1145 (u64)atomic64_read(&zram->stats.same_pages),
1147 (u64)atomic64_read(&zram->stats.huge_pages),
1148 (u64)atomic64_read(&zram->stats.huge_pages_since));
1149 up_read(&zram->init_lock);
1159 struct zram *zram = dev_to_zram(dev);
1162 down_read(&zram->init_lock);
1165 FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
1166 FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
1167 FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
1168 up_read(&zram->init_lock);
1178 struct zram *zram = dev_to_zram(dev);
1181 down_read(&zram->init_lock);
1185 (u64)atomic64_read(&zram->stats.writestall),
1186 (u64)atomic64_read(&zram->stats.miss_free));
1187 up_read(&zram->init_lock);
1199 static void zram_meta_free(struct zram *zram, u64 disksize)
1204 /* Free all pages that are still in this zram device */
1206 zram_free_page(zram, index);
1208 zs_destroy_pool(zram->mem_pool);
1209 vfree(zram->table);
1212 static bool zram_meta_alloc(struct zram *zram, u64 disksize)
1217 zram->table = vzalloc(array_size(num_pages, sizeof(*zram->table)));
1218 if (!zram->table)
1221 zram->mem_pool = zs_create_pool(zram->disk->disk_name);
1222 if (!zram->mem_pool) {
1223 vfree(zram->table);
1228 huge_class_size = zs_huge_class_size(zram->mem_pool);
1237 static void zram_free_page(struct zram *zram, size_t index)
1242 zram->table[index].ac_time = 0;
1244 if (zram_test_flag(zram, index, ZRAM_IDLE))
1245 zram_clear_flag(zram, index, ZRAM_IDLE);
1247 if (zram_test_flag(zram, index, ZRAM_HUGE)) {
1248 zram_clear_flag(zram, index, ZRAM_HUGE);
1249 atomic64_dec(&zram->stats.huge_pages);
1252 if (zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1253 zram_clear_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1255 zram_set_priority(zram, index, 0);
1257 if (zram_test_flag(zram, index, ZRAM_WB)) {
1258 zram_clear_flag(zram, index, ZRAM_WB);
1259 free_block_bdev(zram, zram_get_element(zram, index));
1267 if (zram_test_flag(zram, index, ZRAM_SAME)) {
1268 zram_clear_flag(zram, index, ZRAM_SAME);
1269 atomic64_dec(&zram->stats.same_pages);
1273 handle = zram_get_handle(zram, index);
1277 zs_free(zram->mem_pool, handle);
1279 atomic64_sub(zram_get_obj_size(zram, index),
1280 &zram->stats.compr_data_size);
1282 atomic64_dec(&zram->stats.pages_stored);
1283 zram_set_handle(zram, index, 0);
1284 zram_set_obj_size(zram, index, 0);
1285 WARN_ON_ONCE(zram->table[index].flags &
1293 static int zram_read_from_zspool(struct zram *zram, struct page *page,
1303 handle = zram_get_handle(zram, index);
1304 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
1308 value = handle ? zram_get_element(zram, index) : 0;
1315 size = zram_get_obj_size(zram, index);
1318 prio = zram_get_priority(zram, index);
1319 zstrm = zcomp_stream_get(zram->comps[prio]);
1322 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
1332 zcomp_stream_put(zram->comps[prio]);
1334 zs_unmap_object(zram->mem_pool, handle);
1338 static int zram_read_page(struct zram *zram, struct page *page, u32 index,
1343 zram_slot_lock(zram, index);
1344 if (!zram_test_flag(zram, index, ZRAM_WB)) {
1346 ret = zram_read_from_zspool(zram, page, index);
1347 zram_slot_unlock(zram, index);
1353 zram_slot_unlock(zram, index);
1355 ret = read_from_bdev(zram, page, zram_get_element(zram, index),
1370 static int zram_bvec_read_partial(struct zram *zram, struct bio_vec *bvec,
1378 ret = zram_read_page(zram, page, index, NULL);
1385 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
1389 return zram_bvec_read_partial(zram, bvec, index, offset);
1390 return zram_read_page(zram, bvec->bv_page, index, bio);
1393 static int zram_write_page(struct zram *zram, struct page *page, u32 index)
1409 atomic64_inc(&zram->stats.same_pages);
1415 zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1421 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1423 zs_free(zram->mem_pool, handle);
1443 handle = zs_malloc(zram->mem_pool, comp_len,
1449 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1450 atomic64_inc(&zram->stats.writestall);
1451 handle = zs_malloc(zram->mem_pool, comp_len,
1466 zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1469 alloced_pages = zs_get_total_pages(zram->mem_pool);
1470 update_used_max(zram, alloced_pages);
1472 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1473 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1474 zs_free(zram->mem_pool, handle);
1478 dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1487 zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1488 zs_unmap_object(zram->mem_pool, handle);
1489 atomic64_add(comp_len, &zram->stats.compr_data_size);
1495 zram_slot_lock(zram, index);
1496 zram_free_page(zram, index);
1499 zram_set_flag(zram, index, ZRAM_HUGE);
1500 atomic64_inc(&zram->stats.huge_pages);
1501 atomic64_inc(&zram->stats.huge_pages_since);
1505 zram_set_flag(zram, index, flags);
1506 zram_set_element(zram, index, element);
1508 zram_set_handle(zram, index, handle);
1509 zram_set_obj_size(zram, index, comp_len);
1511 zram_slot_unlock(zram, index);
1514 atomic64_inc(&zram->stats.pages_stored);
1521 static int zram_bvec_write_partial(struct zram *zram, struct bio_vec *bvec,
1530 ret = zram_read_page(zram, page, index, bio);
1533 ret = zram_write_page(zram, page, index);
1539 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
1543 return zram_bvec_write_partial(zram, bvec, index, offset, bio);
1544 return zram_write_page(zram, bvec->bv_page, index);
1555 static int zram_recompress(struct zram *zram, u32 index, struct page *page,
1570 handle_old = zram_get_handle(zram, index);
1574 comp_len_old = zram_get_obj_size(zram, index);
1581 ret = zram_read_from_zspool(zram, page, index);
1585 class_index_old = zs_lookup_class_index(zram->mem_pool, comp_len_old);
1591 if (!zram->comps[prio])
1598 if (prio <= zram_get_priority(zram, index))
1602 zstrm = zcomp_stream_get(zram->comps[prio]);
1608 zcomp_stream_put(zram->comps[prio]);
1612 class_index_new = zs_lookup_class_index(zram->mem_pool,
1618 zcomp_stream_put(zram->comps[prio]);
1654 if (num_recomps == zram->num_active_comps - 1)
1655 zram_set_flag(zram, index, ZRAM_INCOMPRESSIBLE);
1670 handle_new = zs_malloc(zram->mem_pool, comp_len_new,
1676 zcomp_stream_put(zram->comps[prio]);
1680 dst = zs_map_object(zram->mem_pool, handle_new, ZS_MM_WO);
1682 zcomp_stream_put(zram->comps[prio]);
1684 zs_unmap_object(zram->mem_pool, handle_new);
1686 zram_free_page(zram, index);
1687 zram_set_handle(zram, index, handle_new);
1688 zram_set_obj_size(zram, index, comp_len_new);
1689 zram_set_priority(zram, index, prio);
1691 atomic64_add(comp_len_new, &zram->stats.compr_data_size);
1692 atomic64_inc(&zram->stats.pages_stored);
1705 struct zram *zram = dev_to_zram(dev);
1706 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
1762 down_read(&zram->init_lock);
1763 if (!init_done(zram)) {
1772 if (!zram->comp_algs[prio])
1775 if (!strcmp(zram->comp_algs[prio], algo)) {
1801 zram_slot_lock(zram, index);
1803 if (!zram_allocated(zram, index))
1807 !zram_test_flag(zram, index, ZRAM_IDLE))
1811 !zram_test_flag(zram, index, ZRAM_HUGE))
1814 if (zram_test_flag(zram, index, ZRAM_WB) ||
1815 zram_test_flag(zram, index, ZRAM_UNDER_WB) ||
1816 zram_test_flag(zram, index, ZRAM_SAME) ||
1817 zram_test_flag(zram, index, ZRAM_INCOMPRESSIBLE))
1820 err = zram_recompress(zram, index, page, &num_recomp_pages,
1823 zram_slot_unlock(zram, index);
1835 up_read(&zram->init_lock);
1840 static void zram_bio_discard(struct zram *zram, struct bio *bio)
1848 * zram manages data in physical block size units. Because logical block
1866 zram_slot_lock(zram, index);
1867 zram_free_page(zram, index);
1868 zram_slot_unlock(zram, index);
1869 atomic64_inc(&zram->stats.notify_free);
1877 static void zram_bio_read(struct zram *zram, struct bio *bio)
1890 if (zram_bvec_read(zram, &bv, index, offset, bio) < 0) {
1891 atomic64_inc(&zram->stats.failed_reads);
1897 zram_slot_lock(zram, index);
1898 zram_accessed(zram, index);
1899 zram_slot_unlock(zram, index);
1908 static void zram_bio_write(struct zram *zram, struct bio *bio)
1921 if (zram_bvec_write(zram, &bv, index, offset, bio) < 0) {
1922 atomic64_inc(&zram->stats.failed_writes);
1927 zram_slot_lock(zram, index);
1928 zram_accessed(zram, index);
1929 zram_slot_unlock(zram, index);
1939 * Handler function for all zram I/O requests.
1943 struct zram *zram = bio->bi_bdev->bd_disk->private_data;
1947 zram_bio_read(zram, bio);
1950 zram_bio_write(zram, bio);
1954 zram_bio_discard(zram, bio);
1965 struct zram *zram;
1967 zram = bdev->bd_disk->private_data;
1969 atomic64_inc(&zram->stats.notify_free);
1970 if (!zram_slot_trylock(zram, index)) {
1971 atomic64_inc(&zram->stats.miss_free);
1975 zram_free_page(zram, index);
1976 zram_slot_unlock(zram, index);
1979 static void zram_destroy_comps(struct zram *zram)
1984 struct zcomp *comp = zram->comps[prio];
1986 zram->comps[prio] = NULL;
1990 zram->num_active_comps--;
1994 static void zram_reset_device(struct zram *zram)
1996 down_write(&zram->init_lock);
1998 zram->limit_pages = 0;
2000 if (!init_done(zram)) {
2001 up_write(&zram->init_lock);
2005 set_capacity_and_notify(zram->disk, 0);
2006 part_stat_set_all(zram->disk->part0, 0);
2009 zram_meta_free(zram, zram->disksize);
2010 zram->disksize = 0;
2011 zram_destroy_comps(zram);
2012 memset(&zram->stats, 0, sizeof(zram->stats));
2013 reset_bdev(zram);
2015 comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
2016 up_write(&zram->init_lock);
2024 struct zram *zram = dev_to_zram(dev);
2032 down_write(&zram->init_lock);
2033 if (init_done(zram)) {
2040 if (!zram_meta_alloc(zram, disksize)) {
2046 if (!zram->comp_algs[prio])
2049 comp = zcomp_create(zram->comp_algs[prio]);
2052 zram->comp_algs[prio]);
2057 zram->comps[prio] = comp;
2058 zram->num_active_comps++;
2060 zram->disksize = disksize;
2061 set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT);
2062 up_write(&zram->init_lock);
2067 zram_destroy_comps(zram);
2068 zram_meta_free(zram, disksize);
2070 up_write(&zram->init_lock);
2079 struct zram *zram;
2089 zram = dev_to_zram(dev);
2090 disk = zram->disk;
2094 if (disk_openers(disk) || zram->claim) {
2099 /* From now on, anyone can't open /dev/zram[0-9] */
2100 zram->claim = true;
2105 zram_reset_device(zram);
2108 zram->claim = false;
2116 struct zram *zram = disk->private_data;
2120 /* zram was claimed to reset so open request fails */
2121 if (zram->claim)
2185 * Allocate and initialize new zram device. the function returns
2212 struct zram *zram;
2215 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
2216 if (!zram)
2219 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
2224 init_rwsem(&zram->init_lock);
2226 spin_lock_init(&zram->wb_limit_lock);
2230 zram->disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
2231 if (IS_ERR(zram->disk)) {
2234 ret = PTR_ERR(zram->disk);
2238 zram->disk->major = zram_major;
2239 zram->disk->first_minor = device_id;
2240 zram->disk->minors = 1;
2241 zram->disk->flags |= GENHD_FL_NO_PART;
2242 zram->disk->fops = &zram_devops;
2243 zram->disk->private_data = zram;
2244 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
2246 /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
2247 set_capacity(zram->disk, 0);
2248 /* zram devices sort of resembles non-rotational disks */
2249 blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
2250 blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
2251 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
2252 ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
2256 comp_algorithm_set(zram, ZRAM_PRIMARY_COMP, default_compressor);
2258 zram_debugfs_register(zram);
2259 pr_info("Added device: %s\n", zram->disk->disk_name);
2263 put_disk(zram->disk);
2267 kfree(zram);
2271 static int zram_remove(struct zram *zram)
2275 mutex_lock(&zram->disk->open_mutex);
2276 if (disk_openers(zram->disk)) {
2277 mutex_unlock(&zram->disk->open_mutex);
2281 claimed = zram->claim;
2283 zram->claim = true;
2284 mutex_unlock(&zram->disk->open_mutex);
2286 zram_debugfs_unregister(zram);
2296 sync_blockdev(zram->disk->part0);
2297 zram_reset_device(zram);
2300 pr_info("Removed device: %s\n", zram->disk->disk_name);
2302 del_gendisk(zram->disk);
2305 WARN_ON_ONCE(claimed && zram->claim);
2312 zram_reset_device(zram);
2314 put_disk(zram->disk);
2315 kfree(zram);
2319 /* zram-control sysfs attributes */
2324 * creates a new un-initialized zram device and returns back this device's
2350 struct zram *zram;
2362 zram = idr_find(&zram_index_idr, dev_id);
2363 if (zram) {
2364 ret = zram_remove(zram);
2384 .name = "zram-control",
2400 unregister_blkdev(zram_major, "zram");
2410 ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
2417 pr_err("Unable to register zram-control class\n");
2423 zram_major = register_blkdev(0, "zram");
2456 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");