Lines Matching refs:zram
59 struct zram *zram = dev_to_zram(d); \
63 (u64)atomic64_read(&zram->stats.name)); \
67 static inline bool init_done(struct zram *zram) in init_done() argument
69 return zram->disksize; in init_done()
72 static inline struct zram *dev_to_zram(struct device *dev) in dev_to_zram()
74 return (struct zram *)dev_to_disk(dev)->private_data; in dev_to_zram()
117 static inline bool valid_io_request(struct zram *zram, in valid_io_request() argument
129 bound = zram->disksize >> SECTOR_SHIFT; in valid_io_request()
145 static inline void update_used_max(struct zram *zram, in update_used_max() argument
150 old_max = atomic_long_read(&zram->stats.max_used_pages); in update_used_max()
156 &zram->stats.max_used_pages, cur_max, pages); in update_used_max()
194 struct zram *zram = dev_to_zram(dev); in initstate_show() local
196 down_read(&zram->init_lock); in initstate_show()
197 val = init_done(zram); in initstate_show()
198 up_read(&zram->init_lock); in initstate_show()
206 struct zram *zram = dev_to_zram(dev); in disksize_show() local
208 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize); in disksize_show()
214 struct zram *zram = dev_to_zram(dev); in orig_data_size_show() local
218 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT); in orig_data_size_show()
225 struct zram *zram = dev_to_zram(dev); in mem_used_total_show() local
228 down_read(&zram->init_lock); in mem_used_total_show()
229 if (init_done(zram)) { in mem_used_total_show()
230 struct zram_meta *meta = zram->meta; in mem_used_total_show()
233 up_read(&zram->init_lock); in mem_used_total_show()
242 struct zram *zram = dev_to_zram(dev); in mem_limit_show() local
245 down_read(&zram->init_lock); in mem_limit_show()
246 val = zram->limit_pages; in mem_limit_show()
247 up_read(&zram->init_lock); in mem_limit_show()
257 struct zram *zram = dev_to_zram(dev); in mem_limit_store() local
263 down_write(&zram->init_lock); in mem_limit_store()
264 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT; in mem_limit_store()
265 up_write(&zram->init_lock); in mem_limit_store()
274 struct zram *zram = dev_to_zram(dev); in mem_used_max_show() local
277 down_read(&zram->init_lock); in mem_used_max_show()
278 if (init_done(zram)) in mem_used_max_show()
279 val = atomic_long_read(&zram->stats.max_used_pages); in mem_used_max_show()
280 up_read(&zram->init_lock); in mem_used_max_show()
290 struct zram *zram = dev_to_zram(dev); in mem_used_max_store() local
296 down_read(&zram->init_lock); in mem_used_max_store()
297 if (init_done(zram)) { in mem_used_max_store()
298 struct zram_meta *meta = zram->meta; in mem_used_max_store()
299 atomic_long_set(&zram->stats.max_used_pages, in mem_used_max_store()
302 up_read(&zram->init_lock); in mem_used_max_store()
311 struct zram *zram = dev_to_zram(dev); in max_comp_streams_show() local
313 down_read(&zram->init_lock); in max_comp_streams_show()
314 val = zram->max_comp_streams; in max_comp_streams_show()
315 up_read(&zram->init_lock); in max_comp_streams_show()
324 struct zram *zram = dev_to_zram(dev); in max_comp_streams_store() local
333 down_write(&zram->init_lock); in max_comp_streams_store()
334 if (init_done(zram)) { in max_comp_streams_store()
335 if (!zcomp_set_max_streams(zram->comp, num)) { in max_comp_streams_store()
342 zram->max_comp_streams = num; in max_comp_streams_store()
345 up_write(&zram->init_lock); in max_comp_streams_store()
353 struct zram *zram = dev_to_zram(dev); in comp_algorithm_show() local
355 down_read(&zram->init_lock); in comp_algorithm_show()
356 sz = zcomp_available_show(zram->compressor, buf); in comp_algorithm_show()
357 up_read(&zram->init_lock); in comp_algorithm_show()
365 struct zram *zram = dev_to_zram(dev); in comp_algorithm_store() local
371 down_write(&zram->init_lock); in comp_algorithm_store()
372 if (init_done(zram)) { in comp_algorithm_store()
373 up_write(&zram->init_lock); in comp_algorithm_store()
377 strlcpy(zram->compressor, buf, sizeof(zram->compressor)); in comp_algorithm_store()
380 sz = strlen(zram->compressor); in comp_algorithm_store()
381 if (sz > 0 && zram->compressor[sz - 1] == '\n') in comp_algorithm_store()
382 zram->compressor[sz - 1] = 0x00; in comp_algorithm_store()
384 up_write(&zram->init_lock); in comp_algorithm_store()
391 struct zram *zram = dev_to_zram(dev); in compact_store() local
394 down_read(&zram->init_lock); in compact_store()
395 if (!init_done(zram)) { in compact_store()
396 up_read(&zram->init_lock); in compact_store()
400 meta = zram->meta; in compact_store()
402 up_read(&zram->init_lock); in compact_store()
410 struct zram *zram = dev_to_zram(dev); in io_stat_show() local
413 down_read(&zram->init_lock); in io_stat_show()
416 (u64)atomic64_read(&zram->stats.failed_reads), in io_stat_show()
417 (u64)atomic64_read(&zram->stats.failed_writes), in io_stat_show()
418 (u64)atomic64_read(&zram->stats.invalid_io), in io_stat_show()
419 (u64)atomic64_read(&zram->stats.notify_free)); in io_stat_show()
420 up_read(&zram->init_lock); in io_stat_show()
428 struct zram *zram = dev_to_zram(dev); in mm_stat_show() local
436 down_read(&zram->init_lock); in mm_stat_show()
437 if (init_done(zram)) { in mm_stat_show()
438 mem_used = zs_get_total_pages(zram->meta->mem_pool); in mm_stat_show()
439 zs_pool_stats(zram->meta->mem_pool, &pool_stats); in mm_stat_show()
442 orig_size = atomic64_read(&zram->stats.pages_stored); in mm_stat_show()
443 max_used = atomic_long_read(&zram->stats.max_used_pages); in mm_stat_show()
448 (u64)atomic64_read(&zram->stats.compr_data_size), in mm_stat_show()
450 zram->limit_pages << PAGE_SHIFT, in mm_stat_show()
452 (u64)atomic64_read(&zram->stats.zero_pages), in mm_stat_show()
454 up_read(&zram->init_lock); in mm_stat_show()
470 static inline bool zram_meta_get(struct zram *zram) in zram_meta_get() argument
472 if (atomic_inc_not_zero(&zram->refcount)) in zram_meta_get()
477 static inline void zram_meta_put(struct zram *zram) in zram_meta_put() argument
479 atomic_dec(&zram->refcount); in zram_meta_put()
536 static void zram_free_page(struct zram *zram, size_t index) in zram_free_page() argument
538 struct zram_meta *meta = zram->meta; in zram_free_page()
548 atomic64_dec(&zram->stats.zero_pages); in zram_free_page()
556 &zram->stats.compr_data_size); in zram_free_page()
557 atomic64_dec(&zram->stats.pages_stored); in zram_free_page()
563 static int zram_decompress_page(struct zram *zram, char *mem, u32 index) in zram_decompress_page() argument
567 struct zram_meta *meta = zram->meta; in zram_decompress_page()
585 ret = zcomp_decompress(zram->comp, cmem, size, mem); in zram_decompress_page()
598 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, in zram_bvec_read() argument
604 struct zram_meta *meta = zram->meta; in zram_bvec_read()
630 ret = zram_decompress_page(zram, uncmem, index); in zram_bvec_read()
648 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, in zram_bvec_write() argument
656 struct zram_meta *meta = zram->meta; in zram_bvec_write()
671 ret = zram_decompress_page(zram, uncmem, index); in zram_bvec_write()
676 zstrm = zcomp_strm_find(zram->comp); in zram_bvec_write()
693 zram_free_page(zram, index); in zram_bvec_write()
697 atomic64_inc(&zram->stats.zero_pages); in zram_bvec_write()
702 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen); in zram_bvec_write()
729 update_used_max(zram, alloced_pages); in zram_bvec_write()
731 if (zram->limit_pages && alloced_pages > zram->limit_pages) { in zram_bvec_write()
747 zcomp_strm_release(zram->comp, zstrm); in zram_bvec_write()
756 zram_free_page(zram, index); in zram_bvec_write()
763 atomic64_add(clen, &zram->stats.compr_data_size); in zram_bvec_write()
764 atomic64_inc(&zram->stats.pages_stored); in zram_bvec_write()
767 zcomp_strm_release(zram->comp, zstrm); in zram_bvec_write()
778 static void zram_bio_discard(struct zram *zram, u32 index, in zram_bio_discard() argument
782 struct zram_meta *meta = zram->meta; in zram_bio_discard()
804 zram_free_page(zram, index); in zram_bio_discard()
806 atomic64_inc(&zram->stats.notify_free); in zram_bio_discard()
812 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, in zram_bvec_rw() argument
819 &zram->disk->part0); in zram_bvec_rw()
822 atomic64_inc(&zram->stats.num_reads); in zram_bvec_rw()
823 ret = zram_bvec_read(zram, bvec, index, offset); in zram_bvec_rw()
825 atomic64_inc(&zram->stats.num_writes); in zram_bvec_rw()
826 ret = zram_bvec_write(zram, bvec, index, offset); in zram_bvec_rw()
829 generic_end_io_acct(rw, &zram->disk->part0, start_time); in zram_bvec_rw()
833 atomic64_inc(&zram->stats.failed_reads); in zram_bvec_rw()
835 atomic64_inc(&zram->stats.failed_writes); in zram_bvec_rw()
841 static void __zram_make_request(struct zram *zram, struct bio *bio) in __zram_make_request() argument
853 zram_bio_discard(zram, index, offset, bio); in __zram_make_request()
873 if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0) in __zram_make_request()
878 if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0) in __zram_make_request()
881 if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0) in __zram_make_request()
899 struct zram *zram = queue->queuedata; in zram_make_request() local
901 if (unlikely(!zram_meta_get(zram))) in zram_make_request()
906 if (!valid_io_request(zram, bio->bi_iter.bi_sector, in zram_make_request()
908 atomic64_inc(&zram->stats.invalid_io); in zram_make_request()
912 __zram_make_request(zram, bio); in zram_make_request()
913 zram_meta_put(zram); in zram_make_request()
916 zram_meta_put(zram); in zram_make_request()
925 struct zram *zram; in zram_slot_free_notify() local
928 zram = bdev->bd_disk->private_data; in zram_slot_free_notify()
929 meta = zram->meta; in zram_slot_free_notify()
932 zram_free_page(zram, index); in zram_slot_free_notify()
934 atomic64_inc(&zram->stats.notify_free); in zram_slot_free_notify()
942 struct zram *zram; in zram_rw_page() local
945 zram = bdev->bd_disk->private_data; in zram_rw_page()
946 if (unlikely(!zram_meta_get(zram))) in zram_rw_page()
949 if (!valid_io_request(zram, sector, PAGE_SIZE)) { in zram_rw_page()
950 atomic64_inc(&zram->stats.invalid_io); in zram_rw_page()
962 err = zram_bvec_rw(zram, &bv, index, offset, rw); in zram_rw_page()
964 zram_meta_put(zram); in zram_rw_page()
979 static void zram_reset_device(struct zram *zram) in zram_reset_device() argument
985 down_write(&zram->init_lock); in zram_reset_device()
987 zram->limit_pages = 0; in zram_reset_device()
989 if (!init_done(zram)) { in zram_reset_device()
990 up_write(&zram->init_lock); in zram_reset_device()
994 meta = zram->meta; in zram_reset_device()
995 comp = zram->comp; in zram_reset_device()
996 disksize = zram->disksize; in zram_reset_device()
1002 zram_meta_put(zram); in zram_reset_device()
1007 wait_event(zram->io_done, atomic_read(&zram->refcount) == 0); in zram_reset_device()
1010 memset(&zram->stats, 0, sizeof(zram->stats)); in zram_reset_device()
1011 zram->disksize = 0; in zram_reset_device()
1012 zram->max_comp_streams = 1; in zram_reset_device()
1014 set_capacity(zram->disk, 0); in zram_reset_device()
1015 part_stat_set_all(&zram->disk->part0, 0); in zram_reset_device()
1017 up_write(&zram->init_lock); in zram_reset_device()
1029 struct zram *zram = dev_to_zram(dev); in disksize_store() local
1037 meta = zram_meta_alloc(zram->disk->disk_name, disksize); in disksize_store()
1041 comp = zcomp_create(zram->compressor, zram->max_comp_streams); in disksize_store()
1044 zram->compressor); in disksize_store()
1049 down_write(&zram->init_lock); in disksize_store()
1050 if (init_done(zram)) { in disksize_store()
1056 init_waitqueue_head(&zram->io_done); in disksize_store()
1057 atomic_set(&zram->refcount, 1); in disksize_store()
1058 zram->meta = meta; in disksize_store()
1059 zram->comp = comp; in disksize_store()
1060 zram->disksize = disksize; in disksize_store()
1061 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); in disksize_store()
1062 up_write(&zram->init_lock); in disksize_store()
1069 revalidate_disk(zram->disk); in disksize_store()
1074 up_write(&zram->init_lock); in disksize_store()
1086 struct zram *zram; in reset_store() local
1096 zram = dev_to_zram(dev); in reset_store()
1097 bdev = bdget_disk(zram->disk, 0); in reset_store()
1103 if (bdev->bd_openers || zram->claim) { in reset_store()
1110 zram->claim = true; in reset_store()
1115 zram_reset_device(zram); in reset_store()
1116 revalidate_disk(zram->disk); in reset_store()
1120 zram->claim = false; in reset_store()
1129 struct zram *zram; in zram_open() local
1133 zram = bdev->bd_disk->private_data; in zram_open()
1135 if (zram->claim) in zram_open()
1193 struct zram *zram; in zram_add() local
1197 zram = kzalloc(sizeof(struct zram), GFP_KERNEL); in zram_add()
1198 if (!zram) in zram_add()
1201 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL); in zram_add()
1206 init_rwsem(&zram->init_lock); in zram_add()
1219 zram->disk = alloc_disk(1); in zram_add()
1220 if (!zram->disk) { in zram_add()
1227 zram->disk->major = zram_major; in zram_add()
1228 zram->disk->first_minor = device_id; in zram_add()
1229 zram->disk->fops = &zram_devops; in zram_add()
1230 zram->disk->queue = queue; in zram_add()
1231 zram->disk->queue->queuedata = zram; in zram_add()
1232 zram->disk->private_data = zram; in zram_add()
1233 snprintf(zram->disk->disk_name, 16, "zram%d", device_id); in zram_add()
1236 set_capacity(zram->disk, 0); in zram_add()
1238 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); in zram_add()
1239 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); in zram_add()
1244 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE); in zram_add()
1245 blk_queue_logical_block_size(zram->disk->queue, in zram_add()
1247 blk_queue_io_min(zram->disk->queue, PAGE_SIZE); in zram_add()
1248 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); in zram_add()
1249 zram->disk->queue->limits.discard_granularity = PAGE_SIZE; in zram_add()
1250 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); in zram_add()
1260 zram->disk->queue->limits.discard_zeroes_data = 1; in zram_add()
1262 zram->disk->queue->limits.discard_zeroes_data = 0; in zram_add()
1263 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); in zram_add()
1265 add_disk(zram->disk); in zram_add()
1267 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj, in zram_add()
1274 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); in zram_add()
1275 zram->meta = NULL; in zram_add()
1276 zram->max_comp_streams = 1; in zram_add()
1278 pr_info("Added device: %s\n", zram->disk->disk_name); in zram_add()
1282 del_gendisk(zram->disk); in zram_add()
1283 put_disk(zram->disk); in zram_add()
1289 kfree(zram); in zram_add()
1293 static int zram_remove(struct zram *zram) in zram_remove() argument
1297 bdev = bdget_disk(zram->disk, 0); in zram_remove()
1302 if (bdev->bd_openers || zram->claim) { in zram_remove()
1308 zram->claim = true; in zram_remove()
1318 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj, in zram_remove()
1323 zram_reset_device(zram); in zram_remove()
1326 pr_info("Removed device: %s\n", zram->disk->disk_name); in zram_remove()
1328 blk_cleanup_queue(zram->disk->queue); in zram_remove()
1329 del_gendisk(zram->disk); in zram_remove()
1330 put_disk(zram->disk); in zram_remove()
1331 kfree(zram); in zram_remove()
1356 struct zram *zram; in hot_remove_store() local
1368 zram = idr_find(&zram_index_idr, dev_id); in hot_remove_store()
1369 if (zram) { in hot_remove_store()
1370 ret = zram_remove(zram); in hot_remove_store()