Lines Matching refs:meta

78 static int zram_test_flag(struct zram_meta *meta, u32 index,  in zram_test_flag()  argument
81 return meta->table[index].value & BIT(flag); in zram_test_flag()
84 static void zram_set_flag(struct zram_meta *meta, u32 index, in zram_set_flag() argument
87 meta->table[index].value |= BIT(flag); in zram_set_flag()
90 static void zram_clear_flag(struct zram_meta *meta, u32 index, in zram_clear_flag() argument
93 meta->table[index].value &= ~BIT(flag); in zram_clear_flag()
96 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) in zram_get_obj_size() argument
98 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); in zram_get_obj_size()
101 static void zram_set_obj_size(struct zram_meta *meta, in zram_set_obj_size() argument
104 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT; in zram_set_obj_size()
106 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; in zram_set_obj_size()
230 struct zram_meta *meta = zram->meta; in mem_used_total_show() local
231 val = zs_get_total_pages(meta->mem_pool); in mem_used_total_show()
298 struct zram_meta *meta = zram->meta; in mem_used_max_store() local
300 zs_get_total_pages(meta->mem_pool)); in mem_used_max_store()
392 struct zram_meta *meta; in compact_store() local
400 meta = zram->meta; in compact_store()
401 zs_compact(meta->mem_pool); in compact_store()
438 mem_used = zs_get_total_pages(zram->meta->mem_pool); in mm_stat_show()
439 zs_pool_stats(zram->meta->mem_pool, &pool_stats); in mm_stat_show()
482 static void zram_meta_free(struct zram_meta *meta, u64 disksize) in zram_meta_free() argument
489 unsigned long handle = meta->table[index].handle; in zram_meta_free()
494 zs_free(meta->mem_pool, handle); in zram_meta_free()
497 zs_destroy_pool(meta->mem_pool); in zram_meta_free()
498 vfree(meta->table); in zram_meta_free()
499 kfree(meta); in zram_meta_free()
505 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); in zram_meta_alloc() local
507 if (!meta) in zram_meta_alloc()
511 meta->table = vzalloc(num_pages * sizeof(*meta->table)); in zram_meta_alloc()
512 if (!meta->table) { in zram_meta_alloc()
517 meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM); in zram_meta_alloc()
518 if (!meta->mem_pool) { in zram_meta_alloc()
523 return meta; in zram_meta_alloc()
526 vfree(meta->table); in zram_meta_alloc()
527 kfree(meta); in zram_meta_alloc()
538 struct zram_meta *meta = zram->meta; in zram_free_page() local
539 unsigned long handle = meta->table[index].handle; in zram_free_page()
546 if (zram_test_flag(meta, index, ZRAM_ZERO)) { in zram_free_page()
547 zram_clear_flag(meta, index, ZRAM_ZERO); in zram_free_page()
553 zs_free(meta->mem_pool, handle); in zram_free_page()
555 atomic64_sub(zram_get_obj_size(meta, index), in zram_free_page()
559 meta->table[index].handle = 0; in zram_free_page()
560 zram_set_obj_size(meta, index, 0); in zram_free_page()
567 struct zram_meta *meta = zram->meta; in zram_decompress_page() local
571 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); in zram_decompress_page()
572 handle = meta->table[index].handle; in zram_decompress_page()
573 size = zram_get_obj_size(meta, index); in zram_decompress_page()
575 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { in zram_decompress_page()
576 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_decompress_page()
581 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); in zram_decompress_page()
586 zs_unmap_object(meta->mem_pool, handle); in zram_decompress_page()
587 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_decompress_page()
604 struct zram_meta *meta = zram->meta; in zram_bvec_read() local
607 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_read()
608 if (unlikely(!meta->table[index].handle) || in zram_bvec_read()
609 zram_test_flag(meta, index, ZRAM_ZERO)) { in zram_bvec_read()
610 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_read()
614 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_read()
656 struct zram_meta *meta = zram->meta; in zram_bvec_write() local
692 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_write()
694 zram_set_flag(meta, index, ZRAM_ZERO); in zram_bvec_write()
695 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_write()
720 handle = zs_malloc(meta->mem_pool, clen); in zram_bvec_write()
728 alloced_pages = zs_get_total_pages(meta->mem_pool); in zram_bvec_write()
732 zs_free(meta->mem_pool, handle); in zram_bvec_write()
737 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); in zram_bvec_write()
749 zs_unmap_object(meta->mem_pool, handle); in zram_bvec_write()
755 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_write()
758 meta->table[index].handle = handle; in zram_bvec_write()
759 zram_set_obj_size(meta, index, clen); in zram_bvec_write()
760 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_write()
782 struct zram_meta *meta = zram->meta; in zram_bio_discard() local
803 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); in zram_bio_discard()
805 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_bio_discard()
926 struct zram_meta *meta; in zram_slot_free_notify() local
929 meta = zram->meta; in zram_slot_free_notify()
931 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); in zram_slot_free_notify()
933 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_slot_free_notify()
981 struct zram_meta *meta; in zram_reset_device() local
994 meta = zram->meta; in zram_reset_device()
1019 zram_meta_free(meta, disksize); in zram_reset_device()
1028 struct zram_meta *meta; in disksize_store() local
1037 meta = zram_meta_alloc(zram->disk->disk_name, disksize); in disksize_store()
1038 if (!meta) in disksize_store()
1058 zram->meta = meta; in disksize_store()
1077 zram_meta_free(meta, disksize); in disksize_store()
1275 zram->meta = NULL; in zram_add()