Lines Matching refs:meta

82 	struct zram_meta *meta;  in compact_store()  local
90 meta = zram->meta; in compact_store()
91 nr_migrated = zs_compact(meta->mem_pool); in compact_store()
138 struct zram_meta *meta = zram->meta; in mem_used_total_show() local
139 val = zs_get_total_pages(meta->mem_pool); in mem_used_total_show()
219 struct zram_meta *meta = zram->meta; in mem_used_max_store() local
221 zs_get_total_pages(meta->mem_pool)); in mem_used_max_store()
286 static int zram_test_flag(struct zram_meta *meta, u32 index, in zram_test_flag() argument
289 return meta->table[index].value & BIT(flag); in zram_test_flag()
292 static void zram_set_flag(struct zram_meta *meta, u32 index, in zram_set_flag() argument
295 meta->table[index].value |= BIT(flag); in zram_set_flag()
298 static void zram_clear_flag(struct zram_meta *meta, u32 index, in zram_clear_flag() argument
301 meta->table[index].value &= ~BIT(flag); in zram_clear_flag()
304 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index) in zram_get_obj_size() argument
306 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1); in zram_get_obj_size()
309 static void zram_set_obj_size(struct zram_meta *meta, in zram_set_obj_size() argument
312 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT; in zram_set_obj_size()
314 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size; in zram_set_obj_size()
346 static void zram_meta_free(struct zram_meta *meta, u64 disksize) in zram_meta_free() argument
353 unsigned long handle = meta->table[index].handle; in zram_meta_free()
358 zs_free(meta->mem_pool, handle); in zram_meta_free()
361 zs_destroy_pool(meta->mem_pool); in zram_meta_free()
362 vfree(meta->table); in zram_meta_free()
363 kfree(meta); in zram_meta_free()
370 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); in zram_meta_alloc() local
372 if (!meta) in zram_meta_alloc()
376 meta->table = vzalloc(num_pages * sizeof(*meta->table)); in zram_meta_alloc()
377 if (!meta->table) { in zram_meta_alloc()
383 meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM); in zram_meta_alloc()
384 if (!meta->mem_pool) { in zram_meta_alloc()
389 return meta; in zram_meta_alloc()
392 vfree(meta->table); in zram_meta_alloc()
393 kfree(meta); in zram_meta_alloc()
454 struct zram_meta *meta = zram->meta; in zram_free_page() local
455 unsigned long handle = meta->table[index].handle; in zram_free_page()
462 if (zram_test_flag(meta, index, ZRAM_ZERO)) { in zram_free_page()
463 zram_clear_flag(meta, index, ZRAM_ZERO); in zram_free_page()
469 zs_free(meta->mem_pool, handle); in zram_free_page()
471 atomic64_sub(zram_get_obj_size(meta, index), in zram_free_page()
475 meta->table[index].handle = 0; in zram_free_page()
476 zram_set_obj_size(meta, index, 0); in zram_free_page()
483 struct zram_meta *meta = zram->meta; in zram_decompress_page() local
487 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); in zram_decompress_page()
488 handle = meta->table[index].handle; in zram_decompress_page()
489 size = zram_get_obj_size(meta, index); in zram_decompress_page()
491 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { in zram_decompress_page()
492 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_decompress_page()
497 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); in zram_decompress_page()
502 zs_unmap_object(meta->mem_pool, handle); in zram_decompress_page()
503 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_decompress_page()
520 struct zram_meta *meta = zram->meta; in zram_bvec_read() local
523 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_read()
524 if (unlikely(!meta->table[index].handle) || in zram_bvec_read()
525 zram_test_flag(meta, index, ZRAM_ZERO)) { in zram_bvec_read()
526 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_read()
530 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_read()
587 struct zram_meta *meta = zram->meta; in zram_bvec_write() local
625 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_write()
627 zram_set_flag(meta, index, ZRAM_ZERO); in zram_bvec_write()
628 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_write()
653 handle = zs_malloc(meta->mem_pool, clen); in zram_bvec_write()
661 alloced_pages = zs_get_total_pages(meta->mem_pool); in zram_bvec_write()
663 zs_free(meta->mem_pool, handle); in zram_bvec_write()
670 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO); in zram_bvec_write()
682 zs_unmap_object(meta->mem_pool, handle); in zram_bvec_write()
688 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_write()
691 meta->table[index].handle = handle; in zram_bvec_write()
692 zram_set_obj_size(meta, index, clen); in zram_bvec_write()
693 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_bvec_write()
744 struct zram_meta *meta = zram->meta; in zram_bio_discard() local
765 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); in zram_bio_discard()
767 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_bio_discard()
776 struct zram_meta *meta; in zram_reset_device() local
789 meta = zram->meta; in zram_reset_device()
814 zram_meta_free(meta, disksize); in zram_reset_device()
823 struct zram_meta *meta; in disksize_store() local
832 meta = zram_meta_alloc(zram->disk->first_minor, disksize); in disksize_store()
833 if (!meta) in disksize_store()
853 zram->meta = meta; in disksize_store()
872 zram_meta_free(meta, disksize); in disksize_store()
1005 struct zram_meta *meta; in zram_slot_free_notify() local
1008 meta = zram->meta; in zram_slot_free_notify()
1010 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); in zram_slot_free_notify()
1012 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); in zram_slot_free_notify()
1103 mem_used = zs_get_total_pages(zram->meta->mem_pool); in mm_stat_show()
1233 zram->meta = NULL; in create_device()