Lines Matching refs:va

299 		struct vmap_area *va;  in __find_vmap_area()  local
301 va = rb_entry(n, struct vmap_area, rb_node); in __find_vmap_area()
302 if (addr < va->va_start) in __find_vmap_area()
304 else if (addr >= va->va_end) in __find_vmap_area()
307 return va; in __find_vmap_area()
313 static void __insert_vmap_area(struct vmap_area *va) in __insert_vmap_area() argument
324 if (va->va_start < tmp_va->va_end) in __insert_vmap_area()
326 else if (va->va_end > tmp_va->va_start) in __insert_vmap_area()
332 rb_link_node(&va->rb_node, parent, p); in __insert_vmap_area()
333 rb_insert_color(&va->rb_node, &vmap_area_root); in __insert_vmap_area()
336 tmp = rb_prev(&va->rb_node); in __insert_vmap_area()
340 list_add_rcu(&va->list, &prev->list); in __insert_vmap_area()
342 list_add_rcu(&va->list, &vmap_area_list); in __insert_vmap_area()
356 struct vmap_area *va; in alloc_vmap_area() local
366 va = kmalloc_node(sizeof(struct vmap_area), in alloc_vmap_area()
368 if (unlikely(!va)) in alloc_vmap_area()
375 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); in alloc_vmap_area()
452 va->va_start = addr; in alloc_vmap_area()
453 va->va_end = addr + size; in alloc_vmap_area()
454 va->flags = 0; in alloc_vmap_area()
455 __insert_vmap_area(va); in alloc_vmap_area()
456 free_vmap_cache = &va->rb_node; in alloc_vmap_area()
459 BUG_ON(va->va_start & (align-1)); in alloc_vmap_area()
460 BUG_ON(va->va_start < vstart); in alloc_vmap_area()
461 BUG_ON(va->va_end > vend); in alloc_vmap_area()
463 return va; in alloc_vmap_area()
475 kfree(va); in alloc_vmap_area()
479 static void __free_vmap_area(struct vmap_area *va) in __free_vmap_area() argument
481 BUG_ON(RB_EMPTY_NODE(&va->rb_node)); in __free_vmap_area()
484 if (va->va_end < cached_vstart) { in __free_vmap_area()
489 if (va->va_start <= cache->va_start) { in __free_vmap_area()
490 free_vmap_cache = rb_prev(&va->rb_node); in __free_vmap_area()
498 rb_erase(&va->rb_node, &vmap_area_root); in __free_vmap_area()
499 RB_CLEAR_NODE(&va->rb_node); in __free_vmap_area()
500 list_del_rcu(&va->list); in __free_vmap_area()
508 if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) in __free_vmap_area()
509 vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); in __free_vmap_area()
511 kfree_rcu(va, rcu_head); in __free_vmap_area()
517 static void free_vmap_area(struct vmap_area *va) in free_vmap_area() argument
520 __free_vmap_area(va); in free_vmap_area()
527 static void unmap_vmap_area(struct vmap_area *va) in unmap_vmap_area() argument
529 vunmap_page_range(va->va_start, va->va_end); in unmap_vmap_area()
607 struct vmap_area *va; in __purge_vmap_area_lazy() local
626 list_for_each_entry_rcu(va, &vmap_area_list, list) { in __purge_vmap_area_lazy()
627 if (va->flags & VM_LAZY_FREE) { in __purge_vmap_area_lazy()
628 if (va->va_start < *start) in __purge_vmap_area_lazy()
629 *start = va->va_start; in __purge_vmap_area_lazy()
630 if (va->va_end > *end) in __purge_vmap_area_lazy()
631 *end = va->va_end; in __purge_vmap_area_lazy()
632 nr += (va->va_end - va->va_start) >> PAGE_SHIFT; in __purge_vmap_area_lazy()
633 list_add_tail(&va->purge_list, &valist); in __purge_vmap_area_lazy()
634 va->flags |= VM_LAZY_FREEING; in __purge_vmap_area_lazy()
635 va->flags &= ~VM_LAZY_FREE; in __purge_vmap_area_lazy()
648 list_for_each_entry_safe(va, n_va, &valist, purge_list) in __purge_vmap_area_lazy()
649 __free_vmap_area(va); in __purge_vmap_area_lazy()
681 static void free_vmap_area_noflush(struct vmap_area *va) in free_vmap_area_noflush() argument
683 va->flags |= VM_LAZY_FREE; in free_vmap_area_noflush()
684 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); in free_vmap_area_noflush()
693 static void free_unmap_vmap_area_noflush(struct vmap_area *va) in free_unmap_vmap_area_noflush() argument
695 unmap_vmap_area(va); in free_unmap_vmap_area_noflush()
696 free_vmap_area_noflush(va); in free_unmap_vmap_area_noflush()
702 static void free_unmap_vmap_area(struct vmap_area *va) in free_unmap_vmap_area() argument
704 flush_cache_vunmap(va->va_start, va->va_end); in free_unmap_vmap_area()
705 free_unmap_vmap_area_noflush(va); in free_unmap_vmap_area()
710 struct vmap_area *va; in find_vmap_area() local
713 va = __find_vmap_area(addr); in find_vmap_area()
716 return va; in find_vmap_area()
721 struct vmap_area *va; in free_unmap_vmap_area_addr() local
723 va = find_vmap_area(addr); in free_unmap_vmap_area_addr()
724 BUG_ON(!va); in free_unmap_vmap_area_addr()
725 free_unmap_vmap_area(va); in free_unmap_vmap_area_addr()
768 struct vmap_area *va; member
822 struct vmap_area *va; in new_vmap_block() local
834 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, in new_vmap_block()
837 if (IS_ERR(va)) { in new_vmap_block()
839 return ERR_CAST(va); in new_vmap_block()
845 free_vmap_area(va); in new_vmap_block()
849 vaddr = vmap_block_vaddr(va->va_start, 0); in new_vmap_block()
851 vb->va = va; in new_vmap_block()
860 vb_idx = addr_to_vb_idx(va->va_start); in new_vmap_block()
881 vb_idx = addr_to_vb_idx(vb->va->va_start); in free_vmap_block()
887 free_vmap_area_noflush(vb->va); in free_vmap_block()
965 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc()
1057 unsigned long va_start = vb->va->va_start; in vm_unmap_aliases()
1129 struct vmap_area *va; in vm_map_ram() local
1130 va = alloc_vmap_area(size, PAGE_SIZE, in vm_map_ram()
1132 if (IS_ERR(va)) in vm_map_ram()
1135 addr = va->va_start; in vm_map_ram()
1200 struct vmap_area *va; in vmalloc_init() local
1218 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); in vmalloc_init()
1219 va->flags = VM_VM_AREA; in vmalloc_init()
1220 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
1221 va->va_end = va->va_start + tmp->size; in vmalloc_init()
1222 va->vm = tmp; in vmalloc_init()
1223 __insert_vmap_area(va); in vmalloc_init()
1306 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, in setup_vmalloc_vm() argument
1311 vm->addr = (void *)va->va_start; in setup_vmalloc_vm()
1312 vm->size = va->va_end - va->va_start; in setup_vmalloc_vm()
1314 va->vm = vm; in setup_vmalloc_vm()
1315 va->flags |= VM_VM_AREA; in setup_vmalloc_vm()
1334 struct vmap_area *va; in __get_vm_area_node() local
1353 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); in __get_vm_area_node()
1354 if (IS_ERR(va)) { in __get_vm_area_node()
1359 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node()
1413 struct vmap_area *va; in find_vm_area() local
1415 va = find_vmap_area((unsigned long)addr); in find_vm_area()
1416 if (va && va->flags & VM_VM_AREA) in find_vm_area()
1417 return va->vm; in find_vm_area()
1432 struct vmap_area *va; in remove_vm_area() local
1434 va = find_vmap_area((unsigned long)addr); in remove_vm_area()
1435 if (va && va->flags & VM_VM_AREA) { in remove_vm_area()
1436 struct vm_struct *vm = va->vm; in remove_vm_area()
1439 va->vm = NULL; in remove_vm_area()
1440 va->flags &= ~VM_VM_AREA; in remove_vm_area()
1443 vmap_debug_free_range(va->va_start, va->va_end); in remove_vm_area()
1445 free_unmap_vmap_area(va); in remove_vm_area()
2002 struct vmap_area *va; in vread() local
2013 list_for_each_entry(va, &vmap_area_list, list) { in vread()
2017 if (!(va->flags & VM_VM_AREA)) in vread()
2020 vm = va->vm; in vread()
2083 struct vmap_area *va; in vwrite() local
2095 list_for_each_entry(va, &vmap_area_list, list) { in vwrite()
2099 if (!(va->flags & VM_VM_AREA)) in vwrite()
2102 vm = va->vm; in vwrite()
2298 struct vmap_area *va = NULL; in pvm_find_next_prev() local
2301 va = rb_entry(n, struct vmap_area, rb_node); in pvm_find_next_prev()
2302 if (end < va->va_end) in pvm_find_next_prev()
2304 else if (end > va->va_end) in pvm_find_next_prev()
2310 if (!va) in pvm_find_next_prev()
2313 if (va->va_end > end) { in pvm_find_next_prev()
2314 *pnext = va; in pvm_find_next_prev()
2317 *pprev = va; in pvm_find_next_prev()
2507 struct vmap_area *va = vas[area]; in pcpu_get_vm_areas() local
2509 va->va_start = base + offsets[area]; in pcpu_get_vm_areas()
2510 va->va_end = va->va_start + sizes[area]; in pcpu_get_vm_areas()
2511 __insert_vmap_area(va); in pcpu_get_vm_areas()
2559 struct vmap_area *va; in s_start() local
2562 va = list_entry((&vmap_area_list)->next, typeof(*va), list); in s_start()
2563 while (n > 0 && &va->list != &vmap_area_list) { in s_start()
2565 va = list_entry(va->list.next, typeof(*va), list); in s_start()
2567 if (!n && &va->list != &vmap_area_list) in s_start()
2568 return va; in s_start()
2576 struct vmap_area *va = p, *next; in s_next() local
2579 next = list_entry(va->list.next, typeof(*va), list); in s_next()
2618 struct vmap_area *va = p; in s_show() local
2625 if (!(va->flags & VM_VM_AREA)) in s_show()
2628 v = va->vm; in s_show()