Lines Matching refs:area

1294 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)  in map_vm_area()  argument
1296 unsigned long addr = (unsigned long)area->addr; in map_vm_area()
1297 unsigned long end = addr + get_vm_area_size(area); in map_vm_area()
1335 struct vm_struct *area; in __get_vm_area_node() local
1346 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node()
1347 if (unlikely(!area)) in __get_vm_area_node()
1355 kfree(area); in __get_vm_area_node()
1359 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node()
1361 return area; in __get_vm_area_node()
1454 struct vm_struct *area; in __vunmap() local
1463 area = remove_vm_area(addr); in __vunmap()
1464 if (unlikely(!area)) { in __vunmap()
1470 debug_check_no_locks_freed(addr, get_vm_area_size(area)); in __vunmap()
1471 debug_check_no_obj_freed(addr, get_vm_area_size(area)); in __vunmap()
1476 for (i = 0; i < area->nr_pages; i++) { in __vunmap()
1477 struct page *page = area->pages[i]; in __vunmap()
1483 if (area->flags & VM_VPAGES) in __vunmap()
1484 vfree(area->pages); in __vunmap()
1486 kfree(area->pages); in __vunmap()
1489 kfree(area); in __vunmap()
1555 struct vm_struct *area; in vmap() local
1562 area = get_vm_area_caller((count << PAGE_SHIFT), flags, in vmap()
1564 if (!area) in vmap()
1567 if (map_vm_area(area, prot, pages)) { in vmap()
1568 vunmap(area->addr); in vmap()
1572 return area->addr; in vmap()
1579 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, in __vmalloc_area_node() argument
1588 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; in __vmalloc_area_node()
1591 area->nr_pages = nr_pages; in __vmalloc_area_node()
1595 PAGE_KERNEL, node, area->caller); in __vmalloc_area_node()
1596 area->flags |= VM_VPAGES; in __vmalloc_area_node()
1600 area->pages = pages; in __vmalloc_area_node()
1601 if (!area->pages) { in __vmalloc_area_node()
1602 remove_vm_area(area->addr); in __vmalloc_area_node()
1603 kfree(area); in __vmalloc_area_node()
1607 for (i = 0; i < area->nr_pages; i++) { in __vmalloc_area_node()
1617 area->nr_pages = i; in __vmalloc_area_node()
1620 area->pages[i] = page; in __vmalloc_area_node()
1625 if (map_vm_area(area, prot, pages)) in __vmalloc_area_node()
1627 return area->addr; in __vmalloc_area_node()
1632 (area->nr_pages*PAGE_SIZE), area->size); in __vmalloc_area_node()
1633 vfree(area->addr); in __vmalloc_area_node()
1658 struct vm_struct *area; in __vmalloc_node_range() local
1666 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | in __vmalloc_node_range()
1668 if (!area) in __vmalloc_node_range()
1671 addr = __vmalloc_area_node(area, gfp_mask, prot, node); in __vmalloc_node_range()
1680 clear_vm_uninitialized_flag(area); in __vmalloc_node_range()
1775 struct vm_struct *area; in vmalloc_user() local
1783 area = find_vm_area(ret); in vmalloc_user()
1784 area->flags |= VM_USERMAP; in vmalloc_user()
1880 struct vm_struct *area; in vmalloc_32_user() local
1886 area = find_vm_area(ret); in vmalloc_32_user()
1887 area->flags |= VM_USERMAP; in vmalloc_32_user()
2150 struct vm_struct *area; in remap_vmalloc_range_partial() local
2157 area = find_vm_area(kaddr); in remap_vmalloc_range_partial()
2158 if (!area) in remap_vmalloc_range_partial()
2161 if (!(area->flags & VM_USERMAP)) in remap_vmalloc_range_partial()
2164 if (kaddr + size > area->addr + area->size) in remap_vmalloc_range_partial()
2245 struct vm_struct *area; in alloc_vm_area() local
2247 area = get_vm_area_caller(size, VM_IOREMAP, in alloc_vm_area()
2249 if (area == NULL) in alloc_vm_area()
2256 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in alloc_vm_area()
2258 free_vm_area(area); in alloc_vm_area()
2262 return area; in alloc_vm_area()
2266 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
2269 ret = remove_vm_area(area->addr); in free_vm_area()
2270 BUG_ON(ret != area); in free_vm_area()
2271 kfree(area); in free_vm_area()
2391 int area, area2, last_area, term_area; in pcpu_get_vm_areas() local
2397 for (last_area = 0, area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
2398 start = offsets[area]; in pcpu_get_vm_areas()
2399 end = start + sizes[area]; in pcpu_get_vm_areas()
2402 BUG_ON(!IS_ALIGNED(offsets[area], align)); in pcpu_get_vm_areas()
2403 BUG_ON(!IS_ALIGNED(sizes[area], align)); in pcpu_get_vm_areas()
2407 last_area = area; in pcpu_get_vm_areas()
2413 if (area2 == area) in pcpu_get_vm_areas()
2432 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
2433 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); in pcpu_get_vm_areas()
2434 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); in pcpu_get_vm_areas()
2435 if (!vas[area] || !vms[area]) in pcpu_get_vm_areas()
2442 area = term_area = last_area; in pcpu_get_vm_areas()
2443 start = offsets[area]; in pcpu_get_vm_areas()
2444 end = start + sizes[area]; in pcpu_get_vm_areas()
2476 term_area = area; in pcpu_get_vm_areas()
2489 term_area = area; in pcpu_get_vm_areas()
2497 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
2498 if (area == term_area) in pcpu_get_vm_areas()
2500 start = offsets[area]; in pcpu_get_vm_areas()
2501 end = start + sizes[area]; in pcpu_get_vm_areas()
2506 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
2507 struct vmap_area *va = vas[area]; in pcpu_get_vm_areas()
2509 va->va_start = base + offsets[area]; in pcpu_get_vm_areas()
2510 va->va_end = va->va_start + sizes[area]; in pcpu_get_vm_areas()
2519 for (area = 0; area < nr_vms; area++) in pcpu_get_vm_areas()
2520 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, in pcpu_get_vm_areas()
2527 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
2528 kfree(vas[area]); in pcpu_get_vm_areas()
2529 kfree(vms[area]); in pcpu_get_vm_areas()