Lines Matching refs:area
1292 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages) in map_vm_area() argument
1294 unsigned long addr = (unsigned long)area->addr; in map_vm_area()
1295 unsigned long end = addr + get_vm_area_size(area); in map_vm_area()
1333 struct vm_struct *area; in __get_vm_area_node() local
1344 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node()
1345 if (unlikely(!area)) in __get_vm_area_node()
1353 kfree(area); in __get_vm_area_node()
1357 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node()
1359 return area; in __get_vm_area_node()
1453 struct vm_struct *area; in __vunmap() local
1462 area = remove_vm_area(addr); in __vunmap()
1463 if (unlikely(!area)) { in __vunmap()
1469 debug_check_no_locks_freed(addr, area->size); in __vunmap()
1470 debug_check_no_obj_freed(addr, area->size); in __vunmap()
1475 for (i = 0; i < area->nr_pages; i++) { in __vunmap()
1476 struct page *page = area->pages[i]; in __vunmap()
1482 if (area->flags & VM_VPAGES) in __vunmap()
1483 vfree(area->pages); in __vunmap()
1485 kfree(area->pages); in __vunmap()
1488 kfree(area); in __vunmap()
1554 struct vm_struct *area; in vmap() local
1561 area = get_vm_area_caller((count << PAGE_SHIFT), flags, in vmap()
1563 if (!area) in vmap()
1566 if (map_vm_area(area, prot, pages)) { in vmap()
1567 vunmap(area->addr); in vmap()
1571 return area->addr; in vmap()
1578 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, in __vmalloc_area_node() argument
1587 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; in __vmalloc_area_node()
1590 area->nr_pages = nr_pages; in __vmalloc_area_node()
1594 PAGE_KERNEL, node, area->caller); in __vmalloc_area_node()
1595 area->flags |= VM_VPAGES; in __vmalloc_area_node()
1599 area->pages = pages; in __vmalloc_area_node()
1600 if (!area->pages) { in __vmalloc_area_node()
1601 remove_vm_area(area->addr); in __vmalloc_area_node()
1602 kfree(area); in __vmalloc_area_node()
1606 for (i = 0; i < area->nr_pages; i++) { in __vmalloc_area_node()
1616 area->nr_pages = i; in __vmalloc_area_node()
1619 area->pages[i] = page; in __vmalloc_area_node()
1624 if (map_vm_area(area, prot, pages)) in __vmalloc_area_node()
1626 return area->addr; in __vmalloc_area_node()
1631 (area->nr_pages*PAGE_SIZE), area->size); in __vmalloc_area_node()
1632 vfree(area->addr); in __vmalloc_area_node()
1657 struct vm_struct *area; in __vmalloc_node_range() local
1665 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | in __vmalloc_node_range()
1667 if (!area) in __vmalloc_node_range()
1670 addr = __vmalloc_area_node(area, gfp_mask, prot, node); in __vmalloc_node_range()
1679 clear_vm_uninitialized_flag(area); in __vmalloc_node_range()
1774 struct vm_struct *area; in vmalloc_user() local
1782 area = find_vm_area(ret); in vmalloc_user()
1783 area->flags |= VM_USERMAP; in vmalloc_user()
1879 struct vm_struct *area; in vmalloc_32_user() local
1885 area = find_vm_area(ret); in vmalloc_32_user()
1886 area->flags |= VM_USERMAP; in vmalloc_32_user()
2149 struct vm_struct *area; in remap_vmalloc_range_partial() local
2156 area = find_vm_area(kaddr); in remap_vmalloc_range_partial()
2157 if (!area) in remap_vmalloc_range_partial()
2160 if (!(area->flags & VM_USERMAP)) in remap_vmalloc_range_partial()
2163 if (kaddr + size > area->addr + area->size) in remap_vmalloc_range_partial()
2244 struct vm_struct *area; in alloc_vm_area() local
2246 area = get_vm_area_caller(size, VM_IOREMAP, in alloc_vm_area()
2248 if (area == NULL) in alloc_vm_area()
2255 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in alloc_vm_area()
2257 free_vm_area(area); in alloc_vm_area()
2261 return area; in alloc_vm_area()
2265 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
2268 ret = remove_vm_area(area->addr); in free_vm_area()
2269 BUG_ON(ret != area); in free_vm_area()
2270 kfree(area); in free_vm_area()
2390 int area, area2, last_area, term_area; in pcpu_get_vm_areas() local
2396 for (last_area = 0, area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
2397 start = offsets[area]; in pcpu_get_vm_areas()
2398 end = start + sizes[area]; in pcpu_get_vm_areas()
2401 BUG_ON(!IS_ALIGNED(offsets[area], align)); in pcpu_get_vm_areas()
2402 BUG_ON(!IS_ALIGNED(sizes[area], align)); in pcpu_get_vm_areas()
2406 last_area = area; in pcpu_get_vm_areas()
2412 if (area2 == area) in pcpu_get_vm_areas()
2431 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
2432 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); in pcpu_get_vm_areas()
2433 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); in pcpu_get_vm_areas()
2434 if (!vas[area] || !vms[area]) in pcpu_get_vm_areas()
2441 area = term_area = last_area; in pcpu_get_vm_areas()
2442 start = offsets[area]; in pcpu_get_vm_areas()
2443 end = start + sizes[area]; in pcpu_get_vm_areas()
2475 term_area = area; in pcpu_get_vm_areas()
2488 term_area = area; in pcpu_get_vm_areas()
2496 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
2497 if (area == term_area) in pcpu_get_vm_areas()
2499 start = offsets[area]; in pcpu_get_vm_areas()
2500 end = start + sizes[area]; in pcpu_get_vm_areas()
2505 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
2506 struct vmap_area *va = vas[area]; in pcpu_get_vm_areas()
2508 va->va_start = base + offsets[area]; in pcpu_get_vm_areas()
2509 va->va_end = va->va_start + sizes[area]; in pcpu_get_vm_areas()
2518 for (area = 0; area < nr_vms; area++) in pcpu_get_vm_areas()
2519 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, in pcpu_get_vm_areas()
2526 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
2527 kfree(vas[area]); in pcpu_get_vm_areas()
2528 kfree(vms[area]); in pcpu_get_vm_areas()