Lines Matching refs:vma
61 struct vm_area_struct *vma, struct vm_area_struct *prev,
98 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
100 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot()
102 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
103 if (vma_wants_writenotify(vma)) { in vma_set_page_prot()
105 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, in vma_set_page_prot()
237 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument
240 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct()
242 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct()
246 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct()
254 void unlink_file_vma(struct vm_area_struct *vma) in unlink_file_vma() argument
256 struct file *file = vma->vm_file; in unlink_file_vma()
261 __remove_shared_vm_struct(vma, file, mapping); in unlink_file_vma()
269 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) in remove_vma() argument
271 struct vm_area_struct *next = vma->vm_next; in remove_vma()
274 if (vma->vm_ops && vma->vm_ops->close) in remove_vma()
275 vma->vm_ops->close(vma); in remove_vma()
276 if (vma->vm_file) in remove_vma()
277 fput(vma->vm_file); in remove_vma()
278 mpol_put(vma_policy(vma)); in remove_vma()
279 kmem_cache_free(vm_area_cachep, vma); in remove_vma()
355 static long vma_compute_subtree_gap(struct vm_area_struct *vma) in vma_compute_subtree_gap() argument
358 max = vma->vm_start; in vma_compute_subtree_gap()
359 if (vma->vm_prev) in vma_compute_subtree_gap()
360 max -= vma->vm_prev->vm_end; in vma_compute_subtree_gap()
361 if (vma->vm_rb.rb_left) { in vma_compute_subtree_gap()
362 subtree_gap = rb_entry(vma->vm_rb.rb_left, in vma_compute_subtree_gap()
367 if (vma->vm_rb.rb_right) { in vma_compute_subtree_gap()
368 subtree_gap = rb_entry(vma->vm_rb.rb_right, in vma_compute_subtree_gap()
384 struct vm_area_struct *vma; in browse_rb() local
385 vma = rb_entry(nd, struct vm_area_struct, vm_rb); in browse_rb()
386 if (vma->vm_start < prev) { in browse_rb()
388 vma->vm_start, prev); in browse_rb()
391 if (vma->vm_start < pend) { in browse_rb()
393 vma->vm_start, pend); in browse_rb()
396 if (vma->vm_start > vma->vm_end) { in browse_rb()
398 vma->vm_start, vma->vm_end); in browse_rb()
401 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { in browse_rb()
403 vma->rb_subtree_gap, in browse_rb()
404 vma_compute_subtree_gap(vma)); in browse_rb()
409 prev = vma->vm_start; in browse_rb()
410 pend = vma->vm_end; in browse_rb()
427 struct vm_area_struct *vma; in validate_mm_rb() local
428 vma = rb_entry(nd, struct vm_area_struct, vm_rb); in validate_mm_rb()
429 VM_BUG_ON_VMA(vma != ignore && in validate_mm_rb()
430 vma->rb_subtree_gap != vma_compute_subtree_gap(vma), in validate_mm_rb()
431 vma); in validate_mm_rb()
440 struct vm_area_struct *vma = mm->mmap; in validate_mm() local
442 while (vma) { in validate_mm()
443 struct anon_vma *anon_vma = vma->anon_vma; in validate_mm()
448 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in validate_mm()
453 highest_address = vma->vm_end; in validate_mm()
454 vma = vma->vm_next; in validate_mm()
487 static void vma_gap_update(struct vm_area_struct *vma) in RB_DECLARE_CALLBACKS()
493 vma_gap_callbacks_propagate(&vma->vm_rb, NULL); in RB_DECLARE_CALLBACKS()
496 static inline void vma_rb_insert(struct vm_area_struct *vma, in vma_rb_insert() argument
502 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); in vma_rb_insert()
505 static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) in vma_rb_erase() argument
511 validate_mm_rb(root, vma); in vma_rb_erase()
518 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); in vma_rb_erase()
536 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_pre_update_vma() argument
540 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_pre_update_vma()
545 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_post_update_vma() argument
549 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_post_update_vma()
591 struct vm_area_struct *vma; in count_vma_pages_range() local
594 vma = find_vma_intersection(mm, addr, end); in count_vma_pages_range()
595 if (!vma) in count_vma_pages_range()
598 nr_pages = (min(end, vma->vm_end) - in count_vma_pages_range()
599 max(addr, vma->vm_start)) >> PAGE_SHIFT; in count_vma_pages_range()
602 for (vma = vma->vm_next; vma; vma = vma->vm_next) { in count_vma_pages_range()
605 if (vma->vm_start > end) in count_vma_pages_range()
608 overlap_len = min(end, vma->vm_end) - vma->vm_start; in count_vma_pages_range()
615 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link_rb() argument
619 if (vma->vm_next) in __vma_link_rb()
620 vma_gap_update(vma->vm_next); in __vma_link_rb()
622 mm->highest_vm_end = vma->vm_end; in __vma_link_rb()
633 rb_link_node(&vma->vm_rb, rb_parent, rb_link); in __vma_link_rb()
634 vma->rb_subtree_gap = 0; in __vma_link_rb()
635 vma_gap_update(vma); in __vma_link_rb()
636 vma_rb_insert(vma, &mm->mm_rb); in __vma_link_rb()
639 static void __vma_link_file(struct vm_area_struct *vma) in __vma_link_file() argument
643 file = vma->vm_file; in __vma_link_file()
647 if (vma->vm_flags & VM_DENYWRITE) in __vma_link_file()
649 if (vma->vm_flags & VM_SHARED) in __vma_link_file()
653 vma_interval_tree_insert(vma, &mapping->i_mmap); in __vma_link_file()
659 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link() argument
663 __vma_link_list(mm, vma, prev, rb_parent); in __vma_link()
664 __vma_link_rb(mm, vma, rb_link, rb_parent); in __vma_link()
667 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in vma_link() argument
673 if (vma->vm_file) { in vma_link()
674 mapping = vma->vm_file->f_mapping; in vma_link()
678 __vma_link(mm, vma, prev, rb_link, rb_parent); in vma_link()
679 __vma_link_file(vma); in vma_link()
692 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in __insert_vm_struct() argument
697 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in __insert_vm_struct()
700 __vma_link(mm, vma, prev, rb_link, rb_parent); in __insert_vm_struct()
705 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_unlink() argument
710 vma_rb_erase(vma, &mm->mm_rb); in __vma_unlink()
711 prev->vm_next = next = vma->vm_next; in __vma_unlink()
726 int vma_adjust(struct vm_area_struct *vma, unsigned long start, in vma_adjust() argument
729 struct mm_struct *mm = vma->vm_mm; in vma_adjust()
730 struct vm_area_struct *next = vma->vm_next; in vma_adjust()
735 struct file *file = vma->vm_file; in vma_adjust()
751 importer = vma; in vma_adjust()
759 importer = vma; in vma_adjust()
760 } else if (end < vma->vm_end) { in vma_adjust()
766 adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); in vma_adjust()
767 exporter = vma; in vma_adjust()
789 uprobe_munmap(vma, vma->vm_start, vma->vm_end); in vma_adjust()
806 vma_adjust_trans_huge(vma, start, end, adjust_next); in vma_adjust()
808 anon_vma = vma->anon_vma; in vma_adjust()
815 anon_vma_interval_tree_pre_update_vma(vma); in vma_adjust()
822 vma_interval_tree_remove(vma, root); in vma_adjust()
827 if (start != vma->vm_start) { in vma_adjust()
828 vma->vm_start = start; in vma_adjust()
831 if (end != vma->vm_end) { in vma_adjust()
832 vma->vm_end = end; in vma_adjust()
835 vma->vm_pgoff = pgoff; in vma_adjust()
844 vma_interval_tree_insert(vma, root); in vma_adjust()
853 __vma_unlink(mm, next, vma); in vma_adjust()
865 vma_gap_update(vma); in vma_adjust()
875 anon_vma_interval_tree_post_update_vma(vma); in vma_adjust()
884 uprobe_mmap(vma); in vma_adjust()
896 anon_vma_merge(vma, next); in vma_adjust()
905 next = vma->vm_next; in vma_adjust()
925 static inline int is_mergeable_vma(struct vm_area_struct *vma, in is_mergeable_vma() argument
936 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) in is_mergeable_vma()
938 if (vma->vm_file != file) in is_mergeable_vma()
940 if (vma->vm_ops && vma->vm_ops->close) in is_mergeable_vma()
947 struct vm_area_struct *vma) in is_mergeable_anon_vma() argument
953 if ((!anon_vma1 || !anon_vma2) && (!vma || in is_mergeable_anon_vma()
954 list_is_singular(&vma->anon_vma_chain))) in is_mergeable_anon_vma()
971 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, in can_vma_merge_before() argument
974 if (is_mergeable_vma(vma, file, vm_flags) && in can_vma_merge_before()
975 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { in can_vma_merge_before()
976 if (vma->vm_pgoff == vm_pgoff) in can_vma_merge_before()
990 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, in can_vma_merge_after() argument
993 if (is_mergeable_vma(vma, file, vm_flags) && in can_vma_merge_after()
994 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { in can_vma_merge_after()
996 vm_pglen = vma_pages(vma); in can_vma_merge_after()
997 if (vma->vm_pgoff + vm_pglen == vm_pgoff) in can_vma_merge_after()
1170 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) in find_mergeable_anon_vma() argument
1175 near = vma->vm_next; in find_mergeable_anon_vma()
1179 anon_vma = reusable_anon_vma(near, vma, near); in find_mergeable_anon_vma()
1183 near = vma->vm_prev; in find_mergeable_anon_vma()
1187 anon_vma = reusable_anon_vma(near, near, vma); in find_mergeable_anon_vma()
1483 int vma_wants_writenotify(struct vm_area_struct *vma) in vma_wants_writenotify() argument
1485 vm_flags_t vm_flags = vma->vm_flags; in vma_wants_writenotify()
1492 if (vma->vm_ops && vma->vm_ops->page_mkwrite) in vma_wants_writenotify()
1497 if (pgprot_val(vma->vm_page_prot) != in vma_wants_writenotify()
1498 pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags))) in vma_wants_writenotify()
1510 return vma->vm_file && vma->vm_file->f_mapping && in vma_wants_writenotify()
1511 mapping_cap_account_dirty(vma->vm_file->f_mapping); in vma_wants_writenotify()
1534 struct vm_area_struct *vma, *prev; in mmap_region() local
1577 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, in mmap_region()
1579 if (vma) in mmap_region()
1587 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); in mmap_region()
1588 if (!vma) { in mmap_region()
1593 vma->vm_mm = mm; in mmap_region()
1594 vma->vm_start = addr; in mmap_region()
1595 vma->vm_end = addr + len; in mmap_region()
1596 vma->vm_flags = vm_flags; in mmap_region()
1597 vma->vm_page_prot = vm_get_page_prot(vm_flags); in mmap_region()
1598 vma->vm_pgoff = pgoff; in mmap_region()
1599 INIT_LIST_HEAD(&vma->anon_vma_chain); in mmap_region()
1618 vma->vm_file = get_file(file); in mmap_region()
1619 error = file->f_op->mmap(file, vma); in mmap_region()
1630 WARN_ON_ONCE(addr != vma->vm_start); in mmap_region()
1632 addr = vma->vm_start; in mmap_region()
1633 vm_flags = vma->vm_flags; in mmap_region()
1635 error = shmem_zero_setup(vma); in mmap_region()
1640 vma_link(mm, vma, prev, rb_link, rb_parent); in mmap_region()
1648 file = vma->vm_file; in mmap_region()
1650 perf_event_mmap(vma); in mmap_region()
1654 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) || in mmap_region()
1655 vma == get_gate_vma(current->mm))) in mmap_region()
1658 vma->vm_flags &= ~VM_LOCKED; in mmap_region()
1662 uprobe_mmap(vma); in mmap_region()
1671 vma->vm_flags |= VM_SOFTDIRTY; in mmap_region()
1673 vma_set_page_prot(vma); in mmap_region()
1678 vma->vm_file = NULL; in mmap_region()
1682 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); in mmap_region()
1690 kmem_cache_free(vm_area_cachep, vma); in mmap_region()
1708 struct vm_area_struct *vma; in unmapped_area() local
1728 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area()
1729 if (vma->rb_subtree_gap < length) in unmapped_area()
1734 gap_end = vma->vm_start; in unmapped_area()
1735 if (gap_end >= low_limit && vma->vm_rb.rb_left) { in unmapped_area()
1737 rb_entry(vma->vm_rb.rb_left, in unmapped_area()
1740 vma = left; in unmapped_area()
1745 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; in unmapped_area()
1754 if (vma->vm_rb.rb_right) { in unmapped_area()
1756 rb_entry(vma->vm_rb.rb_right, in unmapped_area()
1759 vma = right; in unmapped_area()
1766 struct rb_node *prev = &vma->vm_rb; in unmapped_area()
1769 vma = rb_entry(rb_parent(prev), in unmapped_area()
1771 if (prev == vma->vm_rb.rb_left) { in unmapped_area()
1772 gap_start = vma->vm_prev->vm_end; in unmapped_area()
1773 gap_end = vma->vm_start; in unmapped_area()
1802 struct vm_area_struct *vma; in unmapped_area_topdown() local
1831 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area_topdown()
1832 if (vma->rb_subtree_gap < length) in unmapped_area_topdown()
1837 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; in unmapped_area_topdown()
1838 if (gap_start <= high_limit && vma->vm_rb.rb_right) { in unmapped_area_topdown()
1840 rb_entry(vma->vm_rb.rb_right, in unmapped_area_topdown()
1843 vma = right; in unmapped_area_topdown()
1850 gap_end = vma->vm_start; in unmapped_area_topdown()
1857 if (vma->vm_rb.rb_left) { in unmapped_area_topdown()
1859 rb_entry(vma->vm_rb.rb_left, in unmapped_area_topdown()
1862 vma = left; in unmapped_area_topdown()
1869 struct rb_node *prev = &vma->vm_rb; in unmapped_area_topdown()
1872 vma = rb_entry(rb_parent(prev), in unmapped_area_topdown()
1874 if (prev == vma->vm_rb.rb_right) { in unmapped_area_topdown()
1875 gap_start = vma->vm_prev ? in unmapped_area_topdown()
1876 vma->vm_prev->vm_end : 0; in unmapped_area_topdown()
1914 struct vm_area_struct *vma; in arch_get_unmapped_area() local
1925 vma = find_vma(mm, addr); in arch_get_unmapped_area()
1927 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area()
1950 struct vm_area_struct *vma; in arch_get_unmapped_area_topdown() local
1965 vma = find_vma(mm, addr); in arch_get_unmapped_area_topdown()
1967 (!vma || addr + len <= vma->vm_start)) in arch_get_unmapped_area_topdown()
2034 struct vm_area_struct *vma; in find_vma() local
2037 vma = vmacache_find(mm, addr); in find_vma()
2038 if (likely(vma)) in find_vma()
2039 return vma; in find_vma()
2042 vma = NULL; in find_vma()
2050 vma = tmp; in find_vma()
2058 if (vma) in find_vma()
2059 vmacache_update(addr, vma); in find_vma()
2060 return vma; in find_vma()
2072 struct vm_area_struct *vma; in find_vma_prev() local
2074 vma = find_vma(mm, addr); in find_vma_prev()
2075 if (vma) { in find_vma_prev()
2076 *pprev = vma->vm_prev; in find_vma_prev()
2085 return vma; in find_vma_prev()
2093 static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) in acct_stack_growth() argument
2095 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth()
2105 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) in acct_stack_growth()
2111 if (vma->vm_flags & VM_LOCKED) { in acct_stack_growth()
2122 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : in acct_stack_growth()
2123 vma->vm_end - size; in acct_stack_growth()
2124 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth()
2135 if (vma->vm_flags & VM_LOCKED) in acct_stack_growth()
2137 vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); in acct_stack_growth()
2146 int expand_upwards(struct vm_area_struct *vma, unsigned long address) in expand_upwards() argument
2150 if (!(vma->vm_flags & VM_GROWSUP)) in expand_upwards()
2160 if (unlikely(anon_vma_prepare(vma))) in expand_upwards()
2168 anon_vma_lock_write(vma->anon_vma); in expand_upwards()
2171 if (address > vma->vm_end) { in expand_upwards()
2174 size = address - vma->vm_start; in expand_upwards()
2175 grow = (address - vma->vm_end) >> PAGE_SHIFT; in expand_upwards()
2178 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { in expand_upwards()
2179 error = acct_stack_growth(vma, size, grow); in expand_upwards()
2192 spin_lock(&vma->vm_mm->page_table_lock); in expand_upwards()
2193 anon_vma_interval_tree_pre_update_vma(vma); in expand_upwards()
2194 vma->vm_end = address; in expand_upwards()
2195 anon_vma_interval_tree_post_update_vma(vma); in expand_upwards()
2196 if (vma->vm_next) in expand_upwards()
2197 vma_gap_update(vma->vm_next); in expand_upwards()
2199 vma->vm_mm->highest_vm_end = address; in expand_upwards()
2200 spin_unlock(&vma->vm_mm->page_table_lock); in expand_upwards()
2202 perf_event_mmap(vma); in expand_upwards()
2206 anon_vma_unlock_write(vma->anon_vma); in expand_upwards()
2207 khugepaged_enter_vma_merge(vma, vma->vm_flags); in expand_upwards()
2208 validate_mm(vma->vm_mm); in expand_upwards()
2216 int expand_downwards(struct vm_area_struct *vma, in expand_downwards() argument
2227 if (unlikely(anon_vma_prepare(vma))) in expand_downwards()
2235 anon_vma_lock_write(vma->anon_vma); in expand_downwards()
2238 if (address < vma->vm_start) { in expand_downwards()
2241 size = vma->vm_end - address; in expand_downwards()
2242 grow = (vma->vm_start - address) >> PAGE_SHIFT; in expand_downwards()
2245 if (grow <= vma->vm_pgoff) { in expand_downwards()
2246 error = acct_stack_growth(vma, size, grow); in expand_downwards()
2259 spin_lock(&vma->vm_mm->page_table_lock); in expand_downwards()
2260 anon_vma_interval_tree_pre_update_vma(vma); in expand_downwards()
2261 vma->vm_start = address; in expand_downwards()
2262 vma->vm_pgoff -= grow; in expand_downwards()
2263 anon_vma_interval_tree_post_update_vma(vma); in expand_downwards()
2264 vma_gap_update(vma); in expand_downwards()
2265 spin_unlock(&vma->vm_mm->page_table_lock); in expand_downwards()
2267 perf_event_mmap(vma); in expand_downwards()
2271 anon_vma_unlock_write(vma->anon_vma); in expand_downwards()
2272 khugepaged_enter_vma_merge(vma, vma->vm_flags); in expand_downwards()
2273 validate_mm(vma->vm_mm); in expand_downwards()
2289 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
2294 next = vma->vm_next; in expand_stack()
2299 return expand_upwards(vma, address); in expand_stack()
2305 struct vm_area_struct *vma, *prev; in find_extend_vma() local
2308 vma = find_vma_prev(mm, addr, &prev); in find_extend_vma()
2309 if (vma && (vma->vm_start <= addr)) in find_extend_vma()
2310 return vma; in find_extend_vma()
2318 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
2323 prev = vma->vm_prev; in expand_stack()
2328 return expand_downwards(vma, address); in expand_stack()
2334 struct vm_area_struct *vma; in find_extend_vma() local
2338 vma = find_vma(mm, addr); in find_extend_vma()
2339 if (!vma) in find_extend_vma()
2341 if (vma->vm_start <= addr) in find_extend_vma()
2342 return vma; in find_extend_vma()
2343 if (!(vma->vm_flags & VM_GROWSDOWN)) in find_extend_vma()
2345 start = vma->vm_start; in find_extend_vma()
2346 if (expand_stack(vma, addr)) in find_extend_vma()
2348 if (vma->vm_flags & VM_LOCKED) in find_extend_vma()
2349 populate_vma_page_range(vma, addr, start, NULL); in find_extend_vma()
2350 return vma; in find_extend_vma()
2362 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) in remove_vma_list() argument
2369 long nrpages = vma_pages(vma); in remove_vma_list()
2371 if (vma->vm_flags & VM_ACCOUNT) in remove_vma_list()
2373 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); in remove_vma_list()
2374 vma = remove_vma(vma); in remove_vma_list()
2375 } while (vma); in remove_vma_list()
2386 struct vm_area_struct *vma, struct vm_area_struct *prev, in unmap_region() argument
2395 unmap_vmas(&tlb, vma, start, end); in unmap_region()
2396 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region()
2406 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, in detach_vmas_to_be_unmapped() argument
2413 vma->vm_prev = NULL; in detach_vmas_to_be_unmapped()
2415 vma_rb_erase(vma, &mm->mm_rb); in detach_vmas_to_be_unmapped()
2417 tail_vma = vma; in detach_vmas_to_be_unmapped()
2418 vma = vma->vm_next; in detach_vmas_to_be_unmapped()
2419 } while (vma && vma->vm_start < end); in detach_vmas_to_be_unmapped()
2420 *insertion_point = vma; in detach_vmas_to_be_unmapped()
2421 if (vma) { in detach_vmas_to_be_unmapped()
2422 vma->vm_prev = prev; in detach_vmas_to_be_unmapped()
2423 vma_gap_update(vma); in detach_vmas_to_be_unmapped()
2436 static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in __split_vma() argument
2442 if (is_vm_hugetlb_page(vma) && (addr & in __split_vma()
2443 ~(huge_page_mask(hstate_vma(vma))))) in __split_vma()
2451 *new = *vma; in __split_vma()
2459 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); in __split_vma()
2462 err = vma_dup_policy(vma, new); in __split_vma()
2466 err = anon_vma_clone(new, vma); in __split_vma()
2477 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + in __split_vma()
2480 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); in __split_vma()
2504 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
2510 return __split_vma(mm, vma, addr, new_below); in split_vma()
2521 struct vm_area_struct *vma, *prev, *last; in do_munmap() local
2531 vma = find_vma(mm, start); in do_munmap()
2532 if (!vma) in do_munmap()
2534 prev = vma->vm_prev; in do_munmap()
2539 if (vma->vm_start >= end) in do_munmap()
2549 if (start > vma->vm_start) { in do_munmap()
2557 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) in do_munmap()
2560 error = __split_vma(mm, vma, start, 0); in do_munmap()
2563 prev = vma; in do_munmap()
2573 vma = prev ? prev->vm_next : mm->mmap; in do_munmap()
2579 struct vm_area_struct *tmp = vma; in do_munmap()
2592 detach_vmas_to_be_unmapped(mm, vma, prev, end); in do_munmap()
2593 unmap_region(mm, vma, prev, start, end); in do_munmap()
2595 arch_unmap(mm, vma, start, end); in do_munmap()
2598 remove_vma_list(mm, vma); in do_munmap()
2630 struct vm_area_struct *vma; in SYSCALL_DEFINE5() local
2652 vma = find_vma(mm, start); in SYSCALL_DEFINE5()
2654 if (!vma || !(vma->vm_flags & VM_SHARED)) in SYSCALL_DEFINE5()
2657 if (start < vma->vm_start) in SYSCALL_DEFINE5()
2660 if (start + size > vma->vm_end) { in SYSCALL_DEFINE5()
2663 for (next = vma->vm_next; next; next = next->vm_next) { in SYSCALL_DEFINE5()
2668 if (next->vm_file != vma->vm_file) in SYSCALL_DEFINE5()
2671 if (next->vm_flags != vma->vm_flags) in SYSCALL_DEFINE5()
2682 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; in SYSCALL_DEFINE5()
2683 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; in SYSCALL_DEFINE5()
2684 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; in SYSCALL_DEFINE5()
2688 if (vma->vm_flags & VM_LOCKED) { in SYSCALL_DEFINE5()
2693 for (tmp = vma; tmp->vm_start >= start + size; in SYSCALL_DEFINE5()
2701 file = get_file(vma->vm_file); in SYSCALL_DEFINE5()
2702 ret = do_mmap_pgoff(vma->vm_file, start, size, in SYSCALL_DEFINE5()
2732 struct vm_area_struct *vma, *prev; in do_brk() local
2778 vma = vma_merge(mm, prev, addr, addr + len, flags, in do_brk()
2780 if (vma) in do_brk()
2786 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); in do_brk()
2787 if (!vma) { in do_brk()
2792 INIT_LIST_HEAD(&vma->anon_vma_chain); in do_brk()
2793 vma->vm_mm = mm; in do_brk()
2794 vma->vm_start = addr; in do_brk()
2795 vma->vm_end = addr + len; in do_brk()
2796 vma->vm_pgoff = pgoff; in do_brk()
2797 vma->vm_flags = flags; in do_brk()
2798 vma->vm_page_prot = vm_get_page_prot(flags); in do_brk()
2799 vma_link(mm, vma, prev, rb_link, rb_parent); in do_brk()
2801 perf_event_mmap(vma); in do_brk()
2805 vma->vm_flags |= VM_SOFTDIRTY; in do_brk()
2829 struct vm_area_struct *vma; in exit_mmap() local
2836 vma = mm->mmap; in exit_mmap()
2837 while (vma) { in exit_mmap()
2838 if (vma->vm_flags & VM_LOCKED) in exit_mmap()
2839 munlock_vma_pages_all(vma); in exit_mmap()
2840 vma = vma->vm_next; in exit_mmap()
2846 vma = mm->mmap; in exit_mmap()
2847 if (!vma) /* Can happen if dup_mmap() received an OOM */ in exit_mmap()
2855 unmap_vmas(&tlb, vma, 0, -1); in exit_mmap()
2857 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); in exit_mmap()
2864 while (vma) { in exit_mmap()
2865 if (vma->vm_flags & VM_ACCOUNT) in exit_mmap()
2866 nr_accounted += vma_pages(vma); in exit_mmap()
2867 vma = remove_vma(vma); in exit_mmap()
2876 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in insert_vm_struct() argument
2893 if (!vma->vm_file) { in insert_vm_struct()
2894 BUG_ON(vma->anon_vma); in insert_vm_struct()
2895 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; in insert_vm_struct()
2897 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in insert_vm_struct()
2900 if ((vma->vm_flags & VM_ACCOUNT) && in insert_vm_struct()
2901 security_vm_enough_memory_mm(mm, vma_pages(vma))) in insert_vm_struct()
2904 vma_link(mm, vma, prev, rb_link, rb_parent); in insert_vm_struct()
2916 struct vm_area_struct *vma = *vmap; in copy_vma() local
2917 unsigned long vma_start = vma->vm_start; in copy_vma()
2918 struct mm_struct *mm = vma->vm_mm; in copy_vma()
2927 if (unlikely(!vma->vm_file && !vma->anon_vma)) { in copy_vma()
2934 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
2935 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); in copy_vma()
2955 *vmap = vma = new_vma; in copy_vma()
2957 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); in copy_vma()
2961 *new_vma = *vma; in copy_vma()
2965 if (vma_dup_policy(vma, new_vma)) in copy_vma()
2968 if (anon_vma_clone(new_vma, vma)) in copy_vma()
3003 static int special_mapping_fault(struct vm_area_struct *vma,
3009 static void special_mapping_close(struct vm_area_struct *vma) in special_mapping_close() argument
3013 static const char *special_mapping_name(struct vm_area_struct *vma) in special_mapping_name() argument
3015 return ((struct vm_special_mapping *)vma->vm_private_data)->name; in special_mapping_name()
3029 static int special_mapping_fault(struct vm_area_struct *vma, in special_mapping_fault() argument
3041 pgoff = vmf->pgoff - vma->vm_pgoff; in special_mapping_fault()
3043 if (vma->vm_ops == &legacy_special_mapping_vmops) in special_mapping_fault()
3044 pages = vma->vm_private_data; in special_mapping_fault()
3046 pages = ((struct vm_special_mapping *)vma->vm_private_data)-> in special_mapping_fault()
3069 struct vm_area_struct *vma; in __install_special_mapping() local
3071 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); in __install_special_mapping()
3072 if (unlikely(vma == NULL)) in __install_special_mapping()
3075 INIT_LIST_HEAD(&vma->anon_vma_chain); in __install_special_mapping()
3076 vma->vm_mm = mm; in __install_special_mapping()
3077 vma->vm_start = addr; in __install_special_mapping()
3078 vma->vm_end = addr + len; in __install_special_mapping()
3080 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; in __install_special_mapping()
3081 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in __install_special_mapping()
3083 vma->vm_ops = ops; in __install_special_mapping()
3084 vma->vm_private_data = priv; in __install_special_mapping()
3086 ret = insert_vm_struct(mm, vma); in __install_special_mapping()
3092 perf_event_mmap(vma); in __install_special_mapping()
3094 return vma; in __install_special_mapping()
3097 kmem_cache_free(vm_area_cachep, vma); in __install_special_mapping()
3123 struct vm_area_struct *vma = __install_special_mapping( in install_special_mapping() local
3127 return PTR_ERR_OR_ZERO(vma); in install_special_mapping()
3206 struct vm_area_struct *vma; in mm_take_all_locks() local
3213 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3216 if (vma->vm_file && vma->vm_file->f_mapping) in mm_take_all_locks()
3217 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3220 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3223 if (vma->anon_vma) in mm_take_all_locks()
3224 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_take_all_locks()
3277 struct vm_area_struct *vma; in mm_drop_all_locks() local
3283 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_drop_all_locks()
3284 if (vma->anon_vma) in mm_drop_all_locks()
3285 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_drop_all_locks()
3287 if (vma->vm_file && vma->vm_file->f_mapping) in mm_drop_all_locks()
3288 vm_unlock_mapping(vma->vm_file->f_mapping); in mm_drop_all_locks()