Lines Matching refs:mm
60 static void unmap_region(struct mm_struct *mm,
153 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) in __vm_enough_memory() argument
221 if (mm) { in __vm_enough_memory()
223 allowed -= min_t(long, mm->total_vm / 32, reserve); in __vm_enough_memory()
289 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local
293 down_write(&mm->mmap_sem); in SYSCALL_DEFINE1()
302 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
304 min_brk = mm->end_data; in SYSCALL_DEFINE1()
306 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
317 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, in SYSCALL_DEFINE1()
318 mm->end_data, mm->start_data)) in SYSCALL_DEFINE1()
322 oldbrk = PAGE_ALIGN(mm->brk); in SYSCALL_DEFINE1()
327 if (brk <= mm->brk) { in SYSCALL_DEFINE1()
328 if (!do_munmap(mm, newbrk, oldbrk-newbrk)) in SYSCALL_DEFINE1()
334 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) in SYSCALL_DEFINE1()
342 mm->brk = brk; in SYSCALL_DEFINE1()
343 populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; in SYSCALL_DEFINE1()
344 up_write(&mm->mmap_sem); in SYSCALL_DEFINE1()
350 retval = mm->brk; in SYSCALL_DEFINE1()
351 up_write(&mm->mmap_sem); in SYSCALL_DEFINE1()
435 static void validate_mm(struct mm_struct *mm) in validate_mm() argument
440 struct vm_area_struct *vma = mm->mmap; in validate_mm()
457 if (i != mm->map_count) { in validate_mm()
458 pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); in validate_mm()
461 if (highest_address != mm->highest_vm_end) { in validate_mm()
463 mm->highest_vm_end, highest_address); in validate_mm()
466 i = browse_rb(&mm->mm_rb); in validate_mm()
467 if (i != mm->map_count) { in validate_mm()
469 pr_emerg("map_count %d rb %d\n", mm->map_count, i); in validate_mm()
472 VM_BUG_ON_MM(bug, mm); in validate_mm()
476 #define validate_mm(mm) do { } while (0) argument
553 static int find_vma_links(struct mm_struct *mm, unsigned long addr, in find_vma_links() argument
559 __rb_link = &mm->mm_rb.rb_node; in find_vma_links()
587 static unsigned long count_vma_pages_range(struct mm_struct *mm, in count_vma_pages_range() argument
594 vma = find_vma_intersection(mm, addr, end); in count_vma_pages_range()
615 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link_rb() argument
622 mm->highest_vm_end = vma->vm_end; in __vma_link_rb()
636 vma_rb_insert(vma, &mm->mm_rb); in __vma_link_rb()
659 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link() argument
663 __vma_link_list(mm, vma, prev, rb_parent); in __vma_link()
664 __vma_link_rb(mm, vma, rb_link, rb_parent); in __vma_link()
667 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in vma_link() argument
678 __vma_link(mm, vma, prev, rb_link, rb_parent); in vma_link()
684 mm->map_count++; in vma_link()
685 validate_mm(mm); in vma_link()
692 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in __insert_vm_struct() argument
697 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in __insert_vm_struct()
700 __vma_link(mm, vma, prev, rb_link, rb_parent); in __insert_vm_struct()
701 mm->map_count++; in __insert_vm_struct()
705 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_unlink() argument
710 vma_rb_erase(vma, &mm->mm_rb); in __vma_unlink()
716 vmacache_invalidate(mm); in __vma_unlink()
729 struct mm_struct *mm = vma->vm_mm; in vma_adjust() local
853 __vma_unlink(mm, next, vma); in vma_adjust()
862 __insert_vm_struct(mm, insert); in vma_adjust()
868 mm->highest_vm_end = end; in vma_adjust()
897 mm->map_count--; in vma_adjust()
911 mm->highest_vm_end = end; in vma_adjust()
916 validate_mm(mm); in vma_adjust()
1032 struct vm_area_struct *vma_merge(struct mm_struct *mm, in vma_merge() argument
1052 next = mm->mmap; in vma_merge()
1203 void vm_stat_account(struct mm_struct *mm, unsigned long flags, in vm_stat_account() argument
1209 mm->total_vm += pages; in vm_stat_account()
1212 mm->shared_vm += pages; in vm_stat_account()
1214 mm->exec_vm += pages; in vm_stat_account()
1216 mm->stack_vm += pages; in vm_stat_account()
1233 static inline int mlock_future_check(struct mm_struct *mm, in mlock_future_check() argument
1242 locked += mm->locked_vm; in mlock_future_check()
1260 struct mm_struct *mm = current->mm; in do_mmap_pgoff() local
1291 if (mm->map_count > sysctl_max_map_count) in do_mmap_pgoff()
1306 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; in do_mmap_pgoff()
1312 if (mlock_future_check(mm, vm_flags, len)) in do_mmap_pgoff()
1533 struct mm_struct *mm = current->mm; in mmap_region() local
1540 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) { in mmap_region()
1550 nr_pages = count_vma_pages_range(mm, addr, addr + len); in mmap_region()
1552 if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages)) in mmap_region()
1558 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, in mmap_region()
1560 if (do_munmap(mm, addr, len)) in mmap_region()
1569 if (security_vm_enough_memory_mm(mm, charged)) in mmap_region()
1577 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, in mmap_region()
1593 vma->vm_mm = mm; in mmap_region()
1640 vma_link(mm, vma, prev, rb_link, rb_parent); in mmap_region()
1652 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); in mmap_region()
1655 vma == get_gate_vma(current->mm))) in mmap_region()
1656 mm->locked_vm += (len >> PAGE_SHIFT); in mmap_region()
1682 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); in mmap_region()
1707 struct mm_struct *mm = current->mm; in unmapped_area() local
1726 if (RB_EMPTY_ROOT(&mm->mm_rb)) in unmapped_area()
1728 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area()
1781 gap_start = mm->highest_vm_end; in unmapped_area()
1801 struct mm_struct *mm = current->mm; in unmapped_area_topdown() local
1824 gap_start = mm->highest_vm_end; in unmapped_area_topdown()
1829 if (RB_EMPTY_ROOT(&mm->mm_rb)) in unmapped_area_topdown()
1831 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area_topdown()
1913 struct mm_struct *mm = current->mm; in arch_get_unmapped_area() local
1925 vma = find_vma(mm, addr); in arch_get_unmapped_area()
1933 info.low_limit = mm->mmap_base; in arch_get_unmapped_area()
1951 struct mm_struct *mm = current->mm; in arch_get_unmapped_area_topdown() local
1965 vma = find_vma(mm, addr); in arch_get_unmapped_area_topdown()
1974 info.high_limit = mm->mmap_base; in arch_get_unmapped_area_topdown()
2011 get_area = current->mm->get_unmapped_area; in get_unmapped_area()
2031 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) in find_vma() argument
2037 vma = vmacache_find(mm, addr); in find_vma()
2041 rb_node = mm->mm_rb.rb_node; in find_vma()
2069 find_vma_prev(struct mm_struct *mm, unsigned long addr, in find_vma_prev() argument
2074 vma = find_vma(mm, addr); in find_vma_prev()
2078 struct rb_node *rb_node = mm->mm_rb.rb_node; in find_vma_prev()
2095 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth() local
2100 if (!may_expand_vm(mm, grow)) in acct_stack_growth()
2114 locked = mm->locked_vm + grow; in acct_stack_growth()
2131 if (security_vm_enough_memory_mm(mm, grow)) in acct_stack_growth()
2136 mm->locked_vm += grow; in acct_stack_growth()
2137 vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); in acct_stack_growth()
2303 find_extend_vma(struct mm_struct *mm, unsigned long addr) in find_extend_vma() argument
2308 vma = find_vma_prev(mm, addr, &prev); in find_extend_vma()
2332 find_extend_vma(struct mm_struct *mm, unsigned long addr) in find_extend_vma() argument
2338 vma = find_vma(mm, addr); in find_extend_vma()
2362 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) in remove_vma_list() argument
2367 update_hiwater_vm(mm); in remove_vma_list()
2373 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); in remove_vma_list()
2377 validate_mm(mm); in remove_vma_list()
2385 static void unmap_region(struct mm_struct *mm, in unmap_region() argument
2389 struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap; in unmap_region()
2393 tlb_gather_mmu(&tlb, mm, start, end); in unmap_region()
2394 update_hiwater_rss(mm); in unmap_region()
2406 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, in detach_vmas_to_be_unmapped() argument
2412 insertion_point = (prev ? &prev->vm_next : &mm->mmap); in detach_vmas_to_be_unmapped()
2415 vma_rb_erase(vma, &mm->mm_rb); in detach_vmas_to_be_unmapped()
2416 mm->map_count--; in detach_vmas_to_be_unmapped()
2425 mm->highest_vm_end = prev ? prev->vm_end : 0; in detach_vmas_to_be_unmapped()
2429 vmacache_invalidate(mm); in detach_vmas_to_be_unmapped()
2436 static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in __split_vma() argument
2504 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
2507 if (mm->map_count >= sysctl_max_map_count) in split_vma()
2510 return __split_vma(mm, vma, addr, new_below); in split_vma()
2518 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) in do_munmap() argument
2531 vma = find_vma(mm, start); in do_munmap()
2557 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) in do_munmap()
2560 error = __split_vma(mm, vma, start, 0); in do_munmap()
2567 last = find_vma(mm, end); in do_munmap()
2569 int error = __split_vma(mm, last, end, 1); in do_munmap()
2573 vma = prev ? prev->vm_next : mm->mmap; in do_munmap()
2578 if (mm->locked_vm) { in do_munmap()
2582 mm->locked_vm -= vma_pages(tmp); in do_munmap()
2592 detach_vmas_to_be_unmapped(mm, vma, prev, end); in do_munmap()
2593 unmap_region(mm, vma, prev, start, end); in do_munmap()
2595 arch_unmap(mm, vma, start, end); in do_munmap()
2598 remove_vma_list(mm, vma); in do_munmap()
2606 struct mm_struct *mm = current->mm; in vm_munmap() local
2608 down_write(&mm->mmap_sem); in vm_munmap()
2609 ret = do_munmap(mm, start, len); in vm_munmap()
2610 up_write(&mm->mmap_sem); in vm_munmap()
2629 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE5() local
2651 down_write(&mm->mmap_sem); in SYSCALL_DEFINE5()
2652 vma = find_vma(mm, start); in SYSCALL_DEFINE5()
2706 up_write(&mm->mmap_sem); in SYSCALL_DEFINE5()
2714 static inline void verify_mm_writelocked(struct mm_struct *mm) in verify_mm_writelocked() argument
2717 if (unlikely(down_read_trylock(&mm->mmap_sem))) { in verify_mm_writelocked()
2719 up_read(&mm->mmap_sem); in verify_mm_writelocked()
2731 struct mm_struct *mm = current->mm; in do_brk() local
2742 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; in do_brk()
2748 error = mlock_future_check(mm, mm->def_flags, len); in do_brk()
2756 verify_mm_writelocked(mm); in do_brk()
2761 while (find_vma_links(mm, addr, addr + len, &prev, &rb_link, in do_brk()
2763 if (do_munmap(mm, addr, len)) in do_brk()
2768 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) in do_brk()
2771 if (mm->map_count > sysctl_max_map_count) in do_brk()
2774 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) in do_brk()
2778 vma = vma_merge(mm, prev, addr, addr + len, flags, in do_brk()
2793 vma->vm_mm = mm; in do_brk()
2799 vma_link(mm, vma, prev, rb_link, rb_parent); in do_brk()
2802 mm->total_vm += len >> PAGE_SHIFT; in do_brk()
2804 mm->locked_vm += (len >> PAGE_SHIFT); in do_brk()
2811 struct mm_struct *mm = current->mm; in vm_brk() local
2815 down_write(&mm->mmap_sem); in vm_brk()
2817 populate = ((mm->def_flags & VM_LOCKED) != 0); in vm_brk()
2818 up_write(&mm->mmap_sem); in vm_brk()
2826 void exit_mmap(struct mm_struct *mm) in exit_mmap() argument
2833 mmu_notifier_release(mm); in exit_mmap()
2835 if (mm->locked_vm) { in exit_mmap()
2836 vma = mm->mmap; in exit_mmap()
2844 arch_exit_mmap(mm); in exit_mmap()
2846 vma = mm->mmap; in exit_mmap()
2851 flush_cache_mm(mm); in exit_mmap()
2852 tlb_gather_mmu(&tlb, mm, 0, -1); in exit_mmap()
2876 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in insert_vm_struct() argument
2897 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in insert_vm_struct()
2901 security_vm_enough_memory_mm(mm, vma_pages(vma))) in insert_vm_struct()
2904 vma_link(mm, vma, prev, rb_link, rb_parent); in insert_vm_struct()
2918 struct mm_struct *mm = vma->vm_mm; in copy_vma() local
2932 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) in copy_vma()
2934 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
2974 vma_link(mm, new_vma, prev, rb_link, rb_parent); in copy_vma()
2991 int may_expand_vm(struct mm_struct *mm, unsigned long npages) in may_expand_vm() argument
2993 unsigned long cur = mm->total_vm; /* pages */ in may_expand_vm()
3063 struct mm_struct *mm, in __install_special_mapping() argument
3076 vma->vm_mm = mm; in __install_special_mapping()
3080 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; in __install_special_mapping()
3086 ret = insert_vm_struct(mm, vma); in __install_special_mapping()
3090 mm->total_vm += len >> PAGE_SHIFT; in __install_special_mapping()
3111 struct mm_struct *mm, in _install_special_mapping() argument
3115 return __install_special_mapping(mm, addr, len, vm_flags, in _install_special_mapping()
3119 int install_special_mapping(struct mm_struct *mm, in install_special_mapping() argument
3124 mm, addr, len, vm_flags, &legacy_special_mapping_vmops, in install_special_mapping()
3132 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma) in vm_lock_anon_vma() argument
3139 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem); in vm_lock_anon_vma()
3155 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) in vm_lock_mapping() argument
3169 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem); in vm_lock_mapping()
3204 int mm_take_all_locks(struct mm_struct *mm) in mm_take_all_locks() argument
3209 BUG_ON(down_read_trylock(&mm->mmap_sem)); in mm_take_all_locks()
3213 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3217 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3220 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3225 vm_lock_anon_vma(mm, avc->anon_vma); in mm_take_all_locks()
3231 mm_drop_all_locks(mm); in mm_take_all_locks()
3275 void mm_drop_all_locks(struct mm_struct *mm) in mm_drop_all_locks() argument
3280 BUG_ON(down_read_trylock(&mm->mmap_sem)); in mm_drop_all_locks()
3283 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_drop_all_locks()