Lines Matching refs:mm

139 		vma = find_vma(current->mm, (unsigned long)objp);  in kobjsize()
151 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, in __get_user_pages() argument
169 vma = find_vma(mm, start); in __get_user_pages()
201 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages() argument
213 return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, in get_user_pages()
218 long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages_locked() argument
223 return get_user_pages(tsk, mm, start, nr_pages, write, force, in get_user_pages_locked()
228 long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, in __get_user_pages_unlocked() argument
234 down_read(&mm->mmap_sem); in __get_user_pages_unlocked()
235 ret = get_user_pages(tsk, mm, start, nr_pages, write, force, in __get_user_pages_unlocked()
237 up_read(&mm->mmap_sem); in __get_user_pages_unlocked()
242 long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages_unlocked() argument
246 return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, in get_user_pages_unlocked()
299 down_write(&current->mm->mmap_sem); in vmalloc_user()
300 vma = find_vma(current->mm, (unsigned long)ret); in vmalloc_user()
303 up_write(&current->mm->mmap_sem); in vmalloc_user()
547 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local
549 if (brk < mm->start_brk || brk > mm->context.end_brk) in SYSCALL_DEFINE1()
550 return mm->brk; in SYSCALL_DEFINE1()
552 if (mm->brk == brk) in SYSCALL_DEFINE1()
553 return mm->brk; in SYSCALL_DEFINE1()
558 if (brk <= mm->brk) { in SYSCALL_DEFINE1()
559 mm->brk = brk; in SYSCALL_DEFINE1()
566 flush_icache_range(mm->brk, brk); in SYSCALL_DEFINE1()
567 return mm->brk = brk; in SYSCALL_DEFINE1()
725 struct mm_struct *mm = vma->vm_mm; in protect_vma() local
728 protect_page(mm, start, flags); in protect_vma()
731 update_protections(mm); in protect_vma()
741 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) in add_vma_to_mm() argument
751 mm->map_count++; in add_vma_to_mm()
752 vma->vm_mm = mm; in add_vma_to_mm()
769 p = &mm->mm_rb.rb_node; in add_vma_to_mm()
796 rb_insert_color(&vma->vm_rb, &mm->mm_rb); in add_vma_to_mm()
803 __vma_link_list(mm, vma, prev, parent); in add_vma_to_mm()
813 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm() local
820 mm->map_count--; in delete_vma_from_mm()
824 vmacache_invalidate(mm); in delete_vma_from_mm()
841 rb_erase(&vma->vm_rb, &mm->mm_rb); in delete_vma_from_mm()
846 mm->mmap = vma->vm_next; in delete_vma_from_mm()
855 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) in delete_vma() argument
870 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) in find_vma() argument
875 vma = vmacache_find(mm, addr); in find_vma()
881 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma()
898 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) in find_extend_vma() argument
900 return find_vma(mm, addr); in find_extend_vma()
916 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm, in find_vma_exact() argument
924 vma = vmacache_find_exact(mm, addr, end); in find_vma_exact()
930 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma_exact()
1456 current->mm->total_vm += len >> PAGE_SHIFT; in do_mmap_pgoff()
1459 add_vma_to_mm(current->mm, vma); in do_mmap_pgoff()
1559 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
1573 if (mm->map_count >= sysctl_max_map_count) in split_vma()
1616 add_vma_to_mm(mm, vma); in split_vma()
1617 add_vma_to_mm(mm, new); in split_vma()
1625 static int shrink_vma(struct mm_struct *mm, in shrink_vma() argument
1640 add_vma_to_mm(mm, vma); in shrink_vma()
1666 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) in do_munmap() argument
1681 vma = find_vma(mm, start); in do_munmap()
1725 ret = split_vma(mm, vma, start, 1); in do_munmap()
1731 return shrink_vma(mm, vma, start, end); in do_munmap()
1736 delete_vma(mm, vma); in do_munmap()
1744 struct mm_struct *mm = current->mm; in vm_munmap() local
1747 down_write(&mm->mmap_sem); in vm_munmap()
1748 ret = do_munmap(mm, addr, len); in vm_munmap()
1749 up_write(&mm->mmap_sem); in vm_munmap()
1762 void exit_mmap(struct mm_struct *mm) in exit_mmap() argument
1766 if (!mm) in exit_mmap()
1771 mm->total_vm = 0; in exit_mmap()
1773 while ((vma = mm->mmap)) { in exit_mmap()
1774 mm->mmap = vma->vm_next; in exit_mmap()
1776 delete_vma(mm, vma); in exit_mmap()
1816 vma = find_vma_exact(current->mm, addr, old_len); in do_mremap()
1840 down_write(&current->mm->mmap_sem); in SYSCALL_DEFINE5()
1842 up_write(&current->mm->mmap_sem); in SYSCALL_DEFINE5()
1919 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) in __vm_enough_memory() argument
1983 if (mm) { in __vm_enough_memory()
1985 allowed -= min_t(long, mm->total_vm / 32, reserve); in __vm_enough_memory()
2010 static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, in __access_remote_vm() argument
2015 down_read(&mm->mmap_sem); in __access_remote_vm()
2018 vma = find_vma(mm, addr); in __access_remote_vm()
2037 up_read(&mm->mmap_sem); in __access_remote_vm()
2052 int access_remote_vm(struct mm_struct *mm, unsigned long addr, in access_remote_vm() argument
2055 return __access_remote_vm(NULL, mm, addr, buf, len, write); in access_remote_vm()
2064 struct mm_struct *mm; in access_process_vm() local
2069 mm = get_task_mm(tsk); in access_process_vm()
2070 if (!mm) in access_process_vm()
2073 len = __access_remote_vm(tsk, mm, addr, buf, len, write); in access_process_vm()
2075 mmput(mm); in access_process_vm()