Lines Matching refs:mm

38 	struct mm_struct *mm = vma->vm_mm;  in follow_page_pte()  local
47 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte()
64 migration_entry_wait(mm, pmd, address); in follow_page_pte()
152 struct mm_struct *mm = vma->vm_mm; in follow_page_mask() local
156 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); in follow_page_mask()
162 pgd = pgd_offset(mm, address); in follow_page_mask()
170 page = follow_huge_pud(mm, address, pud, flags); in follow_page_mask()
182 page = follow_huge_pmd(mm, address, pmd, flags); in follow_page_mask()
194 ptl = pmd_lock(mm, pmd); in follow_page_mask()
212 static int get_gate_page(struct mm_struct *mm, unsigned long address, in get_gate_page() argument
228 pgd = pgd_offset_gate(mm, address); in get_gate_page()
239 *vma = get_gate_vma(mm); in get_gate_page()
264 struct mm_struct *mm = vma->vm_mm; in faultin_page() local
284 ret = handle_mm_fault(mm, vma, address, fault_flags); in faultin_page()
416 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, in __get_user_pages() argument
445 vma = find_extend_vma(mm, start); in __get_user_pages()
446 if (!vma && in_gate_area(mm, start)) { in __get_user_pages()
448 ret = get_gate_page(mm, start & PAGE_MASK, in __get_user_pages()
460 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
544 int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, in fixup_user_fault() argument
551 vma = find_extend_vma(mm, address); in fixup_user_fault()
559 ret = handle_mm_fault(mm, vma, address, fault_flags); in fixup_user_fault()
579 struct mm_struct *mm, in __get_user_pages_locked() argument
608 ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
647 down_read(&mm->mmap_sem); in __get_user_pages_locked()
648 ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, in __get_user_pages_locked()
668 up_read(&mm->mmap_sem); in __get_user_pages_locked()
695 long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages_locked() argument
700 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, in get_user_pages_locked()
715 __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, in __get_user_pages_unlocked() argument
722 down_read(&mm->mmap_sem); in __get_user_pages_unlocked()
723 ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, in __get_user_pages_unlocked()
726 up_read(&mm->mmap_sem); in __get_user_pages_unlocked()
748 long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages_unlocked() argument
752 return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write, in get_user_pages_unlocked()
812 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm, in get_user_pages() argument
816 return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force, in get_user_pages()
843 struct mm_struct *mm = vma->vm_mm; in populate_vma_page_range() local
851 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); in populate_vma_page_range()
873 return __get_user_pages(current, mm, start, nr_pages, gup_flags, in populate_vma_page_range()
886 struct mm_struct *mm = current->mm; in __mm_populate() local
903 down_read(&mm->mmap_sem); in __mm_populate()
904 vma = find_vma(mm, nstart); in __mm_populate()
935 up_read(&mm->mmap_sem); in __mm_populate()
959 if (__get_user_pages(current, current->mm, addr, 1, in get_dump_page()
1282 struct mm_struct *mm = current->mm; in __get_user_pages_fast() local
1310 pgdp = pgd_offset(mm, addr); in __get_user_pages_fast()
1352 struct mm_struct *mm = current->mm; in get_user_pages_fast() local
1364 ret = get_user_pages_unlocked(current, mm, start, in get_user_pages_fast()