Lines Matching refs:mm
136 void sync_mm_rss(struct mm_struct *mm) in sync_mm_rss() argument
142 add_mm_counter(mm, i, current->rss_stat.count[i]); in sync_mm_rss()
149 static void add_mm_counter_fast(struct mm_struct *mm, int member, int val) in add_mm_counter_fast() argument
153 if (likely(task->mm == mm)) in add_mm_counter_fast()
156 add_mm_counter(mm, member, val); in add_mm_counter_fast()
158 #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1) argument
159 #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1) argument
168 sync_mm_rss(task->mm); in check_sync_rss_stat()
172 #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member) argument
173 #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member) argument
216 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned lon… in tlb_gather_mmu() argument
218 tlb->mm = mm; in tlb_gather_mmu()
242 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); in tlb_flush_mmu_tlbonly()
367 if (atomic_read(&tlb->mm->mm_users) < 2) { in tlb_remove_table()
397 atomic_long_dec(&tlb->mm->nr_ptes); in free_pte_range()
431 mm_dec_nr_pmds(tlb->mm); in free_pmd_range()
519 pgd = pgd_offset(tlb->mm, addr); in free_pgd_range()
563 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, in __pte_alloc() argument
567 pgtable_t new = pte_alloc_one(mm, address); in __pte_alloc()
587 ptl = pmd_lock(mm, pmd); in __pte_alloc()
590 atomic_long_inc(&mm->nr_ptes); in __pte_alloc()
591 pmd_populate(mm, pmd, new); in __pte_alloc()
597 pte_free(mm, new); in __pte_alloc()
628 static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) in add_mm_rss_vec() argument
632 if (current->mm == mm) in add_mm_rss_vec()
633 sync_mm_rss(mm); in add_mm_rss_vec()
636 add_mm_counter(mm, i, rss[i]); in add_mm_rss_vec()
1077 struct mm_struct *mm = tlb->mm; in zap_pte_range() local
1087 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range()
1110 ptent = ptep_get_and_clear_full(mm, addr, pte, in zap_pte_range()
1156 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in zap_pte_range()
1159 add_mm_rss_vec(mm, rss); in zap_pte_range()
1198 if (!rwsem_is_locked(&tlb->mm->mmap_sem)) { in zap_pmd_range()
1336 struct mm_struct *mm = vma->vm_mm; in unmap_vmas() local
1338 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); in unmap_vmas()
1341 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); in unmap_vmas()
1356 struct mm_struct *mm = vma->vm_mm; in zap_page_range() local
1361 tlb_gather_mmu(&tlb, mm, start, end); in zap_page_range()
1362 update_hiwater_rss(mm); in zap_page_range()
1363 mmu_notifier_invalidate_range_start(mm, start, end); in zap_page_range()
1366 mmu_notifier_invalidate_range_end(mm, start, end); in zap_page_range()
1382 struct mm_struct *mm = vma->vm_mm; in zap_page_range_single() local
1387 tlb_gather_mmu(&tlb, mm, address, end); in zap_page_range_single()
1388 update_hiwater_rss(mm); in zap_page_range_single()
1389 mmu_notifier_invalidate_range_start(mm, address, end); in zap_page_range_single()
1391 mmu_notifier_invalidate_range_end(mm, address, end); in zap_page_range_single()
1418 pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, in __get_locked_pte() argument
1421 pgd_t * pgd = pgd_offset(mm, addr); in __get_locked_pte()
1422 pud_t * pud = pud_alloc(mm, pgd, addr); in __get_locked_pte()
1424 pmd_t * pmd = pmd_alloc(mm, pud, addr); in __get_locked_pte()
1427 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte()
1443 struct mm_struct *mm = vma->vm_mm; in insert_page() local
1453 pte = get_locked_pte(mm, addr, &ptl); in insert_page()
1462 inc_mm_counter_fast(mm, MM_FILEPAGES); in insert_page()
1464 set_pte_at(mm, addr, pte, mk_pte(page, prot)); in insert_page()
1521 struct mm_struct *mm = vma->vm_mm; in insert_pfn() local
1527 pte = get_locked_pte(mm, addr, &ptl); in insert_pfn()
1536 set_pte_at(mm, addr, pte, entry); in insert_pfn()
1621 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, in remap_pte_range() argument
1628 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); in remap_pte_range()
1634 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); in remap_pte_range()
1642 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, in remap_pmd_range() argument
1650 pmd = pmd_alloc(mm, pud, addr); in remap_pmd_range()
1656 if (remap_pte_range(mm, pmd, addr, next, in remap_pmd_range()
1663 static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, in remap_pud_range() argument
1671 pud = pud_alloc(mm, pgd, addr); in remap_pud_range()
1676 if (remap_pmd_range(mm, pud, addr, next, in remap_pud_range()
1699 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range() local
1734 pgd = pgd_offset(mm, addr); in remap_pfn_range()
1738 err = remap_pud_range(mm, pgd, addr, next, in remap_pfn_range()
1798 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, in apply_to_pte_range() argument
1807 pte = (mm == &init_mm) ? in apply_to_pte_range()
1809 pte_alloc_map_lock(mm, pmd, addr, &ptl); in apply_to_pte_range()
1827 if (mm != &init_mm) in apply_to_pte_range()
1832 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, in apply_to_pmd_range() argument
1842 pmd = pmd_alloc(mm, pud, addr); in apply_to_pmd_range()
1847 err = apply_to_pte_range(mm, pmd, addr, next, fn, data); in apply_to_pmd_range()
1854 static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, in apply_to_pud_range() argument
1862 pud = pud_alloc(mm, pgd, addr); in apply_to_pud_range()
1867 err = apply_to_pmd_range(mm, pud, addr, next, fn, data); in apply_to_pud_range()
1878 int apply_to_page_range(struct mm_struct *mm, unsigned long addr, in apply_to_page_range() argument
1887 pgd = pgd_offset(mm, addr); in apply_to_page_range()
1890 err = apply_to_pud_range(mm, pgd, addr, next, fn, data); in apply_to_page_range()
1907 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, in pte_unmap_same() argument
1913 spinlock_t *ptl = pte_lockptr(mm, pmd); in pte_unmap_same()
1992 static inline int wp_page_reuse(struct mm_struct *mm, in wp_page_reuse() argument
2059 static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, in wp_page_copy() argument
2086 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) in wp_page_copy()
2089 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); in wp_page_copy()
2094 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in wp_page_copy()
2098 dec_mm_counter_fast(mm, MM_FILEPAGES); in wp_page_copy()
2099 inc_mm_counter_fast(mm, MM_ANONPAGES); in wp_page_copy()
2102 inc_mm_counter_fast(mm, MM_ANONPAGES); in wp_page_copy()
2122 set_pte_at_notify(mm, address, page_table, entry); in wp_page_copy()
2161 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); in wp_page_copy()
2187 static int wp_pfn_shared(struct mm_struct *mm, in wp_pfn_shared() argument
2205 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in wp_pfn_shared()
2215 return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte, in wp_pfn_shared()
2219 static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, in wp_page_shared() argument
2250 page_table = pte_offset_map_lock(mm, pmd, address, in wp_page_shared()
2261 return wp_page_reuse(mm, vma, address, page_table, ptl, in wp_page_shared()
2283 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_wp_page() argument
2301 return wp_pfn_shared(mm, vma, address, page_table, ptl, in do_wp_page()
2305 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2318 page_table = pte_offset_map_lock(mm, pmd, address, in do_wp_page()
2336 return wp_page_reuse(mm, vma, address, page_table, ptl, in do_wp_page()
2342 return wp_page_shared(mm, vma, address, page_table, pmd, in do_wp_page()
2352 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2444 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_swap_page() argument
2457 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) in do_swap_page()
2463 migration_entry_wait(mm, pmd, address); in do_swap_page()
2482 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_swap_page()
2492 mem_cgroup_count_vm_event(mm, PGMAJFAULT); in do_swap_page()
2505 locked = lock_page_or_retry(page, mm, flags); in do_swap_page()
2529 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) { in do_swap_page()
2537 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_swap_page()
2556 inc_mm_counter_fast(mm, MM_ANONPAGES); in do_swap_page()
2557 dec_mm_counter_fast(mm, MM_SWAPENTS); in do_swap_page()
2568 set_pte_at(mm, address, page_table, pte); in do_swap_page()
2596 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); in do_swap_page()
2661 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_anonymous_page() argument
2681 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) { in do_anonymous_page()
2684 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_anonymous_page()
2703 if (mem_cgroup_try_charge(page, mm, GFP_KERNEL, &memcg)) in do_anonymous_page()
2710 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_anonymous_page()
2714 inc_mm_counter_fast(mm, MM_ANONPAGES); in do_anonymous_page()
2719 set_pte_at(mm, address, page_table, entry); in do_anonymous_page()
2921 static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_read_fault() argument
2936 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
2947 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
2961 static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_cow_fault() argument
2978 if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) { in do_cow_fault()
2991 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_cow_fault()
3027 static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_shared_fault() argument
3056 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_shared_fault()
3096 static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_fault() argument
3108 return do_read_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3111 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3113 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); in do_fault()
3131 static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_numa_page() argument
3155 ptl = pte_lockptr(mm, pmd); in do_numa_page()
3167 set_pte_at(mm, addr, ptep, pte); in do_numa_page()
3233 static int handle_pte_fault(struct mm_struct *mm, in handle_pte_fault() argument
3253 return do_fault(mm, vma, address, pte, pmd, in handle_pte_fault()
3256 return do_anonymous_page(mm, vma, address, pte, pmd, in handle_pte_fault()
3259 return do_swap_page(mm, vma, address, in handle_pte_fault()
3264 return do_numa_page(mm, vma, address, entry, pte, pmd); in handle_pte_fault()
3266 ptl = pte_lockptr(mm, pmd); in handle_pte_fault()
3272 return do_wp_page(mm, vma, address, in handle_pte_fault()
3300 static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, in __handle_mm_fault() argument
3309 return hugetlb_fault(mm, vma, address, flags); in __handle_mm_fault()
3311 pgd = pgd_offset(mm, address); in __handle_mm_fault()
3312 pud = pud_alloc(mm, pgd, address); in __handle_mm_fault()
3315 pmd = pmd_alloc(mm, pud, address); in __handle_mm_fault()
3321 ret = do_huge_pmd_anonymous_page(mm, vma, address, in __handle_mm_fault()
3342 return do_huge_pmd_numa_page(mm, vma, address, in __handle_mm_fault()
3346 ret = do_huge_pmd_wp_page(mm, vma, address, pmd, in __handle_mm_fault()
3351 huge_pmd_set_accessed(mm, vma, address, pmd, in __handle_mm_fault()
3364 unlikely(__pte_alloc(mm, vma, pmd, address))) in __handle_mm_fault()
3387 return handle_pte_fault(mm, vma, address, pte, pmd, flags); in __handle_mm_fault()
3396 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, in handle_mm_fault() argument
3404 mem_cgroup_count_vm_event(mm, PGFAULT); in handle_mm_fault()
3416 ret = __handle_mm_fault(mm, vma, address, flags); in handle_mm_fault()
3439 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) in __pud_alloc() argument
3441 pud_t *new = pud_alloc_one(mm, address); in __pud_alloc()
3447 spin_lock(&mm->page_table_lock); in __pud_alloc()
3449 pud_free(mm, new); in __pud_alloc()
3451 pgd_populate(mm, pgd, new); in __pud_alloc()
3452 spin_unlock(&mm->page_table_lock); in __pud_alloc()
3462 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) in __pmd_alloc() argument
3464 pmd_t *new = pmd_alloc_one(mm, address); in __pmd_alloc()
3470 spin_lock(&mm->page_table_lock); in __pmd_alloc()
3473 mm_inc_nr_pmds(mm); in __pmd_alloc()
3474 pud_populate(mm, pud, new); in __pmd_alloc()
3476 pmd_free(mm, new); in __pmd_alloc()
3479 mm_inc_nr_pmds(mm); in __pmd_alloc()
3480 pgd_populate(mm, pud, new); in __pmd_alloc()
3482 pmd_free(mm, new); in __pmd_alloc()
3484 spin_unlock(&mm->page_table_lock); in __pmd_alloc()
3489 static int __follow_pte(struct mm_struct *mm, unsigned long address, in __follow_pte() argument
3497 pgd = pgd_offset(mm, address); in __follow_pte()
3514 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); in __follow_pte()
3527 static inline int follow_pte(struct mm_struct *mm, unsigned long address, in follow_pte() argument
3534 !(res = __follow_pte(mm, address, ptepp, ptlp))); in follow_pte()
3623 static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, in __access_remote_vm() argument
3629 down_read(&mm->mmap_sem); in __access_remote_vm()
3636 ret = get_user_pages(tsk, mm, addr, 1, in __access_remote_vm()
3646 vma = find_vma(mm, addr); in __access_remote_vm()
3678 up_read(&mm->mmap_sem); in __access_remote_vm()
3693 int access_remote_vm(struct mm_struct *mm, unsigned long addr, in access_remote_vm() argument
3696 return __access_remote_vm(NULL, mm, addr, buf, len, write); in access_remote_vm()
3707 struct mm_struct *mm; in access_process_vm() local
3710 mm = get_task_mm(tsk); in access_process_vm()
3711 if (!mm) in access_process_vm()
3714 ret = __access_remote_vm(tsk, mm, addr, buf, len, write); in access_process_vm()
3715 mmput(mm); in access_process_vm()
3725 struct mm_struct *mm = current->mm; in print_vma_addr() local
3735 down_read(&mm->mmap_sem); in print_vma_addr()
3736 vma = find_vma(mm, ip); in print_vma_addr()
3752 up_read(&mm->mmap_sem); in print_vma_addr()
3777 if (current->mm) in might_fault()
3778 might_lock_read(¤t->mm->mmap_sem); in might_fault()