Lines Matching refs:vma

528 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,  in free_pgtables()  argument
531 while (vma) { in free_pgtables()
532 struct vm_area_struct *next = vma->vm_next; in free_pgtables()
533 unsigned long addr = vma->vm_start; in free_pgtables()
539 unlink_anon_vmas(vma); in free_pgtables()
540 unlink_file_vma(vma); in free_pgtables()
542 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
543 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
549 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
551 vma = next; in free_pgtables()
552 next = vma->vm_next; in free_pgtables()
553 unlink_anon_vmas(vma); in free_pgtables()
554 unlink_file_vma(vma); in free_pgtables()
556 free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
559 vma = next; in free_pgtables()
563 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, in __pte_alloc() argument
599 wait_split_huge_page(vma->anon_vma, pmd); in __pte_alloc()
646 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, in print_bad_pte() argument
649 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
678 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; in print_bad_pte()
679 index = linear_page_index(vma, addr); in print_bad_pte()
689 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); in print_bad_pte()
694 vma->vm_file, in print_bad_pte()
695 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_pte()
696 vma->vm_file ? vma->vm_file->f_op->mmap : NULL, in print_bad_pte()
749 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page() argument
757 if (vma->vm_ops && vma->vm_ops->find_special_page) in vm_normal_page()
758 return vma->vm_ops->find_special_page(vma, addr); in vm_normal_page()
759 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) in vm_normal_page()
762 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
768 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { in vm_normal_page()
769 if (vma->vm_flags & VM_MIXEDMAP) { in vm_normal_page()
775 off = (addr - vma->vm_start) >> PAGE_SHIFT; in vm_normal_page()
776 if (pfn == vma->vm_pgoff + off) in vm_normal_page()
778 if (!is_cow_mapping(vma->vm_flags)) in vm_normal_page()
787 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
807 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, in copy_one_pte() argument
810 unsigned long vm_flags = vma->vm_flags; in copy_one_pte()
872 page = vm_normal_page(vma, addr, pte); in copy_one_pte()
888 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, in copy_pte_range() argument
927 vma, addr, rss); in copy_pte_range()
951 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, in copy_pmd_range() argument
967 dst_pmd, src_pmd, addr, vma); in copy_pmd_range()
977 vma, addr, next)) in copy_pmd_range()
984 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, in copy_pud_range() argument
999 vma, addr, next)) in copy_pud_range()
1006 struct vm_area_struct *vma) in copy_page_range() argument
1010 unsigned long addr = vma->vm_start; in copy_page_range()
1011 unsigned long end = vma->vm_end; in copy_page_range()
1023 if (!(vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) && in copy_page_range()
1024 !vma->anon_vma) in copy_page_range()
1027 if (is_vm_hugetlb_page(vma)) in copy_page_range()
1028 return copy_hugetlb_page_range(dst_mm, src_mm, vma); in copy_page_range()
1030 if (unlikely(vma->vm_flags & VM_PFNMAP)) { in copy_page_range()
1035 ret = track_pfn_copy(vma); in copy_page_range()
1046 is_cow = is_cow_mapping(vma->vm_flags); in copy_page_range()
1061 vma, addr, next))) { in copy_page_range()
1073 struct vm_area_struct *vma, pmd_t *pmd, in zap_pte_range() argument
1099 page = vm_normal_page(vma, addr, ptent); in zap_pte_range()
1123 likely(!(vma->vm_flags & VM_SEQ_READ))) in zap_pte_range()
1129 print_bad_pte(vma, addr, ptent, page); in zap_pte_range()
1155 print_bad_pte(vma, addr, ptent, NULL); in zap_pte_range()
1185 struct vm_area_struct *vma, pud_t *pud, in zap_pmd_range() argument
1201 vma->vm_start, in zap_pmd_range()
1202 vma->vm_end); in zap_pmd_range()
1206 split_huge_page_pmd(vma, addr, pmd); in zap_pmd_range()
1207 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) in zap_pmd_range()
1220 next = zap_pte_range(tlb, vma, pmd, addr, next, details); in zap_pmd_range()
1229 struct vm_area_struct *vma, pgd_t *pgd, in zap_pud_range() argument
1241 next = zap_pmd_range(tlb, vma, pud, addr, next, details); in zap_pud_range()
1248 struct vm_area_struct *vma, in unmap_page_range() argument
1259 tlb_start_vma(tlb, vma); in unmap_page_range()
1260 pgd = pgd_offset(vma->vm_mm, addr); in unmap_page_range()
1265 next = zap_pud_range(tlb, vma, pgd, addr, next, details); in unmap_page_range()
1267 tlb_end_vma(tlb, vma); in unmap_page_range()
1272 struct vm_area_struct *vma, unsigned long start_addr, in unmap_single_vma() argument
1276 unsigned long start = max(vma->vm_start, start_addr); in unmap_single_vma()
1279 if (start >= vma->vm_end) in unmap_single_vma()
1281 end = min(vma->vm_end, end_addr); in unmap_single_vma()
1282 if (end <= vma->vm_start) in unmap_single_vma()
1285 if (vma->vm_file) in unmap_single_vma()
1286 uprobe_munmap(vma, start, end); in unmap_single_vma()
1288 if (unlikely(vma->vm_flags & VM_PFNMAP)) in unmap_single_vma()
1289 untrack_pfn(vma, 0, 0); in unmap_single_vma()
1292 if (unlikely(is_vm_hugetlb_page(vma))) { in unmap_single_vma()
1304 if (vma->vm_file) { in unmap_single_vma()
1305 i_mmap_lock_write(vma->vm_file->f_mapping); in unmap_single_vma()
1306 __unmap_hugepage_range_final(tlb, vma, start, end, NULL); in unmap_single_vma()
1307 i_mmap_unlock_write(vma->vm_file->f_mapping); in unmap_single_vma()
1310 unmap_page_range(tlb, vma, start, end, details); in unmap_single_vma()
1333 struct vm_area_struct *vma, unsigned long start_addr, in unmap_vmas() argument
1336 struct mm_struct *mm = vma->vm_mm; in unmap_vmas()
1339 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) in unmap_vmas()
1340 unmap_single_vma(tlb, vma, start_addr, end_addr, NULL); in unmap_vmas()
1353 void zap_page_range(struct vm_area_struct *vma, unsigned long start, in zap_page_range() argument
1356 struct mm_struct *mm = vma->vm_mm; in zap_page_range()
1364 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) in zap_page_range()
1365 unmap_single_vma(&tlb, vma, start, end, details); in zap_page_range()
1379 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1382 struct mm_struct *mm = vma->vm_mm; in zap_page_range_single()
1390 unmap_single_vma(&tlb, vma, address, end, details); in zap_page_range_single()
1407 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
1410 if (address < vma->vm_start || address + size > vma->vm_end || in zap_vma_ptes()
1411 !(vma->vm_flags & VM_PFNMAP)) in zap_vma_ptes()
1413 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
1440 static int insert_page(struct vm_area_struct *vma, unsigned long addr, in insert_page() argument
1443 struct mm_struct *mm = vma->vm_mm; in insert_page()
1502 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
1505 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_page()
1509 if (!(vma->vm_flags & VM_MIXEDMAP)) { in vm_insert_page()
1510 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem)); in vm_insert_page()
1511 BUG_ON(vma->vm_flags & VM_PFNMAP); in vm_insert_page()
1512 vma->vm_flags |= VM_MIXEDMAP; in vm_insert_page()
1514 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_page()
1518 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, in insert_pfn() argument
1521 struct mm_struct *mm = vma->vm_mm; in insert_pfn()
1537 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn()
1563 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pfn() argument
1567 pgprot_t pgprot = vma->vm_page_prot; in vm_insert_pfn()
1574 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vm_insert_pfn()
1575 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vm_insert_pfn()
1577 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vm_insert_pfn()
1578 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); in vm_insert_pfn()
1580 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_pfn()
1582 if (track_pfn_insert(vma, &pgprot, pfn)) in vm_insert_pfn()
1585 ret = insert_pfn(vma, addr, pfn, pgprot); in vm_insert_pfn()
1591 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, in vm_insert_mixed() argument
1594 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); in vm_insert_mixed()
1596 if (addr < vma->vm_start || addr >= vma->vm_end) in vm_insert_mixed()
1610 return insert_page(vma, addr, page, vma->vm_page_prot); in vm_insert_mixed()
1612 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); in vm_insert_mixed()
1693 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
1699 struct mm_struct *mm = vma->vm_mm; in remap_pfn_range()
1720 if (is_cow_mapping(vma->vm_flags)) { in remap_pfn_range()
1721 if (addr != vma->vm_start || end != vma->vm_end) in remap_pfn_range()
1723 vma->vm_pgoff = pfn; in remap_pfn_range()
1726 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size)); in remap_pfn_range()
1730 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; in remap_pfn_range()
1735 flush_cache_range(vma, addr, end); in remap_pfn_range()
1745 untrack_pfn(vma, pfn, PAGE_ALIGN(size)); in remap_pfn_range()
1764 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
1783 if (vma->vm_pgoff > pages) in vm_iomap_memory()
1785 pfn += vma->vm_pgoff; in vm_iomap_memory()
1786 pages -= vma->vm_pgoff; in vm_iomap_memory()
1789 vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
1794 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
1923 …oid cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) in cow_user_page() argument
1948 copy_user_highpage(dst, src, va, vma); in cow_user_page()
1957 static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, in do_page_mkwrite() argument
1969 ret = vma->vm_ops->page_mkwrite(vma, &vmf); in do_page_mkwrite()
1993 struct vm_area_struct *vma, unsigned long address, in wp_page_reuse() argument
2008 flush_cache_page(vma, address, pte_pfn(orig_pte)); in wp_page_reuse()
2010 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_reuse()
2011 if (ptep_set_access_flags(vma, address, page_table, entry, 1)) in wp_page_reuse()
2012 update_mmu_cache(vma, address, page_table); in wp_page_reuse()
2037 file_update_time(vma->vm_file); in wp_page_reuse()
2059 static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, in wp_page_copy() argument
2071 if (unlikely(anon_vma_prepare(vma))) in wp_page_copy()
2075 new_page = alloc_zeroed_user_highpage_movable(vma, address); in wp_page_copy()
2079 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in wp_page_copy()
2082 cow_user_page(new_page, old_page, address, vma); in wp_page_copy()
2104 flush_cache_page(vma, address, pte_pfn(orig_pte)); in wp_page_copy()
2105 entry = mk_pte(new_page, vma->vm_page_prot); in wp_page_copy()
2106 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in wp_page_copy()
2113 ptep_clear_flush_notify(vma, address, page_table); in wp_page_copy()
2114 page_add_new_anon_rmap(new_page, vma, address); in wp_page_copy()
2116 lru_cache_add_active_or_unevictable(new_page, vma); in wp_page_copy()
2123 update_mmu_cache(vma, address, page_table); in wp_page_copy()
2167 if (page_copied && (vma->vm_flags & VM_LOCKED)) { in wp_page_copy()
2188 struct vm_area_struct *vma, unsigned long address, in wp_pfn_shared() argument
2192 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { in wp_pfn_shared()
2195 .pgoff = linear_page_index(vma, address), in wp_pfn_shared()
2202 ret = vma->vm_ops->pfn_mkwrite(vma, &vmf); in wp_pfn_shared()
2215 return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte, in wp_pfn_shared()
2219 static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, in wp_page_shared() argument
2234 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { in wp_page_shared()
2238 tmp = do_page_mkwrite(vma, old_page, address); in wp_page_shared()
2261 return wp_page_reuse(mm, vma, address, page_table, ptl, in wp_page_shared()
2283 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_wp_page() argument
2290 old_page = vm_normal_page(vma, address, orig_pte); in do_wp_page()
2299 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == in do_wp_page()
2301 return wp_pfn_shared(mm, vma, address, page_table, ptl, in do_wp_page()
2305 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2334 page_move_anon_rmap(old_page, vma, address); in do_wp_page()
2336 return wp_page_reuse(mm, vma, address, page_table, ptl, in do_wp_page()
2340 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == in do_wp_page()
2342 return wp_page_shared(mm, vma, address, page_table, pmd, in do_wp_page()
2352 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2356 static void unmap_mapping_range_vma(struct vm_area_struct *vma, in unmap_mapping_range_vma() argument
2360 zap_page_range_single(vma, start_addr, end_addr - start_addr, details); in unmap_mapping_range_vma()
2366 struct vm_area_struct *vma; in unmap_mapping_range_tree() local
2369 vma_interval_tree_foreach(vma, root, in unmap_mapping_range_tree()
2372 vba = vma->vm_pgoff; in unmap_mapping_range_tree()
2373 vea = vba + vma_pages(vma) - 1; in unmap_mapping_range_tree()
2382 unmap_mapping_range_vma(vma, in unmap_mapping_range_tree()
2383 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
2384 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, in unmap_mapping_range_tree()
2444 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_swap_page() argument
2467 print_bad_pte(vma, address, orig_pte, NULL); in do_swap_page()
2476 GFP_HIGHUSER_MOVABLE, vma, address); in do_swap_page()
2522 page = ksm_might_need_to_copy(page, vma, address); in do_swap_page()
2558 pte = mk_pte(page, vma->vm_page_prot); in do_swap_page()
2560 pte = maybe_mkwrite(pte_mkdirty(pte), vma); in do_swap_page()
2565 flush_icache_page(vma, page); in do_swap_page()
2570 do_page_add_anon_rmap(page, vma, address, exclusive); in do_swap_page()
2573 page_add_new_anon_rmap(page, vma, address); in do_swap_page()
2575 lru_cache_add_active_or_unevictable(page, vma); in do_swap_page()
2579 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) in do_swap_page()
2596 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); in do_swap_page()
2603 update_mmu_cache(vma, address, page_table); in do_swap_page()
2627 static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) in check_stack_guard_page() argument
2630 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { in check_stack_guard_page()
2631 struct vm_area_struct *prev = vma->vm_prev; in check_stack_guard_page()
2642 return expand_downwards(vma, address - PAGE_SIZE); in check_stack_guard_page()
2644 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { in check_stack_guard_page()
2645 struct vm_area_struct *next = vma->vm_next; in check_stack_guard_page()
2651 return expand_upwards(vma, address + PAGE_SIZE); in check_stack_guard_page()
2661 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_anonymous_page() argument
2673 if (vma->vm_flags & VM_SHARED) in do_anonymous_page()
2677 if (check_stack_guard_page(vma, address) < 0) in do_anonymous_page()
2683 vma->vm_page_prot)); in do_anonymous_page()
2691 if (unlikely(anon_vma_prepare(vma))) in do_anonymous_page()
2693 page = alloc_zeroed_user_highpage_movable(vma, address); in do_anonymous_page()
2706 entry = mk_pte(page, vma->vm_page_prot); in do_anonymous_page()
2707 if (vma->vm_flags & VM_WRITE) in do_anonymous_page()
2715 page_add_new_anon_rmap(page, vma, address); in do_anonymous_page()
2717 lru_cache_add_active_or_unevictable(page, vma); in do_anonymous_page()
2722 update_mmu_cache(vma, address, page_table); in do_anonymous_page()
2741 static int __do_fault(struct vm_area_struct *vma, unsigned long address, in __do_fault() argument
2754 ret = vma->vm_ops->fault(vma, &vmf); in __do_fault()
2792 void do_set_pte(struct vm_area_struct *vma, unsigned long address, in do_set_pte() argument
2797 flush_icache_page(vma, page); in do_set_pte()
2798 entry = mk_pte(page, vma->vm_page_prot); in do_set_pte()
2800 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in do_set_pte()
2802 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); in do_set_pte()
2803 page_add_new_anon_rmap(page, vma, address); in do_set_pte()
2805 inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES); in do_set_pte()
2808 set_pte_at(vma->vm_mm, address, pte, entry); in do_set_pte()
2811 update_mmu_cache(vma, address, pte); in do_set_pte()
2878 static void do_fault_around(struct vm_area_struct *vma, unsigned long address, in do_fault_around() argument
2889 start_addr = max(address & mask, vma->vm_start); in do_fault_around()
2900 max_pgoff = min3(max_pgoff, vma_pages(vma) + vma->vm_pgoff - 1, in do_fault_around()
2908 if (start_addr >= vma->vm_end) in do_fault_around()
2918 vma->vm_ops->map_pages(vma, &vmf); in do_fault_around()
2921 static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_read_fault() argument
2935 if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { in do_read_fault()
2937 do_fault_around(vma, address, pte, pgoff, flags); in do_read_fault()
2943 ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); in do_read_fault()
2954 do_set_pte(vma, address, fault_page, pte, false, false); in do_read_fault()
2961 static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_cow_fault() argument
2971 if (unlikely(anon_vma_prepare(vma))) in do_cow_fault()
2974 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in do_cow_fault()
2983 ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page); in do_cow_fault()
2988 copy_user_highpage(new_page, fault_page, address, vma); in do_cow_fault()
3002 i_mmap_unlock_read(vma->vm_file->f_mapping); in do_cow_fault()
3006 do_set_pte(vma, address, new_page, pte, true, true); in do_cow_fault()
3008 lru_cache_add_active_or_unevictable(new_page, vma); in do_cow_fault()
3018 i_mmap_unlock_read(vma->vm_file->f_mapping); in do_cow_fault()
3027 static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_shared_fault() argument
3038 ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); in do_shared_fault()
3046 if (vma->vm_ops->page_mkwrite) { in do_shared_fault()
3048 tmp = do_page_mkwrite(vma, fault_page, address); in do_shared_fault()
3063 do_set_pte(vma, address, fault_page, pte, true, false); in do_shared_fault()
3076 if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { in do_shared_fault()
3084 if (!vma->vm_ops->page_mkwrite) in do_shared_fault()
3085 file_update_time(vma->vm_file); in do_shared_fault()
3096 static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, in do_fault() argument
3101 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; in do_fault()
3105 if (!vma->vm_ops->fault) in do_fault()
3108 return do_read_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3110 if (!(vma->vm_flags & VM_SHARED)) in do_fault()
3111 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3113 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); in do_fault()
3116 static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma, in numa_migrate_prep() argument
3128 return mpol_misplaced(page, vma, addr); in numa_migrate_prep()
3131 static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_numa_page() argument
3144 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); in do_numa_page()
3163 pte = pte_modify(pte, vma->vm_page_prot); in do_numa_page()
3168 update_mmu_cache(vma, addr, ptep); in do_numa_page()
3170 page = vm_normal_page(vma, addr, pte); in do_numa_page()
3184 if (!(vma->vm_flags & VM_WRITE)) in do_numa_page()
3191 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED)) in do_numa_page()
3196 target_nid = numa_migrate_prep(page, vma, addr, page_nid, &flags); in do_numa_page()
3204 migrated = migrate_misplaced_page(page, vma, target_nid); in do_numa_page()
3234 struct vm_area_struct *vma, unsigned long address, in handle_pte_fault() argument
3252 if (vma->vm_ops) in handle_pte_fault()
3253 return do_fault(mm, vma, address, pte, pmd, in handle_pte_fault()
3256 return do_anonymous_page(mm, vma, address, pte, pmd, in handle_pte_fault()
3259 return do_swap_page(mm, vma, address, in handle_pte_fault()
3264 return do_numa_page(mm, vma, address, entry, pte, pmd); in handle_pte_fault()
3272 return do_wp_page(mm, vma, address, in handle_pte_fault()
3277 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
3278 update_mmu_cache(vma, address, pte); in handle_pte_fault()
3287 flush_tlb_fix_spurious_fault(vma, address); in handle_pte_fault()
3300 static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, in __handle_mm_fault() argument
3308 if (unlikely(is_vm_hugetlb_page(vma))) in __handle_mm_fault()
3309 return hugetlb_fault(mm, vma, address, flags); in __handle_mm_fault()
3318 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { in __handle_mm_fault()
3320 if (!vma->vm_ops) in __handle_mm_fault()
3321 ret = do_huge_pmd_anonymous_page(mm, vma, address, in __handle_mm_fault()
3342 return do_huge_pmd_numa_page(mm, vma, address, in __handle_mm_fault()
3346 ret = do_huge_pmd_wp_page(mm, vma, address, pmd, in __handle_mm_fault()
3351 huge_pmd_set_accessed(mm, vma, address, pmd, in __handle_mm_fault()
3364 unlikely(__pte_alloc(mm, vma, pmd, address))) in __handle_mm_fault()
3387 return handle_pte_fault(mm, vma, address, pte, pmd, flags); in __handle_mm_fault()
3396 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, in handle_mm_fault() argument
3416 ret = __handle_mm_fault(mm, vma, address, flags); in handle_mm_fault()
3548 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
3555 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
3558 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
3568 int follow_phys(struct vm_area_struct *vma, in follow_phys() argument
3576 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_phys()
3579 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()
3596 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, in generic_access_phys() argument
3604 if (follow_phys(vma, addr, write, &prot, &phys_addr)) in generic_access_phys()
3626 struct vm_area_struct *vma; in __access_remote_vm() local
3637 write, 1, &page, &vma); in __access_remote_vm()
3646 vma = find_vma(mm, addr); in __access_remote_vm()
3647 if (!vma || vma->vm_start > addr) in __access_remote_vm()
3649 if (vma->vm_ops && vma->vm_ops->access) in __access_remote_vm()
3650 ret = vma->vm_ops->access(vma, addr, buf, in __access_remote_vm()
3664 copy_to_user_page(vma, page, addr, in __access_remote_vm()
3668 copy_from_user_page(vma, page, addr, in __access_remote_vm()
3726 struct vm_area_struct *vma; in print_vma_addr() local
3736 vma = find_vma(mm, ip); in print_vma_addr()
3737 if (vma && vma->vm_file) { in print_vma_addr()
3738 struct file *f = vma->vm_file; in print_vma_addr()
3747 vma->vm_start, in print_vma_addr()
3748 vma->vm_end - vma->vm_start); in print_vma_addr()
3817 struct vm_area_struct *vma, in copy_user_gigantic_page() argument
3826 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); in copy_user_gigantic_page()
3835 unsigned long addr, struct vm_area_struct *vma, in copy_user_huge_page() argument
3841 copy_user_gigantic_page(dst, src, addr, vma, in copy_user_huge_page()
3849 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma); in copy_user_huge_page()