Lines Matching refs:address

564 		pmd_t *pmd, unsigned long address)  in __pte_alloc()  argument
567 pgtable_t new = pte_alloc_one(mm, address); in __pte_alloc()
603 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) in __pte_alloc_kernel() argument
605 pte_t *new = pte_alloc_one_kernel(&init_mm, address); in __pte_alloc_kernel()
1379 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, in zap_page_range_single() argument
1384 unsigned long end = address + size; in zap_page_range_single()
1387 tlb_gather_mmu(&tlb, mm, address, end); in zap_page_range_single()
1389 mmu_notifier_invalidate_range_start(mm, address, end); in zap_page_range_single()
1390 unmap_single_vma(&tlb, vma, address, end, details); in zap_page_range_single()
1391 mmu_notifier_invalidate_range_end(mm, address, end); in zap_page_range_single()
1392 tlb_finish_mmu(&tlb, address, end); in zap_page_range_single()
1407 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, in zap_vma_ptes() argument
1410 if (address < vma->vm_start || address + size > vma->vm_end || in zap_vma_ptes()
1413 zap_page_range_single(vma, address, size, NULL); in zap_vma_ptes()
1958 unsigned long address) in do_page_mkwrite() argument
1963 vmf.virtual_address = (void __user *)(address & PAGE_MASK); in do_page_mkwrite()
1993 struct vm_area_struct *vma, unsigned long address, in wp_page_reuse() argument
2008 flush_cache_page(vma, address, pte_pfn(orig_pte)); in wp_page_reuse()
2011 if (ptep_set_access_flags(vma, address, page_table, entry, 1)) in wp_page_reuse()
2012 update_mmu_cache(vma, address, page_table); in wp_page_reuse()
2060 unsigned long address, pte_t *page_table, pmd_t *pmd, in wp_page_copy() argument
2067 const unsigned long mmun_start = address & PAGE_MASK; /* For mmu_notifiers */ in wp_page_copy()
2075 new_page = alloc_zeroed_user_highpage_movable(vma, address); in wp_page_copy()
2079 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in wp_page_copy()
2082 cow_user_page(new_page, old_page, address, vma); in wp_page_copy()
2094 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in wp_page_copy()
2104 flush_cache_page(vma, address, pte_pfn(orig_pte)); in wp_page_copy()
2113 ptep_clear_flush_notify(vma, address, page_table); in wp_page_copy()
2114 page_add_new_anon_rmap(new_page, vma, address); in wp_page_copy()
2122 set_pte_at_notify(mm, address, page_table, entry); in wp_page_copy()
2123 update_mmu_cache(vma, address, page_table); in wp_page_copy()
2188 struct vm_area_struct *vma, unsigned long address, in wp_pfn_shared() argument
2195 .pgoff = linear_page_index(vma, address), in wp_pfn_shared()
2196 .virtual_address = (void __user *)(address & PAGE_MASK), in wp_pfn_shared()
2205 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in wp_pfn_shared()
2215 return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte, in wp_pfn_shared()
2220 unsigned long address, pte_t *page_table, in wp_page_shared() argument
2238 tmp = do_page_mkwrite(vma, old_page, address); in wp_page_shared()
2250 page_table = pte_offset_map_lock(mm, pmd, address, in wp_page_shared()
2261 return wp_page_reuse(mm, vma, address, page_table, ptl, in wp_page_shared()
2284 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_wp_page() argument
2290 old_page = vm_normal_page(vma, address, orig_pte); in do_wp_page()
2301 return wp_pfn_shared(mm, vma, address, page_table, ptl, in do_wp_page()
2305 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2318 page_table = pte_offset_map_lock(mm, pmd, address, in do_wp_page()
2334 page_move_anon_rmap(old_page, vma, address); in do_wp_page()
2336 return wp_page_reuse(mm, vma, address, page_table, ptl, in do_wp_page()
2342 return wp_page_shared(mm, vma, address, page_table, pmd, in do_wp_page()
2352 return wp_page_copy(mm, vma, address, page_table, pmd, in do_wp_page()
2445 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_swap_page() argument
2463 migration_entry_wait(mm, pmd, address); in do_swap_page()
2467 print_bad_pte(vma, address, orig_pte, NULL); in do_swap_page()
2476 GFP_HIGHUSER_MOVABLE, vma, address); in do_swap_page()
2482 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_swap_page()
2522 page = ksm_might_need_to_copy(page, vma, address); in do_swap_page()
2537 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_swap_page()
2568 set_pte_at(mm, address, page_table, pte); in do_swap_page()
2570 do_page_add_anon_rmap(page, vma, address, exclusive); in do_swap_page()
2573 page_add_new_anon_rmap(page, vma, address); in do_swap_page()
2596 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); in do_swap_page()
2603 update_mmu_cache(vma, address, page_table); in do_swap_page()
2627 static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) in check_stack_guard_page() argument
2629 address &= PAGE_MASK; in check_stack_guard_page()
2630 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { in check_stack_guard_page()
2639 if (prev && prev->vm_end == address) in check_stack_guard_page()
2642 return expand_downwards(vma, address - PAGE_SIZE); in check_stack_guard_page()
2644 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { in check_stack_guard_page()
2648 if (next && next->vm_start == address + PAGE_SIZE) in check_stack_guard_page()
2651 return expand_upwards(vma, address + PAGE_SIZE); in check_stack_guard_page()
2662 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_anonymous_page() argument
2677 if (check_stack_guard_page(vma, address) < 0) in do_anonymous_page()
2682 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), in do_anonymous_page()
2684 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_anonymous_page()
2693 page = alloc_zeroed_user_highpage_movable(vma, address); in do_anonymous_page()
2710 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); in do_anonymous_page()
2715 page_add_new_anon_rmap(page, vma, address); in do_anonymous_page()
2719 set_pte_at(mm, address, page_table, entry); in do_anonymous_page()
2722 update_mmu_cache(vma, address, page_table); in do_anonymous_page()
2741 static int __do_fault(struct vm_area_struct *vma, unsigned long address, in __do_fault() argument
2748 vmf.virtual_address = (void __user *)(address & PAGE_MASK); in __do_fault()
2792 void do_set_pte(struct vm_area_struct *vma, unsigned long address, in do_set_pte() argument
2803 page_add_new_anon_rmap(page, vma, address); in do_set_pte()
2808 set_pte_at(vma->vm_mm, address, pte, entry); in do_set_pte()
2811 update_mmu_cache(vma, address, pte); in do_set_pte()
2878 static void do_fault_around(struct vm_area_struct *vma, unsigned long address, in do_fault_around() argument
2889 start_addr = max(address & mask, vma->vm_start); in do_fault_around()
2890 off = ((address - start_addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); in do_fault_around()
2922 unsigned long address, pmd_t *pmd, in do_read_fault() argument
2936 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
2937 do_fault_around(vma, address, pte, pgoff, flags); in do_read_fault()
2943 ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); in do_read_fault()
2947 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
2954 do_set_pte(vma, address, fault_page, pte, false, false); in do_read_fault()
2962 unsigned long address, pmd_t *pmd, in do_cow_fault() argument
2974 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in do_cow_fault()
2983 ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page); in do_cow_fault()
2988 copy_user_highpage(new_page, fault_page, address, vma); in do_cow_fault()
2991 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_cow_fault()
3006 do_set_pte(vma, address, new_page, pte, true, true); in do_cow_fault()
3028 unsigned long address, pmd_t *pmd, in do_shared_fault() argument
3038 ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page); in do_shared_fault()
3048 tmp = do_page_mkwrite(vma, fault_page, address); in do_shared_fault()
3056 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_shared_fault()
3063 do_set_pte(vma, address, fault_page, pte, true, false); in do_shared_fault()
3097 unsigned long address, pte_t *page_table, pmd_t *pmd, in do_fault() argument
3100 pgoff_t pgoff = (((address & PAGE_MASK) in do_fault()
3108 return do_read_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3111 return do_cow_fault(mm, vma, address, pmd, pgoff, flags, in do_fault()
3113 return do_shared_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); in do_fault()
3234 struct vm_area_struct *vma, unsigned long address, in handle_pte_fault() argument
3253 return do_fault(mm, vma, address, pte, pmd, in handle_pte_fault()
3256 return do_anonymous_page(mm, vma, address, pte, pmd, in handle_pte_fault()
3259 return do_swap_page(mm, vma, address, in handle_pte_fault()
3264 return do_numa_page(mm, vma, address, entry, pte, pmd); in handle_pte_fault()
3272 return do_wp_page(mm, vma, address, in handle_pte_fault()
3277 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
3278 update_mmu_cache(vma, address, pte); in handle_pte_fault()
3287 flush_tlb_fix_spurious_fault(vma, address); in handle_pte_fault()
3301 unsigned long address, unsigned int flags) in __handle_mm_fault() argument
3309 return hugetlb_fault(mm, vma, address, flags); in __handle_mm_fault()
3311 pgd = pgd_offset(mm, address); in __handle_mm_fault()
3312 pud = pud_alloc(mm, pgd, address); in __handle_mm_fault()
3315 pmd = pmd_alloc(mm, pud, address); in __handle_mm_fault()
3321 ret = do_huge_pmd_anonymous_page(mm, vma, address, in __handle_mm_fault()
3342 return do_huge_pmd_numa_page(mm, vma, address, in __handle_mm_fault()
3346 ret = do_huge_pmd_wp_page(mm, vma, address, pmd, in __handle_mm_fault()
3351 huge_pmd_set_accessed(mm, vma, address, pmd, in __handle_mm_fault()
3364 unlikely(__pte_alloc(mm, vma, pmd, address))) in __handle_mm_fault()
3385 pte = pte_offset_map(pmd, address); in __handle_mm_fault()
3387 return handle_pte_fault(mm, vma, address, pte, pmd, flags); in __handle_mm_fault()
3397 unsigned long address, unsigned int flags) in handle_mm_fault() argument
3416 ret = __handle_mm_fault(mm, vma, address, flags); in handle_mm_fault()
3439 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) in __pud_alloc() argument
3441 pud_t *new = pud_alloc_one(mm, address); in __pud_alloc()
3462 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) in __pmd_alloc() argument
3464 pmd_t *new = pmd_alloc_one(mm, address); in __pmd_alloc()
3489 static int __follow_pte(struct mm_struct *mm, unsigned long address, in __follow_pte() argument
3497 pgd = pgd_offset(mm, address); in __follow_pte()
3501 pud = pud_offset(pgd, address); in __follow_pte()
3505 pmd = pmd_offset(pud, address); in __follow_pte()
3514 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); in __follow_pte()
3527 static inline int follow_pte(struct mm_struct *mm, unsigned long address, in follow_pte() argument
3534 !(res = __follow_pte(mm, address, ptepp, ptlp))); in follow_pte()
3548 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
3558 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); in follow_pfn()
3569 unsigned long address, unsigned int flags, in follow_phys() argument
3579 if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) in follow_phys()