Lines Matching refs:pte

647 			  pte_t pte, struct page *page)  in print_bad_pte()  argument
684 (long long)pte_val(pte), (long long)pmd_val(*pmd)); in print_bad_pte()
750 pte_t pte) in vm_normal_page() argument
752 unsigned long pfn = pte_pfn(pte); in vm_normal_page()
755 if (likely(!pte_special(pte))) in vm_normal_page()
762 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
787 print_bad_pte(vma, addr, pte, NULL); in vm_normal_page()
811 pte_t pte = *src_pte; in copy_one_pte() local
815 if (unlikely(!pte_present(pte))) { in copy_one_pte()
816 swp_entry_t entry = pte_to_swp_entry(pte); in copy_one_pte()
846 pte = swp_entry_to_pte(entry); in copy_one_pte()
848 pte = pte_swp_mksoft_dirty(pte); in copy_one_pte()
849 set_pte_at(src_mm, addr, src_pte, pte); in copy_one_pte()
861 pte = pte_wrprotect(pte); in copy_one_pte()
869 pte = pte_mkclean(pte); in copy_one_pte()
870 pte = pte_mkold(pte); in copy_one_pte()
872 page = vm_normal_page(vma, addr, pte); in copy_one_pte()
883 set_pte_at(dst_mm, addr, dst_pte, pte); in copy_one_pte()
1082 pte_t *pte; in zap_pte_range() local
1088 pte = start_pte; in zap_pte_range()
1091 pte_t ptent = *pte; in zap_pte_range()
1110 ptent = ptep_get_and_clear_full(mm, addr, pte, in zap_pte_range()
1112 tlb_remove_tlb_entry(tlb, pte, addr); in zap_pte_range()
1156 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); in zap_pte_range()
1157 } while (pte++, addr += PAGE_SIZE, addr != end); in zap_pte_range()
1445 pte_t *pte; in insert_page() local
1453 pte = get_locked_pte(mm, addr, &ptl); in insert_page()
1454 if (!pte) in insert_page()
1457 if (!pte_none(*pte)) in insert_page()
1464 set_pte_at(mm, addr, pte, mk_pte(page, prot)); in insert_page()
1467 pte_unmap_unlock(pte, ptl); in insert_page()
1470 pte_unmap_unlock(pte, ptl); in insert_page()
1523 pte_t *pte, entry; in insert_pfn() local
1527 pte = get_locked_pte(mm, addr, &ptl); in insert_pfn()
1528 if (!pte) in insert_pfn()
1531 if (!pte_none(*pte)) in insert_pfn()
1536 set_pte_at(mm, addr, pte, entry); in insert_pfn()
1537 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn()
1541 pte_unmap_unlock(pte, ptl); in insert_pfn()
1625 pte_t *pte; in remap_pte_range() local
1628 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); in remap_pte_range()
1629 if (!pte) in remap_pte_range()
1633 BUG_ON(!pte_none(*pte)); in remap_pte_range()
1634 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); in remap_pte_range()
1636 } while (pte++, addr += PAGE_SIZE, addr != end); in remap_pte_range()
1638 pte_unmap_unlock(pte - 1, ptl); in remap_pte_range()
1802 pte_t *pte; in apply_to_pte_range() local
1807 pte = (mm == &init_mm) ? in apply_to_pte_range()
1810 if (!pte) in apply_to_pte_range()
1820 err = fn(pte++, token, addr, data); in apply_to_pte_range()
1828 pte_unmap_unlock(pte-1, ptl); in apply_to_pte_range()
2452 pte_t pte; in do_swap_page() local
2558 pte = mk_pte(page, vma->vm_page_prot); in do_swap_page()
2560 pte = maybe_mkwrite(pte_mkdirty(pte), vma); in do_swap_page()
2567 pte = pte_mksoft_dirty(pte); in do_swap_page()
2568 set_pte_at(mm, address, page_table, pte); in do_swap_page()
2596 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); in do_swap_page()
2793 struct page *page, pte_t *pte, bool write, bool anon) in do_set_pte() argument
2808 set_pte_at(vma->vm_mm, address, pte, entry); in do_set_pte()
2811 update_mmu_cache(vma, address, pte); in do_set_pte()
2879 pte_t *pte, pgoff_t pgoff, unsigned int flags) in do_fault_around() argument
2891 pte -= off; in do_fault_around()
2904 while (!pte_none(*pte)) { in do_fault_around()
2910 pte++; in do_fault_around()
2914 vmf.pte = pte; in do_fault_around()
2927 pte_t *pte; in do_read_fault() local
2936 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
2937 do_fault_around(vma, address, pte, pgoff, flags); in do_read_fault()
2938 if (!pte_same(*pte, orig_pte)) in do_read_fault()
2940 pte_unmap_unlock(pte, ptl); in do_read_fault()
2947 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_read_fault()
2948 if (unlikely(!pte_same(*pte, orig_pte))) { in do_read_fault()
2949 pte_unmap_unlock(pte, ptl); in do_read_fault()
2954 do_set_pte(vma, address, fault_page, pte, false, false); in do_read_fault()
2957 pte_unmap_unlock(pte, ptl); in do_read_fault()
2968 pte_t *pte; in do_cow_fault() local
2991 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_cow_fault()
2992 if (unlikely(!pte_same(*pte, orig_pte))) { in do_cow_fault()
2993 pte_unmap_unlock(pte, ptl); in do_cow_fault()
3006 do_set_pte(vma, address, new_page, pte, true, true); in do_cow_fault()
3009 pte_unmap_unlock(pte, ptl); in do_cow_fault()
3034 pte_t *pte; in do_shared_fault() local
3056 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in do_shared_fault()
3057 if (unlikely(!pte_same(*pte, orig_pte))) { in do_shared_fault()
3058 pte_unmap_unlock(pte, ptl); in do_shared_fault()
3063 do_set_pte(vma, address, fault_page, pte, true, false); in do_shared_fault()
3064 pte_unmap_unlock(pte, ptl); in do_shared_fault()
3132 unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) in do_numa_page() argument
3140 bool was_writable = pte_write(pte); in do_numa_page()
3157 if (unlikely(!pte_same(*ptep, pte))) { in do_numa_page()
3163 pte = pte_modify(pte, vma->vm_page_prot); in do_numa_page()
3164 pte = pte_mkyoung(pte); in do_numa_page()
3166 pte = pte_mkwrite(pte); in do_numa_page()
3167 set_pte_at(mm, addr, ptep, pte); in do_numa_page()
3170 page = vm_normal_page(vma, addr, pte); in do_numa_page()
3235 pte_t *pte, pmd_t *pmd, unsigned int flags) in handle_pte_fault() argument
3248 entry = *pte; in handle_pte_fault()
3253 return do_fault(mm, vma, address, pte, pmd, in handle_pte_fault()
3256 return do_anonymous_page(mm, vma, address, pte, pmd, in handle_pte_fault()
3260 pte, pmd, flags, entry); in handle_pte_fault()
3264 return do_numa_page(mm, vma, address, entry, pte, pmd); in handle_pte_fault()
3268 if (unlikely(!pte_same(*pte, entry))) in handle_pte_fault()
3273 pte, pmd, ptl, entry); in handle_pte_fault()
3277 if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { in handle_pte_fault()
3278 update_mmu_cache(vma, address, pte); in handle_pte_fault()
3290 pte_unmap_unlock(pte, ptl); in handle_pte_fault()
3306 pte_t *pte; in __handle_mm_fault() local
3385 pte = pte_offset_map(pmd, address); in __handle_mm_fault()
3387 return handle_pte_fault(mm, vma, address, pte, pmd, flags); in __handle_mm_fault()
3573 pte_t *ptep, pte; in follow_phys() local
3581 pte = *ptep; in follow_phys()
3583 if ((flags & FOLL_WRITE) && !pte_write(pte)) in follow_phys()
3586 *prot = pgprot_val(pte_pgprot(pte)); in follow_phys()
3587 *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; in follow_phys()