Lines Matching refs:vma

702 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)  in maybe_pmd_mkwrite()  argument
704 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite()
718 struct vm_area_struct *vma, in __do_huge_pmd_anonymous_page() argument
753 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page()
754 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in __do_huge_pmd_anonymous_page()
755 page_add_new_anon_rmap(page, vma, haddr); in __do_huge_pmd_anonymous_page()
757 lru_cache_add_active_or_unevictable(page, vma); in __do_huge_pmd_anonymous_page()
775 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
781 entry = mk_pmd(zero_page, vma->vm_page_prot); in set_huge_zero_page()
789 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_anonymous_page() argument
797 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) in do_huge_pmd_anonymous_page()
799 if (unlikely(anon_vma_prepare(vma))) in do_huge_pmd_anonymous_page()
801 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) in do_huge_pmd_anonymous_page()
819 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd, in do_huge_pmd_anonymous_page()
828 gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); in do_huge_pmd_anonymous_page()
829 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_anonymous_page()
834 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page, gfp))) { in do_huge_pmd_anonymous_page()
846 struct vm_area_struct *vma) in copy_huge_pmd() argument
883 set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, in copy_huge_pmd()
896 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ in copy_huge_pmd()
920 struct vm_area_struct *vma, in huge_pmd_set_accessed() argument
935 if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty)) in huge_pmd_set_accessed()
936 update_mmu_cache_pmd(vma, address, pmd); in huge_pmd_set_accessed()
974 struct vm_area_struct *vma, in do_huge_pmd_wp_page_fallback() argument
999 vma, address, page_to_nid(page)); in do_huge_pmd_wp_page_fallback()
1020 haddr + PAGE_SIZE * i, vma); in do_huge_pmd_wp_page_fallback()
1034 pmdp_clear_flush_notify(vma, haddr, pmd); in do_huge_pmd_wp_page_fallback()
1042 entry = mk_pte(pages[i], vma->vm_page_prot); in do_huge_pmd_wp_page_fallback()
1043 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in do_huge_pmd_wp_page_fallback()
1046 page_add_new_anon_rmap(pages[i], vma, haddr); in do_huge_pmd_wp_page_fallback()
1048 lru_cache_add_active_or_unevictable(pages[i], vma); in do_huge_pmd_wp_page_fallback()
1082 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_wp_page() argument
1095 VM_BUG_ON_VMA(!vma->anon_vma, vma); in do_huge_pmd_wp_page()
1108 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_huge_pmd_wp_page()
1109 if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) in do_huge_pmd_wp_page()
1110 update_mmu_cache_pmd(vma, address, pmd); in do_huge_pmd_wp_page()
1117 if (transparent_hugepage_enabled(vma) && in do_huge_pmd_wp_page()
1119 huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); in do_huge_pmd_wp_page()
1120 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_wp_page()
1126 split_huge_page_pmd(vma, address, pmd); in do_huge_pmd_wp_page()
1129 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, in do_huge_pmd_wp_page()
1147 split_huge_page_pmd(vma, address, pmd); in do_huge_pmd_wp_page()
1158 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); in do_huge_pmd_wp_page()
1175 entry = mk_huge_pmd(new_page, vma->vm_page_prot); in do_huge_pmd_wp_page()
1176 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_huge_pmd_wp_page()
1177 pmdp_clear_flush_notify(vma, haddr, pmd); in do_huge_pmd_wp_page()
1178 page_add_new_anon_rmap(new_page, vma, haddr); in do_huge_pmd_wp_page()
1180 lru_cache_add_active_or_unevictable(new_page, vma); in do_huge_pmd_wp_page()
1182 update_mmu_cache_pmd(vma, address, pmd); in do_huge_pmd_wp_page()
1203 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, in follow_trans_huge_pmd() argument
1208 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd()
1237 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, in follow_trans_huge_pmd()
1239 update_mmu_cache_pmd(vma, addr, pmd); in follow_trans_huge_pmd()
1241 if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) { in follow_trans_huge_pmd()
1259 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, in do_huge_pmd_numa_page() argument
1274 BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); in do_huge_pmd_numa_page()
1303 if (!(vma->vm_flags & VM_WRITE)) in do_huge_pmd_numa_page()
1311 target_nid = mpol_misplaced(page, vma, haddr); in do_huge_pmd_numa_page()
1355 migrated = migrate_misplaced_transhuge_page(mm, vma, in do_huge_pmd_numa_page()
1367 pmd = pmd_modify(pmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1372 update_mmu_cache_pmd(vma, addr, pmdp); in do_huge_pmd_numa_page()
1387 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pmd() argument
1393 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in zap_huge_pmd()
1427 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, in move_huge_pmd() argument
1436 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd()
1457 ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl); in move_huge_pmd()
1485 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in change_huge_pmd() argument
1488 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd()
1492 if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in change_huge_pmd()
1529 int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, in __pmd_trans_huge_lock() argument
1532 *ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1536 wait_split_huge_page(vma->anon_vma, pmd); in __pmd_trans_huge_lock()
1603 struct vm_area_struct *vma, in __split_huge_page_splitting() argument
1606 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_splitting()
1625 pmdp_splitting_flush(vma, address, pmd); in __split_huge_page_splitting()
1751 struct vm_area_struct *vma, in __split_huge_page_map() argument
1754 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_map()
1778 entry = mk_pte(page + i, vma->vm_page_prot); in __split_huge_page_map()
1779 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in __split_huge_page_map()
1817 pmdp_invalidate(vma, address, pmd); in __split_huge_page_map()
1840 struct vm_area_struct *vma = avc->vma; in __split_huge_page() local
1841 unsigned long addr = vma_address(page, vma); in __split_huge_page()
1842 BUG_ON(is_vma_temporary_stack(vma)); in __split_huge_page()
1843 mapcount += __split_huge_page_splitting(page, vma, addr); in __split_huge_page()
1865 struct vm_area_struct *vma = avc->vma; in __split_huge_page() local
1866 unsigned long addr = vma_address(page, vma); in __split_huge_page()
1867 BUG_ON(is_vma_temporary_stack(vma)); in __split_huge_page()
1868 mapcount2 += __split_huge_page_map(page, vma, addr); in __split_huge_page()
1922 int hugepage_madvise(struct vm_area_struct *vma, in hugepage_madvise() argument
1933 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise()
1948 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags))) in hugepage_madvise()
2054 int khugepaged_enter_vma_merge(struct vm_area_struct *vma, in khugepaged_enter_vma_merge() argument
2058 if (!vma->anon_vma) in khugepaged_enter_vma_merge()
2064 if (vma->vm_ops || (vm_flags & VM_NO_THP)) in khugepaged_enter_vma_merge()
2067 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge()
2068 hend = vma->vm_end & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge()
2070 return khugepaged_enter(vma, vm_flags); in khugepaged_enter_vma_merge()
2123 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, in __collapse_huge_page_isolate() argument
2143 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate()
2197 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
2208 struct vm_area_struct *vma, in __collapse_huge_page_copy() argument
2219 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy()
2229 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
2234 copy_user_highpage(page, src_page, address, vma); in __collapse_huge_page_copy()
2247 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
2335 struct vm_area_struct *vma, unsigned long address, in khugepaged_alloc_page() argument
2403 struct vm_area_struct *vma, unsigned long address, in khugepaged_alloc_page() argument
2413 static bool hugepage_vma_check(struct vm_area_struct *vma) in hugepage_vma_check() argument
2415 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || in hugepage_vma_check()
2416 (vma->vm_flags & VM_NOHUGEPAGE)) in hugepage_vma_check()
2419 if (!vma->anon_vma || vma->vm_ops) in hugepage_vma_check()
2421 if (is_vma_temporary_stack(vma)) in hugepage_vma_check()
2423 return !(vma->vm_flags & VM_NO_THP); in hugepage_vma_check()
2429 struct vm_area_struct *vma, in collapse_huge_page() argument
2451 new_page = khugepaged_alloc_page(hpage, gfp, mm, vma, address, node); in collapse_huge_page()
2468 vma = find_vma(mm, address); in collapse_huge_page()
2469 if (!vma) in collapse_huge_page()
2471 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in collapse_huge_page()
2472 hend = vma->vm_end & HPAGE_PMD_MASK; in collapse_huge_page()
2475 if (!hugepage_vma_check(vma)) in collapse_huge_page()
2481 anon_vma_lock_write(vma->anon_vma); in collapse_huge_page()
2496 _pmd = pmdp_clear_flush(vma, address, pmd); in collapse_huge_page()
2501 isolated = __collapse_huge_page_isolate(vma, address, pte); in collapse_huge_page()
2515 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
2523 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
2525 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); in collapse_huge_page()
2530 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); in collapse_huge_page()
2531 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); in collapse_huge_page()
2542 page_add_new_anon_rmap(new_page, vma, address); in collapse_huge_page()
2544 lru_cache_add_active_or_unevictable(new_page, vma); in collapse_huge_page()
2547 update_mmu_cache_pmd(vma, address, pmd); in collapse_huge_page()
2563 struct vm_area_struct *vma, in khugepaged_scan_pmd() argument
2598 page = vm_normal_page(vma, _address, pteval); in khugepaged_scan_pmd()
2622 mmu_notifier_test_young(vma->vm_mm, address)) in khugepaged_scan_pmd()
2632 collapse_huge_page(mm, address, hpage, vma, node); in khugepaged_scan_pmd()
2668 struct vm_area_struct *vma; in khugepaged_scan_mm_slot() local
2687 vma = NULL; in khugepaged_scan_mm_slot()
2689 vma = find_vma(mm, khugepaged_scan.address); in khugepaged_scan_mm_slot()
2692 for (; vma; vma = vma->vm_next) { in khugepaged_scan_mm_slot()
2700 if (!hugepage_vma_check(vma)) { in khugepaged_scan_mm_slot()
2705 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in khugepaged_scan_mm_slot()
2706 hend = vma->vm_end & HPAGE_PMD_MASK; in khugepaged_scan_mm_slot()
2724 ret = khugepaged_scan_pmd(mm, vma, in khugepaged_scan_mm_slot()
2747 if (khugepaged_test_exit(mm) || !vma) { in khugepaged_scan_mm_slot()
2854 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, in __split_huge_zero_page_pmd() argument
2857 struct mm_struct *mm = vma->vm_mm; in __split_huge_zero_page_pmd()
2862 pmdp_clear_flush_notify(vma, haddr, pmd); in __split_huge_zero_page_pmd()
2870 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); in __split_huge_zero_page_pmd()
2882 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, in __split_huge_page_pmd() argument
2887 struct mm_struct *mm = vma->vm_mm; in __split_huge_page_pmd()
2892 BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE); in __split_huge_page_pmd()
2905 __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_page_pmd()
2932 struct vm_area_struct *vma; in split_huge_page_pmd_mm() local
2934 vma = find_vma(mm, address); in split_huge_page_pmd_mm()
2935 BUG_ON(vma == NULL); in split_huge_page_pmd_mm()
2936 split_huge_page_pmd(vma, address, pmd); in split_huge_page_pmd_mm()
2966 void __vma_adjust_trans_huge(struct vm_area_struct *vma, in __vma_adjust_trans_huge() argument
2977 (start & HPAGE_PMD_MASK) >= vma->vm_start && in __vma_adjust_trans_huge()
2978 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) in __vma_adjust_trans_huge()
2979 split_huge_page_address(vma->vm_mm, start); in __vma_adjust_trans_huge()
2987 (end & HPAGE_PMD_MASK) >= vma->vm_start && in __vma_adjust_trans_huge()
2988 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) in __vma_adjust_trans_huge()
2989 split_huge_page_address(vma->vm_mm, end); in __vma_adjust_trans_huge()
2997 struct vm_area_struct *next = vma->vm_next; in __vma_adjust_trans_huge()