Lines Matching refs:vma

211 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)  in subpool_vma()  argument
213 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma()
401 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument
403 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
404 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
407 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, in linear_hugepage_index() argument
410 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index()
417 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) in vma_kernel_pagesize() argument
421 if (!is_vm_hugetlb_page(vma)) in vma_kernel_pagesize()
424 hstate = hstate_vma(vma); in vma_kernel_pagesize()
437 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) in vma_mmu_pagesize() argument
439 return vma_kernel_pagesize(vma); in vma_mmu_pagesize()
471 static unsigned long get_vma_private_data(struct vm_area_struct *vma) in get_vma_private_data() argument
473 return (unsigned long)vma->vm_private_data; in get_vma_private_data()
476 static void set_vma_private_data(struct vm_area_struct *vma, in set_vma_private_data() argument
479 vma->vm_private_data = (void *)value; in set_vma_private_data()
509 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) in vma_resv_map() argument
511 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in vma_resv_map()
512 if (vma->vm_flags & VM_MAYSHARE) { in vma_resv_map()
513 struct address_space *mapping = vma->vm_file->f_mapping; in vma_resv_map()
519 return (struct resv_map *)(get_vma_private_data(vma) & in vma_resv_map()
524 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) in set_vma_resv_map() argument
526 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in set_vma_resv_map()
527 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_map()
529 set_vma_private_data(vma, (get_vma_private_data(vma) & in set_vma_resv_map()
533 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) in set_vma_resv_flags() argument
535 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in set_vma_resv_flags()
536 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_flags()
538 set_vma_private_data(vma, get_vma_private_data(vma) | flags); in set_vma_resv_flags()
541 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) in is_vma_resv_set() argument
543 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in is_vma_resv_set()
545 return (get_vma_private_data(vma) & flag) != 0; in is_vma_resv_set()
549 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) in reset_vma_resv_huge_pages() argument
551 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in reset_vma_resv_huge_pages()
552 if (!(vma->vm_flags & VM_MAYSHARE)) in reset_vma_resv_huge_pages()
553 vma->vm_private_data = (void *)0; in reset_vma_resv_huge_pages()
557 static int vma_has_reserves(struct vm_area_struct *vma, long chg) in vma_has_reserves() argument
559 if (vma->vm_flags & VM_NORESERVE) { in vma_has_reserves()
569 if (vma->vm_flags & VM_MAYSHARE && chg == 0) in vma_has_reserves()
576 if (vma->vm_flags & VM_MAYSHARE) in vma_has_reserves()
583 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in vma_has_reserves()
627 struct vm_area_struct *vma, in dequeue_huge_page_vma() argument
644 if (!vma_has_reserves(vma, chg) && in dequeue_huge_page_vma()
654 zonelist = huge_zonelist(vma, address, in dequeue_huge_page_vma()
664 if (!vma_has_reserves(vma, chg)) in dequeue_huge_page_vma()
1435 struct vm_area_struct *vma, unsigned long addr) in vma_needs_reservation() argument
1441 resv = vma_resv_map(vma); in vma_needs_reservation()
1445 idx = vma_hugecache_offset(h, vma, addr); in vma_needs_reservation()
1448 if (vma->vm_flags & VM_MAYSHARE) in vma_needs_reservation()
1454 struct vm_area_struct *vma, unsigned long addr) in vma_commit_reservation() argument
1459 resv = vma_resv_map(vma); in vma_commit_reservation()
1463 idx = vma_hugecache_offset(h, vma, addr); in vma_commit_reservation()
1467 static struct page *alloc_huge_page(struct vm_area_struct *vma, in alloc_huge_page() argument
1470 struct hugepage_subpool *spool = subpool_vma(vma); in alloc_huge_page()
1471 struct hstate *h = hstate_vma(vma); in alloc_huge_page()
1486 chg = vma_needs_reservation(h, vma, addr); in alloc_huge_page()
1498 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg); in alloc_huge_page()
1514 vma_commit_reservation(h, vma, addr); in alloc_huge_page()
1530 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, in alloc_huge_page_noerr() argument
1533 struct page *page = alloc_huge_page(vma, addr, avoid_reserve); in alloc_huge_page_noerr()
2541 static void hugetlb_vm_op_open(struct vm_area_struct *vma) in hugetlb_vm_op_open() argument
2543 struct resv_map *resv = vma_resv_map(vma); in hugetlb_vm_op_open()
2553 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_vm_op_open()
2557 static void hugetlb_vm_op_close(struct vm_area_struct *vma) in hugetlb_vm_op_close() argument
2559 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close()
2560 struct resv_map *resv = vma_resv_map(vma); in hugetlb_vm_op_close()
2561 struct hugepage_subpool *spool = subpool_vma(vma); in hugetlb_vm_op_close()
2565 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_vm_op_close()
2568 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
2569 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
2591 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) in hugetlb_vm_op_fault() argument
2603 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, in make_huge_pte() argument
2610 vma->vm_page_prot))); in make_huge_pte()
2613 vma->vm_page_prot)); in make_huge_pte()
2617 entry = arch_make_huge_pte(entry, vma, page, writable); in make_huge_pte()
2622 static void set_huge_ptep_writable(struct vm_area_struct *vma, in set_huge_ptep_writable() argument
2628 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) in set_huge_ptep_writable()
2629 update_mmu_cache(vma, address, ptep); in set_huge_ptep_writable()
2659 struct vm_area_struct *vma) in copy_hugetlb_page_range() argument
2665 struct hstate *h = hstate_vma(vma); in copy_hugetlb_page_range()
2671 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; in copy_hugetlb_page_range()
2673 mmun_start = vma->vm_start; in copy_hugetlb_page_range()
2674 mmun_end = vma->vm_end; in copy_hugetlb_page_range()
2678 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { in copy_hugetlb_page_range()
2735 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, in __unmap_hugepage_range() argument
2740 struct mm_struct *mm = vma->vm_mm; in __unmap_hugepage_range()
2746 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range()
2751 WARN_ON(!is_vm_hugetlb_page(vma)); in __unmap_hugepage_range()
2755 tlb_start_vma(tlb, vma); in __unmap_hugepage_range()
2796 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); in __unmap_hugepage_range()
2831 tlb_end_vma(tlb, vma); in __unmap_hugepage_range()
2835 struct vm_area_struct *vma, unsigned long start, in __unmap_hugepage_range_final() argument
2838 __unmap_hugepage_range(tlb, vma, start, end, ref_page); in __unmap_hugepage_range_final()
2850 vma->vm_flags &= ~VM_MAYSHARE; in __unmap_hugepage_range_final()
2853 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, in unmap_hugepage_range() argument
2859 mm = vma->vm_mm; in unmap_hugepage_range()
2862 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); in unmap_hugepage_range()
2872 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, in unmap_ref_private() argument
2875 struct hstate *h = hstate_vma(vma); in unmap_ref_private()
2885 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + in unmap_ref_private()
2886 vma->vm_pgoff; in unmap_ref_private()
2887 mapping = file_inode(vma->vm_file)->i_mapping; in unmap_ref_private()
2897 if (iter_vma == vma) in unmap_ref_private()
2928 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_cow() argument
2932 struct hstate *h = hstate_vma(vma); in hugetlb_cow()
2944 page_move_anon_rmap(old_page, vma, address); in hugetlb_cow()
2945 set_huge_ptep_writable(vma, address, ptep); in hugetlb_cow()
2958 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && in hugetlb_cow()
2969 new_page = alloc_huge_page(vma, address, outside_reserve); in hugetlb_cow()
2982 unmap_ref_private(mm, vma, old_page, address); in hugetlb_cow()
3005 if (unlikely(anon_vma_prepare(vma))) { in hugetlb_cow()
3010 copy_user_huge_page(new_page, old_page, address, vma, in hugetlb_cow()
3029 huge_ptep_clear_flush(vma, address, ptep); in hugetlb_cow()
3032 make_huge_pte(vma, new_page, 1)); in hugetlb_cow()
3034 hugepage_add_new_anon_rmap(new_page, vma, address); in hugetlb_cow()
3051 struct vm_area_struct *vma, unsigned long address) in hugetlbfs_pagecache_page() argument
3056 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_page()
3057 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_page()
3067 struct vm_area_struct *vma, unsigned long address) in hugetlbfs_pagecache_present() argument
3073 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_present()
3074 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_present()
3082 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_no_page() argument
3086 struct hstate *h = hstate_vma(vma); in hugetlb_no_page()
3099 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { in hugetlb_no_page()
3115 page = alloc_huge_page(vma, address, 0); in hugetlb_no_page()
3128 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_no_page()
3146 if (unlikely(anon_vma_prepare(vma))) { in hugetlb_no_page()
3171 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) in hugetlb_no_page()
3172 if (vma_needs_reservation(h, vma, address) < 0) { in hugetlb_no_page()
3189 hugepage_add_new_anon_rmap(page, vma, address); in hugetlb_no_page()
3192 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) in hugetlb_no_page()
3193 && (vma->vm_flags & VM_SHARED))); in hugetlb_no_page()
3196 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { in hugetlb_no_page()
3198 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl); in hugetlb_no_page()
3216 struct vm_area_struct *vma, in fault_mutex_hash() argument
3223 if (vma->vm_flags & VM_SHARED) { in fault_mutex_hash()
3241 struct vm_area_struct *vma, in fault_mutex_hash() argument
3249 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_fault() argument
3259 struct hstate *h = hstate_vma(vma); in hugetlb_fault()
3269 migration_entry_wait_huge(vma, mm, ptep); in hugetlb_fault()
3280 mapping = vma->vm_file->f_mapping; in hugetlb_fault()
3281 idx = vma_hugecache_offset(h, vma, address); in hugetlb_fault()
3288 hash = fault_mutex_hash(h, mm, vma, mapping, idx, address); in hugetlb_fault()
3293 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); in hugetlb_fault()
3318 if (vma_needs_reservation(h, vma, address) < 0) { in hugetlb_fault()
3323 if (!(vma->vm_flags & VM_MAYSHARE)) in hugetlb_fault()
3325 vma, address); in hugetlb_fault()
3350 ret = hugetlb_cow(mm, vma, address, ptep, entry, in hugetlb_fault()
3357 if (huge_ptep_set_access_flags(vma, address, ptep, entry, in hugetlb_fault()
3359 update_mmu_cache(vma, address, ptep); in hugetlb_fault()
3385 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, in follow_hugetlb_page() argument
3393 struct hstate *h = hstate_vma(vma); in follow_hugetlb_page()
3395 while (vaddr < vma->vm_end && remainder) { in follow_hugetlb_page()
3430 !hugetlbfs_pagecache_present(h, vma, vaddr)) { in follow_hugetlb_page()
3454 ret = hugetlb_fault(mm, vma, vaddr, in follow_hugetlb_page()
3472 vmas[i] = vma; in follow_hugetlb_page()
3478 if (vaddr < vma->vm_end && remainder && in follow_hugetlb_page()
3494 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, in hugetlb_change_protection() argument
3497 struct mm_struct *mm = vma->vm_mm; in hugetlb_change_protection()
3501 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection()
3505 flush_cache_range(vma, address, end); in hugetlb_change_protection()
3508 i_mmap_lock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
3542 pte = arch_make_huge_pte(pte, vma, NULL, 0); in hugetlb_change_protection()
3554 flush_tlb_range(vma, start, end); in hugetlb_change_protection()
3556 i_mmap_unlock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
3564 struct vm_area_struct *vma, in hugetlb_reserve_pages() argument
3587 if (!vma || vma->vm_flags & VM_MAYSHARE) { in hugetlb_reserve_pages()
3599 set_vma_resv_map(vma, resv_map); in hugetlb_reserve_pages()
3600 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); in hugetlb_reserve_pages()
3641 if (!vma || vma->vm_flags & VM_MAYSHARE) in hugetlb_reserve_pages()
3645 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_reserve_pages()
3674 struct vm_area_struct *vma, in page_table_shareable() argument
3683 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; in page_table_shareable()
3698 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) in vma_shareable() argument
3706 if (vma->vm_flags & VM_MAYSHARE && in vma_shareable()
3707 vma->vm_start <= base && end <= vma->vm_end) in vma_shareable()
3723 struct vm_area_struct *vma = find_vma(mm, addr); in huge_pmd_share() local
3724 struct address_space *mapping = vma->vm_file->f_mapping; in huge_pmd_share()
3725 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + in huge_pmd_share()
3726 vma->vm_pgoff; in huge_pmd_share()
3733 if (!vma_shareable(vma, addr)) in huge_pmd_share()
3738 if (svma == vma) in huge_pmd_share()
3741 saddr = page_table_shareable(svma, vma, addr, idx); in huge_pmd_share()
3755 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte); in huge_pmd_share()