Lines Matching refs:vma
445 struct vm_area_struct *vma; in mpol_rebind_mm() local
448 for (vma = mm->mmap; vma; vma = vma->vm_next) in mpol_rebind_mm()
449 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); in mpol_rebind_mm()
488 struct vm_area_struct *vma = walk->vma; in queue_pages_pte_range() local
496 split_huge_page_pmd(vma, addr, pmd); in queue_pages_pte_range()
504 page = vm_normal_page(vma, addr, *pte); in queue_pages_pte_range()
537 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte); in queue_pages_hugetlb()
567 unsigned long change_prot_numa(struct vm_area_struct *vma, in change_prot_numa() argument
572 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1); in change_prot_numa()
579 static unsigned long change_prot_numa(struct vm_area_struct *vma, in change_prot_numa() argument
589 struct vm_area_struct *vma = walk->vma; in queue_pages_test_walk() local
591 unsigned long endvma = vma->vm_end; in queue_pages_test_walk()
594 if (vma->vm_flags & VM_PFNMAP) in queue_pages_test_walk()
599 if (vma->vm_start > start) in queue_pages_test_walk()
600 start = vma->vm_start; in queue_pages_test_walk()
603 if (!vma->vm_next && vma->vm_end < end) in queue_pages_test_walk()
605 if (qp->prev && qp->prev->vm_end < vma->vm_start) in queue_pages_test_walk()
609 qp->prev = vma; in queue_pages_test_walk()
611 if (vma->vm_flags & VM_PFNMAP) in queue_pages_test_walk()
616 if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) in queue_pages_test_walk()
617 change_prot_numa(vma, start, endvma); in queue_pages_test_walk()
623 vma_migratable(vma))) in queue_pages_test_walk()
662 static int vma_replace_policy(struct vm_area_struct *vma, in vma_replace_policy() argument
670 vma->vm_start, vma->vm_end, vma->vm_pgoff, in vma_replace_policy()
671 vma->vm_ops, vma->vm_file, in vma_replace_policy()
672 vma->vm_ops ? vma->vm_ops->set_policy : NULL); in vma_replace_policy()
678 if (vma->vm_ops && vma->vm_ops->set_policy) { in vma_replace_policy()
679 err = vma->vm_ops->set_policy(vma, new); in vma_replace_policy()
684 old = vma->vm_policy; in vma_replace_policy()
685 vma->vm_policy = new; /* protected by mmap_sem */ in vma_replace_policy()
700 struct vm_area_struct *vma; in mbind_range() local
706 vma = find_vma(mm, start); in mbind_range()
707 if (!vma || vma->vm_start > start) in mbind_range()
710 prev = vma->vm_prev; in mbind_range()
711 if (start > vma->vm_start) in mbind_range()
712 prev = vma; in mbind_range()
714 for (; vma && vma->vm_start < end; prev = vma, vma = next) { in mbind_range()
715 next = vma->vm_next; in mbind_range()
716 vmstart = max(start, vma->vm_start); in mbind_range()
717 vmend = min(end, vma->vm_end); in mbind_range()
719 if (mpol_equal(vma_policy(vma), new_pol)) in mbind_range()
722 pgoff = vma->vm_pgoff + in mbind_range()
723 ((vmstart - vma->vm_start) >> PAGE_SHIFT); in mbind_range()
724 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags, in mbind_range()
725 vma->anon_vma, vma->vm_file, pgoff, in mbind_range()
728 vma = prev; in mbind_range()
729 next = vma->vm_next; in mbind_range()
730 if (mpol_equal(vma_policy(vma), new_pol)) in mbind_range()
735 if (vma->vm_start != vmstart) { in mbind_range()
736 err = split_vma(vma->vm_mm, vma, vmstart, 1); in mbind_range()
740 if (vma->vm_end != vmend) { in mbind_range()
741 err = split_vma(vma->vm_mm, vma, vmend, 0); in mbind_range()
746 err = vma_replace_policy(vma, new_pol); in mbind_range()
838 struct vm_area_struct *vma = NULL; in do_get_mempolicy() local
862 vma = find_vma_intersection(mm, addr, addr+1); in do_get_mempolicy()
863 if (!vma) { in do_get_mempolicy()
867 if (vma->vm_ops && vma->vm_ops->get_policy) in do_get_mempolicy()
868 pol = vma->vm_ops->get_policy(vma, addr); in do_get_mempolicy()
870 pol = vma->vm_policy; in do_get_mempolicy()
900 if (vma) { in do_get_mempolicy()
902 vma = NULL; in do_get_mempolicy()
918 if (vma) in do_get_mempolicy()
1099 struct vm_area_struct *vma; in new_page() local
1102 vma = find_vma(current->mm, start); in new_page()
1103 while (vma) { in new_page()
1104 address = page_address_in_vma(page, vma); in new_page()
1107 vma = vma->vm_next; in new_page()
1111 BUG_ON(!vma); in new_page()
1112 return alloc_huge_page_noerr(vma, address, 1); in new_page()
1117 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); in new_page()
1544 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, in __get_vma_policy() argument
1549 if (vma) { in __get_vma_policy()
1550 if (vma->vm_ops && vma->vm_ops->get_policy) { in __get_vma_policy()
1551 pol = vma->vm_ops->get_policy(vma, addr); in __get_vma_policy()
1552 } else if (vma->vm_policy) { in __get_vma_policy()
1553 pol = vma->vm_policy; in __get_vma_policy()
1581 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, in get_vma_policy() argument
1584 struct mempolicy *pol = __get_vma_policy(vma, addr); in get_vma_policy()
1592 bool vma_policy_mof(struct vm_area_struct *vma) in vma_policy_mof() argument
1596 if (vma->vm_ops && vma->vm_ops->get_policy) { in vma_policy_mof()
1599 pol = vma->vm_ops->get_policy(vma, vma->vm_start); in vma_policy_mof()
1607 pol = vma->vm_policy; in vma_policy_mof()
1738 struct vm_area_struct *vma, unsigned long off) in offset_il_node() argument
1758 struct vm_area_struct *vma, unsigned long addr, int shift) in interleave_nid() argument
1760 if (vma) { in interleave_nid()
1771 off = vma->vm_pgoff >> (shift - PAGE_SHIFT); in interleave_nid()
1772 off += (addr - vma->vm_start) >> shift; in interleave_nid()
1773 return offset_il_node(pol, vma, off); in interleave_nid()
1809 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, in huge_zonelist() argument
1815 *mpol = get_vma_policy(vma, addr); in huge_zonelist()
1819 zl = node_zonelist(interleave_nid(*mpol, vma, addr, in huge_zonelist()
1820 huge_page_shift(hstate_vma(vma))), gfp_flags); in huge_zonelist()
1962 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, in alloc_pages_vma() argument
1972 pol = get_vma_policy(vma, addr); in alloc_pages_vma()
1978 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); in alloc_pages_vma()
2250 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) in mpol_misplaced() argument
2261 BUG_ON(!vma); in mpol_misplaced()
2263 pol = get_vma_policy(vma, addr); in mpol_misplaced()
2269 BUG_ON(addr >= vma->vm_end); in mpol_misplaced()
2270 BUG_ON(addr < vma->vm_start); in mpol_misplaced()
2272 pgoff = vma->vm_pgoff; in mpol_misplaced()
2273 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT; in mpol_misplaced()
2274 polnid = offset_il_node(pol, vma, pgoff); in mpol_misplaced()
2472 struct vm_area_struct *vma, struct mempolicy *npol) in mpol_set_shared_policy() argument
2476 unsigned long sz = vma_pages(vma); in mpol_set_shared_policy()
2479 vma->vm_pgoff, in mpol_set_shared_policy()
2485 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol); in mpol_set_shared_policy()
2489 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new); in mpol_set_shared_policy()