Lines Matching refs:vma
52 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument
88 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, in move_ptes() argument
95 struct mm_struct *mm = vma->vm_mm; in move_ptes()
118 if (vma->vm_file) { in move_ptes()
119 mapping = vma->vm_file->f_mapping; in move_ptes()
122 if (vma->anon_vma) { in move_ptes()
123 anon_vma = vma->anon_vma; in move_ptes()
162 unsigned long move_page_tables(struct vm_area_struct *vma, in move_page_tables() argument
174 flush_cache_range(vma, old_addr, old_end); in move_page_tables()
178 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
187 old_pmd = get_old_pmd(vma->vm_mm, old_addr); in move_page_tables()
190 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); in move_page_tables()
196 VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma, in move_page_tables()
197 vma); in move_page_tables()
200 anon_vma_lock_write(vma->anon_vma); in move_page_tables()
201 err = move_huge_pmd(vma, new_vma, old_addr, in move_page_tables()
205 anon_vma_unlock_write(vma->anon_vma); in move_page_tables()
211 split_huge_page_pmd(vma, old_addr, old_pmd); in move_page_tables()
223 move_ptes(vma, old_pmd, old_addr, old_addr + extent, in move_page_tables()
228 flush_tlb_range(vma, old_end-len, old_addr); in move_page_tables()
230 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); in move_page_tables()
235 static unsigned long move_vma(struct vm_area_struct *vma, in move_vma() argument
239 struct mm_struct *mm = vma->vm_mm; in move_vma()
241 unsigned long vm_flags = vma->vm_flags; in move_vma()
264 err = ksm_madvise(vma, old_addr, old_addr + old_len, in move_vma()
269 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); in move_vma()
270 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, in move_vma()
275 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, in move_vma()
283 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, in move_vma()
285 vma = new_vma; in move_vma()
289 } else if (vma->vm_file && vma->vm_file->f_op->mremap) { in move_vma()
290 err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma); in move_vma()
292 move_page_tables(new_vma, new_addr, vma, old_addr, in move_vma()
300 vma->vm_flags &= ~VM_ACCOUNT; in move_vma()
301 excess = vma->vm_end - vma->vm_start - old_len; in move_vma()
302 if (old_addr > vma->vm_start && in move_vma()
303 old_addr + old_len < vma->vm_end) in move_vma()
317 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); in move_vma()
328 vma->vm_flags |= VM_ACCOUNT; in move_vma()
330 vma->vm_next->vm_flags |= VM_ACCOUNT; in move_vma()
345 struct vm_area_struct *vma = find_vma(mm, addr); in vma_to_resize() local
347 if (!vma || vma->vm_start > addr) in vma_to_resize()
350 if (is_vm_hugetlb_page(vma)) in vma_to_resize()
354 if (old_len > vma->vm_end - addr) in vma_to_resize()
361 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) in vma_to_resize()
363 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; in vma_to_resize()
364 pgoff += vma->vm_pgoff; in vma_to_resize()
369 if (vma->vm_flags & VM_LOCKED) { in vma_to_resize()
381 if (vma->vm_flags & VM_ACCOUNT) { in vma_to_resize()
388 return vma; in vma_to_resize()
395 struct vm_area_struct *vma; in mremap_to() local
426 vma = vma_to_resize(addr, old_len, new_len, &charged); in mremap_to()
427 if (IS_ERR(vma)) { in mremap_to()
428 ret = PTR_ERR(vma); in mremap_to()
433 if (vma->vm_flags & VM_MAYSHARE) in mremap_to()
436 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + in mremap_to()
437 ((addr - vma->vm_start) >> PAGE_SHIFT), in mremap_to()
442 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked); in mremap_to()
452 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) in vma_expandable() argument
454 unsigned long end = vma->vm_end + delta; in vma_expandable()
455 if (end < vma->vm_end) /* overflow */ in vma_expandable()
457 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ in vma_expandable()
459 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, in vma_expandable()
477 struct vm_area_struct *vma; in SYSCALL_DEFINE5() local
526 vma = vma_to_resize(addr, old_len, new_len, &charged); in SYSCALL_DEFINE5()
527 if (IS_ERR(vma)) { in SYSCALL_DEFINE5()
528 ret = PTR_ERR(vma); in SYSCALL_DEFINE5()
534 if (old_len == vma->vm_end - addr) { in SYSCALL_DEFINE5()
536 if (vma_expandable(vma, new_len - old_len)) { in SYSCALL_DEFINE5()
539 if (vma_adjust(vma, vma->vm_start, addr + new_len, in SYSCALL_DEFINE5()
540 vma->vm_pgoff, NULL)) { in SYSCALL_DEFINE5()
545 vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); in SYSCALL_DEFINE5()
546 if (vma->vm_flags & VM_LOCKED) { in SYSCALL_DEFINE5()
563 if (vma->vm_flags & VM_MAYSHARE) in SYSCALL_DEFINE5()
566 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, in SYSCALL_DEFINE5()
567 vma->vm_pgoff + in SYSCALL_DEFINE5()
568 ((addr - vma->vm_start) >> PAGE_SHIFT), in SYSCALL_DEFINE5()
575 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked); in SYSCALL_DEFINE5()