new_vma           403 arch/arm/kernel/process.c 		struct vm_area_struct *new_vma)
new_vma           405 arch/arm/kernel/process.c 	current->mm->context.sigpage = new_vma->vm_start;
new_vma            49 arch/arm/kernel/vdso.c 		struct vm_area_struct *new_vma)
new_vma            51 arch/arm/kernel/vdso.c 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
new_vma            60 arch/arm/kernel/vdso.c 	current->mm->context.vdso = new_vma->vm_start;
new_vma            86 arch/arm64/kernel/vdso.c 			struct vm_area_struct *new_vma)
new_vma            88 arch/arm64/kernel/vdso.c 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
new_vma            95 arch/arm64/kernel/vdso.c 	current->mm->context.vdso = (void *)new_vma->vm_start;
new_vma           185 arch/arm64/kernel/vdso.c 		struct vm_area_struct *new_vma)
new_vma           187 arch/arm64/kernel/vdso.c 	return __vdso_remap(ARM64_VDSO32, sm, new_vma);
new_vma           372 arch/arm64/kernel/vdso.c 		struct vm_area_struct *new_vma)
new_vma           374 arch/arm64/kernel/vdso.c 	return __vdso_remap(ARM64_VDSO, sm, new_vma);
new_vma            56 arch/x86/entry/vdso/vma.c 		struct vm_area_struct *new_vma)
new_vma            67 arch/x86/entry/vdso/vma.c 			regs->ip = new_vma->vm_start + vdso_land;
new_vma            73 arch/x86/entry/vdso/vma.c 		struct vm_area_struct *new_vma)
new_vma            75 arch/x86/entry/vdso/vma.c 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
new_vma            81 arch/x86/entry/vdso/vma.c 	vdso_fix_landing(image, new_vma);
new_vma            82 arch/x86/entry/vdso/vma.c 	current->mm->context.vdso = (void __user *)new_vma->vm_start;
new_vma          1625 include/linux/mm.h 		unsigned long old_addr, struct vm_area_struct *new_vma,
new_vma           737 include/linux/mm_types.h 		     struct vm_area_struct *new_vma);
new_vma          3215 mm/mmap.c      	struct vm_area_struct *new_vma, *prev;
new_vma          3230 mm/mmap.c      	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
new_vma          3233 mm/mmap.c      	if (new_vma) {
new_vma          3237 mm/mmap.c      		if (unlikely(vma_start >= new_vma->vm_start &&
new_vma          3238 mm/mmap.c      			     vma_start < new_vma->vm_end)) {
new_vma          3251 mm/mmap.c      			VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
new_vma          3252 mm/mmap.c      			*vmap = vma = new_vma;
new_vma          3254 mm/mmap.c      		*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
new_vma          3256 mm/mmap.c      		new_vma = vm_area_dup(vma);
new_vma          3257 mm/mmap.c      		if (!new_vma)
new_vma          3259 mm/mmap.c      		new_vma->vm_start = addr;
new_vma          3260 mm/mmap.c      		new_vma->vm_end = addr + len;
new_vma          3261 mm/mmap.c      		new_vma->vm_pgoff = pgoff;
new_vma          3262 mm/mmap.c      		if (vma_dup_policy(vma, new_vma))
new_vma          3264 mm/mmap.c      		if (anon_vma_clone(new_vma, vma))
new_vma          3266 mm/mmap.c      		if (new_vma->vm_file)
new_vma          3267 mm/mmap.c      			get_file(new_vma->vm_file);
new_vma          3268 mm/mmap.c      		if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma          3269 mm/mmap.c      			new_vma->vm_ops->open(new_vma);
new_vma          3270 mm/mmap.c      		vma_link(mm, new_vma, prev, rb_link, rb_parent);
new_vma          3273 mm/mmap.c      	return new_vma;
new_vma          3276 mm/mmap.c      	mpol_put(vma_policy(new_vma));
new_vma          3278 mm/mmap.c      	vm_area_free(new_vma);
new_vma          3338 mm/mmap.c      static int special_mapping_mremap(struct vm_area_struct *new_vma)
new_vma          3340 mm/mmap.c      	struct vm_special_mapping *sm = new_vma->vm_private_data;
new_vma          3342 mm/mmap.c      	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
new_vma          3346 mm/mmap.c      		return sm->mremap(sm, new_vma);
new_vma           117 mm/mremap.c    		struct vm_area_struct *new_vma, pmd_t *new_pmd,
new_vma           178 mm/mremap.c    		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
new_vma           241 mm/mremap.c    		unsigned long old_addr, struct vm_area_struct *new_vma,
new_vma           304 mm/mremap.c    		if (pte_alloc(new_vma->vm_mm, new_pmd))
new_vma           309 mm/mremap.c    		move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
new_vma           325 mm/mremap.c    	struct vm_area_struct *new_vma;
new_vma           355 mm/mremap.c    	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
new_vma           357 mm/mremap.c    	if (!new_vma)
new_vma           360 mm/mremap.c    	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
new_vma           365 mm/mremap.c    		err = vma->vm_ops->mremap(new_vma);
new_vma           374 mm/mremap.c    		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
new_vma           376 mm/mremap.c    		vma = new_vma;
new_vma           381 mm/mremap.c    		mremap_userfaultfd_prep(new_vma, uf);