1/*
2 *	mm/mremap.c
3 *
4 *	(C) Copyright 1996 Linus Torvalds
5 *
6 *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
7 *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
8 */
9
10#include <linux/mm.h>
11#include <linux/hugetlb.h>
12#include <linux/shm.h>
13#include <linux/ksm.h>
14#include <linux/mman.h>
15#include <linux/swap.h>
16#include <linux/capability.h>
17#include <linux/fs.h>
18#include <linux/swapops.h>
19#include <linux/highmem.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/mmu_notifier.h>
23#include <linux/sched/sysctl.h>
24#include <linux/uaccess.h>
25
26#include <asm/cacheflush.h>
27#include <asm/tlbflush.h>
28
29#include "internal.h"
30
31static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
32{
33	pgd_t *pgd;
34	pud_t *pud;
35	pmd_t *pmd;
36
37	pgd = pgd_offset(mm, addr);
38	if (pgd_none_or_clear_bad(pgd))
39		return NULL;
40
41	pud = pud_offset(pgd, addr);
42	if (pud_none_or_clear_bad(pud))
43		return NULL;
44
45	pmd = pmd_offset(pud, addr);
46	if (pmd_none(*pmd))
47		return NULL;
48
49	return pmd;
50}
51
52static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
53			    unsigned long addr)
54{
55	pgd_t *pgd;
56	pud_t *pud;
57	pmd_t *pmd;
58
59	pgd = pgd_offset(mm, addr);
60	pud = pud_alloc(mm, pgd, addr);
61	if (!pud)
62		return NULL;
63
64	pmd = pmd_alloc(mm, pud, addr);
65	if (!pmd)
66		return NULL;
67
68	VM_BUG_ON(pmd_trans_huge(*pmd));
69
70	return pmd;
71}
72
73static pte_t move_soft_dirty_pte(pte_t pte)
74{
75	/*
76	 * Set soft dirty bit so we can notice
77	 * in userspace the ptes were moved.
78	 */
79#ifdef CONFIG_MEM_SOFT_DIRTY
80	if (pte_present(pte))
81		pte = pte_mksoft_dirty(pte);
82	else if (is_swap_pte(pte))
83		pte = pte_swp_mksoft_dirty(pte);
84#endif
85	return pte;
86}
87
88static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
89		unsigned long old_addr, unsigned long old_end,
90		struct vm_area_struct *new_vma, pmd_t *new_pmd,
91		unsigned long new_addr, bool need_rmap_locks)
92{
93	struct address_space *mapping = NULL;
94	struct anon_vma *anon_vma = NULL;
95	struct mm_struct *mm = vma->vm_mm;
96	pte_t *old_pte, *new_pte, pte;
97	spinlock_t *old_ptl, *new_ptl;
98
99	/*
100	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
101	 * locks to ensure that rmap will always observe either the old or the
102	 * new ptes. This is the easiest way to avoid races with
103	 * truncate_pagecache(), page migration, etc...
104	 *
105	 * When need_rmap_locks is false, we use other ways to avoid
106	 * such races:
107	 *
108	 * - During exec() shift_arg_pages(), we use a specially tagged vma
109	 *   which rmap call sites look for using is_vma_temporary_stack().
110	 *
111	 * - During mremap(), new_vma is often known to be placed after vma
112	 *   in rmap traversal order. This ensures rmap will always observe
113	 *   either the old pte, or the new pte, or both (the page table locks
114	 *   serialize access to individual ptes, but only rmap traversal
115	 *   order guarantees that we won't miss both the old and new ptes).
116	 */
117	if (need_rmap_locks) {
118		if (vma->vm_file) {
119			mapping = vma->vm_file->f_mapping;
120			i_mmap_lock_write(mapping);
121		}
122		if (vma->anon_vma) {
123			anon_vma = vma->anon_vma;
124			anon_vma_lock_write(anon_vma);
125		}
126	}
127
128	/*
129	 * We don't have to worry about the ordering of src and dst
130	 * pte locks because exclusive mmap_sem prevents deadlock.
131	 */
132	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
133	new_pte = pte_offset_map(new_pmd, new_addr);
134	new_ptl = pte_lockptr(mm, new_pmd);
135	if (new_ptl != old_ptl)
136		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
137	arch_enter_lazy_mmu_mode();
138
139	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
140				   new_pte++, new_addr += PAGE_SIZE) {
141		if (pte_none(*old_pte))
142			continue;
143		pte = ptep_get_and_clear(mm, old_addr, old_pte);
144		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
145		pte = move_soft_dirty_pte(pte);
146		set_pte_at(mm, new_addr, new_pte, pte);
147	}
148
149	arch_leave_lazy_mmu_mode();
150	if (new_ptl != old_ptl)
151		spin_unlock(new_ptl);
152	pte_unmap(new_pte - 1);
153	pte_unmap_unlock(old_pte - 1, old_ptl);
154	if (anon_vma)
155		anon_vma_unlock_write(anon_vma);
156	if (mapping)
157		i_mmap_unlock_write(mapping);
158}
159
160#define LATENCY_LIMIT	(64 * PAGE_SIZE)
161
162unsigned long move_page_tables(struct vm_area_struct *vma,
163		unsigned long old_addr, struct vm_area_struct *new_vma,
164		unsigned long new_addr, unsigned long len,
165		bool need_rmap_locks)
166{
167	unsigned long extent, next, old_end;
168	pmd_t *old_pmd, *new_pmd;
169	bool need_flush = false;
170	unsigned long mmun_start;	/* For mmu_notifiers */
171	unsigned long mmun_end;		/* For mmu_notifiers */
172
173	old_end = old_addr + len;
174	flush_cache_range(vma, old_addr, old_end);
175
176	mmun_start = old_addr;
177	mmun_end   = old_end;
178	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
179
180	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
181		cond_resched();
182		next = (old_addr + PMD_SIZE) & PMD_MASK;
183		/* even if next overflowed, extent below will be ok */
184		extent = next - old_addr;
185		if (extent > old_end - old_addr)
186			extent = old_end - old_addr;
187		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
188		if (!old_pmd)
189			continue;
190		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
191		if (!new_pmd)
192			break;
193		if (pmd_trans_huge(*old_pmd)) {
194			int err = 0;
195			if (extent == HPAGE_PMD_SIZE) {
196				VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma,
197					      vma);
198				/* See comment in move_ptes() */
199				if (need_rmap_locks)
200					anon_vma_lock_write(vma->anon_vma);
201				err = move_huge_pmd(vma, new_vma, old_addr,
202						    new_addr, old_end,
203						    old_pmd, new_pmd);
204				if (need_rmap_locks)
205					anon_vma_unlock_write(vma->anon_vma);
206			}
207			if (err > 0) {
208				need_flush = true;
209				continue;
210			} else if (!err) {
211				split_huge_page_pmd(vma, old_addr, old_pmd);
212			}
213			VM_BUG_ON(pmd_trans_huge(*old_pmd));
214		}
215		if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
216						      new_pmd, new_addr))
217			break;
218		next = (new_addr + PMD_SIZE) & PMD_MASK;
219		if (extent > next - new_addr)
220			extent = next - new_addr;
221		if (extent > LATENCY_LIMIT)
222			extent = LATENCY_LIMIT;
223		move_ptes(vma, old_pmd, old_addr, old_addr + extent,
224			  new_vma, new_pmd, new_addr, need_rmap_locks);
225		need_flush = true;
226	}
227	if (likely(need_flush))
228		flush_tlb_range(vma, old_end-len, old_addr);
229
230	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
231
232	return len + old_addr - old_end;	/* how much done */
233}
234
235static unsigned long move_vma(struct vm_area_struct *vma,
236		unsigned long old_addr, unsigned long old_len,
237		unsigned long new_len, unsigned long new_addr, bool *locked)
238{
239	struct mm_struct *mm = vma->vm_mm;
240	struct vm_area_struct *new_vma;
241	unsigned long vm_flags = vma->vm_flags;
242	unsigned long new_pgoff;
243	unsigned long moved_len;
244	unsigned long excess = 0;
245	unsigned long hiwater_vm;
246	int split = 0;
247	int err;
248	bool need_rmap_locks;
249
250	/*
251	 * We'd prefer to avoid failure later on in do_munmap:
252	 * which may split one vma into three before unmapping.
253	 */
254	if (mm->map_count >= sysctl_max_map_count - 3)
255		return -ENOMEM;
256
257	/*
258	 * Advise KSM to break any KSM pages in the area to be moved:
259	 * it would be confusing if they were to turn up at the new
260	 * location, where they happen to coincide with different KSM
261	 * pages recently unmapped.  But leave vma->vm_flags as it was,
262	 * so KSM can come around to merge on vma and new_vma afterwards.
263	 */
264	err = ksm_madvise(vma, old_addr, old_addr + old_len,
265						MADV_UNMERGEABLE, &vm_flags);
266	if (err)
267		return err;
268
269	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
270	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
271			   &need_rmap_locks);
272	if (!new_vma)
273		return -ENOMEM;
274
275	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
276				     need_rmap_locks);
277	if (moved_len < old_len) {
278		/*
279		 * On error, move entries back from new area to old,
280		 * which will succeed since page tables still there,
281		 * and then proceed to unmap new area instead of old.
282		 */
283		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
284				 true);
285		vma = new_vma;
286		old_len = new_len;
287		old_addr = new_addr;
288		new_addr = -ENOMEM;
289	} else if (vma->vm_file && vma->vm_file->f_op->mremap) {
290		err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
291		if (err < 0) {
292			move_page_tables(new_vma, new_addr, vma, old_addr,
293					 moved_len, true);
294			return err;
295		}
296	}
297
298	/* Conceal VM_ACCOUNT so old reservation is not undone */
299	if (vm_flags & VM_ACCOUNT) {
300		vma->vm_flags &= ~VM_ACCOUNT;
301		excess = vma->vm_end - vma->vm_start - old_len;
302		if (old_addr > vma->vm_start &&
303		    old_addr + old_len < vma->vm_end)
304			split = 1;
305	}
306
307	/*
308	 * If we failed to move page tables we still do total_vm increment
309	 * since do_munmap() will decrement it by old_len == new_len.
310	 *
311	 * Since total_vm is about to be raised artificially high for a
312	 * moment, we need to restore high watermark afterwards: if stats
313	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
314	 * If this were a serious issue, we'd add a flag to do_munmap().
315	 */
316	hiwater_vm = mm->hiwater_vm;
317	vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
318
319	if (do_munmap(mm, old_addr, old_len) < 0) {
320		/* OOM: unable to split vma, just get accounts right */
321		vm_unacct_memory(excess >> PAGE_SHIFT);
322		excess = 0;
323	}
324	mm->hiwater_vm = hiwater_vm;
325
326	/* Restore VM_ACCOUNT if one or two pieces of vma left */
327	if (excess) {
328		vma->vm_flags |= VM_ACCOUNT;
329		if (split)
330			vma->vm_next->vm_flags |= VM_ACCOUNT;
331	}
332
333	if (vm_flags & VM_LOCKED) {
334		mm->locked_vm += new_len >> PAGE_SHIFT;
335		*locked = true;
336	}
337
338	return new_addr;
339}
340
341static struct vm_area_struct *vma_to_resize(unsigned long addr,
342	unsigned long old_len, unsigned long new_len, unsigned long *p)
343{
344	struct mm_struct *mm = current->mm;
345	struct vm_area_struct *vma = find_vma(mm, addr);
346
347	if (!vma || vma->vm_start > addr)
348		return ERR_PTR(-EFAULT);
349
350	if (is_vm_hugetlb_page(vma))
351		return ERR_PTR(-EINVAL);
352
353	/* We can't remap across vm area boundaries */
354	if (old_len > vma->vm_end - addr)
355		return ERR_PTR(-EFAULT);
356
357	/* Need to be careful about a growing mapping */
358	if (new_len > old_len) {
359		unsigned long pgoff;
360
361		if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
362			return ERR_PTR(-EFAULT);
363		pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
364		pgoff += vma->vm_pgoff;
365		if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
366			return ERR_PTR(-EINVAL);
367	}
368
369	if (vma->vm_flags & VM_LOCKED) {
370		unsigned long locked, lock_limit;
371		locked = mm->locked_vm << PAGE_SHIFT;
372		lock_limit = rlimit(RLIMIT_MEMLOCK);
373		locked += new_len - old_len;
374		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
375			return ERR_PTR(-EAGAIN);
376	}
377
378	if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
379		return ERR_PTR(-ENOMEM);
380
381	if (vma->vm_flags & VM_ACCOUNT) {
382		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
383		if (security_vm_enough_memory_mm(mm, charged))
384			return ERR_PTR(-ENOMEM);
385		*p = charged;
386	}
387
388	return vma;
389}
390
391static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
392		unsigned long new_addr, unsigned long new_len, bool *locked)
393{
394	struct mm_struct *mm = current->mm;
395	struct vm_area_struct *vma;
396	unsigned long ret = -EINVAL;
397	unsigned long charged = 0;
398	unsigned long map_flags;
399
400	if (new_addr & ~PAGE_MASK)
401		goto out;
402
403	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
404		goto out;
405
406	/* Check if the location we're moving into overlaps the
407	 * old location at all, and fail if it does.
408	 */
409	if ((new_addr <= addr) && (new_addr+new_len) > addr)
410		goto out;
411
412	if ((addr <= new_addr) && (addr+old_len) > new_addr)
413		goto out;
414
415	ret = do_munmap(mm, new_addr, new_len);
416	if (ret)
417		goto out;
418
419	if (old_len >= new_len) {
420		ret = do_munmap(mm, addr+new_len, old_len - new_len);
421		if (ret && old_len != new_len)
422			goto out;
423		old_len = new_len;
424	}
425
426	vma = vma_to_resize(addr, old_len, new_len, &charged);
427	if (IS_ERR(vma)) {
428		ret = PTR_ERR(vma);
429		goto out;
430	}
431
432	map_flags = MAP_FIXED;
433	if (vma->vm_flags & VM_MAYSHARE)
434		map_flags |= MAP_SHARED;
435
436	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
437				((addr - vma->vm_start) >> PAGE_SHIFT),
438				map_flags);
439	if (ret & ~PAGE_MASK)
440		goto out1;
441
442	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
443	if (!(ret & ~PAGE_MASK))
444		goto out;
445out1:
446	vm_unacct_memory(charged);
447
448out:
449	return ret;
450}
451
452static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
453{
454	unsigned long end = vma->vm_end + delta;
455	if (end < vma->vm_end) /* overflow */
456		return 0;
457	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
458		return 0;
459	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
460			      0, MAP_FIXED) & ~PAGE_MASK)
461		return 0;
462	return 1;
463}
464
465/*
466 * Expand (or shrink) an existing mapping, potentially moving it at the
467 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
468 *
469 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
470 * This option implies MREMAP_MAYMOVE.
471 */
472SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
473		unsigned long, new_len, unsigned long, flags,
474		unsigned long, new_addr)
475{
476	struct mm_struct *mm = current->mm;
477	struct vm_area_struct *vma;
478	unsigned long ret = -EINVAL;
479	unsigned long charged = 0;
480	bool locked = false;
481
482	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
483		return ret;
484
485	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
486		return ret;
487
488	if (addr & ~PAGE_MASK)
489		return ret;
490
491	old_len = PAGE_ALIGN(old_len);
492	new_len = PAGE_ALIGN(new_len);
493
494	/*
495	 * We allow a zero old-len as a special case
496	 * for DOS-emu "duplicate shm area" thing. But
497	 * a zero new-len is nonsensical.
498	 */
499	if (!new_len)
500		return ret;
501
502	down_write(&current->mm->mmap_sem);
503
504	if (flags & MREMAP_FIXED) {
505		ret = mremap_to(addr, old_len, new_addr, new_len,
506				&locked);
507		goto out;
508	}
509
510	/*
511	 * Always allow a shrinking remap: that just unmaps
512	 * the unnecessary pages..
513	 * do_munmap does all the needed commit accounting
514	 */
515	if (old_len >= new_len) {
516		ret = do_munmap(mm, addr+new_len, old_len - new_len);
517		if (ret && old_len != new_len)
518			goto out;
519		ret = addr;
520		goto out;
521	}
522
523	/*
524	 * Ok, we need to grow..
525	 */
526	vma = vma_to_resize(addr, old_len, new_len, &charged);
527	if (IS_ERR(vma)) {
528		ret = PTR_ERR(vma);
529		goto out;
530	}
531
532	/* old_len exactly to the end of the area..
533	 */
534	if (old_len == vma->vm_end - addr) {
535		/* can we just expand the current mapping? */
536		if (vma_expandable(vma, new_len - old_len)) {
537			int pages = (new_len - old_len) >> PAGE_SHIFT;
538
539			if (vma_adjust(vma, vma->vm_start, addr + new_len,
540				       vma->vm_pgoff, NULL)) {
541				ret = -ENOMEM;
542				goto out;
543			}
544
545			vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
546			if (vma->vm_flags & VM_LOCKED) {
547				mm->locked_vm += pages;
548				locked = true;
549				new_addr = addr;
550			}
551			ret = addr;
552			goto out;
553		}
554	}
555
556	/*
557	 * We weren't able to just expand or shrink the area,
558	 * we need to create a new one and move it..
559	 */
560	ret = -ENOMEM;
561	if (flags & MREMAP_MAYMOVE) {
562		unsigned long map_flags = 0;
563		if (vma->vm_flags & VM_MAYSHARE)
564			map_flags |= MAP_SHARED;
565
566		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
567					vma->vm_pgoff +
568					((addr - vma->vm_start) >> PAGE_SHIFT),
569					map_flags);
570		if (new_addr & ~PAGE_MASK) {
571			ret = new_addr;
572			goto out;
573		}
574
575		ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
576	}
577out:
578	if (ret & ~PAGE_MASK)
579		vm_unacct_memory(charged);
580	up_write(&current->mm->mmap_sem);
581	if (locked && new_len > old_len)
582		mm_populate(new_addr + old_len, new_len - old_len);
583	return ret;
584}
585