1/*
2 *  linux/mm/vmalloc.c
3 *
4 *  Copyright (C) 1993  Linus Torvalds
5 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8 *  Numa awareness, Christoph Lameter, SGI, June 2005
9 */
10
11#include <linux/vmalloc.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/interrupt.h>
19#include <linux/proc_fs.h>
20#include <linux/seq_file.h>
21#include <linux/debugobjects.h>
22#include <linux/kallsyms.h>
23#include <linux/list.h>
24#include <linux/rbtree.h>
25#include <linux/radix-tree.h>
26#include <linux/rcupdate.h>
27#include <linux/pfn.h>
28#include <linux/kmemleak.h>
29#include <linux/atomic.h>
30#include <linux/compiler.h>
31#include <linux/llist.h>
32#include <linux/bitops.h>
33
34#include <asm/uaccess.h>
35#include <asm/tlbflush.h>
36#include <asm/shmparam.h>
37
38struct vfree_deferred {
39	struct llist_head list;
40	struct work_struct wq;
41};
42static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
43
44static void __vunmap(const void *, int);
45
46static void free_work(struct work_struct *w)
47{
48	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
49	struct llist_node *llnode = llist_del_all(&p->list);
50	while (llnode) {
51		void *p = llnode;
52		llnode = llist_next(llnode);
53		__vunmap(p, 1);
54	}
55}
56
57/*** Page table manipulation functions ***/
58
59static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
60{
61	pte_t *pte;
62
63	pte = pte_offset_kernel(pmd, addr);
64	do {
65		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67	} while (pte++, addr += PAGE_SIZE, addr != end);
68}
69
70static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
71{
72	pmd_t *pmd;
73	unsigned long next;
74
75	pmd = pmd_offset(pud, addr);
76	do {
77		next = pmd_addr_end(addr, end);
78		if (pmd_clear_huge(pmd))
79			continue;
80		if (pmd_none_or_clear_bad(pmd))
81			continue;
82		vunmap_pte_range(pmd, addr, next);
83	} while (pmd++, addr = next, addr != end);
84}
85
86static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
87{
88	pud_t *pud;
89	unsigned long next;
90
91	pud = pud_offset(pgd, addr);
92	do {
93		next = pud_addr_end(addr, end);
94		if (pud_clear_huge(pud))
95			continue;
96		if (pud_none_or_clear_bad(pud))
97			continue;
98		vunmap_pmd_range(pud, addr, next);
99	} while (pud++, addr = next, addr != end);
100}
101
102static void vunmap_page_range(unsigned long addr, unsigned long end)
103{
104	pgd_t *pgd;
105	unsigned long next;
106
107	BUG_ON(addr >= end);
108	pgd = pgd_offset_k(addr);
109	do {
110		next = pgd_addr_end(addr, end);
111		if (pgd_none_or_clear_bad(pgd))
112			continue;
113		vunmap_pud_range(pgd, addr, next);
114	} while (pgd++, addr = next, addr != end);
115}
116
117static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
118		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
119{
120	pte_t *pte;
121
122	/*
123	 * nr is a running index into the array which helps higher level
124	 * callers keep track of where we're up to.
125	 */
126
127	pte = pte_alloc_kernel(pmd, addr);
128	if (!pte)
129		return -ENOMEM;
130	do {
131		struct page *page = pages[*nr];
132
133		if (WARN_ON(!pte_none(*pte)))
134			return -EBUSY;
135		if (WARN_ON(!page))
136			return -ENOMEM;
137		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
138		(*nr)++;
139	} while (pte++, addr += PAGE_SIZE, addr != end);
140	return 0;
141}
142
143static int vmap_pmd_range(pud_t *pud, unsigned long addr,
144		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
145{
146	pmd_t *pmd;
147	unsigned long next;
148
149	pmd = pmd_alloc(&init_mm, pud, addr);
150	if (!pmd)
151		return -ENOMEM;
152	do {
153		next = pmd_addr_end(addr, end);
154		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
155			return -ENOMEM;
156	} while (pmd++, addr = next, addr != end);
157	return 0;
158}
159
160static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
161		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
162{
163	pud_t *pud;
164	unsigned long next;
165
166	pud = pud_alloc(&init_mm, pgd, addr);
167	if (!pud)
168		return -ENOMEM;
169	do {
170		next = pud_addr_end(addr, end);
171		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
172			return -ENOMEM;
173	} while (pud++, addr = next, addr != end);
174	return 0;
175}
176
177/*
178 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
179 * will have pfns corresponding to the "pages" array.
180 *
181 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
182 */
183static int vmap_page_range_noflush(unsigned long start, unsigned long end,
184				   pgprot_t prot, struct page **pages)
185{
186	pgd_t *pgd;
187	unsigned long next;
188	unsigned long addr = start;
189	int err = 0;
190	int nr = 0;
191
192	BUG_ON(addr >= end);
193	pgd = pgd_offset_k(addr);
194	do {
195		next = pgd_addr_end(addr, end);
196		err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
197		if (err)
198			return err;
199	} while (pgd++, addr = next, addr != end);
200
201	return nr;
202}
203
204static int vmap_page_range(unsigned long start, unsigned long end,
205			   pgprot_t prot, struct page **pages)
206{
207	int ret;
208
209	ret = vmap_page_range_noflush(start, end, prot, pages);
210	flush_cache_vmap(start, end);
211	return ret;
212}
213
214int is_vmalloc_or_module_addr(const void *x)
215{
216	/*
217	 * ARM, x86-64 and sparc64 put modules in a special place,
218	 * and fall back on vmalloc() if that fails. Others
219	 * just put it in the vmalloc space.
220	 */
221#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
222	unsigned long addr = (unsigned long)x;
223	if (addr >= MODULES_VADDR && addr < MODULES_END)
224		return 1;
225#endif
226	return is_vmalloc_addr(x);
227}
228
229/*
230 * Walk a vmap address to the struct page it maps.
231 */
232struct page *vmalloc_to_page(const void *vmalloc_addr)
233{
234	unsigned long addr = (unsigned long) vmalloc_addr;
235	struct page *page = NULL;
236	pgd_t *pgd = pgd_offset_k(addr);
237
238	/*
239	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
240	 * architectures that do not vmalloc module space
241	 */
242	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
243
244	if (!pgd_none(*pgd)) {
245		pud_t *pud = pud_offset(pgd, addr);
246		if (!pud_none(*pud)) {
247			pmd_t *pmd = pmd_offset(pud, addr);
248			if (!pmd_none(*pmd)) {
249				pte_t *ptep, pte;
250
251				ptep = pte_offset_map(pmd, addr);
252				pte = *ptep;
253				if (pte_present(pte))
254					page = pte_page(pte);
255				pte_unmap(ptep);
256			}
257		}
258	}
259	return page;
260}
261EXPORT_SYMBOL(vmalloc_to_page);
262
263/*
264 * Map a vmalloc()-space virtual address to the physical page frame number.
265 */
266unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
267{
268	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
269}
270EXPORT_SYMBOL(vmalloc_to_pfn);
271
272
273/*** Global kva allocator ***/
274
275#define VM_LAZY_FREE	0x01
276#define VM_LAZY_FREEING	0x02
277#define VM_VM_AREA	0x04
278
279static DEFINE_SPINLOCK(vmap_area_lock);
280/* Export for kexec only */
281LIST_HEAD(vmap_area_list);
282static struct rb_root vmap_area_root = RB_ROOT;
283
284/* The vmap cache globals are protected by vmap_area_lock */
285static struct rb_node *free_vmap_cache;
286static unsigned long cached_hole_size;
287static unsigned long cached_vstart;
288static unsigned long cached_align;
289
290static unsigned long vmap_area_pcpu_hole;
291
292static struct vmap_area *__find_vmap_area(unsigned long addr)
293{
294	struct rb_node *n = vmap_area_root.rb_node;
295
296	while (n) {
297		struct vmap_area *va;
298
299		va = rb_entry(n, struct vmap_area, rb_node);
300		if (addr < va->va_start)
301			n = n->rb_left;
302		else if (addr >= va->va_end)
303			n = n->rb_right;
304		else
305			return va;
306	}
307
308	return NULL;
309}
310
311static void __insert_vmap_area(struct vmap_area *va)
312{
313	struct rb_node **p = &vmap_area_root.rb_node;
314	struct rb_node *parent = NULL;
315	struct rb_node *tmp;
316
317	while (*p) {
318		struct vmap_area *tmp_va;
319
320		parent = *p;
321		tmp_va = rb_entry(parent, struct vmap_area, rb_node);
322		if (va->va_start < tmp_va->va_end)
323			p = &(*p)->rb_left;
324		else if (va->va_end > tmp_va->va_start)
325			p = &(*p)->rb_right;
326		else
327			BUG();
328	}
329
330	rb_link_node(&va->rb_node, parent, p);
331	rb_insert_color(&va->rb_node, &vmap_area_root);
332
333	/* address-sort this list */
334	tmp = rb_prev(&va->rb_node);
335	if (tmp) {
336		struct vmap_area *prev;
337		prev = rb_entry(tmp, struct vmap_area, rb_node);
338		list_add_rcu(&va->list, &prev->list);
339	} else
340		list_add_rcu(&va->list, &vmap_area_list);
341}
342
343static void purge_vmap_area_lazy(void);
344
345/*
346 * Allocate a region of KVA of the specified size and alignment, within the
347 * vstart and vend.
348 */
349static struct vmap_area *alloc_vmap_area(unsigned long size,
350				unsigned long align,
351				unsigned long vstart, unsigned long vend,
352				int node, gfp_t gfp_mask)
353{
354	struct vmap_area *va;
355	struct rb_node *n;
356	unsigned long addr;
357	int purged = 0;
358	struct vmap_area *first;
359
360	BUG_ON(!size);
361	BUG_ON(size & ~PAGE_MASK);
362	BUG_ON(!is_power_of_2(align));
363
364	va = kmalloc_node(sizeof(struct vmap_area),
365			gfp_mask & GFP_RECLAIM_MASK, node);
366	if (unlikely(!va))
367		return ERR_PTR(-ENOMEM);
368
369	/*
370	 * Only scan the relevant parts containing pointers to other objects
371	 * to avoid false negatives.
372	 */
373	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
374
375retry:
376	spin_lock(&vmap_area_lock);
377	/*
378	 * Invalidate cache if we have more permissive parameters.
379	 * cached_hole_size notes the largest hole noticed _below_
380	 * the vmap_area cached in free_vmap_cache: if size fits
381	 * into that hole, we want to scan from vstart to reuse
382	 * the hole instead of allocating above free_vmap_cache.
383	 * Note that __free_vmap_area may update free_vmap_cache
384	 * without updating cached_hole_size or cached_align.
385	 */
386	if (!free_vmap_cache ||
387			size < cached_hole_size ||
388			vstart < cached_vstart ||
389			align < cached_align) {
390nocache:
391		cached_hole_size = 0;
392		free_vmap_cache = NULL;
393	}
394	/* record if we encounter less permissive parameters */
395	cached_vstart = vstart;
396	cached_align = align;
397
398	/* find starting point for our search */
399	if (free_vmap_cache) {
400		first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
401		addr = ALIGN(first->va_end, align);
402		if (addr < vstart)
403			goto nocache;
404		if (addr + size < addr)
405			goto overflow;
406
407	} else {
408		addr = ALIGN(vstart, align);
409		if (addr + size < addr)
410			goto overflow;
411
412		n = vmap_area_root.rb_node;
413		first = NULL;
414
415		while (n) {
416			struct vmap_area *tmp;
417			tmp = rb_entry(n, struct vmap_area, rb_node);
418			if (tmp->va_end >= addr) {
419				first = tmp;
420				if (tmp->va_start <= addr)
421					break;
422				n = n->rb_left;
423			} else
424				n = n->rb_right;
425		}
426
427		if (!first)
428			goto found;
429	}
430
431	/* from the starting point, walk areas until a suitable hole is found */
432	while (addr + size > first->va_start && addr + size <= vend) {
433		if (addr + cached_hole_size < first->va_start)
434			cached_hole_size = first->va_start - addr;
435		addr = ALIGN(first->va_end, align);
436		if (addr + size < addr)
437			goto overflow;
438
439		if (list_is_last(&first->list, &vmap_area_list))
440			goto found;
441
442		first = list_entry(first->list.next,
443				struct vmap_area, list);
444	}
445
446found:
447	if (addr + size > vend)
448		goto overflow;
449
450	va->va_start = addr;
451	va->va_end = addr + size;
452	va->flags = 0;
453	__insert_vmap_area(va);
454	free_vmap_cache = &va->rb_node;
455	spin_unlock(&vmap_area_lock);
456
457	BUG_ON(va->va_start & (align-1));
458	BUG_ON(va->va_start < vstart);
459	BUG_ON(va->va_end > vend);
460
461	return va;
462
463overflow:
464	spin_unlock(&vmap_area_lock);
465	if (!purged) {
466		purge_vmap_area_lazy();
467		purged = 1;
468		goto retry;
469	}
470	if (printk_ratelimit())
471		pr_warn("vmap allocation for size %lu failed: "
472			"use vmalloc=<size> to increase size.\n", size);
473	kfree(va);
474	return ERR_PTR(-EBUSY);
475}
476
477static void __free_vmap_area(struct vmap_area *va)
478{
479	BUG_ON(RB_EMPTY_NODE(&va->rb_node));
480
481	if (free_vmap_cache) {
482		if (va->va_end < cached_vstart) {
483			free_vmap_cache = NULL;
484		} else {
485			struct vmap_area *cache;
486			cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
487			if (va->va_start <= cache->va_start) {
488				free_vmap_cache = rb_prev(&va->rb_node);
489				/*
490				 * We don't try to update cached_hole_size or
491				 * cached_align, but it won't go very wrong.
492				 */
493			}
494		}
495	}
496	rb_erase(&va->rb_node, &vmap_area_root);
497	RB_CLEAR_NODE(&va->rb_node);
498	list_del_rcu(&va->list);
499
500	/*
501	 * Track the highest possible candidate for pcpu area
502	 * allocation.  Areas outside of vmalloc area can be returned
503	 * here too, consider only end addresses which fall inside
504	 * vmalloc area proper.
505	 */
506	if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
507		vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
508
509	kfree_rcu(va, rcu_head);
510}
511
512/*
513 * Free a region of KVA allocated by alloc_vmap_area
514 */
515static void free_vmap_area(struct vmap_area *va)
516{
517	spin_lock(&vmap_area_lock);
518	__free_vmap_area(va);
519	spin_unlock(&vmap_area_lock);
520}
521
522/*
523 * Clear the pagetable entries of a given vmap_area
524 */
525static void unmap_vmap_area(struct vmap_area *va)
526{
527	vunmap_page_range(va->va_start, va->va_end);
528}
529
530static void vmap_debug_free_range(unsigned long start, unsigned long end)
531{
532	/*
533	 * Unmap page tables and force a TLB flush immediately if
534	 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
535	 * bugs similarly to those in linear kernel virtual address
536	 * space after a page has been freed.
537	 *
538	 * All the lazy freeing logic is still retained, in order to
539	 * minimise intrusiveness of this debugging feature.
540	 *
541	 * This is going to be *slow* (linear kernel virtual address
542	 * debugging doesn't do a broadcast TLB flush so it is a lot
543	 * faster).
544	 */
545#ifdef CONFIG_DEBUG_PAGEALLOC
546	vunmap_page_range(start, end);
547	flush_tlb_kernel_range(start, end);
548#endif
549}
550
551/*
552 * lazy_max_pages is the maximum amount of virtual address space we gather up
553 * before attempting to purge with a TLB flush.
554 *
555 * There is a tradeoff here: a larger number will cover more kernel page tables
556 * and take slightly longer to purge, but it will linearly reduce the number of
557 * global TLB flushes that must be performed. It would seem natural to scale
558 * this number up linearly with the number of CPUs (because vmapping activity
559 * could also scale linearly with the number of CPUs), however it is likely
560 * that in practice, workloads might be constrained in other ways that mean
561 * vmap activity will not scale linearly with CPUs. Also, I want to be
562 * conservative and not introduce a big latency on huge systems, so go with
563 * a less aggressive log scale. It will still be an improvement over the old
564 * code, and it will be simple to change the scale factor if we find that it
565 * becomes a problem on bigger systems.
566 */
567static unsigned long lazy_max_pages(void)
568{
569	unsigned int log;
570
571	log = fls(num_online_cpus());
572
573	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
574}
575
576static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
577
578/* for per-CPU blocks */
579static void purge_fragmented_blocks_allcpus(void);
580
581/*
582 * called before a call to iounmap() if the caller wants vm_area_struct's
583 * immediately freed.
584 */
585void set_iounmap_nonlazy(void)
586{
587	atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
588}
589
590/*
591 * Purges all lazily-freed vmap areas.
592 *
593 * If sync is 0 then don't purge if there is already a purge in progress.
594 * If force_flush is 1, then flush kernel TLBs between *start and *end even
595 * if we found no lazy vmap areas to unmap (callers can use this to optimise
596 * their own TLB flushing).
597 * Returns with *start = min(*start, lowest purged address)
598 *              *end = max(*end, highest purged address)
599 */
600static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
601					int sync, int force_flush)
602{
603	static DEFINE_SPINLOCK(purge_lock);
604	LIST_HEAD(valist);
605	struct vmap_area *va;
606	struct vmap_area *n_va;
607	int nr = 0;
608
609	/*
610	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
611	 * should not expect such behaviour. This just simplifies locking for
612	 * the case that isn't actually used at the moment anyway.
613	 */
614	if (!sync && !force_flush) {
615		if (!spin_trylock(&purge_lock))
616			return;
617	} else
618		spin_lock(&purge_lock);
619
620	if (sync)
621		purge_fragmented_blocks_allcpus();
622
623	rcu_read_lock();
624	list_for_each_entry_rcu(va, &vmap_area_list, list) {
625		if (va->flags & VM_LAZY_FREE) {
626			if (va->va_start < *start)
627				*start = va->va_start;
628			if (va->va_end > *end)
629				*end = va->va_end;
630			nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
631			list_add_tail(&va->purge_list, &valist);
632			va->flags |= VM_LAZY_FREEING;
633			va->flags &= ~VM_LAZY_FREE;
634		}
635	}
636	rcu_read_unlock();
637
638	if (nr)
639		atomic_sub(nr, &vmap_lazy_nr);
640
641	if (nr || force_flush)
642		flush_tlb_kernel_range(*start, *end);
643
644	if (nr) {
645		spin_lock(&vmap_area_lock);
646		list_for_each_entry_safe(va, n_va, &valist, purge_list)
647			__free_vmap_area(va);
648		spin_unlock(&vmap_area_lock);
649	}
650	spin_unlock(&purge_lock);
651}
652
653/*
654 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
655 * is already purging.
656 */
657static void try_purge_vmap_area_lazy(void)
658{
659	unsigned long start = ULONG_MAX, end = 0;
660
661	__purge_vmap_area_lazy(&start, &end, 0, 0);
662}
663
664/*
665 * Kick off a purge of the outstanding lazy areas.
666 */
667static void purge_vmap_area_lazy(void)
668{
669	unsigned long start = ULONG_MAX, end = 0;
670
671	__purge_vmap_area_lazy(&start, &end, 1, 0);
672}
673
674/*
675 * Free a vmap area, caller ensuring that the area has been unmapped
676 * and flush_cache_vunmap had been called for the correct range
677 * previously.
678 */
679static void free_vmap_area_noflush(struct vmap_area *va)
680{
681	va->flags |= VM_LAZY_FREE;
682	atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
683	if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
684		try_purge_vmap_area_lazy();
685}
686
687/*
688 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
689 * called for the correct range previously.
690 */
691static void free_unmap_vmap_area_noflush(struct vmap_area *va)
692{
693	unmap_vmap_area(va);
694	free_vmap_area_noflush(va);
695}
696
697/*
698 * Free and unmap a vmap area
699 */
700static void free_unmap_vmap_area(struct vmap_area *va)
701{
702	flush_cache_vunmap(va->va_start, va->va_end);
703	free_unmap_vmap_area_noflush(va);
704}
705
706static struct vmap_area *find_vmap_area(unsigned long addr)
707{
708	struct vmap_area *va;
709
710	spin_lock(&vmap_area_lock);
711	va = __find_vmap_area(addr);
712	spin_unlock(&vmap_area_lock);
713
714	return va;
715}
716
717static void free_unmap_vmap_area_addr(unsigned long addr)
718{
719	struct vmap_area *va;
720
721	va = find_vmap_area(addr);
722	BUG_ON(!va);
723	free_unmap_vmap_area(va);
724}
725
726
727/*** Per cpu kva allocator ***/
728
729/*
730 * vmap space is limited especially on 32 bit architectures. Ensure there is
731 * room for at least 16 percpu vmap blocks per CPU.
732 */
733/*
734 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
735 * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
736 * instead (we just need a rough idea)
737 */
738#if BITS_PER_LONG == 32
739#define VMALLOC_SPACE		(128UL*1024*1024)
740#else
741#define VMALLOC_SPACE		(128UL*1024*1024*1024)
742#endif
743
744#define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
745#define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
746#define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
747#define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
748#define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
749#define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
750#define VMAP_BBMAP_BITS		\
751		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
752		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
753			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
754
755#define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
756
757static bool vmap_initialized __read_mostly = false;
758
759struct vmap_block_queue {
760	spinlock_t lock;
761	struct list_head free;
762};
763
764struct vmap_block {
765	spinlock_t lock;
766	struct vmap_area *va;
767	unsigned long free, dirty;
768	unsigned long dirty_min, dirty_max; /*< dirty range */
769	struct list_head free_list;
770	struct rcu_head rcu_head;
771	struct list_head purge;
772};
773
774/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
775static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
776
777/*
778 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
779 * in the free path. Could get rid of this if we change the API to return a
780 * "cookie" from alloc, to be passed to free. But no big deal yet.
781 */
782static DEFINE_SPINLOCK(vmap_block_tree_lock);
783static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
784
785/*
786 * We should probably have a fallback mechanism to allocate virtual memory
787 * out of partially filled vmap blocks. However vmap block sizing should be
788 * fairly reasonable according to the vmalloc size, so it shouldn't be a
789 * big problem.
790 */
791
792static unsigned long addr_to_vb_idx(unsigned long addr)
793{
794	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
795	addr /= VMAP_BLOCK_SIZE;
796	return addr;
797}
798
799static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
800{
801	unsigned long addr;
802
803	addr = va_start + (pages_off << PAGE_SHIFT);
804	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
805	return (void *)addr;
806}
807
808/**
809 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
810 *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
811 * @order:    how many 2^order pages should be occupied in newly allocated block
812 * @gfp_mask: flags for the page level allocator
813 *
814 * Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
815 */
816static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
817{
818	struct vmap_block_queue *vbq;
819	struct vmap_block *vb;
820	struct vmap_area *va;
821	unsigned long vb_idx;
822	int node, err;
823	void *vaddr;
824
825	node = numa_node_id();
826
827	vb = kmalloc_node(sizeof(struct vmap_block),
828			gfp_mask & GFP_RECLAIM_MASK, node);
829	if (unlikely(!vb))
830		return ERR_PTR(-ENOMEM);
831
832	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
833					VMALLOC_START, VMALLOC_END,
834					node, gfp_mask);
835	if (IS_ERR(va)) {
836		kfree(vb);
837		return ERR_CAST(va);
838	}
839
840	err = radix_tree_preload(gfp_mask);
841	if (unlikely(err)) {
842		kfree(vb);
843		free_vmap_area(va);
844		return ERR_PTR(err);
845	}
846
847	vaddr = vmap_block_vaddr(va->va_start, 0);
848	spin_lock_init(&vb->lock);
849	vb->va = va;
850	/* At least something should be left free */
851	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
852	vb->free = VMAP_BBMAP_BITS - (1UL << order);
853	vb->dirty = 0;
854	vb->dirty_min = VMAP_BBMAP_BITS;
855	vb->dirty_max = 0;
856	INIT_LIST_HEAD(&vb->free_list);
857
858	vb_idx = addr_to_vb_idx(va->va_start);
859	spin_lock(&vmap_block_tree_lock);
860	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
861	spin_unlock(&vmap_block_tree_lock);
862	BUG_ON(err);
863	radix_tree_preload_end();
864
865	vbq = &get_cpu_var(vmap_block_queue);
866	spin_lock(&vbq->lock);
867	list_add_tail_rcu(&vb->free_list, &vbq->free);
868	spin_unlock(&vbq->lock);
869	put_cpu_var(vmap_block_queue);
870
871	return vaddr;
872}
873
874static void free_vmap_block(struct vmap_block *vb)
875{
876	struct vmap_block *tmp;
877	unsigned long vb_idx;
878
879	vb_idx = addr_to_vb_idx(vb->va->va_start);
880	spin_lock(&vmap_block_tree_lock);
881	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
882	spin_unlock(&vmap_block_tree_lock);
883	BUG_ON(tmp != vb);
884
885	free_vmap_area_noflush(vb->va);
886	kfree_rcu(vb, rcu_head);
887}
888
889static void purge_fragmented_blocks(int cpu)
890{
891	LIST_HEAD(purge);
892	struct vmap_block *vb;
893	struct vmap_block *n_vb;
894	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
895
896	rcu_read_lock();
897	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
898
899		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
900			continue;
901
902		spin_lock(&vb->lock);
903		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
904			vb->free = 0; /* prevent further allocs after releasing lock */
905			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
906			vb->dirty_min = 0;
907			vb->dirty_max = VMAP_BBMAP_BITS;
908			spin_lock(&vbq->lock);
909			list_del_rcu(&vb->free_list);
910			spin_unlock(&vbq->lock);
911			spin_unlock(&vb->lock);
912			list_add_tail(&vb->purge, &purge);
913		} else
914			spin_unlock(&vb->lock);
915	}
916	rcu_read_unlock();
917
918	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
919		list_del(&vb->purge);
920		free_vmap_block(vb);
921	}
922}
923
924static void purge_fragmented_blocks_allcpus(void)
925{
926	int cpu;
927
928	for_each_possible_cpu(cpu)
929		purge_fragmented_blocks(cpu);
930}
931
932static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
933{
934	struct vmap_block_queue *vbq;
935	struct vmap_block *vb;
936	void *vaddr = NULL;
937	unsigned int order;
938
939	BUG_ON(size & ~PAGE_MASK);
940	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
941	if (WARN_ON(size == 0)) {
942		/*
943		 * Allocating 0 bytes isn't what caller wants since
944		 * get_order(0) returns funny result. Just warn and terminate
945		 * early.
946		 */
947		return NULL;
948	}
949	order = get_order(size);
950
951	rcu_read_lock();
952	vbq = &get_cpu_var(vmap_block_queue);
953	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
954		unsigned long pages_off;
955
956		spin_lock(&vb->lock);
957		if (vb->free < (1UL << order)) {
958			spin_unlock(&vb->lock);
959			continue;
960		}
961
962		pages_off = VMAP_BBMAP_BITS - vb->free;
963		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
964		vb->free -= 1UL << order;
965		if (vb->free == 0) {
966			spin_lock(&vbq->lock);
967			list_del_rcu(&vb->free_list);
968			spin_unlock(&vbq->lock);
969		}
970
971		spin_unlock(&vb->lock);
972		break;
973	}
974
975	put_cpu_var(vmap_block_queue);
976	rcu_read_unlock();
977
978	/* Allocate new block if nothing was found */
979	if (!vaddr)
980		vaddr = new_vmap_block(order, gfp_mask);
981
982	return vaddr;
983}
984
985static void vb_free(const void *addr, unsigned long size)
986{
987	unsigned long offset;
988	unsigned long vb_idx;
989	unsigned int order;
990	struct vmap_block *vb;
991
992	BUG_ON(size & ~PAGE_MASK);
993	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
994
995	flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
996
997	order = get_order(size);
998
999	offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
1000	offset >>= PAGE_SHIFT;
1001
1002	vb_idx = addr_to_vb_idx((unsigned long)addr);
1003	rcu_read_lock();
1004	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1005	rcu_read_unlock();
1006	BUG_ON(!vb);
1007
1008	vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1009
1010	spin_lock(&vb->lock);
1011
1012	/* Expand dirty range */
1013	vb->dirty_min = min(vb->dirty_min, offset);
1014	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1015
1016	vb->dirty += 1UL << order;
1017	if (vb->dirty == VMAP_BBMAP_BITS) {
1018		BUG_ON(vb->free);
1019		spin_unlock(&vb->lock);
1020		free_vmap_block(vb);
1021	} else
1022		spin_unlock(&vb->lock);
1023}
1024
1025/**
1026 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1027 *
1028 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1029 * to amortize TLB flushing overheads. What this means is that any page you
1030 * have now, may, in a former life, have been mapped into kernel virtual
1031 * address by the vmap layer and so there might be some CPUs with TLB entries
1032 * still referencing that page (additional to the regular 1:1 kernel mapping).
1033 *
1034 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1035 * be sure that none of the pages we have control over will have any aliases
1036 * from the vmap layer.
1037 */
1038void vm_unmap_aliases(void)
1039{
1040	unsigned long start = ULONG_MAX, end = 0;
1041	int cpu;
1042	int flush = 0;
1043
1044	if (unlikely(!vmap_initialized))
1045		return;
1046
1047	for_each_possible_cpu(cpu) {
1048		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1049		struct vmap_block *vb;
1050
1051		rcu_read_lock();
1052		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1053			spin_lock(&vb->lock);
1054			if (vb->dirty) {
1055				unsigned long va_start = vb->va->va_start;
1056				unsigned long s, e;
1057
1058				s = va_start + (vb->dirty_min << PAGE_SHIFT);
1059				e = va_start + (vb->dirty_max << PAGE_SHIFT);
1060
1061				start = min(s, start);
1062				end   = max(e, end);
1063
1064				flush = 1;
1065			}
1066			spin_unlock(&vb->lock);
1067		}
1068		rcu_read_unlock();
1069	}
1070
1071	__purge_vmap_area_lazy(&start, &end, 1, flush);
1072}
1073EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1074
1075/**
1076 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1077 * @mem: the pointer returned by vm_map_ram
1078 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1079 */
1080void vm_unmap_ram(const void *mem, unsigned int count)
1081{
1082	unsigned long size = count << PAGE_SHIFT;
1083	unsigned long addr = (unsigned long)mem;
1084
1085	BUG_ON(!addr);
1086	BUG_ON(addr < VMALLOC_START);
1087	BUG_ON(addr > VMALLOC_END);
1088	BUG_ON(addr & (PAGE_SIZE-1));
1089
1090	debug_check_no_locks_freed(mem, size);
1091	vmap_debug_free_range(addr, addr+size);
1092
1093	if (likely(count <= VMAP_MAX_ALLOC))
1094		vb_free(mem, size);
1095	else
1096		free_unmap_vmap_area_addr(addr);
1097}
1098EXPORT_SYMBOL(vm_unmap_ram);
1099
1100/**
1101 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1102 * @pages: an array of pointers to the pages to be mapped
1103 * @count: number of pages
1104 * @node: prefer to allocate data structures on this node
1105 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1106 *
1107 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1108 * faster than vmap so it's good.  But if you mix long-life and short-life
1109 * objects with vm_map_ram(), it could consume lots of address space through
1110 * fragmentation (especially on a 32bit machine).  You could see failures in
1111 * the end.  Please use this function for short-lived objects.
1112 *
1113 * Returns: a pointer to the address that has been mapped, or %NULL on failure
1114 */
1115void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1116{
1117	unsigned long size = count << PAGE_SHIFT;
1118	unsigned long addr;
1119	void *mem;
1120
1121	if (likely(count <= VMAP_MAX_ALLOC)) {
1122		mem = vb_alloc(size, GFP_KERNEL);
1123		if (IS_ERR(mem))
1124			return NULL;
1125		addr = (unsigned long)mem;
1126	} else {
1127		struct vmap_area *va;
1128		va = alloc_vmap_area(size, PAGE_SIZE,
1129				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1130		if (IS_ERR(va))
1131			return NULL;
1132
1133		addr = va->va_start;
1134		mem = (void *)addr;
1135	}
1136	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1137		vm_unmap_ram(mem, count);
1138		return NULL;
1139	}
1140	return mem;
1141}
1142EXPORT_SYMBOL(vm_map_ram);
1143
1144static struct vm_struct *vmlist __initdata;
1145/**
1146 * vm_area_add_early - add vmap area early during boot
1147 * @vm: vm_struct to add
1148 *
1149 * This function is used to add fixed kernel vm area to vmlist before
1150 * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
1151 * should contain proper values and the other fields should be zero.
1152 *
1153 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1154 */
1155void __init vm_area_add_early(struct vm_struct *vm)
1156{
1157	struct vm_struct *tmp, **p;
1158
1159	BUG_ON(vmap_initialized);
1160	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1161		if (tmp->addr >= vm->addr) {
1162			BUG_ON(tmp->addr < vm->addr + vm->size);
1163			break;
1164		} else
1165			BUG_ON(tmp->addr + tmp->size > vm->addr);
1166	}
1167	vm->next = *p;
1168	*p = vm;
1169}
1170
1171/**
1172 * vm_area_register_early - register vmap area early during boot
1173 * @vm: vm_struct to register
1174 * @align: requested alignment
1175 *
1176 * This function is used to register kernel vm area before
1177 * vmalloc_init() is called.  @vm->size and @vm->flags should contain
1178 * proper values on entry and other fields should be zero.  On return,
1179 * vm->addr contains the allocated address.
1180 *
1181 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1182 */
1183void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1184{
1185	static size_t vm_init_off __initdata;
1186	unsigned long addr;
1187
1188	addr = ALIGN(VMALLOC_START + vm_init_off, align);
1189	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1190
1191	vm->addr = (void *)addr;
1192
1193	vm_area_add_early(vm);
1194}
1195
1196void __init vmalloc_init(void)
1197{
1198	struct vmap_area *va;
1199	struct vm_struct *tmp;
1200	int i;
1201
1202	for_each_possible_cpu(i) {
1203		struct vmap_block_queue *vbq;
1204		struct vfree_deferred *p;
1205
1206		vbq = &per_cpu(vmap_block_queue, i);
1207		spin_lock_init(&vbq->lock);
1208		INIT_LIST_HEAD(&vbq->free);
1209		p = &per_cpu(vfree_deferred, i);
1210		init_llist_head(&p->list);
1211		INIT_WORK(&p->wq, free_work);
1212	}
1213
1214	/* Import existing vmlist entries. */
1215	for (tmp = vmlist; tmp; tmp = tmp->next) {
1216		va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1217		va->flags = VM_VM_AREA;
1218		va->va_start = (unsigned long)tmp->addr;
1219		va->va_end = va->va_start + tmp->size;
1220		va->vm = tmp;
1221		__insert_vmap_area(va);
1222	}
1223
1224	vmap_area_pcpu_hole = VMALLOC_END;
1225
1226	vmap_initialized = true;
1227}
1228
1229/**
1230 * map_kernel_range_noflush - map kernel VM area with the specified pages
1231 * @addr: start of the VM area to map
1232 * @size: size of the VM area to map
1233 * @prot: page protection flags to use
1234 * @pages: pages to map
1235 *
1236 * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1237 * specify should have been allocated using get_vm_area() and its
1238 * friends.
1239 *
1240 * NOTE:
1241 * This function does NOT do any cache flushing.  The caller is
1242 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1243 * before calling this function.
1244 *
1245 * RETURNS:
1246 * The number of pages mapped on success, -errno on failure.
1247 */
1248int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1249			     pgprot_t prot, struct page **pages)
1250{
1251	return vmap_page_range_noflush(addr, addr + size, prot, pages);
1252}
1253
1254/**
1255 * unmap_kernel_range_noflush - unmap kernel VM area
1256 * @addr: start of the VM area to unmap
1257 * @size: size of the VM area to unmap
1258 *
1259 * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1260 * specify should have been allocated using get_vm_area() and its
1261 * friends.
1262 *
1263 * NOTE:
1264 * This function does NOT do any cache flushing.  The caller is
1265 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1266 * before calling this function and flush_tlb_kernel_range() after.
1267 */
1268void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1269{
1270	vunmap_page_range(addr, addr + size);
1271}
1272EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1273
1274/**
1275 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1276 * @addr: start of the VM area to unmap
1277 * @size: size of the VM area to unmap
1278 *
1279 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1280 * the unmapping and tlb after.
1281 */
1282void unmap_kernel_range(unsigned long addr, unsigned long size)
1283{
1284	unsigned long end = addr + size;
1285
1286	flush_cache_vunmap(addr, end);
1287	vunmap_page_range(addr, end);
1288	flush_tlb_kernel_range(addr, end);
1289}
1290EXPORT_SYMBOL_GPL(unmap_kernel_range);
1291
1292int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
1293{
1294	unsigned long addr = (unsigned long)area->addr;
1295	unsigned long end = addr + get_vm_area_size(area);
1296	int err;
1297
1298	err = vmap_page_range(addr, end, prot, pages);
1299
1300	return err > 0 ? 0 : err;
1301}
1302EXPORT_SYMBOL_GPL(map_vm_area);
1303
1304static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1305			      unsigned long flags, const void *caller)
1306{
1307	spin_lock(&vmap_area_lock);
1308	vm->flags = flags;
1309	vm->addr = (void *)va->va_start;
1310	vm->size = va->va_end - va->va_start;
1311	vm->caller = caller;
1312	va->vm = vm;
1313	va->flags |= VM_VM_AREA;
1314	spin_unlock(&vmap_area_lock);
1315}
1316
1317static void clear_vm_uninitialized_flag(struct vm_struct *vm)
1318{
1319	/*
1320	 * Before removing VM_UNINITIALIZED,
1321	 * we should make sure that vm has proper values.
1322	 * Pair with smp_rmb() in show_numa_info().
1323	 */
1324	smp_wmb();
1325	vm->flags &= ~VM_UNINITIALIZED;
1326}
1327
1328static struct vm_struct *__get_vm_area_node(unsigned long size,
1329		unsigned long align, unsigned long flags, unsigned long start,
1330		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
1331{
1332	struct vmap_area *va;
1333	struct vm_struct *area;
1334
1335	BUG_ON(in_interrupt());
1336	if (flags & VM_IOREMAP)
1337		align = 1ul << clamp_t(int, fls_long(size),
1338				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
1339
1340	size = PAGE_ALIGN(size);
1341	if (unlikely(!size))
1342		return NULL;
1343
1344	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1345	if (unlikely(!area))
1346		return NULL;
1347
1348	if (!(flags & VM_NO_GUARD))
1349		size += PAGE_SIZE;
1350
1351	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1352	if (IS_ERR(va)) {
1353		kfree(area);
1354		return NULL;
1355	}
1356
1357	setup_vmalloc_vm(area, va, flags, caller);
1358
1359	return area;
1360}
1361
1362struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1363				unsigned long start, unsigned long end)
1364{
1365	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1366				  GFP_KERNEL, __builtin_return_address(0));
1367}
1368EXPORT_SYMBOL_GPL(__get_vm_area);
1369
1370struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1371				       unsigned long start, unsigned long end,
1372				       const void *caller)
1373{
1374	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1375				  GFP_KERNEL, caller);
1376}
1377
1378/**
1379 *	get_vm_area  -  reserve a contiguous kernel virtual area
1380 *	@size:		size of the area
1381 *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
1382 *
1383 *	Search an area of @size in the kernel virtual mapping area,
1384 *	and reserved it for out purposes.  Returns the area descriptor
1385 *	on success or %NULL on failure.
1386 */
1387struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1388{
1389	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1390				  NUMA_NO_NODE, GFP_KERNEL,
1391				  __builtin_return_address(0));
1392}
1393
1394struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1395				const void *caller)
1396{
1397	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1398				  NUMA_NO_NODE, GFP_KERNEL, caller);
1399}
1400
1401/**
1402 *	find_vm_area  -  find a continuous kernel virtual area
1403 *	@addr:		base address
1404 *
1405 *	Search for the kernel VM area starting at @addr, and return it.
1406 *	It is up to the caller to do all required locking to keep the returned
1407 *	pointer valid.
1408 */
1409struct vm_struct *find_vm_area(const void *addr)
1410{
1411	struct vmap_area *va;
1412
1413	va = find_vmap_area((unsigned long)addr);
1414	if (va && va->flags & VM_VM_AREA)
1415		return va->vm;
1416
1417	return NULL;
1418}
1419
1420/**
1421 *	remove_vm_area  -  find and remove a continuous kernel virtual area
1422 *	@addr:		base address
1423 *
1424 *	Search for the kernel VM area starting at @addr, and remove it.
1425 *	This function returns the found VM area, but using it is NOT safe
1426 *	on SMP machines, except for its size or flags.
1427 */
1428struct vm_struct *remove_vm_area(const void *addr)
1429{
1430	struct vmap_area *va;
1431
1432	va = find_vmap_area((unsigned long)addr);
1433	if (va && va->flags & VM_VM_AREA) {
1434		struct vm_struct *vm = va->vm;
1435
1436		spin_lock(&vmap_area_lock);
1437		va->vm = NULL;
1438		va->flags &= ~VM_VM_AREA;
1439		spin_unlock(&vmap_area_lock);
1440
1441		vmap_debug_free_range(va->va_start, va->va_end);
1442		kasan_free_shadow(vm);
1443		free_unmap_vmap_area(va);
1444		vm->size -= PAGE_SIZE;
1445
1446		return vm;
1447	}
1448	return NULL;
1449}
1450
1451static void __vunmap(const void *addr, int deallocate_pages)
1452{
1453	struct vm_struct *area;
1454
1455	if (!addr)
1456		return;
1457
1458	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
1459			addr))
1460		return;
1461
1462	area = remove_vm_area(addr);
1463	if (unlikely(!area)) {
1464		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1465				addr);
1466		return;
1467	}
1468
1469	debug_check_no_locks_freed(addr, area->size);
1470	debug_check_no_obj_freed(addr, area->size);
1471
1472	if (deallocate_pages) {
1473		int i;
1474
1475		for (i = 0; i < area->nr_pages; i++) {
1476			struct page *page = area->pages[i];
1477
1478			BUG_ON(!page);
1479			__free_page(page);
1480		}
1481
1482		if (area->flags & VM_VPAGES)
1483			vfree(area->pages);
1484		else
1485			kfree(area->pages);
1486	}
1487
1488	kfree(area);
1489	return;
1490}
1491
1492/**
1493 *	vfree  -  release memory allocated by vmalloc()
1494 *	@addr:		memory base address
1495 *
1496 *	Free the virtually continuous memory area starting at @addr, as
1497 *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1498 *	NULL, no operation is performed.
1499 *
1500 *	Must not be called in NMI context (strictly speaking, only if we don't
1501 *	have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
1502 *	conventions for vfree() arch-depenedent would be a really bad idea)
1503 *
1504 *	NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
1505 */
1506void vfree(const void *addr)
1507{
1508	BUG_ON(in_nmi());
1509
1510	kmemleak_free(addr);
1511
1512	if (!addr)
1513		return;
1514	if (unlikely(in_interrupt())) {
1515		struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
1516		if (llist_add((struct llist_node *)addr, &p->list))
1517			schedule_work(&p->wq);
1518	} else
1519		__vunmap(addr, 1);
1520}
1521EXPORT_SYMBOL(vfree);
1522
1523/**
1524 *	vunmap  -  release virtual mapping obtained by vmap()
1525 *	@addr:		memory base address
1526 *
1527 *	Free the virtually contiguous memory area starting at @addr,
1528 *	which was created from the page array passed to vmap().
1529 *
1530 *	Must not be called in interrupt context.
1531 */
1532void vunmap(const void *addr)
1533{
1534	BUG_ON(in_interrupt());
1535	might_sleep();
1536	if (addr)
1537		__vunmap(addr, 0);
1538}
1539EXPORT_SYMBOL(vunmap);
1540
1541/**
1542 *	vmap  -  map an array of pages into virtually contiguous space
1543 *	@pages:		array of page pointers
1544 *	@count:		number of pages to map
1545 *	@flags:		vm_area->flags
1546 *	@prot:		page protection for the mapping
1547 *
1548 *	Maps @count pages from @pages into contiguous kernel virtual
1549 *	space.
1550 */
1551void *vmap(struct page **pages, unsigned int count,
1552		unsigned long flags, pgprot_t prot)
1553{
1554	struct vm_struct *area;
1555
1556	might_sleep();
1557
1558	if (count > totalram_pages)
1559		return NULL;
1560
1561	area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1562					__builtin_return_address(0));
1563	if (!area)
1564		return NULL;
1565
1566	if (map_vm_area(area, prot, pages)) {
1567		vunmap(area->addr);
1568		return NULL;
1569	}
1570
1571	return area->addr;
1572}
1573EXPORT_SYMBOL(vmap);
1574
1575static void *__vmalloc_node(unsigned long size, unsigned long align,
1576			    gfp_t gfp_mask, pgprot_t prot,
1577			    int node, const void *caller);
1578static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1579				 pgprot_t prot, int node)
1580{
1581	const int order = 0;
1582	struct page **pages;
1583	unsigned int nr_pages, array_size, i;
1584	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1585	const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
1586
1587	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1588	array_size = (nr_pages * sizeof(struct page *));
1589
1590	area->nr_pages = nr_pages;
1591	/* Please note that the recursion is strictly bounded. */
1592	if (array_size > PAGE_SIZE) {
1593		pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
1594				PAGE_KERNEL, node, area->caller);
1595		area->flags |= VM_VPAGES;
1596	} else {
1597		pages = kmalloc_node(array_size, nested_gfp, node);
1598	}
1599	area->pages = pages;
1600	if (!area->pages) {
1601		remove_vm_area(area->addr);
1602		kfree(area);
1603		return NULL;
1604	}
1605
1606	for (i = 0; i < area->nr_pages; i++) {
1607		struct page *page;
1608
1609		if (node == NUMA_NO_NODE)
1610			page = alloc_page(alloc_mask);
1611		else
1612			page = alloc_pages_node(node, alloc_mask, order);
1613
1614		if (unlikely(!page)) {
1615			/* Successfully allocated i pages, free them in __vunmap() */
1616			area->nr_pages = i;
1617			goto fail;
1618		}
1619		area->pages[i] = page;
1620		if (gfp_mask & __GFP_WAIT)
1621			cond_resched();
1622	}
1623
1624	if (map_vm_area(area, prot, pages))
1625		goto fail;
1626	return area->addr;
1627
1628fail:
1629	warn_alloc_failed(gfp_mask, order,
1630			  "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
1631			  (area->nr_pages*PAGE_SIZE), area->size);
1632	vfree(area->addr);
1633	return NULL;
1634}
1635
1636/**
1637 *	__vmalloc_node_range  -  allocate virtually contiguous memory
1638 *	@size:		allocation size
1639 *	@align:		desired alignment
1640 *	@start:		vm area range start
1641 *	@end:		vm area range end
1642 *	@gfp_mask:	flags for the page level allocator
1643 *	@prot:		protection mask for the allocated pages
1644 *	@vm_flags:	additional vm area flags (e.g. %VM_NO_GUARD)
1645 *	@node:		node to use for allocation or NUMA_NO_NODE
1646 *	@caller:	caller's return address
1647 *
1648 *	Allocate enough pages to cover @size from the page level
1649 *	allocator with @gfp_mask flags.  Map them into contiguous
1650 *	kernel virtual space, using a pagetable protection of @prot.
1651 */
1652void *__vmalloc_node_range(unsigned long size, unsigned long align,
1653			unsigned long start, unsigned long end, gfp_t gfp_mask,
1654			pgprot_t prot, unsigned long vm_flags, int node,
1655			const void *caller)
1656{
1657	struct vm_struct *area;
1658	void *addr;
1659	unsigned long real_size = size;
1660
1661	size = PAGE_ALIGN(size);
1662	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1663		goto fail;
1664
1665	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
1666				vm_flags, start, end, node, gfp_mask, caller);
1667	if (!area)
1668		goto fail;
1669
1670	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1671	if (!addr)
1672		return NULL;
1673
1674	/*
1675	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
1676	 * flag. It means that vm_struct is not fully initialized.
1677	 * Now, it is fully initialized, so remove this flag here.
1678	 */
1679	clear_vm_uninitialized_flag(area);
1680
1681	/*
1682	 * A ref_count = 2 is needed because vm_struct allocated in
1683	 * __get_vm_area_node() contains a reference to the virtual address of
1684	 * the vmalloc'ed block.
1685	 */
1686	kmemleak_alloc(addr, real_size, 2, gfp_mask);
1687
1688	return addr;
1689
1690fail:
1691	warn_alloc_failed(gfp_mask, 0,
1692			  "vmalloc: allocation failure: %lu bytes\n",
1693			  real_size);
1694	return NULL;
1695}
1696
1697/**
1698 *	__vmalloc_node  -  allocate virtually contiguous memory
1699 *	@size:		allocation size
1700 *	@align:		desired alignment
1701 *	@gfp_mask:	flags for the page level allocator
1702 *	@prot:		protection mask for the allocated pages
1703 *	@node:		node to use for allocation or NUMA_NO_NODE
1704 *	@caller:	caller's return address
1705 *
1706 *	Allocate enough pages to cover @size from the page level
1707 *	allocator with @gfp_mask flags.  Map them into contiguous
1708 *	kernel virtual space, using a pagetable protection of @prot.
1709 */
1710static void *__vmalloc_node(unsigned long size, unsigned long align,
1711			    gfp_t gfp_mask, pgprot_t prot,
1712			    int node, const void *caller)
1713{
1714	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1715				gfp_mask, prot, 0, node, caller);
1716}
1717
1718void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1719{
1720	return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
1721				__builtin_return_address(0));
1722}
1723EXPORT_SYMBOL(__vmalloc);
1724
1725static inline void *__vmalloc_node_flags(unsigned long size,
1726					int node, gfp_t flags)
1727{
1728	return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
1729					node, __builtin_return_address(0));
1730}
1731
1732/**
1733 *	vmalloc  -  allocate virtually contiguous memory
1734 *	@size:		allocation size
1735 *	Allocate enough pages to cover @size from the page level
1736 *	allocator and map them into contiguous kernel virtual space.
1737 *
1738 *	For tight control over page level allocator and protection flags
1739 *	use __vmalloc() instead.
1740 */
1741void *vmalloc(unsigned long size)
1742{
1743	return __vmalloc_node_flags(size, NUMA_NO_NODE,
1744				    GFP_KERNEL | __GFP_HIGHMEM);
1745}
1746EXPORT_SYMBOL(vmalloc);
1747
1748/**
1749 *	vzalloc - allocate virtually contiguous memory with zero fill
1750 *	@size:	allocation size
1751 *	Allocate enough pages to cover @size from the page level
1752 *	allocator and map them into contiguous kernel virtual space.
1753 *	The memory allocated is set to zero.
1754 *
1755 *	For tight control over page level allocator and protection flags
1756 *	use __vmalloc() instead.
1757 */
1758void *vzalloc(unsigned long size)
1759{
1760	return __vmalloc_node_flags(size, NUMA_NO_NODE,
1761				GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1762}
1763EXPORT_SYMBOL(vzalloc);
1764
1765/**
1766 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1767 * @size: allocation size
1768 *
1769 * The resulting memory area is zeroed so it can be mapped to userspace
1770 * without leaking data.
1771 */
1772void *vmalloc_user(unsigned long size)
1773{
1774	struct vm_struct *area;
1775	void *ret;
1776
1777	ret = __vmalloc_node(size, SHMLBA,
1778			     GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1779			     PAGE_KERNEL, NUMA_NO_NODE,
1780			     __builtin_return_address(0));
1781	if (ret) {
1782		area = find_vm_area(ret);
1783		area->flags |= VM_USERMAP;
1784	}
1785	return ret;
1786}
1787EXPORT_SYMBOL(vmalloc_user);
1788
1789/**
1790 *	vmalloc_node  -  allocate memory on a specific node
1791 *	@size:		allocation size
1792 *	@node:		numa node
1793 *
1794 *	Allocate enough pages to cover @size from the page level
1795 *	allocator and map them into contiguous kernel virtual space.
1796 *
1797 *	For tight control over page level allocator and protection flags
1798 *	use __vmalloc() instead.
1799 */
1800void *vmalloc_node(unsigned long size, int node)
1801{
1802	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1803					node, __builtin_return_address(0));
1804}
1805EXPORT_SYMBOL(vmalloc_node);
1806
1807/**
1808 * vzalloc_node - allocate memory on a specific node with zero fill
1809 * @size:	allocation size
1810 * @node:	numa node
1811 *
1812 * Allocate enough pages to cover @size from the page level
1813 * allocator and map them into contiguous kernel virtual space.
1814 * The memory allocated is set to zero.
1815 *
1816 * For tight control over page level allocator and protection flags
1817 * use __vmalloc_node() instead.
1818 */
1819void *vzalloc_node(unsigned long size, int node)
1820{
1821	return __vmalloc_node_flags(size, node,
1822			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1823}
1824EXPORT_SYMBOL(vzalloc_node);
1825
1826#ifndef PAGE_KERNEL_EXEC
1827# define PAGE_KERNEL_EXEC PAGE_KERNEL
1828#endif
1829
1830/**
1831 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
1832 *	@size:		allocation size
1833 *
1834 *	Kernel-internal function to allocate enough pages to cover @size
1835 *	the page level allocator and map them into contiguous and
1836 *	executable kernel virtual space.
1837 *
1838 *	For tight control over page level allocator and protection flags
1839 *	use __vmalloc() instead.
1840 */
1841
1842void *vmalloc_exec(unsigned long size)
1843{
1844	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1845			      NUMA_NO_NODE, __builtin_return_address(0));
1846}
1847
1848#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1849#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1850#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1851#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1852#else
1853#define GFP_VMALLOC32 GFP_KERNEL
1854#endif
1855
1856/**
1857 *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
1858 *	@size:		allocation size
1859 *
1860 *	Allocate enough 32bit PA addressable pages to cover @size from the
1861 *	page level allocator and map them into contiguous kernel virtual space.
1862 */
1863void *vmalloc_32(unsigned long size)
1864{
1865	return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1866			      NUMA_NO_NODE, __builtin_return_address(0));
1867}
1868EXPORT_SYMBOL(vmalloc_32);
1869
1870/**
1871 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1872 *	@size:		allocation size
1873 *
1874 * The resulting memory area is 32bit addressable and zeroed so it can be
1875 * mapped to userspace without leaking data.
1876 */
1877void *vmalloc_32_user(unsigned long size)
1878{
1879	struct vm_struct *area;
1880	void *ret;
1881
1882	ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1883			     NUMA_NO_NODE, __builtin_return_address(0));
1884	if (ret) {
1885		area = find_vm_area(ret);
1886		area->flags |= VM_USERMAP;
1887	}
1888	return ret;
1889}
1890EXPORT_SYMBOL(vmalloc_32_user);
1891
1892/*
1893 * small helper routine , copy contents to buf from addr.
1894 * If the page is not present, fill zero.
1895 */
1896
1897static int aligned_vread(char *buf, char *addr, unsigned long count)
1898{
1899	struct page *p;
1900	int copied = 0;
1901
1902	while (count) {
1903		unsigned long offset, length;
1904
1905		offset = (unsigned long)addr & ~PAGE_MASK;
1906		length = PAGE_SIZE - offset;
1907		if (length > count)
1908			length = count;
1909		p = vmalloc_to_page(addr);
1910		/*
1911		 * To do safe access to this _mapped_ area, we need
1912		 * lock. But adding lock here means that we need to add
1913		 * overhead of vmalloc()/vfree() calles for this _debug_
1914		 * interface, rarely used. Instead of that, we'll use
1915		 * kmap() and get small overhead in this access function.
1916		 */
1917		if (p) {
1918			/*
1919			 * we can expect USER0 is not used (see vread/vwrite's
1920			 * function description)
1921			 */
1922			void *map = kmap_atomic(p);
1923			memcpy(buf, map + offset, length);
1924			kunmap_atomic(map);
1925		} else
1926			memset(buf, 0, length);
1927
1928		addr += length;
1929		buf += length;
1930		copied += length;
1931		count -= length;
1932	}
1933	return copied;
1934}
1935
1936static int aligned_vwrite(char *buf, char *addr, unsigned long count)
1937{
1938	struct page *p;
1939	int copied = 0;
1940
1941	while (count) {
1942		unsigned long offset, length;
1943
1944		offset = (unsigned long)addr & ~PAGE_MASK;
1945		length = PAGE_SIZE - offset;
1946		if (length > count)
1947			length = count;
1948		p = vmalloc_to_page(addr);
1949		/*
1950		 * To do safe access to this _mapped_ area, we need
1951		 * lock. But adding lock here means that we need to add
1952		 * overhead of vmalloc()/vfree() calles for this _debug_
1953		 * interface, rarely used. Instead of that, we'll use
1954		 * kmap() and get small overhead in this access function.
1955		 */
1956		if (p) {
1957			/*
1958			 * we can expect USER0 is not used (see vread/vwrite's
1959			 * function description)
1960			 */
1961			void *map = kmap_atomic(p);
1962			memcpy(map + offset, buf, length);
1963			kunmap_atomic(map);
1964		}
1965		addr += length;
1966		buf += length;
1967		copied += length;
1968		count -= length;
1969	}
1970	return copied;
1971}
1972
1973/**
1974 *	vread() -  read vmalloc area in a safe way.
1975 *	@buf:		buffer for reading data
1976 *	@addr:		vm address.
1977 *	@count:		number of bytes to be read.
1978 *
1979 *	Returns # of bytes which addr and buf should be increased.
1980 *	(same number to @count). Returns 0 if [addr...addr+count) doesn't
1981 *	includes any intersect with alive vmalloc area.
1982 *
1983 *	This function checks that addr is a valid vmalloc'ed area, and
1984 *	copy data from that area to a given buffer. If the given memory range
1985 *	of [addr...addr+count) includes some valid address, data is copied to
1986 *	proper area of @buf. If there are memory holes, they'll be zero-filled.
1987 *	IOREMAP area is treated as memory hole and no copy is done.
1988 *
1989 *	If [addr...addr+count) doesn't includes any intersects with alive
1990 *	vm_struct area, returns 0. @buf should be kernel's buffer.
1991 *
1992 *	Note: In usual ops, vread() is never necessary because the caller
1993 *	should know vmalloc() area is valid and can use memcpy().
1994 *	This is for routines which have to access vmalloc area without
1995 *	any informaion, as /dev/kmem.
1996 *
1997 */
1998
1999long vread(char *buf, char *addr, unsigned long count)
2000{
2001	struct vmap_area *va;
2002	struct vm_struct *vm;
2003	char *vaddr, *buf_start = buf;
2004	unsigned long buflen = count;
2005	unsigned long n;
2006
2007	/* Don't allow overflow */
2008	if ((unsigned long) addr + count < count)
2009		count = -(unsigned long) addr;
2010
2011	spin_lock(&vmap_area_lock);
2012	list_for_each_entry(va, &vmap_area_list, list) {
2013		if (!count)
2014			break;
2015
2016		if (!(va->flags & VM_VM_AREA))
2017			continue;
2018
2019		vm = va->vm;
2020		vaddr = (char *) vm->addr;
2021		if (addr >= vaddr + get_vm_area_size(vm))
2022			continue;
2023		while (addr < vaddr) {
2024			if (count == 0)
2025				goto finished;
2026			*buf = '\0';
2027			buf++;
2028			addr++;
2029			count--;
2030		}
2031		n = vaddr + get_vm_area_size(vm) - addr;
2032		if (n > count)
2033			n = count;
2034		if (!(vm->flags & VM_IOREMAP))
2035			aligned_vread(buf, addr, n);
2036		else /* IOREMAP area is treated as memory hole */
2037			memset(buf, 0, n);
2038		buf += n;
2039		addr += n;
2040		count -= n;
2041	}
2042finished:
2043	spin_unlock(&vmap_area_lock);
2044
2045	if (buf == buf_start)
2046		return 0;
2047	/* zero-fill memory holes */
2048	if (buf != buf_start + buflen)
2049		memset(buf, 0, buflen - (buf - buf_start));
2050
2051	return buflen;
2052}
2053
2054/**
2055 *	vwrite() -  write vmalloc area in a safe way.
2056 *	@buf:		buffer for source data
2057 *	@addr:		vm address.
2058 *	@count:		number of bytes to be read.
2059 *
2060 *	Returns # of bytes which addr and buf should be incresed.
2061 *	(same number to @count).
2062 *	If [addr...addr+count) doesn't includes any intersect with valid
2063 *	vmalloc area, returns 0.
2064 *
2065 *	This function checks that addr is a valid vmalloc'ed area, and
2066 *	copy data from a buffer to the given addr. If specified range of
2067 *	[addr...addr+count) includes some valid address, data is copied from
2068 *	proper area of @buf. If there are memory holes, no copy to hole.
2069 *	IOREMAP area is treated as memory hole and no copy is done.
2070 *
2071 *	If [addr...addr+count) doesn't includes any intersects with alive
2072 *	vm_struct area, returns 0. @buf should be kernel's buffer.
2073 *
2074 *	Note: In usual ops, vwrite() is never necessary because the caller
2075 *	should know vmalloc() area is valid and can use memcpy().
2076 *	This is for routines which have to access vmalloc area without
2077 *	any informaion, as /dev/kmem.
2078 */
2079
2080long vwrite(char *buf, char *addr, unsigned long count)
2081{
2082	struct vmap_area *va;
2083	struct vm_struct *vm;
2084	char *vaddr;
2085	unsigned long n, buflen;
2086	int copied = 0;
2087
2088	/* Don't allow overflow */
2089	if ((unsigned long) addr + count < count)
2090		count = -(unsigned long) addr;
2091	buflen = count;
2092
2093	spin_lock(&vmap_area_lock);
2094	list_for_each_entry(va, &vmap_area_list, list) {
2095		if (!count)
2096			break;
2097
2098		if (!(va->flags & VM_VM_AREA))
2099			continue;
2100
2101		vm = va->vm;
2102		vaddr = (char *) vm->addr;
2103		if (addr >= vaddr + get_vm_area_size(vm))
2104			continue;
2105		while (addr < vaddr) {
2106			if (count == 0)
2107				goto finished;
2108			buf++;
2109			addr++;
2110			count--;
2111		}
2112		n = vaddr + get_vm_area_size(vm) - addr;
2113		if (n > count)
2114			n = count;
2115		if (!(vm->flags & VM_IOREMAP)) {
2116			aligned_vwrite(buf, addr, n);
2117			copied++;
2118		}
2119		buf += n;
2120		addr += n;
2121		count -= n;
2122	}
2123finished:
2124	spin_unlock(&vmap_area_lock);
2125	if (!copied)
2126		return 0;
2127	return buflen;
2128}
2129
2130/**
2131 *	remap_vmalloc_range_partial  -  map vmalloc pages to userspace
2132 *	@vma:		vma to cover
2133 *	@uaddr:		target user address to start at
2134 *	@kaddr:		virtual address of vmalloc kernel memory
2135 *	@size:		size of map area
2136 *
2137 *	Returns:	0 for success, -Exxx on failure
2138 *
2139 *	This function checks that @kaddr is a valid vmalloc'ed area,
2140 *	and that it is big enough to cover the range starting at
2141 *	@uaddr in @vma. Will return failure if that criteria isn't
2142 *	met.
2143 *
2144 *	Similar to remap_pfn_range() (see mm/memory.c)
2145 */
2146int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2147				void *kaddr, unsigned long size)
2148{
2149	struct vm_struct *area;
2150
2151	size = PAGE_ALIGN(size);
2152
2153	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2154		return -EINVAL;
2155
2156	area = find_vm_area(kaddr);
2157	if (!area)
2158		return -EINVAL;
2159
2160	if (!(area->flags & VM_USERMAP))
2161		return -EINVAL;
2162
2163	if (kaddr + size > area->addr + area->size)
2164		return -EINVAL;
2165
2166	do {
2167		struct page *page = vmalloc_to_page(kaddr);
2168		int ret;
2169
2170		ret = vm_insert_page(vma, uaddr, page);
2171		if (ret)
2172			return ret;
2173
2174		uaddr += PAGE_SIZE;
2175		kaddr += PAGE_SIZE;
2176		size -= PAGE_SIZE;
2177	} while (size > 0);
2178
2179	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2180
2181	return 0;
2182}
2183EXPORT_SYMBOL(remap_vmalloc_range_partial);
2184
2185/**
2186 *	remap_vmalloc_range  -  map vmalloc pages to userspace
2187 *	@vma:		vma to cover (map full range of vma)
2188 *	@addr:		vmalloc memory
2189 *	@pgoff:		number of pages into addr before first page to map
2190 *
2191 *	Returns:	0 for success, -Exxx on failure
2192 *
2193 *	This function checks that addr is a valid vmalloc'ed area, and
2194 *	that it is big enough to cover the vma. Will return failure if
2195 *	that criteria isn't met.
2196 *
2197 *	Similar to remap_pfn_range() (see mm/memory.c)
2198 */
2199int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2200						unsigned long pgoff)
2201{
2202	return remap_vmalloc_range_partial(vma, vma->vm_start,
2203					   addr + (pgoff << PAGE_SHIFT),
2204					   vma->vm_end - vma->vm_start);
2205}
2206EXPORT_SYMBOL(remap_vmalloc_range);
2207
2208/*
2209 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
2210 * have one.
2211 */
2212void __weak vmalloc_sync_all(void)
2213{
2214}
2215
2216
2217static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2218{
2219	pte_t ***p = data;
2220
2221	if (p) {
2222		*(*p) = pte;
2223		(*p)++;
2224	}
2225	return 0;
2226}
2227
2228/**
2229 *	alloc_vm_area - allocate a range of kernel address space
2230 *	@size:		size of the area
2231 *	@ptes:		returns the PTEs for the address space
2232 *
2233 *	Returns:	NULL on failure, vm_struct on success
2234 *
2235 *	This function reserves a range of kernel address space, and
2236 *	allocates pagetables to map that range.  No actual mappings
2237 *	are created.
2238 *
2239 *	If @ptes is non-NULL, pointers to the PTEs (in init_mm)
2240 *	allocated for the VM area are returned.
2241 */
2242struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2243{
2244	struct vm_struct *area;
2245
2246	area = get_vm_area_caller(size, VM_IOREMAP,
2247				__builtin_return_address(0));
2248	if (area == NULL)
2249		return NULL;
2250
2251	/*
2252	 * This ensures that page tables are constructed for this region
2253	 * of kernel virtual address space and mapped into init_mm.
2254	 */
2255	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2256				size, f, ptes ? &ptes : NULL)) {
2257		free_vm_area(area);
2258		return NULL;
2259	}
2260
2261	return area;
2262}
2263EXPORT_SYMBOL_GPL(alloc_vm_area);
2264
2265void free_vm_area(struct vm_struct *area)
2266{
2267	struct vm_struct *ret;
2268	ret = remove_vm_area(area->addr);
2269	BUG_ON(ret != area);
2270	kfree(area);
2271}
2272EXPORT_SYMBOL_GPL(free_vm_area);
2273
2274#ifdef CONFIG_SMP
2275static struct vmap_area *node_to_va(struct rb_node *n)
2276{
2277	return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
2278}
2279
2280/**
2281 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
2282 * @end: target address
2283 * @pnext: out arg for the next vmap_area
2284 * @pprev: out arg for the previous vmap_area
2285 *
2286 * Returns: %true if either or both of next and prev are found,
2287 *	    %false if no vmap_area exists
2288 *
2289 * Find vmap_areas end addresses of which enclose @end.  ie. if not
2290 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
2291 */
2292static bool pvm_find_next_prev(unsigned long end,
2293			       struct vmap_area **pnext,
2294			       struct vmap_area **pprev)
2295{
2296	struct rb_node *n = vmap_area_root.rb_node;
2297	struct vmap_area *va = NULL;
2298
2299	while (n) {
2300		va = rb_entry(n, struct vmap_area, rb_node);
2301		if (end < va->va_end)
2302			n = n->rb_left;
2303		else if (end > va->va_end)
2304			n = n->rb_right;
2305		else
2306			break;
2307	}
2308
2309	if (!va)
2310		return false;
2311
2312	if (va->va_end > end) {
2313		*pnext = va;
2314		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2315	} else {
2316		*pprev = va;
2317		*pnext = node_to_va(rb_next(&(*pprev)->rb_node));
2318	}
2319	return true;
2320}
2321
2322/**
2323 * pvm_determine_end - find the highest aligned address between two vmap_areas
2324 * @pnext: in/out arg for the next vmap_area
2325 * @pprev: in/out arg for the previous vmap_area
2326 * @align: alignment
2327 *
2328 * Returns: determined end address
2329 *
2330 * Find the highest aligned address between *@pnext and *@pprev below
2331 * VMALLOC_END.  *@pnext and *@pprev are adjusted so that the aligned
2332 * down address is between the end addresses of the two vmap_areas.
2333 *
2334 * Please note that the address returned by this function may fall
2335 * inside *@pnext vmap_area.  The caller is responsible for checking
2336 * that.
2337 */
2338static unsigned long pvm_determine_end(struct vmap_area **pnext,
2339				       struct vmap_area **pprev,
2340				       unsigned long align)
2341{
2342	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2343	unsigned long addr;
2344
2345	if (*pnext)
2346		addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
2347	else
2348		addr = vmalloc_end;
2349
2350	while (*pprev && (*pprev)->va_end > addr) {
2351		*pnext = *pprev;
2352		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2353	}
2354
2355	return addr;
2356}
2357
2358/**
2359 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
2360 * @offsets: array containing offset of each area
2361 * @sizes: array containing size of each area
2362 * @nr_vms: the number of areas to allocate
2363 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
2364 *
2365 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
2366 *	    vm_structs on success, %NULL on failure
2367 *
2368 * Percpu allocator wants to use congruent vm areas so that it can
2369 * maintain the offsets among percpu areas.  This function allocates
2370 * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
2371 * be scattered pretty far, distance between two areas easily going up
2372 * to gigabytes.  To avoid interacting with regular vmallocs, these
2373 * areas are allocated from top.
2374 *
2375 * Despite its complicated look, this allocator is rather simple.  It
2376 * does everything top-down and scans areas from the end looking for
2377 * matching slot.  While scanning, if any of the areas overlaps with
2378 * existing vmap_area, the base address is pulled down to fit the
2379 * area.  Scanning is repeated till all the areas fit and then all
2380 * necessary data structres are inserted and the result is returned.
2381 */
2382struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2383				     const size_t *sizes, int nr_vms,
2384				     size_t align)
2385{
2386	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2387	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2388	struct vmap_area **vas, *prev, *next;
2389	struct vm_struct **vms;
2390	int area, area2, last_area, term_area;
2391	unsigned long base, start, end, last_end;
2392	bool purged = false;
2393
2394	/* verify parameters and allocate data structures */
2395	BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
2396	for (last_area = 0, area = 0; area < nr_vms; area++) {
2397		start = offsets[area];
2398		end = start + sizes[area];
2399
2400		/* is everything aligned properly? */
2401		BUG_ON(!IS_ALIGNED(offsets[area], align));
2402		BUG_ON(!IS_ALIGNED(sizes[area], align));
2403
2404		/* detect the area with the highest address */
2405		if (start > offsets[last_area])
2406			last_area = area;
2407
2408		for (area2 = 0; area2 < nr_vms; area2++) {
2409			unsigned long start2 = offsets[area2];
2410			unsigned long end2 = start2 + sizes[area2];
2411
2412			if (area2 == area)
2413				continue;
2414
2415			BUG_ON(start2 >= start && start2 < end);
2416			BUG_ON(end2 <= end && end2 > start);
2417		}
2418	}
2419	last_end = offsets[last_area] + sizes[last_area];
2420
2421	if (vmalloc_end - vmalloc_start < last_end) {
2422		WARN_ON(true);
2423		return NULL;
2424	}
2425
2426	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
2427	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
2428	if (!vas || !vms)
2429		goto err_free2;
2430
2431	for (area = 0; area < nr_vms; area++) {
2432		vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2433		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2434		if (!vas[area] || !vms[area])
2435			goto err_free;
2436	}
2437retry:
2438	spin_lock(&vmap_area_lock);
2439
2440	/* start scanning - we scan from the top, begin with the last area */
2441	area = term_area = last_area;
2442	start = offsets[area];
2443	end = start + sizes[area];
2444
2445	if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
2446		base = vmalloc_end - last_end;
2447		goto found;
2448	}
2449	base = pvm_determine_end(&next, &prev, align) - end;
2450
2451	while (true) {
2452		BUG_ON(next && next->va_end <= base + end);
2453		BUG_ON(prev && prev->va_end > base + end);
2454
2455		/*
2456		 * base might have underflowed, add last_end before
2457		 * comparing.
2458		 */
2459		if (base + last_end < vmalloc_start + last_end) {
2460			spin_unlock(&vmap_area_lock);
2461			if (!purged) {
2462				purge_vmap_area_lazy();
2463				purged = true;
2464				goto retry;
2465			}
2466			goto err_free;
2467		}
2468
2469		/*
2470		 * If next overlaps, move base downwards so that it's
2471		 * right below next and then recheck.
2472		 */
2473		if (next && next->va_start < base + end) {
2474			base = pvm_determine_end(&next, &prev, align) - end;
2475			term_area = area;
2476			continue;
2477		}
2478
2479		/*
2480		 * If prev overlaps, shift down next and prev and move
2481		 * base so that it's right below new next and then
2482		 * recheck.
2483		 */
2484		if (prev && prev->va_end > base + start)  {
2485			next = prev;
2486			prev = node_to_va(rb_prev(&next->rb_node));
2487			base = pvm_determine_end(&next, &prev, align) - end;
2488			term_area = area;
2489			continue;
2490		}
2491
2492		/*
2493		 * This area fits, move on to the previous one.  If
2494		 * the previous one is the terminal one, we're done.
2495		 */
2496		area = (area + nr_vms - 1) % nr_vms;
2497		if (area == term_area)
2498			break;
2499		start = offsets[area];
2500		end = start + sizes[area];
2501		pvm_find_next_prev(base + end, &next, &prev);
2502	}
2503found:
2504	/* we've found a fitting base, insert all va's */
2505	for (area = 0; area < nr_vms; area++) {
2506		struct vmap_area *va = vas[area];
2507
2508		va->va_start = base + offsets[area];
2509		va->va_end = va->va_start + sizes[area];
2510		__insert_vmap_area(va);
2511	}
2512
2513	vmap_area_pcpu_hole = base + offsets[last_area];
2514
2515	spin_unlock(&vmap_area_lock);
2516
2517	/* insert all vm's */
2518	for (area = 0; area < nr_vms; area++)
2519		setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2520				 pcpu_get_vm_areas);
2521
2522	kfree(vas);
2523	return vms;
2524
2525err_free:
2526	for (area = 0; area < nr_vms; area++) {
2527		kfree(vas[area]);
2528		kfree(vms[area]);
2529	}
2530err_free2:
2531	kfree(vas);
2532	kfree(vms);
2533	return NULL;
2534}
2535
2536/**
2537 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
2538 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
2539 * @nr_vms: the number of allocated areas
2540 *
2541 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
2542 */
2543void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2544{
2545	int i;
2546
2547	for (i = 0; i < nr_vms; i++)
2548		free_vm_area(vms[i]);
2549	kfree(vms);
2550}
2551#endif	/* CONFIG_SMP */
2552
2553#ifdef CONFIG_PROC_FS
2554static void *s_start(struct seq_file *m, loff_t *pos)
2555	__acquires(&vmap_area_lock)
2556{
2557	loff_t n = *pos;
2558	struct vmap_area *va;
2559
2560	spin_lock(&vmap_area_lock);
2561	va = list_entry((&vmap_area_list)->next, typeof(*va), list);
2562	while (n > 0 && &va->list != &vmap_area_list) {
2563		n--;
2564		va = list_entry(va->list.next, typeof(*va), list);
2565	}
2566	if (!n && &va->list != &vmap_area_list)
2567		return va;
2568
2569	return NULL;
2570
2571}
2572
2573static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2574{
2575	struct vmap_area *va = p, *next;
2576
2577	++*pos;
2578	next = list_entry(va->list.next, typeof(*va), list);
2579	if (&next->list != &vmap_area_list)
2580		return next;
2581
2582	return NULL;
2583}
2584
2585static void s_stop(struct seq_file *m, void *p)
2586	__releases(&vmap_area_lock)
2587{
2588	spin_unlock(&vmap_area_lock);
2589}
2590
2591static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2592{
2593	if (IS_ENABLED(CONFIG_NUMA)) {
2594		unsigned int nr, *counters = m->private;
2595
2596		if (!counters)
2597			return;
2598
2599		if (v->flags & VM_UNINITIALIZED)
2600			return;
2601		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
2602		smp_rmb();
2603
2604		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2605
2606		for (nr = 0; nr < v->nr_pages; nr++)
2607			counters[page_to_nid(v->pages[nr])]++;
2608
2609		for_each_node_state(nr, N_HIGH_MEMORY)
2610			if (counters[nr])
2611				seq_printf(m, " N%u=%u", nr, counters[nr]);
2612	}
2613}
2614
2615static int s_show(struct seq_file *m, void *p)
2616{
2617	struct vmap_area *va = p;
2618	struct vm_struct *v;
2619
2620	/*
2621	 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
2622	 * behalf of vmap area is being tear down or vm_map_ram allocation.
2623	 */
2624	if (!(va->flags & VM_VM_AREA))
2625		return 0;
2626
2627	v = va->vm;
2628
2629	seq_printf(m, "0x%pK-0x%pK %7ld",
2630		v->addr, v->addr + v->size, v->size);
2631
2632	if (v->caller)
2633		seq_printf(m, " %pS", v->caller);
2634
2635	if (v->nr_pages)
2636		seq_printf(m, " pages=%d", v->nr_pages);
2637
2638	if (v->phys_addr)
2639		seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
2640
2641	if (v->flags & VM_IOREMAP)
2642		seq_puts(m, " ioremap");
2643
2644	if (v->flags & VM_ALLOC)
2645		seq_puts(m, " vmalloc");
2646
2647	if (v->flags & VM_MAP)
2648		seq_puts(m, " vmap");
2649
2650	if (v->flags & VM_USERMAP)
2651		seq_puts(m, " user");
2652
2653	if (v->flags & VM_VPAGES)
2654		seq_puts(m, " vpages");
2655
2656	show_numa_info(m, v);
2657	seq_putc(m, '\n');
2658	return 0;
2659}
2660
2661static const struct seq_operations vmalloc_op = {
2662	.start = s_start,
2663	.next = s_next,
2664	.stop = s_stop,
2665	.show = s_show,
2666};
2667
2668static int vmalloc_open(struct inode *inode, struct file *file)
2669{
2670	if (IS_ENABLED(CONFIG_NUMA))
2671		return seq_open_private(file, &vmalloc_op,
2672					nr_node_ids * sizeof(unsigned int));
2673	else
2674		return seq_open(file, &vmalloc_op);
2675}
2676
2677static const struct file_operations proc_vmalloc_operations = {
2678	.open		= vmalloc_open,
2679	.read		= seq_read,
2680	.llseek		= seq_lseek,
2681	.release	= seq_release_private,
2682};
2683
2684static int __init proc_vmalloc_init(void)
2685{
2686	proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
2687	return 0;
2688}
2689module_init(proc_vmalloc_init);
2690
2691void get_vmalloc_info(struct vmalloc_info *vmi)
2692{
2693	struct vmap_area *va;
2694	unsigned long free_area_size;
2695	unsigned long prev_end;
2696
2697	vmi->used = 0;
2698	vmi->largest_chunk = 0;
2699
2700	prev_end = VMALLOC_START;
2701
2702	rcu_read_lock();
2703
2704	if (list_empty(&vmap_area_list)) {
2705		vmi->largest_chunk = VMALLOC_TOTAL;
2706		goto out;
2707	}
2708
2709	list_for_each_entry_rcu(va, &vmap_area_list, list) {
2710		unsigned long addr = va->va_start;
2711
2712		/*
2713		 * Some archs keep another range for modules in vmalloc space
2714		 */
2715		if (addr < VMALLOC_START)
2716			continue;
2717		if (addr >= VMALLOC_END)
2718			break;
2719
2720		if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
2721			continue;
2722
2723		vmi->used += (va->va_end - va->va_start);
2724
2725		free_area_size = addr - prev_end;
2726		if (vmi->largest_chunk < free_area_size)
2727			vmi->largest_chunk = free_area_size;
2728
2729		prev_end = va->va_end;
2730	}
2731
2732	if (VMALLOC_END - prev_end > vmi->largest_chunk)
2733		vmi->largest_chunk = VMALLOC_END - prev_end;
2734
2735out:
2736	rcu_read_unlock();
2737}
2738#endif
2739
2740