1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * Needed for memory-mapped I/O devices mapped outside our normal DRAM
4  * window (that is, all memory-mapped I/O devices).
5  *
6  * Copyright (C) 1995,1996 Linus Torvalds
7  *
8  * Meta port based on CRIS-port by Axis Communications AB
9  */
10 
11 #include <linux/vmalloc.h>
12 #include <linux/io.h>
13 #include <linux/export.h>
14 #include <linux/slab.h>
15 #include <linux/mm.h>
16 
17 #include <asm/pgtable.h>
18 
19 /*
20  * Remap an arbitrary physical address space into the kernel virtual
21  * address space. Needed when the kernel wants to access high addresses
22  * directly.
23  *
24  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
25  * have to convert them into an offset in a page-aligned mapping, but the
26  * caller shouldn't need to know that small detail.
27  */
__ioremap(unsigned long phys_addr,size_t size,unsigned long flags)28 void __iomem *__ioremap(unsigned long phys_addr, size_t size,
29 			unsigned long flags)
30 {
31 	unsigned long addr;
32 	struct vm_struct *area;
33 	unsigned long offset, last_addr;
34 	pgprot_t prot;
35 
36 	/* Don't allow wraparound or zero size */
37 	last_addr = phys_addr + size - 1;
38 	if (!size || last_addr < phys_addr)
39 		return NULL;
40 
41 	/* Custom region addresses are accessible and uncached by default. */
42 	if (phys_addr >= LINSYSCUSTOM_BASE &&
43 	    phys_addr < (LINSYSCUSTOM_BASE + LINSYSCUSTOM_LIMIT))
44 		return (__force void __iomem *) phys_addr;
45 
46 	/*
47 	 * Mappings have to be page-aligned
48 	 */
49 	offset = phys_addr & ~PAGE_MASK;
50 	phys_addr &= PAGE_MASK;
51 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
52 	prot = __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY |
53 			_PAGE_ACCESSED | _PAGE_KERNEL | _PAGE_CACHE_WIN0 |
54 			flags);
55 
56 	/*
57 	 * Ok, go for it..
58 	 */
59 	area = get_vm_area(size, VM_IOREMAP);
60 	if (!area)
61 		return NULL;
62 	area->phys_addr = phys_addr;
63 	addr = (unsigned long) area->addr;
64 	if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
65 		vunmap((void *) addr);
66 		return NULL;
67 	}
68 	return (__force void __iomem *) (offset + (char *)addr);
69 }
70 EXPORT_SYMBOL(__ioremap);
71 
__iounmap(void __iomem * addr)72 void __iounmap(void __iomem *addr)
73 {
74 	struct vm_struct *p;
75 
76 	if ((__force unsigned long)addr >= LINSYSCUSTOM_BASE &&
77 	    (__force unsigned long)addr < (LINSYSCUSTOM_BASE +
78 					   LINSYSCUSTOM_LIMIT))
79 		return;
80 
81 	p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr));
82 	if (unlikely(!p)) {
83 		pr_err("iounmap: bad address %p\n", addr);
84 		return;
85 	}
86 
87 	kfree(p);
88 }
89 EXPORT_SYMBOL(__iounmap);
90