1#ifndef _MOTOROLA_PGTABLE_H
2#define _MOTOROLA_PGTABLE_H
3
4
5/*
6 * Definitions for MMU descriptors
7 */
8#define _PAGE_PRESENT	0x001
9#define _PAGE_SHORT	0x002
10#define _PAGE_RONLY	0x004
11#define _PAGE_READWRITE	0x000
12#define _PAGE_ACCESSED	0x008
13#define _PAGE_DIRTY	0x010
14#define _PAGE_SUPER	0x080	/* 68040 supervisor only */
15#define _PAGE_GLOBAL040	0x400	/* 68040 global bit, used for kva descs */
16#define _PAGE_NOCACHE030 0x040	/* 68030 no-cache mode */
17#define _PAGE_NOCACHE	0x060	/* 68040 cache mode, non-serialized */
18#define _PAGE_NOCACHE_S	0x040	/* 68040 no-cache mode, serialized */
19#define _PAGE_CACHE040	0x020	/* 68040 cache mode, cachable, copyback */
20#define _PAGE_CACHE040W	0x000	/* 68040 cache mode, cachable, write-through */
21
22#define _DESCTYPE_MASK	0x003
23
24#define _CACHEMASK040	(~0x060)
25#define _TABLE_MASK	(0xfffffe00)
26
27#define _PAGE_TABLE	(_PAGE_SHORT)
28#define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
29
30#define _PAGE_PROTNONE	0x004
31
32#ifndef __ASSEMBLY__
33
34/* This is the cache mode to be used for pages containing page descriptors for
35 * processors >= '040. It is in pte_mknocache(), and the variable is defined
36 * and initialized in head.S */
37extern int m68k_pgtable_cachemode;
38
39/* This is the cache mode for normal pages, for supervisor access on
40 * processors >= '040. It is used in pte_mkcache(), and the variable is
41 * defined and initialized in head.S */
42
43#if defined(CPU_M68060_ONLY) && defined(CONFIG_060_WRITETHROUGH)
44#define m68k_supervisor_cachemode _PAGE_CACHE040W
45#elif defined(CPU_M68040_OR_M68060_ONLY)
46#define m68k_supervisor_cachemode _PAGE_CACHE040
47#elif defined(CPU_M68020_OR_M68030_ONLY)
48#define m68k_supervisor_cachemode 0
49#else
50extern int m68k_supervisor_cachemode;
51#endif
52
53#if defined(CPU_M68040_OR_M68060_ONLY)
54#define mm_cachebits _PAGE_CACHE040
55#elif defined(CPU_M68020_OR_M68030_ONLY)
56#define mm_cachebits 0
57#else
58extern unsigned long mm_cachebits;
59#endif
60
61#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED | mm_cachebits)
62#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | mm_cachebits)
63#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
64#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED | mm_cachebits)
65#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | mm_cachebits)
66
67/* Alternate definitions that are compile time constants, for
68   initializing protection_map.  The cachebits are fixed later.  */
69#define PAGE_NONE_C	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
70#define PAGE_SHARED_C	__pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
71#define PAGE_COPY_C	__pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
72#define PAGE_READONLY_C	__pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
73
74/*
75 * The m68k can't do page protection for execute, and considers that the same are read.
76 * Also, write permissions imply read permissions. This is the closest we can get..
77 */
78#define __P000	PAGE_NONE_C
79#define __P001	PAGE_READONLY_C
80#define __P010	PAGE_COPY_C
81#define __P011	PAGE_COPY_C
82#define __P100	PAGE_READONLY_C
83#define __P101	PAGE_READONLY_C
84#define __P110	PAGE_COPY_C
85#define __P111	PAGE_COPY_C
86
87#define __S000	PAGE_NONE_C
88#define __S001	PAGE_READONLY_C
89#define __S010	PAGE_SHARED_C
90#define __S011	PAGE_SHARED_C
91#define __S100	PAGE_READONLY_C
92#define __S101	PAGE_READONLY_C
93#define __S110	PAGE_SHARED_C
94#define __S111	PAGE_SHARED_C
95
96/*
97 * Conversion functions: convert a page and protection to a page entry,
98 * and a page entry and page directory to the page they refer to.
99 */
100#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
101
102static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
103{
104	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
105	return pte;
106}
107
108static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
109{
110	unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
111	unsigned long *ptr = pmdp->pmd;
112	short i = 16;
113	while (--i >= 0) {
114		*ptr++ = ptbl;
115		ptbl += (sizeof(pte_t)*PTRS_PER_PTE/16);
116	}
117}
118
119static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
120{
121	pgd_val(*pgdp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp);
122}
123
124#define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK))
125#define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
126#define __pgd_page(pgd) ((unsigned long)__va(pgd_val(pgd) & _TABLE_MASK))
127
128
129#define pte_none(pte)		(!pte_val(pte))
130#define pte_present(pte)	(pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
131#define pte_clear(mm,addr,ptep)		({ pte_val(*(ptep)) = 0; })
132
133#define pte_page(pte)		virt_to_page(__va(pte_val(pte)))
134#define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
135#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
136
137#define pmd_none(pmd)		(!pmd_val(pmd))
138#define pmd_bad(pmd)		((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
139#define pmd_present(pmd)	(pmd_val(pmd) & _PAGE_TABLE)
140#define pmd_clear(pmdp) ({			\
141	unsigned long *__ptr = pmdp->pmd;	\
142	short __i = 16;				\
143	while (--__i >= 0)			\
144		*__ptr++ = 0;			\
145})
146#define pmd_page(pmd)		virt_to_page(__va(pmd_val(pmd)))
147
148
149#define pgd_none(pgd)		(!pgd_val(pgd))
150#define pgd_bad(pgd)		((pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE)
151#define pgd_present(pgd)	(pgd_val(pgd) & _PAGE_TABLE)
152#define pgd_clear(pgdp)		({ pgd_val(*pgdp) = 0; })
153#define pgd_page(pgd)		(mem_map + ((unsigned long)(__va(pgd_val(pgd)) - PAGE_OFFSET) >> PAGE_SHIFT))
154
155#define pte_ERROR(e) \
156	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
157#define pmd_ERROR(e) \
158	printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
159#define pgd_ERROR(e) \
160	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
161
162
163/*
164 * The following only work if pte_present() is true.
165 * Undefined behaviour if not..
166 */
167static inline int pte_write(pte_t pte)		{ return !(pte_val(pte) & _PAGE_RONLY); }
168static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
169static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
170static inline int pte_special(pte_t pte)	{ return 0; }
171
172static inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) |= _PAGE_RONLY; return pte; }
173static inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
174static inline pte_t pte_mkold(pte_t pte)	{ pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
175static inline pte_t pte_mkwrite(pte_t pte)	{ pte_val(pte) &= ~_PAGE_RONLY; return pte; }
176static inline pte_t pte_mkdirty(pte_t pte)	{ pte_val(pte) |= _PAGE_DIRTY; return pte; }
177static inline pte_t pte_mkyoung(pte_t pte)	{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
178static inline pte_t pte_mknocache(pte_t pte)
179{
180	pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_pgtable_cachemode;
181	return pte;
182}
183static inline pte_t pte_mkcache(pte_t pte)
184{
185	pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode;
186	return pte;
187}
188static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
189
190#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
191
192#define pgd_index(address)     ((address) >> PGDIR_SHIFT)
193
194/* to find an entry in a page-table-directory */
195static inline pgd_t *pgd_offset(const struct mm_struct *mm,
196				unsigned long address)
197{
198	return mm->pgd + pgd_index(address);
199}
200
201#define swapper_pg_dir kernel_pg_dir
202extern pgd_t kernel_pg_dir[128];
203
204static inline pgd_t *pgd_offset_k(unsigned long address)
205{
206	return kernel_pg_dir + (address >> PGDIR_SHIFT);
207}
208
209
210/* Find an entry in the second-level page table.. */
211static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
212{
213	return (pmd_t *)__pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
214}
215
216/* Find an entry in the third-level page table.. */
217static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
218{
219	return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
220}
221
222#define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
223#define pte_unmap(pte)		((void)0)
224
225/*
226 * Allocate and free page tables. The xxx_kernel() versions are
227 * used to allocate a kernel page table - this turns on ASN bits
228 * if any.
229 */
230
231/* Prior to calling these routines, the page should have been flushed
232 * from both the cache and ATC, or the CPU might not notice that the
233 * cache setting for the page has been changed. -jskov
234 */
235static inline void nocache_page(void *vaddr)
236{
237	unsigned long addr = (unsigned long)vaddr;
238
239	if (CPU_IS_040_OR_060) {
240		pgd_t *dir;
241		pmd_t *pmdp;
242		pte_t *ptep;
243
244		dir = pgd_offset_k(addr);
245		pmdp = pmd_offset(dir, addr);
246		ptep = pte_offset_kernel(pmdp, addr);
247		*ptep = pte_mknocache(*ptep);
248	}
249}
250
251static inline void cache_page(void *vaddr)
252{
253	unsigned long addr = (unsigned long)vaddr;
254
255	if (CPU_IS_040_OR_060) {
256		pgd_t *dir;
257		pmd_t *pmdp;
258		pte_t *ptep;
259
260		dir = pgd_offset_k(addr);
261		pmdp = pmd_offset(dir, addr);
262		ptep = pte_offset_kernel(pmdp, addr);
263		*ptep = pte_mkcache(*ptep);
264	}
265}
266
267/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
268#define __swp_type(x)		(((x).val >> 4) & 0xff)
269#define __swp_offset(x)		((x).val >> 12)
270#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 4) | ((offset) << 12) })
271#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
272#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
273
274#endif	/* !__ASSEMBLY__ */
275#endif /* _MOTOROLA_PGTABLE_H */
276