1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_PGTABLE_H
17#define __ASM_PGTABLE_H
18
19#include <asm/proc-fns.h>
20
21#include <asm/memory.h>
22#include <asm/pgtable-hwdef.h>
23
24/*
25 * Software defined PTE bits definition.
26 */
27#define PTE_VALID		(_AT(pteval_t, 1) << 0)
28#define PTE_DIRTY		(_AT(pteval_t, 1) << 55)
29#define PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
30#define PTE_WRITE		(_AT(pteval_t, 1) << 57)
31#define PTE_PROT_NONE		(_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
32
33/*
34 * VMALLOC and SPARSEMEM_VMEMMAP ranges.
35 *
36 * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
37 *	(rounded up to PUD_SIZE).
38 * VMALLOC_START: beginning of the kernel VA space
39 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
40 *	fixed mappings and modules
41 */
42#define VMEMMAP_SIZE		ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
43#define VMALLOC_START		(UL(0xffffffffffffffff) << VA_BITS)
44#define VMALLOC_END		(PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
45
46#define VMEMMAP_START		(VMALLOC_END + SZ_64K)
47#define vmemmap			((struct page *)VMEMMAP_START - \
48				 SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
49
50#define FIRST_USER_ADDRESS	0UL
51
52#ifndef __ASSEMBLY__
53extern void __pte_error(const char *file, int line, unsigned long val);
54extern void __pmd_error(const char *file, int line, unsigned long val);
55extern void __pud_error(const char *file, int line, unsigned long val);
56extern void __pgd_error(const char *file, int line, unsigned long val);
57
58#ifdef CONFIG_SMP
59#define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
60#define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
61#else
62#define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF)
63#define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF)
64#endif
65
66#define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
67#define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
68#define PROT_NORMAL		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
69
70#define PROT_SECT_DEVICE_nGnRE	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
71#define PROT_SECT_NORMAL	(PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
72#define PROT_SECT_NORMAL_EXEC	(PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
73
74#define _PAGE_DEFAULT		(PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
75
76#define PAGE_KERNEL		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
77#define PAGE_KERNEL_EXEC	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
78
79#define PAGE_HYP		__pgprot(_PAGE_DEFAULT | PTE_HYP)
80#define PAGE_HYP_DEVICE		__pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
81
82#define PAGE_S2			__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
83#define PAGE_S2_DEVICE		__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
84
85#define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
86#define PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
87#define PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
88#define PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
89#define PAGE_COPY_EXEC		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
90#define PAGE_READONLY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
91#define PAGE_READONLY_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
92
93#define __P000  PAGE_NONE
94#define __P001  PAGE_READONLY
95#define __P010  PAGE_COPY
96#define __P011  PAGE_COPY
97#define __P100  PAGE_READONLY_EXEC
98#define __P101  PAGE_READONLY_EXEC
99#define __P110  PAGE_COPY_EXEC
100#define __P111  PAGE_COPY_EXEC
101
102#define __S000  PAGE_NONE
103#define __S001  PAGE_READONLY
104#define __S010  PAGE_SHARED
105#define __S011  PAGE_SHARED
106#define __S100  PAGE_READONLY_EXEC
107#define __S101  PAGE_READONLY_EXEC
108#define __S110  PAGE_SHARED_EXEC
109#define __S111  PAGE_SHARED_EXEC
110
111/*
112 * ZERO_PAGE is a global shared page that is always zero: used
113 * for zero-mapped memory areas etc..
114 */
115extern struct page *empty_zero_page;
116#define ZERO_PAGE(vaddr)	(empty_zero_page)
117
118#define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
119
120#define pte_pfn(pte)		((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
121
122#define pfn_pte(pfn,prot)	(__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
123
124#define pte_none(pte)		(!pte_val(pte))
125#define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
126#define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
127
128/* Find an entry in the third-level page table. */
129#define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
130
131#define pte_offset_kernel(dir,addr)	(pmd_page_vaddr(*(dir)) + pte_index(addr))
132
133#define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
134#define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir), (addr))
135#define pte_unmap(pte)			do { } while (0)
136#define pte_unmap_nested(pte)		do { } while (0)
137
138/*
139 * The following only work if pte_present(). Undefined behaviour otherwise.
140 */
141#define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
142#define pte_dirty(pte)		(!!(pte_val(pte) & PTE_DIRTY))
143#define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
144#define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
145#define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
146#define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
147
148#define pte_valid_user(pte) \
149	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
150#define pte_valid_not_user(pte) \
151	((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
152
153static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
154{
155	pte_val(pte) &= ~pgprot_val(prot);
156	return pte;
157}
158
159static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
160{
161	pte_val(pte) |= pgprot_val(prot);
162	return pte;
163}
164
165static inline pte_t pte_wrprotect(pte_t pte)
166{
167	return clear_pte_bit(pte, __pgprot(PTE_WRITE));
168}
169
170static inline pte_t pte_mkwrite(pte_t pte)
171{
172	return set_pte_bit(pte, __pgprot(PTE_WRITE));
173}
174
175static inline pte_t pte_mkclean(pte_t pte)
176{
177	return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
178}
179
180static inline pte_t pte_mkdirty(pte_t pte)
181{
182	return set_pte_bit(pte, __pgprot(PTE_DIRTY));
183}
184
185static inline pte_t pte_mkold(pte_t pte)
186{
187	return clear_pte_bit(pte, __pgprot(PTE_AF));
188}
189
190static inline pte_t pte_mkyoung(pte_t pte)
191{
192	return set_pte_bit(pte, __pgprot(PTE_AF));
193}
194
195static inline pte_t pte_mkspecial(pte_t pte)
196{
197	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
198}
199
200static inline void set_pte(pte_t *ptep, pte_t pte)
201{
202	*ptep = pte;
203
204	/*
205	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
206	 * or update_mmu_cache() have the necessary barriers.
207	 */
208	if (pte_valid_not_user(pte)) {
209		dsb(ishst);
210		isb();
211	}
212}
213
214extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
215
216static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
217			      pte_t *ptep, pte_t pte)
218{
219	if (pte_valid_user(pte)) {
220		if (!pte_special(pte) && pte_exec(pte))
221			__sync_icache_dcache(pte, addr);
222		if (pte_dirty(pte) && pte_write(pte))
223			pte_val(pte) &= ~PTE_RDONLY;
224		else
225			pte_val(pte) |= PTE_RDONLY;
226	}
227
228	set_pte(ptep, pte);
229}
230
231/*
232 * Huge pte definitions.
233 */
234#define pte_huge(pte)		(!(pte_val(pte) & PTE_TABLE_BIT))
235#define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
236
237/*
238 * Hugetlb definitions.
239 */
240#define HUGE_MAX_HSTATE		2
241#define HPAGE_SHIFT		PMD_SHIFT
242#define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
243#define HPAGE_MASK		(~(HPAGE_SIZE - 1))
244#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
245
246#define __HAVE_ARCH_PTE_SPECIAL
247
248static inline pte_t pud_pte(pud_t pud)
249{
250	return __pte(pud_val(pud));
251}
252
253static inline pmd_t pud_pmd(pud_t pud)
254{
255	return __pmd(pud_val(pud));
256}
257
258static inline pte_t pmd_pte(pmd_t pmd)
259{
260	return __pte(pmd_val(pmd));
261}
262
263static inline pmd_t pte_pmd(pte_t pte)
264{
265	return __pmd(pte_val(pte));
266}
267
268static inline pgprot_t mk_sect_prot(pgprot_t prot)
269{
270	return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
271}
272
273/*
274 * THP definitions.
275 */
276
277#ifdef CONFIG_TRANSPARENT_HUGEPAGE
278#define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
279#define pmd_trans_splitting(pmd)	pte_special(pmd_pte(pmd))
280#ifdef CONFIG_HAVE_RCU_TABLE_FREE
281#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
282struct vm_area_struct;
283void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
284			  pmd_t *pmdp);
285#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
286#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
287
288#define pmd_present(pmd)	pte_present(pmd_pte(pmd))
289#define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
290#define pmd_young(pmd)		pte_young(pmd_pte(pmd))
291#define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
292#define pmd_mksplitting(pmd)	pte_pmd(pte_mkspecial(pmd_pte(pmd)))
293#define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
294#define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
295#define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
296#define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
297#define pmd_mknotpresent(pmd)	(__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
298
299#define __HAVE_ARCH_PMD_WRITE
300#define pmd_write(pmd)		pte_write(pmd_pte(pmd))
301
302#define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
303
304#define pmd_pfn(pmd)		(((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
305#define pfn_pmd(pfn,prot)	(__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
306#define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
307
308#define pud_write(pud)		pte_write(pud_pte(pud))
309#define pud_pfn(pud)		(((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
310
311#define set_pmd_at(mm, addr, pmdp, pmd)	set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
312
313static inline int has_transparent_hugepage(void)
314{
315	return 1;
316}
317
318#define __pgprot_modify(prot,mask,bits) \
319	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
320
321/*
322 * Mark the prot value as uncacheable and unbufferable.
323 */
324#define pgprot_noncached(prot) \
325	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
326#define pgprot_writecombine(prot) \
327	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
328#define pgprot_device(prot) \
329	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
330#define __HAVE_PHYS_MEM_ACCESS_PROT
331struct file;
332extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
333				     unsigned long size, pgprot_t vma_prot);
334
335#define pmd_none(pmd)		(!pmd_val(pmd))
336
337#define pmd_bad(pmd)		(!(pmd_val(pmd) & 2))
338
339#define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
340				 PMD_TYPE_TABLE)
341#define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
342				 PMD_TYPE_SECT)
343
344#ifdef CONFIG_ARM64_64K_PAGES
345#define pud_sect(pud)		(0)
346#define pud_table(pud)		(1)
347#else
348#define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
349				 PUD_TYPE_SECT)
350#define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
351				 PUD_TYPE_TABLE)
352#endif
353
354static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
355{
356	*pmdp = pmd;
357	dsb(ishst);
358	isb();
359}
360
361static inline void pmd_clear(pmd_t *pmdp)
362{
363	set_pmd(pmdp, __pmd(0));
364}
365
366static inline pte_t *pmd_page_vaddr(pmd_t pmd)
367{
368	return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
369}
370
371#define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
372
373/*
374 * Conversion functions: convert a page and protection to a page entry,
375 * and a page entry and page directory to the page they refer to.
376 */
377#define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
378
379#if CONFIG_PGTABLE_LEVELS > 2
380
381#define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
382
383#define pud_none(pud)		(!pud_val(pud))
384#define pud_bad(pud)		(!(pud_val(pud) & 2))
385#define pud_present(pud)	(pud_val(pud))
386
387static inline void set_pud(pud_t *pudp, pud_t pud)
388{
389	*pudp = pud;
390	dsb(ishst);
391	isb();
392}
393
394static inline void pud_clear(pud_t *pudp)
395{
396	set_pud(pudp, __pud(0));
397}
398
399static inline pmd_t *pud_page_vaddr(pud_t pud)
400{
401	return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
402}
403
404/* Find an entry in the second-level page table. */
405#define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
406
407static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
408{
409	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
410}
411
412#define pud_page(pud)		pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
413
414#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
415
416#if CONFIG_PGTABLE_LEVELS > 3
417
418#define pud_ERROR(pud)		__pud_error(__FILE__, __LINE__, pud_val(pud))
419
420#define pgd_none(pgd)		(!pgd_val(pgd))
421#define pgd_bad(pgd)		(!(pgd_val(pgd) & 2))
422#define pgd_present(pgd)	(pgd_val(pgd))
423
424static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
425{
426	*pgdp = pgd;
427	dsb(ishst);
428}
429
430static inline void pgd_clear(pgd_t *pgdp)
431{
432	set_pgd(pgdp, __pgd(0));
433}
434
435static inline pud_t *pgd_page_vaddr(pgd_t pgd)
436{
437	return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
438}
439
440/* Find an entry in the frst-level page table. */
441#define pud_index(addr)		(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
442
443static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
444{
445	return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
446}
447
448#define pgd_page(pgd)		pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
449
450#endif  /* CONFIG_PGTABLE_LEVELS > 3 */
451
452#define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
453
454/* to find an entry in a page-table-directory */
455#define pgd_index(addr)		(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
456
457#define pgd_offset(mm, addr)	((mm)->pgd+pgd_index(addr))
458
459/* to find an entry in a kernel page-table-directory */
460#define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
461
462static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
463{
464	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
465			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
466	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
467	return pte;
468}
469
470static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
471{
472	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
473}
474
475extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
476extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
477
478/*
479 * Encode and decode a swap entry:
480 *	bits 0-1:	present (must be zero)
481 *	bits 2-7:	swap type
482 *	bits 8-57:	swap offset
483 */
484#define __SWP_TYPE_SHIFT	2
485#define __SWP_TYPE_BITS		6
486#define __SWP_OFFSET_BITS	50
487#define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
488#define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
489#define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
490
491#define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
492#define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
493#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
494
495#define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
496#define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
497
498/*
499 * Ensure that there are not more swap files than can be encoded in the kernel
500 * PTEs.
501 */
502#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
503
504extern int kern_addr_valid(unsigned long addr);
505
506#include <asm-generic/pgtable.h>
507
508#define pgtable_cache_init() do { } while (0)
509
510#endif /* !__ASSEMBLY__ */
511
512#endif /* __ASM_PGTABLE_H */
513