1#ifndef _ASM_PGALLOC_H
2#define _ASM_PGALLOC_H
3
4#include <linux/gfp.h>
5#include <linux/mm.h>
6#include <linux/threads.h>
7#include <asm/processor.h>
8#include <asm/fixmap.h>
9
10#include <asm/cache.h>
11
12/* Allocate the top level pgd (page directory)
13 *
14 * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
15 * allocate the first pmd adjacent to the pgd.  This means that we can
16 * subtract a constant offset to get to it.  The pmd and pgd sizes are
17 * arranged so that a single pmd covers 4GB (giving a full 64-bit
18 * process access to 8TB) so our lookups are effectively L2 for the
19 * first 4GB of the kernel (i.e. for all ILP32 processes and all the
20 * kernel for machines with under 4GB of memory) */
21static inline pgd_t *pgd_alloc(struct mm_struct *mm)
22{
23	pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
24					       PGD_ALLOC_ORDER);
25	pgd_t *actual_pgd = pgd;
26
27	if (likely(pgd != NULL)) {
28		memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
29#if CONFIG_PGTABLE_LEVELS == 3
30		actual_pgd += PTRS_PER_PGD;
31		/* Populate first pmd with allocated memory.  We mark it
32		 * with PxD_FLAG_ATTACHED as a signal to the system that this
33		 * pmd entry may not be cleared. */
34		__pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
35				        PxD_FLAG_VALID |
36					PxD_FLAG_ATTACHED)
37			+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
38		/* The first pmd entry also is marked with _PAGE_GATEWAY as
39		 * a signal that this pmd may not be freed */
40		__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
41#endif
42	}
43	return actual_pgd;
44}
45
46static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
47{
48#if CONFIG_PGTABLE_LEVELS == 3
49	pgd -= PTRS_PER_PGD;
50#endif
51	free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
52}
53
54#if CONFIG_PGTABLE_LEVELS == 3
55
56/* Three Level Page Table Support for pmd's */
57
58static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
59{
60	__pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
61		        (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
62}
63
64static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
65{
66	pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
67					       PMD_ORDER);
68	if (pmd)
69		memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
70	return pmd;
71}
72
73static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
74{
75	if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
76		/*
77		 * This is the permanent pmd attached to the pgd;
78		 * cannot free it.
79		 * Increment the counter to compensate for the decrement
80		 * done by generic mm code.
81		 */
82		mm_inc_nr_pmds(mm);
83		return;
84	}
85	free_pages((unsigned long)pmd, PMD_ORDER);
86}
87
88#else
89
90/* Two Level Page Table Support for pmd's */
91
92/*
93 * allocating and freeing a pmd is trivial: the 1-entry pmd is
94 * inside the pgd, so has no extra memory associated with it.
95 */
96
97#define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
98#define pmd_free(mm, x)			do { } while (0)
99#define pgd_populate(mm, pmd, pte)	BUG()
100
101#endif
102
103static inline void
104pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
105{
106#if CONFIG_PGTABLE_LEVELS == 3
107	/* preserve the gateway marker if this is the beginning of
108	 * the permanent pmd */
109	if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
110		__pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
111				 PxD_FLAG_VALID |
112				 PxD_FLAG_ATTACHED)
113			+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
114	else
115#endif
116		__pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
117			+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
118}
119
120#define pmd_populate(mm, pmd, pte_page) \
121	pmd_populate_kernel(mm, pmd, page_address(pte_page))
122#define pmd_pgtable(pmd) pmd_page(pmd)
123
124static inline pgtable_t
125pte_alloc_one(struct mm_struct *mm, unsigned long address)
126{
127	struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
128	if (!page)
129		return NULL;
130	if (!pgtable_page_ctor(page)) {
131		__free_page(page);
132		return NULL;
133	}
134	return page;
135}
136
137static inline pte_t *
138pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
139{
140	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
141	return pte;
142}
143
144static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
145{
146	free_page((unsigned long)pte);
147}
148
149static inline void pte_free(struct mm_struct *mm, struct page *pte)
150{
151	pgtable_page_dtor(pte);
152	pte_free_kernel(mm, page_address(pte));
153}
154
155#define check_pgt_cache()	do { } while (0)
156
157#endif
158