1#ifndef _ASM_POWERPC_PGALLOC_64_H
2#define _ASM_POWERPC_PGALLOC_64_H
3/*
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/slab.h>
11#include <linux/cpumask.h>
12#include <linux/percpu.h>
13
14struct vmemmap_backing {
15	struct vmemmap_backing *list;
16	unsigned long phys;
17	unsigned long virt_addr;
18};
19extern struct vmemmap_backing *vmemmap_list;
20
21/*
22 * Functions that deal with pagetables that could be at any level of
23 * the table need to be passed an "index_size" so they know how to
24 * handle allocation.  For PTE pages (which are linked to a struct
25 * page for now, and drawn from the main get_free_pages() pool), the
26 * allocation size will be (2^index_size * sizeof(pointer)) and
27 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
28 *
29 * The maximum index size needs to be big enough to allow any
30 * pagetable sizes we need, but small enough to fit in the low bits of
31 * any page table pointer.  In other words all pagetables, even tiny
32 * ones, must be aligned to allow at least enough low 0 bits to
33 * contain this value.  This value is also used as a mask, so it must
34 * be one less than a power of two.
35 */
36#define MAX_PGTABLE_INDEX_SIZE	0xf
37
38extern struct kmem_cache *pgtable_cache[];
39#define PGT_CACHE(shift) ({				\
40			BUG_ON(!(shift));		\
41			pgtable_cache[(shift) - 1];	\
42		})
43
44static inline pgd_t *pgd_alloc(struct mm_struct *mm)
45{
46	return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
47}
48
49static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
50{
51	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
52}
53
54#ifndef CONFIG_PPC_64K_PAGES
55
56#define pgd_populate(MM, PGD, PUD)	pgd_set(PGD, PUD)
57
58static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
59{
60	return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
61				GFP_KERNEL|__GFP_REPEAT);
62}
63
64static inline void pud_free(struct mm_struct *mm, pud_t *pud)
65{
66	kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
67}
68
69static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
70{
71	pud_set(pud, (unsigned long)pmd);
72}
73
74#define pmd_populate(mm, pmd, pte_page) \
75	pmd_populate_kernel(mm, pmd, page_address(pte_page))
76#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
77#define pmd_pgtable(pmd) pmd_page(pmd)
78
79static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
80					  unsigned long address)
81{
82	return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
83}
84
85static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
86				      unsigned long address)
87{
88	struct page *page;
89	pte_t *pte;
90
91	pte = pte_alloc_one_kernel(mm, address);
92	if (!pte)
93		return NULL;
94	page = virt_to_page(pte);
95	if (!pgtable_page_ctor(page)) {
96		__free_page(page);
97		return NULL;
98	}
99	return page;
100}
101
102static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
103{
104	free_page((unsigned long)pte);
105}
106
107static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
108{
109	pgtable_page_dtor(ptepage);
110	__free_page(ptepage);
111}
112
113static inline void pgtable_free(void *table, unsigned index_size)
114{
115	if (!index_size)
116		free_page((unsigned long)table);
117	else {
118		BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
119		kmem_cache_free(PGT_CACHE(index_size), table);
120	}
121}
122
123#ifdef CONFIG_SMP
124static inline void pgtable_free_tlb(struct mmu_gather *tlb,
125				    void *table, int shift)
126{
127	unsigned long pgf = (unsigned long)table;
128	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
129	pgf |= shift;
130	tlb_remove_table(tlb, (void *)pgf);
131}
132
133static inline void __tlb_remove_table(void *_table)
134{
135	void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
136	unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
137
138	pgtable_free(table, shift);
139}
140#else /* !CONFIG_SMP */
141static inline void pgtable_free_tlb(struct mmu_gather *tlb,
142				    void *table, int shift)
143{
144	pgtable_free(table, shift);
145}
146#endif /* CONFIG_SMP */
147
148static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
149				  unsigned long address)
150{
151	tlb_flush_pgtable(tlb, address);
152	pgtable_page_dtor(table);
153	pgtable_free_tlb(tlb, page_address(table), 0);
154}
155
156#else /* if CONFIG_PPC_64K_PAGES */
157/*
158 * we support 16 fragments per PTE page.
159 */
160#define PTE_FRAG_NR	16
161/*
162 * We use a 2K PTE page fragment and another 2K for storing
163 * real_pte_t hash index
164 */
165#define PTE_FRAG_SIZE_SHIFT  12
166#define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
167
168extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int);
169extern void page_table_free(struct mm_struct *, unsigned long *, int);
170extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
171#ifdef CONFIG_SMP
172extern void __tlb_remove_table(void *_table);
173#endif
174
175#define pud_populate(mm, pud, pmd)	pud_set(pud, (unsigned long)pmd)
176
177static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
178				       pte_t *pte)
179{
180	pmd_set(pmd, (unsigned long)pte);
181}
182
183static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
184				pgtable_t pte_page)
185{
186	pmd_set(pmd, (unsigned long)pte_page);
187}
188
189static inline pgtable_t pmd_pgtable(pmd_t pmd)
190{
191	return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
192}
193
194static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
195					  unsigned long address)
196{
197	return (pte_t *)page_table_alloc(mm, address, 1);
198}
199
200static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
201					unsigned long address)
202{
203	return (pgtable_t)page_table_alloc(mm, address, 0);
204}
205
206static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
207{
208	page_table_free(mm, (unsigned long *)pte, 1);
209}
210
211static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
212{
213	page_table_free(mm, (unsigned long *)ptepage, 0);
214}
215
216static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
217				  unsigned long address)
218{
219	tlb_flush_pgtable(tlb, address);
220	pgtable_free_tlb(tlb, table, 0);
221}
222#endif /* CONFIG_PPC_64K_PAGES */
223
224static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
225{
226	return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
227				GFP_KERNEL|__GFP_REPEAT);
228}
229
230static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
231{
232	kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
233}
234
235#define __pmd_free_tlb(tlb, pmd, addr)		      \
236	pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
237#ifndef CONFIG_PPC_64K_PAGES
238#define __pud_free_tlb(tlb, pud, addr)		      \
239	pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
240
241#endif /* CONFIG_PPC_64K_PAGES */
242
243#define check_pgt_cache()	do { } while (0)
244
245#endif /* _ASM_POWERPC_PGALLOC_64_H */
246