1/*
2 * vdso setup for s390
3 *
4 *  Copyright IBM Corp. 2008
5 *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/smp.h>
18#include <linux/stddef.h>
19#include <linux/unistd.h>
20#include <linux/slab.h>
21#include <linux/user.h>
22#include <linux/elf.h>
23#include <linux/security.h>
24#include <linux/bootmem.h>
25#include <linux/compat.h>
26#include <asm/asm-offsets.h>
27#include <asm/pgtable.h>
28#include <asm/processor.h>
29#include <asm/mmu.h>
30#include <asm/mmu_context.h>
31#include <asm/sections.h>
32#include <asm/vdso.h>
33#include <asm/facility.h>
34
35#ifdef CONFIG_COMPAT
36extern char vdso32_start, vdso32_end;
37static void *vdso32_kbase = &vdso32_start;
38static unsigned int vdso32_pages;
39static struct page **vdso32_pagelist;
40#endif
41
42extern char vdso64_start, vdso64_end;
43static void *vdso64_kbase = &vdso64_start;
44static unsigned int vdso64_pages;
45static struct page **vdso64_pagelist;
46
47/*
48 * Should the kernel map a VDSO page into processes and pass its
49 * address down to glibc upon exec()?
50 */
51unsigned int __read_mostly vdso_enabled = 1;
52
53static int __init vdso_setup(char *s)
54{
55	unsigned long val;
56	int rc;
57
58	rc = 0;
59	if (strncmp(s, "on", 3) == 0)
60		vdso_enabled = 1;
61	else if (strncmp(s, "off", 4) == 0)
62		vdso_enabled = 0;
63	else {
64		rc = kstrtoul(s, 0, &val);
65		vdso_enabled = rc ? 0 : !!val;
66	}
67	return !rc;
68}
69__setup("vdso=", vdso_setup);
70
71/*
72 * The vdso data page
73 */
74static union {
75	struct vdso_data	data;
76	u8			page[PAGE_SIZE];
77} vdso_data_store __page_aligned_data;
78struct vdso_data *vdso_data = &vdso_data_store.data;
79
80/*
81 * Setup vdso data page.
82 */
83static void vdso_init_data(struct vdso_data *vd)
84{
85	vd->ectg_available = test_facility(31);
86}
87
88/*
89 * Allocate/free per cpu vdso data.
90 */
91#define SEGMENT_ORDER	2
92
93int vdso_alloc_per_cpu(struct _lowcore *lowcore)
94{
95	unsigned long segment_table, page_table, page_frame;
96	u32 *psal, *aste;
97	int i;
98
99	lowcore->vdso_per_cpu_data = __LC_PASTE;
100
101	if (!vdso_enabled)
102		return 0;
103
104	segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
105	page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
106	page_frame = get_zeroed_page(GFP_KERNEL);
107	if (!segment_table || !page_table || !page_frame)
108		goto out;
109
110	clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
111		    PAGE_SIZE << SEGMENT_ORDER);
112	clear_table((unsigned long *) page_table, _PAGE_INVALID,
113		    256*sizeof(unsigned long));
114
115	*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
116	*(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
117
118	psal = (u32 *) (page_table + 256*sizeof(unsigned long));
119	aste = psal + 32;
120
121	for (i = 4; i < 32; i += 4)
122		psal[i] = 0x80000000;
123
124	lowcore->paste[4] = (u32)(addr_t) psal;
125	psal[0] = 0x02000000;
126	psal[2] = (u32)(addr_t) aste;
127	*(unsigned long *) (aste + 2) = segment_table +
128		_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
129	aste[4] = (u32)(addr_t) psal;
130	lowcore->vdso_per_cpu_data = page_frame;
131
132	return 0;
133
134out:
135	free_page(page_frame);
136	free_page(page_table);
137	free_pages(segment_table, SEGMENT_ORDER);
138	return -ENOMEM;
139}
140
141void vdso_free_per_cpu(struct _lowcore *lowcore)
142{
143	unsigned long segment_table, page_table, page_frame;
144	u32 *psal, *aste;
145
146	if (!vdso_enabled)
147		return;
148
149	psal = (u32 *)(addr_t) lowcore->paste[4];
150	aste = (u32 *)(addr_t) psal[2];
151	segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
152	page_table = *(unsigned long *) segment_table;
153	page_frame = *(unsigned long *) page_table;
154
155	free_page(page_frame);
156	free_page(page_table);
157	free_pages(segment_table, SEGMENT_ORDER);
158}
159
160static void vdso_init_cr5(void)
161{
162	unsigned long cr5;
163
164	if (!vdso_enabled)
165		return;
166	cr5 = offsetof(struct _lowcore, paste);
167	__ctl_load(cr5, 5, 5);
168}
169
170/*
171 * This is called from binfmt_elf, we create the special vma for the
172 * vDSO and insert it into the mm struct tree
173 */
174int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
175{
176	struct mm_struct *mm = current->mm;
177	struct page **vdso_pagelist;
178	unsigned long vdso_pages;
179	unsigned long vdso_base;
180	int rc;
181
182	if (!vdso_enabled)
183		return 0;
184	/*
185	 * Only map the vdso for dynamically linked elf binaries.
186	 */
187	if (!uses_interp)
188		return 0;
189
190	vdso_pagelist = vdso64_pagelist;
191	vdso_pages = vdso64_pages;
192#ifdef CONFIG_COMPAT
193	if (is_compat_task()) {
194		vdso_pagelist = vdso32_pagelist;
195		vdso_pages = vdso32_pages;
196	}
197#endif
198	/*
199	 * vDSO has a problem and was disabled, just don't "enable" it for
200	 * the process
201	 */
202	if (vdso_pages == 0)
203		return 0;
204
205	current->mm->context.vdso_base = 0;
206
207	/*
208	 * pick a base address for the vDSO in process space. We try to put
209	 * it at vdso_base which is the "natural" base for it, but we might
210	 * fail and end up putting it elsewhere.
211	 */
212	down_write(&mm->mmap_sem);
213	vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
214	if (IS_ERR_VALUE(vdso_base)) {
215		rc = vdso_base;
216		goto out_up;
217	}
218
219	/*
220	 * Put vDSO base into mm struct. We need to do this before calling
221	 * install_special_mapping or the perf counter mmap tracking code
222	 * will fail to recognise it as a vDSO (since arch_vma_name fails).
223	 */
224	current->mm->context.vdso_base = vdso_base;
225
226	/*
227	 * our vma flags don't have VM_WRITE so by default, the process
228	 * isn't allowed to write those pages.
229	 * gdb can break that with ptrace interface, and thus trigger COW
230	 * on those pages but it's then your responsibility to never do that
231	 * on the "data" page of the vDSO or you'll stop getting kernel
232	 * updates and your nice userland gettimeofday will be totally dead.
233	 * It's fine to use that for setting breakpoints in the vDSO code
234	 * pages though.
235	 */
236	rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
237				     VM_READ|VM_EXEC|
238				     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
239				     vdso_pagelist);
240	if (rc)
241		current->mm->context.vdso_base = 0;
242out_up:
243	up_write(&mm->mmap_sem);
244	return rc;
245}
246
247const char *arch_vma_name(struct vm_area_struct *vma)
248{
249	if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
250		return "[vdso]";
251	return NULL;
252}
253
254static int __init vdso_init(void)
255{
256	int i;
257
258	if (!vdso_enabled)
259		return 0;
260	vdso_init_data(vdso_data);
261#ifdef CONFIG_COMPAT
262	/* Calculate the size of the 32 bit vDSO */
263	vdso32_pages = ((&vdso32_end - &vdso32_start
264			 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
265
266	/* Make sure pages are in the correct state */
267	vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
268				  GFP_KERNEL);
269	BUG_ON(vdso32_pagelist == NULL);
270	for (i = 0; i < vdso32_pages - 1; i++) {
271		struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
272		ClearPageReserved(pg);
273		get_page(pg);
274		vdso32_pagelist[i] = pg;
275	}
276	vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
277	vdso32_pagelist[vdso32_pages] = NULL;
278#endif
279
280	/* Calculate the size of the 64 bit vDSO */
281	vdso64_pages = ((&vdso64_end - &vdso64_start
282			 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
283
284	/* Make sure pages are in the correct state */
285	vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
286				  GFP_KERNEL);
287	BUG_ON(vdso64_pagelist == NULL);
288	for (i = 0; i < vdso64_pages - 1; i++) {
289		struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
290		ClearPageReserved(pg);
291		get_page(pg);
292		vdso64_pagelist[i] = pg;
293	}
294	vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
295	vdso64_pagelist[vdso64_pages] = NULL;
296	if (vdso_alloc_per_cpu(&S390_lowcore))
297		BUG();
298	vdso_init_cr5();
299
300	get_page(virt_to_page(vdso_data));
301
302	smp_wmb();
303
304	return 0;
305}
306early_initcall(vdso_init);
307