1/*
2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
4 */
5
6#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <asm/unistd.h>
10#include <os.h>
11#include <skas.h>
12#include <sysdep/tls.h>
13
14extern int modify_ldt(int func, void *ptr, unsigned long bytecount);
15
16static long write_ldt_entry(struct mm_id *mm_idp, int func,
17		     struct user_desc *desc, void **addr, int done)
18{
19	long res;
20	void *stub_addr;
21	res = syscall_stub_data(mm_idp, (unsigned long *)desc,
22				(sizeof(*desc) + sizeof(long) - 1) &
23				    ~(sizeof(long) - 1),
24				addr, &stub_addr);
25	if (!res) {
26		unsigned long args[] = { func,
27					 (unsigned long)stub_addr,
28					 sizeof(*desc),
29					 0, 0, 0 };
30		res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
31				       0, addr, done);
32	}
33
34	return res;
35}
36
37/*
38 * In skas mode, we hold our own ldt data in UML.
39 * Thus, the code implementing sys_modify_ldt_skas
40 * is very similar to (and mostly stolen from) sys_modify_ldt
41 * for arch/i386/kernel/ldt.c
42 * The routines copied and modified in part are:
43 * - read_ldt
44 * - read_default_ldt
45 * - write_ldt
46 * - sys_modify_ldt_skas
47 */
48
49static int read_ldt(void __user * ptr, unsigned long bytecount)
50{
51	int i, err = 0;
52	unsigned long size;
53	uml_ldt_t *ldt = &current->mm->context.arch.ldt;
54
55	if (!ldt->entry_count)
56		goto out;
57	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
58		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
59	err = bytecount;
60
61	mutex_lock(&ldt->lock);
62	if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
63		size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
64		if (size > bytecount)
65			size = bytecount;
66		if (copy_to_user(ptr, ldt->u.entries, size))
67			err = -EFAULT;
68		bytecount -= size;
69		ptr += size;
70	}
71	else {
72		for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
73		     i++) {
74			size = PAGE_SIZE;
75			if (size > bytecount)
76				size = bytecount;
77			if (copy_to_user(ptr, ldt->u.pages[i], size)) {
78				err = -EFAULT;
79				break;
80			}
81			bytecount -= size;
82			ptr += size;
83		}
84	}
85	mutex_unlock(&ldt->lock);
86
87	if (bytecount == 0 || err == -EFAULT)
88		goto out;
89
90	if (clear_user(ptr, bytecount))
91		err = -EFAULT;
92
93out:
94	return err;
95}
96
97static int read_default_ldt(void __user * ptr, unsigned long bytecount)
98{
99	int err;
100
101	if (bytecount > 5*LDT_ENTRY_SIZE)
102		bytecount = 5*LDT_ENTRY_SIZE;
103
104	err = bytecount;
105	/*
106	 * UML doesn't support lcall7 and lcall27.
107	 * So, we don't really have a default ldt, but emulate
108	 * an empty ldt of common host default ldt size.
109	 */
110	if (clear_user(ptr, bytecount))
111		err = -EFAULT;
112
113	return err;
114}
115
116static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
117{
118	uml_ldt_t *ldt = &current->mm->context.arch.ldt;
119	struct mm_id * mm_idp = &current->mm->context.id;
120	int i, err;
121	struct user_desc ldt_info;
122	struct ldt_entry entry0, *ldt_p;
123	void *addr = NULL;
124
125	err = -EINVAL;
126	if (bytecount != sizeof(ldt_info))
127		goto out;
128	err = -EFAULT;
129	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
130		goto out;
131
132	err = -EINVAL;
133	if (ldt_info.entry_number >= LDT_ENTRIES)
134		goto out;
135	if (ldt_info.contents == 3) {
136		if (func == 1)
137			goto out;
138		if (ldt_info.seg_not_present == 0)
139			goto out;
140	}
141
142	mutex_lock(&ldt->lock);
143
144	err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
145	if (err)
146		goto out_unlock;
147
148	if (ldt_info.entry_number >= ldt->entry_count &&
149	    ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
150		for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
151		     i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
152		     i++) {
153			if (i == 0)
154				memcpy(&entry0, ldt->u.entries,
155				       sizeof(entry0));
156			ldt->u.pages[i] = (struct ldt_entry *)
157				__get_free_page(GFP_KERNEL|__GFP_ZERO);
158			if (!ldt->u.pages[i]) {
159				err = -ENOMEM;
160				/* Undo the change in host */
161				memset(&ldt_info, 0, sizeof(ldt_info));
162				write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
163				goto out_unlock;
164			}
165			if (i == 0) {
166				memcpy(ldt->u.pages[0], &entry0,
167				       sizeof(entry0));
168				memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
169				       sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
170			}
171			ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
172		}
173	}
174	if (ldt->entry_count <= ldt_info.entry_number)
175		ldt->entry_count = ldt_info.entry_number + 1;
176
177	if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
178		ldt_p = ldt->u.entries + ldt_info.entry_number;
179	else
180		ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
181			ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
182
183	if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
184	   (func == 1 || LDT_empty(&ldt_info))) {
185		ldt_p->a = 0;
186		ldt_p->b = 0;
187	}
188	else{
189		if (func == 1)
190			ldt_info.useable = 0;
191		ldt_p->a = LDT_entry_a(&ldt_info);
192		ldt_p->b = LDT_entry_b(&ldt_info);
193	}
194	err = 0;
195
196out_unlock:
197	mutex_unlock(&ldt->lock);
198out:
199	return err;
200}
201
202static long do_modify_ldt_skas(int func, void __user *ptr,
203			       unsigned long bytecount)
204{
205	int ret = -ENOSYS;
206
207	switch (func) {
208		case 0:
209			ret = read_ldt(ptr, bytecount);
210			break;
211		case 1:
212		case 0x11:
213			ret = write_ldt(ptr, bytecount, func);
214			break;
215		case 2:
216			ret = read_default_ldt(ptr, bytecount);
217			break;
218	}
219	return ret;
220}
221
222static DEFINE_SPINLOCK(host_ldt_lock);
223static short dummy_list[9] = {0, -1};
224static short * host_ldt_entries = NULL;
225
226static void ldt_get_host_info(void)
227{
228	long ret;
229	struct ldt_entry * ldt;
230	short *tmp;
231	int i, size, k, order;
232
233	spin_lock(&host_ldt_lock);
234
235	if (host_ldt_entries != NULL) {
236		spin_unlock(&host_ldt_lock);
237		return;
238	}
239	host_ldt_entries = dummy_list+1;
240
241	spin_unlock(&host_ldt_lock);
242
243	for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
244		;
245
246	ldt = (struct ldt_entry *)
247	      __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
248	if (ldt == NULL) {
249		printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
250		       "for host ldt\n");
251		return;
252	}
253
254	ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
255	if (ret < 0) {
256		printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
257		goto out_free;
258	}
259	if (ret == 0) {
260		/* default_ldt is active, simply write an empty entry 0 */
261		host_ldt_entries = dummy_list;
262		goto out_free;
263	}
264
265	for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
266		if (ldt[i].a != 0 || ldt[i].b != 0)
267			size++;
268	}
269
270	if (size < ARRAY_SIZE(dummy_list))
271		host_ldt_entries = dummy_list;
272	else {
273		size = (size + 1) * sizeof(dummy_list[0]);
274		tmp = kmalloc(size, GFP_KERNEL);
275		if (tmp == NULL) {
276			printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
277			       "host ldt list\n");
278			goto out_free;
279		}
280		host_ldt_entries = tmp;
281	}
282
283	for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
284		if (ldt[i].a != 0 || ldt[i].b != 0)
285			host_ldt_entries[k++] = i;
286	}
287	host_ldt_entries[k] = -1;
288
289out_free:
290	free_pages((unsigned long)ldt, order);
291}
292
293long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
294{
295	struct user_desc desc;
296	short * num_p;
297	int i;
298	long page, err=0;
299	void *addr = NULL;
300
301
302	mutex_init(&new_mm->arch.ldt.lock);
303
304	if (!from_mm) {
305		memset(&desc, 0, sizeof(desc));
306		/*
307		 * Now we try to retrieve info about the ldt, we
308		 * inherited from the host. All ldt-entries found
309		 * will be reset in the following loop
310		 */
311		ldt_get_host_info();
312		for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
313			desc.entry_number = *num_p;
314			err = write_ldt_entry(&new_mm->id, 1, &desc,
315					      &addr, *(num_p + 1) == -1);
316			if (err)
317				break;
318		}
319		new_mm->arch.ldt.entry_count = 0;
320
321		goto out;
322	}
323
324	/*
325	 * Our local LDT is used to supply the data for
326	 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
327	 * i.e., we have to use the stub for modify_ldt, which
328	 * can't handle the big read buffer of up to 64kB.
329	 */
330	mutex_lock(&from_mm->arch.ldt.lock);
331	if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
332		memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
333		       sizeof(new_mm->arch.ldt.u.entries));
334	else {
335		i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
336		while (i-->0) {
337			page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
338			if (!page) {
339				err = -ENOMEM;
340				break;
341			}
342			new_mm->arch.ldt.u.pages[i] =
343				(struct ldt_entry *) page;
344			memcpy(new_mm->arch.ldt.u.pages[i],
345			       from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
346		}
347	}
348	new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
349	mutex_unlock(&from_mm->arch.ldt.lock);
350
351    out:
352	return err;
353}
354
355
356void free_ldt(struct mm_context *mm)
357{
358	int i;
359
360	if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
361		i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
362		while (i-- > 0)
363			free_page((long) mm->arch.ldt.u.pages[i]);
364	}
365	mm->arch.ldt.entry_count = 0;
366}
367
368int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
369{
370	return do_modify_ldt_skas(func, ptr, bytecount);
371}
372