xref: /OK3568_Linux_fs/kernel/arch/x86/um/ldt.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3*4882a593Smuzhiyun  * Licensed under the GPL
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/mm.h>
7*4882a593Smuzhiyun #include <linux/sched.h>
8*4882a593Smuzhiyun #include <linux/slab.h>
9*4882a593Smuzhiyun #include <linux/syscalls.h>
10*4882a593Smuzhiyun #include <linux/uaccess.h>
11*4882a593Smuzhiyun #include <asm/unistd.h>
12*4882a593Smuzhiyun #include <os.h>
13*4882a593Smuzhiyun #include <skas.h>
14*4882a593Smuzhiyun #include <sysdep/tls.h>
15*4882a593Smuzhiyun 
modify_ldt(int func,void * ptr,unsigned long bytecount)16*4882a593Smuzhiyun static inline int modify_ldt (int func, void *ptr, unsigned long bytecount)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	return syscall(__NR_modify_ldt, func, ptr, bytecount);
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun 
write_ldt_entry(struct mm_id * mm_idp,int func,struct user_desc * desc,void ** addr,int done)21*4882a593Smuzhiyun static long write_ldt_entry(struct mm_id *mm_idp, int func,
22*4882a593Smuzhiyun 		     struct user_desc *desc, void **addr, int done)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	long res;
25*4882a593Smuzhiyun 	void *stub_addr;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(*desc) % sizeof(long));
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	res = syscall_stub_data(mm_idp, (unsigned long *)desc,
30*4882a593Smuzhiyun 				sizeof(*desc) / sizeof(long),
31*4882a593Smuzhiyun 				addr, &stub_addr);
32*4882a593Smuzhiyun 	if (!res) {
33*4882a593Smuzhiyun 		unsigned long args[] = { func,
34*4882a593Smuzhiyun 					 (unsigned long)stub_addr,
35*4882a593Smuzhiyun 					 sizeof(*desc),
36*4882a593Smuzhiyun 					 0, 0, 0 };
37*4882a593Smuzhiyun 		res = run_syscall_stub(mm_idp, __NR_modify_ldt, args,
38*4882a593Smuzhiyun 				       0, addr, done);
39*4882a593Smuzhiyun 	}
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	return res;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun  * In skas mode, we hold our own ldt data in UML.
46*4882a593Smuzhiyun  * Thus, the code implementing sys_modify_ldt_skas
47*4882a593Smuzhiyun  * is very similar to (and mostly stolen from) sys_modify_ldt
48*4882a593Smuzhiyun  * for arch/i386/kernel/ldt.c
49*4882a593Smuzhiyun  * The routines copied and modified in part are:
50*4882a593Smuzhiyun  * - read_ldt
51*4882a593Smuzhiyun  * - read_default_ldt
52*4882a593Smuzhiyun  * - write_ldt
53*4882a593Smuzhiyun  * - sys_modify_ldt_skas
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun 
read_ldt(void __user * ptr,unsigned long bytecount)56*4882a593Smuzhiyun static int read_ldt(void __user * ptr, unsigned long bytecount)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	int i, err = 0;
59*4882a593Smuzhiyun 	unsigned long size;
60*4882a593Smuzhiyun 	uml_ldt_t *ldt = &current->mm->context.arch.ldt;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (!ldt->entry_count)
63*4882a593Smuzhiyun 		goto out;
64*4882a593Smuzhiyun 	if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
65*4882a593Smuzhiyun 		bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
66*4882a593Smuzhiyun 	err = bytecount;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	mutex_lock(&ldt->lock);
69*4882a593Smuzhiyun 	if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
70*4882a593Smuzhiyun 		size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
71*4882a593Smuzhiyun 		if (size > bytecount)
72*4882a593Smuzhiyun 			size = bytecount;
73*4882a593Smuzhiyun 		if (copy_to_user(ptr, ldt->u.entries, size))
74*4882a593Smuzhiyun 			err = -EFAULT;
75*4882a593Smuzhiyun 		bytecount -= size;
76*4882a593Smuzhiyun 		ptr += size;
77*4882a593Smuzhiyun 	}
78*4882a593Smuzhiyun 	else {
79*4882a593Smuzhiyun 		for (i=0; i<ldt->entry_count/LDT_ENTRIES_PER_PAGE && bytecount;
80*4882a593Smuzhiyun 		     i++) {
81*4882a593Smuzhiyun 			size = PAGE_SIZE;
82*4882a593Smuzhiyun 			if (size > bytecount)
83*4882a593Smuzhiyun 				size = bytecount;
84*4882a593Smuzhiyun 			if (copy_to_user(ptr, ldt->u.pages[i], size)) {
85*4882a593Smuzhiyun 				err = -EFAULT;
86*4882a593Smuzhiyun 				break;
87*4882a593Smuzhiyun 			}
88*4882a593Smuzhiyun 			bytecount -= size;
89*4882a593Smuzhiyun 			ptr += size;
90*4882a593Smuzhiyun 		}
91*4882a593Smuzhiyun 	}
92*4882a593Smuzhiyun 	mutex_unlock(&ldt->lock);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	if (bytecount == 0 || err == -EFAULT)
95*4882a593Smuzhiyun 		goto out;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (clear_user(ptr, bytecount))
98*4882a593Smuzhiyun 		err = -EFAULT;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun out:
101*4882a593Smuzhiyun 	return err;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
read_default_ldt(void __user * ptr,unsigned long bytecount)104*4882a593Smuzhiyun static int read_default_ldt(void __user * ptr, unsigned long bytecount)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	int err;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (bytecount > 5*LDT_ENTRY_SIZE)
109*4882a593Smuzhiyun 		bytecount = 5*LDT_ENTRY_SIZE;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	err = bytecount;
112*4882a593Smuzhiyun 	/*
113*4882a593Smuzhiyun 	 * UML doesn't support lcall7 and lcall27.
114*4882a593Smuzhiyun 	 * So, we don't really have a default ldt, but emulate
115*4882a593Smuzhiyun 	 * an empty ldt of common host default ldt size.
116*4882a593Smuzhiyun 	 */
117*4882a593Smuzhiyun 	if (clear_user(ptr, bytecount))
118*4882a593Smuzhiyun 		err = -EFAULT;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return err;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
write_ldt(void __user * ptr,unsigned long bytecount,int func)123*4882a593Smuzhiyun static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	uml_ldt_t *ldt = &current->mm->context.arch.ldt;
126*4882a593Smuzhiyun 	struct mm_id * mm_idp = &current->mm->context.id;
127*4882a593Smuzhiyun 	int i, err;
128*4882a593Smuzhiyun 	struct user_desc ldt_info;
129*4882a593Smuzhiyun 	struct ldt_entry entry0, *ldt_p;
130*4882a593Smuzhiyun 	void *addr = NULL;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	err = -EINVAL;
133*4882a593Smuzhiyun 	if (bytecount != sizeof(ldt_info))
134*4882a593Smuzhiyun 		goto out;
135*4882a593Smuzhiyun 	err = -EFAULT;
136*4882a593Smuzhiyun 	if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
137*4882a593Smuzhiyun 		goto out;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	err = -EINVAL;
140*4882a593Smuzhiyun 	if (ldt_info.entry_number >= LDT_ENTRIES)
141*4882a593Smuzhiyun 		goto out;
142*4882a593Smuzhiyun 	if (ldt_info.contents == 3) {
143*4882a593Smuzhiyun 		if (func == 1)
144*4882a593Smuzhiyun 			goto out;
145*4882a593Smuzhiyun 		if (ldt_info.seg_not_present == 0)
146*4882a593Smuzhiyun 			goto out;
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	mutex_lock(&ldt->lock);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
152*4882a593Smuzhiyun 	if (err)
153*4882a593Smuzhiyun 		goto out_unlock;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	if (ldt_info.entry_number >= ldt->entry_count &&
156*4882a593Smuzhiyun 	    ldt_info.entry_number >= LDT_DIRECT_ENTRIES) {
157*4882a593Smuzhiyun 		for (i=ldt->entry_count/LDT_ENTRIES_PER_PAGE;
158*4882a593Smuzhiyun 		     i*LDT_ENTRIES_PER_PAGE <= ldt_info.entry_number;
159*4882a593Smuzhiyun 		     i++) {
160*4882a593Smuzhiyun 			if (i == 0)
161*4882a593Smuzhiyun 				memcpy(&entry0, ldt->u.entries,
162*4882a593Smuzhiyun 				       sizeof(entry0));
163*4882a593Smuzhiyun 			ldt->u.pages[i] = (struct ldt_entry *)
164*4882a593Smuzhiyun 				__get_free_page(GFP_KERNEL|__GFP_ZERO);
165*4882a593Smuzhiyun 			if (!ldt->u.pages[i]) {
166*4882a593Smuzhiyun 				err = -ENOMEM;
167*4882a593Smuzhiyun 				/* Undo the change in host */
168*4882a593Smuzhiyun 				memset(&ldt_info, 0, sizeof(ldt_info));
169*4882a593Smuzhiyun 				write_ldt_entry(mm_idp, 1, &ldt_info, &addr, 1);
170*4882a593Smuzhiyun 				goto out_unlock;
171*4882a593Smuzhiyun 			}
172*4882a593Smuzhiyun 			if (i == 0) {
173*4882a593Smuzhiyun 				memcpy(ldt->u.pages[0], &entry0,
174*4882a593Smuzhiyun 				       sizeof(entry0));
175*4882a593Smuzhiyun 				memcpy(ldt->u.pages[0]+1, ldt->u.entries+1,
176*4882a593Smuzhiyun 				       sizeof(entry0)*(LDT_DIRECT_ENTRIES-1));
177*4882a593Smuzhiyun 			}
178*4882a593Smuzhiyun 			ldt->entry_count = (i + 1) * LDT_ENTRIES_PER_PAGE;
179*4882a593Smuzhiyun 		}
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 	if (ldt->entry_count <= ldt_info.entry_number)
182*4882a593Smuzhiyun 		ldt->entry_count = ldt_info.entry_number + 1;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (ldt->entry_count <= LDT_DIRECT_ENTRIES)
185*4882a593Smuzhiyun 		ldt_p = ldt->u.entries + ldt_info.entry_number;
186*4882a593Smuzhiyun 	else
187*4882a593Smuzhiyun 		ldt_p = ldt->u.pages[ldt_info.entry_number/LDT_ENTRIES_PER_PAGE] +
188*4882a593Smuzhiyun 			ldt_info.entry_number%LDT_ENTRIES_PER_PAGE;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	if (ldt_info.base_addr == 0 && ldt_info.limit == 0 &&
191*4882a593Smuzhiyun 	   (func == 1 || LDT_empty(&ldt_info))) {
192*4882a593Smuzhiyun 		ldt_p->a = 0;
193*4882a593Smuzhiyun 		ldt_p->b = 0;
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun 	else{
196*4882a593Smuzhiyun 		if (func == 1)
197*4882a593Smuzhiyun 			ldt_info.useable = 0;
198*4882a593Smuzhiyun 		ldt_p->a = LDT_entry_a(&ldt_info);
199*4882a593Smuzhiyun 		ldt_p->b = LDT_entry_b(&ldt_info);
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 	err = 0;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun out_unlock:
204*4882a593Smuzhiyun 	mutex_unlock(&ldt->lock);
205*4882a593Smuzhiyun out:
206*4882a593Smuzhiyun 	return err;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
do_modify_ldt_skas(int func,void __user * ptr,unsigned long bytecount)209*4882a593Smuzhiyun static long do_modify_ldt_skas(int func, void __user *ptr,
210*4882a593Smuzhiyun 			       unsigned long bytecount)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	int ret = -ENOSYS;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	switch (func) {
215*4882a593Smuzhiyun 		case 0:
216*4882a593Smuzhiyun 			ret = read_ldt(ptr, bytecount);
217*4882a593Smuzhiyun 			break;
218*4882a593Smuzhiyun 		case 1:
219*4882a593Smuzhiyun 		case 0x11:
220*4882a593Smuzhiyun 			ret = write_ldt(ptr, bytecount, func);
221*4882a593Smuzhiyun 			break;
222*4882a593Smuzhiyun 		case 2:
223*4882a593Smuzhiyun 			ret = read_default_ldt(ptr, bytecount);
224*4882a593Smuzhiyun 			break;
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 	return ret;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun static DEFINE_SPINLOCK(host_ldt_lock);
230*4882a593Smuzhiyun static short dummy_list[9] = {0, -1};
231*4882a593Smuzhiyun static short * host_ldt_entries = NULL;
232*4882a593Smuzhiyun 
ldt_get_host_info(void)233*4882a593Smuzhiyun static void ldt_get_host_info(void)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	long ret;
236*4882a593Smuzhiyun 	struct ldt_entry * ldt;
237*4882a593Smuzhiyun 	short *tmp;
238*4882a593Smuzhiyun 	int i, size, k, order;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	spin_lock(&host_ldt_lock);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	if (host_ldt_entries != NULL) {
243*4882a593Smuzhiyun 		spin_unlock(&host_ldt_lock);
244*4882a593Smuzhiyun 		return;
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun 	host_ldt_entries = dummy_list+1;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	spin_unlock(&host_ldt_lock);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	for (i = LDT_PAGES_MAX-1, order=0; i; i>>=1, order++)
251*4882a593Smuzhiyun 		;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	ldt = (struct ldt_entry *)
254*4882a593Smuzhiyun 	      __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
255*4882a593Smuzhiyun 	if (ldt == NULL) {
256*4882a593Smuzhiyun 		printk(KERN_ERR "ldt_get_host_info: couldn't allocate buffer "
257*4882a593Smuzhiyun 		       "for host ldt\n");
258*4882a593Smuzhiyun 		return;
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	ret = modify_ldt(0, ldt, (1<<order)*PAGE_SIZE);
262*4882a593Smuzhiyun 	if (ret < 0) {
263*4882a593Smuzhiyun 		printk(KERN_ERR "ldt_get_host_info: couldn't read host ldt\n");
264*4882a593Smuzhiyun 		goto out_free;
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 	if (ret == 0) {
267*4882a593Smuzhiyun 		/* default_ldt is active, simply write an empty entry 0 */
268*4882a593Smuzhiyun 		host_ldt_entries = dummy_list;
269*4882a593Smuzhiyun 		goto out_free;
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	for (i=0, size=0; i<ret/LDT_ENTRY_SIZE; i++) {
273*4882a593Smuzhiyun 		if (ldt[i].a != 0 || ldt[i].b != 0)
274*4882a593Smuzhiyun 			size++;
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	if (size < ARRAY_SIZE(dummy_list))
278*4882a593Smuzhiyun 		host_ldt_entries = dummy_list;
279*4882a593Smuzhiyun 	else {
280*4882a593Smuzhiyun 		size = (size + 1) * sizeof(dummy_list[0]);
281*4882a593Smuzhiyun 		tmp = kmalloc(size, GFP_KERNEL);
282*4882a593Smuzhiyun 		if (tmp == NULL) {
283*4882a593Smuzhiyun 			printk(KERN_ERR "ldt_get_host_info: couldn't allocate "
284*4882a593Smuzhiyun 			       "host ldt list\n");
285*4882a593Smuzhiyun 			goto out_free;
286*4882a593Smuzhiyun 		}
287*4882a593Smuzhiyun 		host_ldt_entries = tmp;
288*4882a593Smuzhiyun 	}
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	for (i=0, k=0; i<ret/LDT_ENTRY_SIZE; i++) {
291*4882a593Smuzhiyun 		if (ldt[i].a != 0 || ldt[i].b != 0)
292*4882a593Smuzhiyun 			host_ldt_entries[k++] = i;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 	host_ldt_entries[k] = -1;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun out_free:
297*4882a593Smuzhiyun 	free_pages((unsigned long)ldt, order);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
init_new_ldt(struct mm_context * new_mm,struct mm_context * from_mm)300*4882a593Smuzhiyun long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	struct user_desc desc;
303*4882a593Smuzhiyun 	short * num_p;
304*4882a593Smuzhiyun 	int i;
305*4882a593Smuzhiyun 	long page, err=0;
306*4882a593Smuzhiyun 	void *addr = NULL;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	mutex_init(&new_mm->arch.ldt.lock);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	if (!from_mm) {
312*4882a593Smuzhiyun 		memset(&desc, 0, sizeof(desc));
313*4882a593Smuzhiyun 		/*
314*4882a593Smuzhiyun 		 * Now we try to retrieve info about the ldt, we
315*4882a593Smuzhiyun 		 * inherited from the host. All ldt-entries found
316*4882a593Smuzhiyun 		 * will be reset in the following loop
317*4882a593Smuzhiyun 		 */
318*4882a593Smuzhiyun 		ldt_get_host_info();
319*4882a593Smuzhiyun 		for (num_p=host_ldt_entries; *num_p != -1; num_p++) {
320*4882a593Smuzhiyun 			desc.entry_number = *num_p;
321*4882a593Smuzhiyun 			err = write_ldt_entry(&new_mm->id, 1, &desc,
322*4882a593Smuzhiyun 					      &addr, *(num_p + 1) == -1);
323*4882a593Smuzhiyun 			if (err)
324*4882a593Smuzhiyun 				break;
325*4882a593Smuzhiyun 		}
326*4882a593Smuzhiyun 		new_mm->arch.ldt.entry_count = 0;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 		goto out;
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	/*
332*4882a593Smuzhiyun 	 * Our local LDT is used to supply the data for
333*4882a593Smuzhiyun 	 * modify_ldt(READLDT), if PTRACE_LDT isn't available,
334*4882a593Smuzhiyun 	 * i.e., we have to use the stub for modify_ldt, which
335*4882a593Smuzhiyun 	 * can't handle the big read buffer of up to 64kB.
336*4882a593Smuzhiyun 	 */
337*4882a593Smuzhiyun 	mutex_lock(&from_mm->arch.ldt.lock);
338*4882a593Smuzhiyun 	if (from_mm->arch.ldt.entry_count <= LDT_DIRECT_ENTRIES)
339*4882a593Smuzhiyun 		memcpy(new_mm->arch.ldt.u.entries, from_mm->arch.ldt.u.entries,
340*4882a593Smuzhiyun 		       sizeof(new_mm->arch.ldt.u.entries));
341*4882a593Smuzhiyun 	else {
342*4882a593Smuzhiyun 		i = from_mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
343*4882a593Smuzhiyun 		while (i-->0) {
344*4882a593Smuzhiyun 			page = __get_free_page(GFP_KERNEL|__GFP_ZERO);
345*4882a593Smuzhiyun 			if (!page) {
346*4882a593Smuzhiyun 				err = -ENOMEM;
347*4882a593Smuzhiyun 				break;
348*4882a593Smuzhiyun 			}
349*4882a593Smuzhiyun 			new_mm->arch.ldt.u.pages[i] =
350*4882a593Smuzhiyun 				(struct ldt_entry *) page;
351*4882a593Smuzhiyun 			memcpy(new_mm->arch.ldt.u.pages[i],
352*4882a593Smuzhiyun 			       from_mm->arch.ldt.u.pages[i], PAGE_SIZE);
353*4882a593Smuzhiyun 		}
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 	new_mm->arch.ldt.entry_count = from_mm->arch.ldt.entry_count;
356*4882a593Smuzhiyun 	mutex_unlock(&from_mm->arch.ldt.lock);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun     out:
359*4882a593Smuzhiyun 	return err;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 
free_ldt(struct mm_context * mm)363*4882a593Smuzhiyun void free_ldt(struct mm_context *mm)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	int i;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	if (mm->arch.ldt.entry_count > LDT_DIRECT_ENTRIES) {
368*4882a593Smuzhiyun 		i = mm->arch.ldt.entry_count / LDT_ENTRIES_PER_PAGE;
369*4882a593Smuzhiyun 		while (i-- > 0)
370*4882a593Smuzhiyun 			free_page((long) mm->arch.ldt.u.pages[i]);
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun 	mm->arch.ldt.entry_count = 0;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
SYSCALL_DEFINE3(modify_ldt,int,func,void __user *,ptr,unsigned long,bytecount)375*4882a593Smuzhiyun SYSCALL_DEFINE3(modify_ldt, int , func , void __user * , ptr ,
376*4882a593Smuzhiyun 		unsigned long , bytecount)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	/* See non-um modify_ldt() for why we do this cast */
379*4882a593Smuzhiyun 	return (unsigned int)do_modify_ldt_skas(func, ptr, bytecount);
380*4882a593Smuzhiyun }
381