xref: /OK3568_Linux_fs/kernel/arch/x86/include/asm/segment.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_SEGMENT_H
3*4882a593Smuzhiyun #define _ASM_X86_SEGMENT_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/const.h>
6*4882a593Smuzhiyun #include <asm/alternative.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  * Constructor for a conventional segment GDT (or LDT) entry.
10*4882a593Smuzhiyun  * This is a macro so it can be used in initializers.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun #define GDT_ENTRY(flags, base, limit)			\
13*4882a593Smuzhiyun 	((((base)  & _AC(0xff000000,ULL)) << (56-24)) |	\
14*4882a593Smuzhiyun 	 (((flags) & _AC(0x0000f0ff,ULL)) << 40) |	\
15*4882a593Smuzhiyun 	 (((limit) & _AC(0x000f0000,ULL)) << (48-16)) |	\
16*4882a593Smuzhiyun 	 (((base)  & _AC(0x00ffffff,ULL)) << 16) |	\
17*4882a593Smuzhiyun 	 (((limit) & _AC(0x0000ffff,ULL))))
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /* Simple and small GDT entries for booting only: */
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define GDT_ENTRY_BOOT_CS	2
22*4882a593Smuzhiyun #define GDT_ENTRY_BOOT_DS	3
23*4882a593Smuzhiyun #define GDT_ENTRY_BOOT_TSS	4
24*4882a593Smuzhiyun #define __BOOT_CS		(GDT_ENTRY_BOOT_CS*8)
25*4882a593Smuzhiyun #define __BOOT_DS		(GDT_ENTRY_BOOT_DS*8)
26*4882a593Smuzhiyun #define __BOOT_TSS		(GDT_ENTRY_BOOT_TSS*8)
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun  * Bottom two bits of selector give the ring
30*4882a593Smuzhiyun  * privilege level
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun #define SEGMENT_RPL_MASK	0x3
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun  * When running on Xen PV, the actual privilege level of the kernel is 1,
36*4882a593Smuzhiyun  * not 0. Testing the Requested Privilege Level in a segment selector to
37*4882a593Smuzhiyun  * determine whether the context is user mode or kernel mode with
38*4882a593Smuzhiyun  * SEGMENT_RPL_MASK is wrong because the PV kernel's privilege level
39*4882a593Smuzhiyun  * matches the 0x3 mask.
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * Testing with USER_SEGMENT_RPL_MASK is valid for both native and Xen PV
42*4882a593Smuzhiyun  * kernels because privilege level 2 is never used.
43*4882a593Smuzhiyun  */
44*4882a593Smuzhiyun #define USER_SEGMENT_RPL_MASK	0x2
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /* User mode is privilege level 3: */
47*4882a593Smuzhiyun #define USER_RPL		0x3
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /* Bit 2 is Table Indicator (TI): selects between LDT or GDT */
50*4882a593Smuzhiyun #define SEGMENT_TI_MASK		0x4
51*4882a593Smuzhiyun /* LDT segment has TI set ... */
52*4882a593Smuzhiyun #define SEGMENT_LDT		0x4
53*4882a593Smuzhiyun /* ... GDT has it cleared */
54*4882a593Smuzhiyun #define SEGMENT_GDT		0x0
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define GDT_ENTRY_INVALID_SEG	0
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #ifdef CONFIG_X86_32
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun  * The layout of the per-CPU GDT under Linux:
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  *   0 - null								<=== cacheline #1
63*4882a593Smuzhiyun  *   1 - reserved
64*4882a593Smuzhiyun  *   2 - reserved
65*4882a593Smuzhiyun  *   3 - reserved
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  *   4 - unused								<=== cacheline #2
68*4882a593Smuzhiyun  *   5 - unused
69*4882a593Smuzhiyun  *
70*4882a593Smuzhiyun  *  ------- start of TLS (Thread-Local Storage) segments:
71*4882a593Smuzhiyun  *
72*4882a593Smuzhiyun  *   6 - TLS segment #1			[ glibc's TLS segment ]
73*4882a593Smuzhiyun  *   7 - TLS segment #2			[ Wine's %fs Win32 segment ]
74*4882a593Smuzhiyun  *   8 - TLS segment #3							<=== cacheline #3
75*4882a593Smuzhiyun  *   9 - reserved
76*4882a593Smuzhiyun  *  10 - reserved
77*4882a593Smuzhiyun  *  11 - reserved
78*4882a593Smuzhiyun  *
79*4882a593Smuzhiyun  *  ------- start of kernel segments:
80*4882a593Smuzhiyun  *
81*4882a593Smuzhiyun  *  12 - kernel code segment						<=== cacheline #4
82*4882a593Smuzhiyun  *  13 - kernel data segment
83*4882a593Smuzhiyun  *  14 - default user CS
84*4882a593Smuzhiyun  *  15 - default user DS
85*4882a593Smuzhiyun  *  16 - TSS								<=== cacheline #5
86*4882a593Smuzhiyun  *  17 - LDT
87*4882a593Smuzhiyun  *  18 - PNPBIOS support (16->32 gate)
88*4882a593Smuzhiyun  *  19 - PNPBIOS support
89*4882a593Smuzhiyun  *  20 - PNPBIOS support						<=== cacheline #6
90*4882a593Smuzhiyun  *  21 - PNPBIOS support
91*4882a593Smuzhiyun  *  22 - PNPBIOS support
92*4882a593Smuzhiyun  *  23 - APM BIOS support
93*4882a593Smuzhiyun  *  24 - APM BIOS support						<=== cacheline #7
94*4882a593Smuzhiyun  *  25 - APM BIOS support
95*4882a593Smuzhiyun  *
96*4882a593Smuzhiyun  *  26 - ESPFIX small SS
97*4882a593Smuzhiyun  *  27 - per-cpu			[ offset to per-cpu data area ]
98*4882a593Smuzhiyun  *  28 - stack_canary-20		[ for stack protector ]		<=== cacheline #8
99*4882a593Smuzhiyun  *  29 - unused
100*4882a593Smuzhiyun  *  30 - unused
101*4882a593Smuzhiyun  *  31 - TSS for double fault handler
102*4882a593Smuzhiyun  */
103*4882a593Smuzhiyun #define GDT_ENTRY_TLS_MIN		6
104*4882a593Smuzhiyun #define GDT_ENTRY_TLS_MAX 		(GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #define GDT_ENTRY_KERNEL_CS		12
107*4882a593Smuzhiyun #define GDT_ENTRY_KERNEL_DS		13
108*4882a593Smuzhiyun #define GDT_ENTRY_DEFAULT_USER_CS	14
109*4882a593Smuzhiyun #define GDT_ENTRY_DEFAULT_USER_DS	15
110*4882a593Smuzhiyun #define GDT_ENTRY_TSS			16
111*4882a593Smuzhiyun #define GDT_ENTRY_LDT			17
112*4882a593Smuzhiyun #define GDT_ENTRY_PNPBIOS_CS32		18
113*4882a593Smuzhiyun #define GDT_ENTRY_PNPBIOS_CS16		19
114*4882a593Smuzhiyun #define GDT_ENTRY_PNPBIOS_DS		20
115*4882a593Smuzhiyun #define GDT_ENTRY_PNPBIOS_TS1		21
116*4882a593Smuzhiyun #define GDT_ENTRY_PNPBIOS_TS2		22
117*4882a593Smuzhiyun #define GDT_ENTRY_APMBIOS_BASE		23
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun #define GDT_ENTRY_ESPFIX_SS		26
120*4882a593Smuzhiyun #define GDT_ENTRY_PERCPU		27
121*4882a593Smuzhiyun #define GDT_ENTRY_STACK_CANARY		28
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #define GDT_ENTRY_DOUBLEFAULT_TSS	31
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun  * Number of entries in the GDT table:
127*4882a593Smuzhiyun  */
128*4882a593Smuzhiyun #define GDT_ENTRIES			32
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun  * Segment selector values corresponding to the above entries:
132*4882a593Smuzhiyun  */
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #define __KERNEL_CS			(GDT_ENTRY_KERNEL_CS*8)
135*4882a593Smuzhiyun #define __KERNEL_DS			(GDT_ENTRY_KERNEL_DS*8)
136*4882a593Smuzhiyun #define __USER_DS			(GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
137*4882a593Smuzhiyun #define __USER_CS			(GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
138*4882a593Smuzhiyun #define __ESPFIX_SS			(GDT_ENTRY_ESPFIX_SS*8)
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun /* segment for calling fn: */
141*4882a593Smuzhiyun #define PNP_CS32			(GDT_ENTRY_PNPBIOS_CS32*8)
142*4882a593Smuzhiyun /* code segment for BIOS: */
143*4882a593Smuzhiyun #define PNP_CS16			(GDT_ENTRY_PNPBIOS_CS16*8)
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /* "Is this PNP code selector (PNP_CS32 or PNP_CS16)?" */
146*4882a593Smuzhiyun #define SEGMENT_IS_PNP_CODE(x)		(((x) & 0xf4) == PNP_CS32)
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun /* data segment for BIOS: */
149*4882a593Smuzhiyun #define PNP_DS				(GDT_ENTRY_PNPBIOS_DS*8)
150*4882a593Smuzhiyun /* transfer data segment: */
151*4882a593Smuzhiyun #define PNP_TS1				(GDT_ENTRY_PNPBIOS_TS1*8)
152*4882a593Smuzhiyun /* another data segment: */
153*4882a593Smuzhiyun #define PNP_TS2				(GDT_ENTRY_PNPBIOS_TS2*8)
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun #ifdef CONFIG_SMP
156*4882a593Smuzhiyun # define __KERNEL_PERCPU		(GDT_ENTRY_PERCPU*8)
157*4882a593Smuzhiyun #else
158*4882a593Smuzhiyun # define __KERNEL_PERCPU		0
159*4882a593Smuzhiyun #endif
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun #ifdef CONFIG_STACKPROTECTOR
162*4882a593Smuzhiyun # define __KERNEL_STACK_CANARY		(GDT_ENTRY_STACK_CANARY*8)
163*4882a593Smuzhiyun #else
164*4882a593Smuzhiyun # define __KERNEL_STACK_CANARY		0
165*4882a593Smuzhiyun #endif
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun #else /* 64-bit: */
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun #include <asm/cache.h>
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun #define GDT_ENTRY_KERNEL32_CS		1
172*4882a593Smuzhiyun #define GDT_ENTRY_KERNEL_CS		2
173*4882a593Smuzhiyun #define GDT_ENTRY_KERNEL_DS		3
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun  * We cannot use the same code segment descriptor for user and kernel mode,
177*4882a593Smuzhiyun  * not even in long flat mode, because of different DPL.
178*4882a593Smuzhiyun  *
179*4882a593Smuzhiyun  * GDT layout to get 64-bit SYSCALL/SYSRET support right. SYSRET hardcodes
180*4882a593Smuzhiyun  * selectors:
181*4882a593Smuzhiyun  *
182*4882a593Smuzhiyun  *   if returning to 32-bit userspace: cs = STAR.SYSRET_CS,
183*4882a593Smuzhiyun  *   if returning to 64-bit userspace: cs = STAR.SYSRET_CS+16,
184*4882a593Smuzhiyun  *
185*4882a593Smuzhiyun  * ss = STAR.SYSRET_CS+8 (in either case)
186*4882a593Smuzhiyun  *
187*4882a593Smuzhiyun  * thus USER_DS should be between 32-bit and 64-bit code selectors:
188*4882a593Smuzhiyun  */
189*4882a593Smuzhiyun #define GDT_ENTRY_DEFAULT_USER32_CS	4
190*4882a593Smuzhiyun #define GDT_ENTRY_DEFAULT_USER_DS	5
191*4882a593Smuzhiyun #define GDT_ENTRY_DEFAULT_USER_CS	6
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun /* Needs two entries */
194*4882a593Smuzhiyun #define GDT_ENTRY_TSS			8
195*4882a593Smuzhiyun /* Needs two entries */
196*4882a593Smuzhiyun #define GDT_ENTRY_LDT			10
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun #define GDT_ENTRY_TLS_MIN		12
199*4882a593Smuzhiyun #define GDT_ENTRY_TLS_MAX		14
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun #define GDT_ENTRY_CPUNODE		15
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun  * Number of entries in the GDT table:
205*4882a593Smuzhiyun  */
206*4882a593Smuzhiyun #define GDT_ENTRIES			16
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun  * Segment selector values corresponding to the above entries:
210*4882a593Smuzhiyun  *
211*4882a593Smuzhiyun  * Note, selectors also need to have a correct RPL,
212*4882a593Smuzhiyun  * expressed with the +3 value for user-space selectors:
213*4882a593Smuzhiyun  */
214*4882a593Smuzhiyun #define __KERNEL32_CS			(GDT_ENTRY_KERNEL32_CS*8)
215*4882a593Smuzhiyun #define __KERNEL_CS			(GDT_ENTRY_KERNEL_CS*8)
216*4882a593Smuzhiyun #define __KERNEL_DS			(GDT_ENTRY_KERNEL_DS*8)
217*4882a593Smuzhiyun #define __USER32_CS			(GDT_ENTRY_DEFAULT_USER32_CS*8 + 3)
218*4882a593Smuzhiyun #define __USER_DS			(GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
219*4882a593Smuzhiyun #define __USER32_DS			__USER_DS
220*4882a593Smuzhiyun #define __USER_CS			(GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
221*4882a593Smuzhiyun #define __CPUNODE_SEG			(GDT_ENTRY_CPUNODE*8 + 3)
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun #endif
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun #define IDT_ENTRIES			256
226*4882a593Smuzhiyun #define NUM_EXCEPTION_VECTORS		32
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun /* Bitmask of exception vectors which push an error code on the stack: */
229*4882a593Smuzhiyun #define EXCEPTION_ERRCODE_MASK		0x20027d00
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun #define GDT_SIZE			(GDT_ENTRIES*8)
232*4882a593Smuzhiyun #define GDT_ENTRY_TLS_ENTRIES		3
233*4882a593Smuzhiyun #define TLS_SIZE			(GDT_ENTRY_TLS_ENTRIES* 8)
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun #ifdef CONFIG_X86_64
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun /* Bit size and mask of CPU number stored in the per CPU data (and TSC_AUX) */
238*4882a593Smuzhiyun #define VDSO_CPUNODE_BITS		12
239*4882a593Smuzhiyun #define VDSO_CPUNODE_MASK		0xfff
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun #ifndef __ASSEMBLY__
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /* Helper functions to store/load CPU and node numbers */
244*4882a593Smuzhiyun 
vdso_encode_cpunode(int cpu,unsigned long node)245*4882a593Smuzhiyun static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	return (node << VDSO_CPUNODE_BITS) | cpu;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
vdso_read_cpunode(unsigned * cpu,unsigned * node)250*4882a593Smuzhiyun static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	unsigned int p;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/*
255*4882a593Smuzhiyun 	 * Load CPU and node number from the GDT.  LSL is faster than RDTSCP
256*4882a593Smuzhiyun 	 * and works on all CPUs.  This is volatile so that it orders
257*4882a593Smuzhiyun 	 * correctly with respect to barrier() and to keep GCC from cleverly
258*4882a593Smuzhiyun 	 * hoisting it out of the calling function.
259*4882a593Smuzhiyun 	 *
260*4882a593Smuzhiyun 	 * If RDPID is available, use it.
261*4882a593Smuzhiyun 	 */
262*4882a593Smuzhiyun 	alternative_io ("lsl %[seg],%[p]",
263*4882a593Smuzhiyun 			".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
264*4882a593Smuzhiyun 			X86_FEATURE_RDPID,
265*4882a593Smuzhiyun 			[p] "=a" (p), [seg] "r" (__CPUNODE_SEG));
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	if (cpu)
268*4882a593Smuzhiyun 		*cpu = (p & VDSO_CPUNODE_MASK);
269*4882a593Smuzhiyun 	if (node)
270*4882a593Smuzhiyun 		*node = (p >> VDSO_CPUNODE_BITS);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
274*4882a593Smuzhiyun #endif /* CONFIG_X86_64 */
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun #ifdef __KERNEL__
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun /*
279*4882a593Smuzhiyun  * early_idt_handler_array is an array of entry points referenced in the
280*4882a593Smuzhiyun  * early IDT.  For simplicity, it's a real array with one entry point
281*4882a593Smuzhiyun  * every nine bytes.  That leaves room for an optional 'push $0' if the
282*4882a593Smuzhiyun  * vector has no error code (two bytes), a 'push $vector_number' (two
283*4882a593Smuzhiyun  * bytes), and a jump to the common entry code (up to five bytes).
284*4882a593Smuzhiyun  */
285*4882a593Smuzhiyun #define EARLY_IDT_HANDLER_SIZE 9
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun  * xen_early_idt_handler_array is for Xen pv guests: for each entry in
289*4882a593Smuzhiyun  * early_idt_handler_array it contains a prequel in the form of
290*4882a593Smuzhiyun  * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to
291*4882a593Smuzhiyun  * max 8 bytes.
292*4882a593Smuzhiyun  */
293*4882a593Smuzhiyun #define XEN_EARLY_IDT_HANDLER_SIZE 8
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun #ifndef __ASSEMBLY__
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
298*4882a593Smuzhiyun extern void early_ignore_irq(void);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun #ifdef CONFIG_XEN_PV
301*4882a593Smuzhiyun extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE];
302*4882a593Smuzhiyun #endif
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun  * Load a segment. Fall back on loading the zero segment if something goes
306*4882a593Smuzhiyun  * wrong.  This variant assumes that loading zero fully clears the segment.
307*4882a593Smuzhiyun  * This is always the case on Intel CPUs and, even on 64-bit AMD CPUs, any
308*4882a593Smuzhiyun  * failure to fully clear the cached descriptor is only observable for
309*4882a593Smuzhiyun  * FS and GS.
310*4882a593Smuzhiyun  */
311*4882a593Smuzhiyun #define __loadsegment_simple(seg, value)				\
312*4882a593Smuzhiyun do {									\
313*4882a593Smuzhiyun 	unsigned short __val = (value);					\
314*4882a593Smuzhiyun 									\
315*4882a593Smuzhiyun 	asm volatile("						\n"	\
316*4882a593Smuzhiyun 		     "1:	movl %k0,%%" #seg "		\n"	\
317*4882a593Smuzhiyun 									\
318*4882a593Smuzhiyun 		     ".section .fixup,\"ax\"			\n"	\
319*4882a593Smuzhiyun 		     "2:	xorl %k0,%k0			\n"	\
320*4882a593Smuzhiyun 		     "		jmp 1b				\n"	\
321*4882a593Smuzhiyun 		     ".previous					\n"	\
322*4882a593Smuzhiyun 									\
323*4882a593Smuzhiyun 		     _ASM_EXTABLE(1b, 2b)				\
324*4882a593Smuzhiyun 									\
325*4882a593Smuzhiyun 		     : "+r" (__val) : : "memory");			\
326*4882a593Smuzhiyun } while (0)
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun #define __loadsegment_ss(value) __loadsegment_simple(ss, (value))
329*4882a593Smuzhiyun #define __loadsegment_ds(value) __loadsegment_simple(ds, (value))
330*4882a593Smuzhiyun #define __loadsegment_es(value) __loadsegment_simple(es, (value))
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun #ifdef CONFIG_X86_32
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun /*
335*4882a593Smuzhiyun  * On 32-bit systems, the hidden parts of FS and GS are unobservable if
336*4882a593Smuzhiyun  * the selector is NULL, so there's no funny business here.
337*4882a593Smuzhiyun  */
338*4882a593Smuzhiyun #define __loadsegment_fs(value) __loadsegment_simple(fs, (value))
339*4882a593Smuzhiyun #define __loadsegment_gs(value) __loadsegment_simple(gs, (value))
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun #else
342*4882a593Smuzhiyun 
__loadsegment_fs(unsigned short value)343*4882a593Smuzhiyun static inline void __loadsegment_fs(unsigned short value)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	asm volatile("						\n"
346*4882a593Smuzhiyun 		     "1:	movw %0, %%fs			\n"
347*4882a593Smuzhiyun 		     "2:					\n"
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		     _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_clear_fs)
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 		     : : "rm" (value) : "memory");
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun /* __loadsegment_gs is intentionally undefined.  Use load_gs_index instead. */
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun #endif
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun #define loadsegment(seg, value) __loadsegment_ ## seg (value)
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun /*
361*4882a593Smuzhiyun  * Save a segment register away:
362*4882a593Smuzhiyun  */
363*4882a593Smuzhiyun #define savesegment(seg, value)				\
364*4882a593Smuzhiyun 	asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun /*
367*4882a593Smuzhiyun  * x86-32 user GS accessors:
368*4882a593Smuzhiyun  */
369*4882a593Smuzhiyun #ifdef CONFIG_X86_32
370*4882a593Smuzhiyun # ifdef CONFIG_X86_32_LAZY_GS
371*4882a593Smuzhiyun #  define get_user_gs(regs)		(u16)({ unsigned long v; savesegment(gs, v); v; })
372*4882a593Smuzhiyun #  define set_user_gs(regs, v)		loadsegment(gs, (unsigned long)(v))
373*4882a593Smuzhiyun #  define task_user_gs(tsk)		((tsk)->thread.gs)
374*4882a593Smuzhiyun #  define lazy_save_gs(v)		savesegment(gs, (v))
375*4882a593Smuzhiyun #  define lazy_load_gs(v)		loadsegment(gs, (v))
376*4882a593Smuzhiyun # else	/* X86_32_LAZY_GS */
377*4882a593Smuzhiyun #  define get_user_gs(regs)		(u16)((regs)->gs)
378*4882a593Smuzhiyun #  define set_user_gs(regs, v)		do { (regs)->gs = (v); } while (0)
379*4882a593Smuzhiyun #  define task_user_gs(tsk)		(task_pt_regs(tsk)->gs)
380*4882a593Smuzhiyun #  define lazy_save_gs(v)		do { } while (0)
381*4882a593Smuzhiyun #  define lazy_load_gs(v)		do { } while (0)
382*4882a593Smuzhiyun # endif	/* X86_32_LAZY_GS */
383*4882a593Smuzhiyun #endif	/* X86_32 */
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
386*4882a593Smuzhiyun #endif /* __KERNEL__ */
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun #endif /* _ASM_X86_SEGMENT_H */
389