Lines Matching +full:ecx +full:- +full:2000
1 /* SPDX-License-Identifier: GPL-2.0 */
6 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
7 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
9 * entry.S contains the system-call and fault low-level handling routines.
14 * - iret frame: Architecture defined interrupt frame from SS to RIP
18 * - SYM_FUNC_START/END:Define functions in the symbol table.
19 * - idtentry: Define exception entry points.
25 #include <asm/asm-offsets.h>
40 #include <asm/nospec-branch.h>
58 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
60 * This is the only entry point used for 64-bit system calls. The
70 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
79 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
86 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
90 * When user can change pt_regs->foo always force IRET. That is because
107 pushq $__USER_DS /* pt_regs->ss */
108 pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
109 pushq %r11 /* pt_regs->flags */
110 pushq $__USER_CS /* pt_regs->cs */
111 pushq %rcx /* pt_regs->ip */
113 pushq %rax /* pt_regs->orig_ax */
115 PUSH_AND_CLEAR_REGS rax=$-ENOSYS
129 * a completely clean 64-bit userspace context. If we're not,
139 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
150 ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \
151 "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57
153 shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
154 sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
176 * the opportunistic SYSRET conditions. For example, single-stepping
210 pushq RSP-RDI(%rdi) /* RSP */
233 * Save callee-saved registers
261 /* restore callee-saved registers */
323 * idtentry_body - Macro to emit code calling the C function
336 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
345 * idtentry - Macro to generate entry stubs for simple IDT entries
360 pushq $-1 /* ORIG_RAX: no syscall to restart */
365 * If coming from kernel space, create a 6-word gap to allow the
368 testb $3, CS-ORIG_RAX(%rsp)
406 * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB
426 pushq $-1 /* ORIG_RAX: no syscall to restart */
432 testb $3, CS-ORIG_RAX(%rsp)
456 * idtentry_vc - Macro to generate entry stub for #VC
465 * an IST stack by switching to the task stack if coming from user-space (which
467 * entered from kernel-mode.
469 * If entered from kernel-mode the return stack is validated first, and if it is
471 * will switch to a fall-back stack (VC2) and call a special handler function.
485 testb $3, CS-ORIG_RAX(%rsp)
490 * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS
499 * stack if it is safe to do so. If not it switches to the VC fall-back
511 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
519 * identical to the stack in the IRET frame or the VC fall-back stack,
549 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
630 addq $8, %rsp /* skip regs->orig_ax */
641 * 64-bit mode SS:RSP on the exception stack is always valid.
644 testb $4, (SS-RIP)(%rsp)
650 * This may fault. Non-paranoid faults on return to userspace are
652 * Double-faults due to espfix64 are handled in exc_double_fault.
670 * --- top of ESPFIX stack ---
675 * RIP <-- RSP points here when we're done
676 * RAX <-- espfix_waddr points here
677 * --- bottom of ESPFIX stack ---
703 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
726 * is read-only and RSP[31:16] are preloaded with the userspace
795 .long 1b - .
802 .long 2b - .
821 * existing activation in its critical region -- if so, we pop the current
856 movl %ds, %ecx
859 movl %es, %ecx
862 movl %fs, %ecx
865 movl %gs, %ecx
880 pushq $-1 /* orig_ax = -1 => not a system call */
892 * N 0 -> SWAPGS on exit
893 * 1 -> no SWAPGS on exit
897 * R14 - old CR3
898 * R15 - old SPEC_CTRL
946 /* EBX = 1 -> kernel GSBASE active, no restore required */
950 * The kernel-enforced convention is a negative GSBASE indicates
953 movl $MSR_GS_BASE, %ecx
958 /* EBX = 0 -> SWAPGS required on exit */
977 * only on return from non-NMI IST interrupts that came
989 * N 0 -> SWAPGS on exit
990 * 1 -> no SWAPGS on exit
994 * R14 - old CR3
995 * R15 - old SPEC_CTRL
1002 * to the per-CPU x86_spec_ctrl_shadow variable.
1024 /* On non-FSGSBASE systems, conditionally do SWAPGS */
1076 movl %ecx, %eax /* zero extend */
1149 * stack of the previous NMI. NMI handlers are not re-entrant
1186 testb $3, CS-RIP+8(%rsp)
1207 pushq 5*8(%rdx) /* pt_regs->ss */
1208 pushq 4*8(%rdx) /* pt_regs->rsp */
1209 pushq 3*8(%rdx) /* pt_regs->flags */
1210 pushq 2*8(%rdx) /* pt_regs->cs */
1211 pushq 1*8(%rdx) /* pt_regs->rip */
1213 pushq $-1 /* pt_regs->orig_ax */
1222 * due to nesting -- we're on the normal thread stack and we're
1227 movq $-1, %rsi
1239 * +---------------------------------------------------------+
1245 * +---------------------------------------------------------+
1247 * +---------------------------------------------------------+
1249 * +---------------------------------------------------------+
1255 * +---------------------------------------------------------+
1261 * +---------------------------------------------------------+
1263 * +---------------------------------------------------------+
1265 * The "original" frame is used by hardware. Before re-enabling
1302 cmpl $1, -8(%rsp)
1341 leaq -10*8(%rsp), %rdx
1416 pushq -6*8(%rsp)
1426 pushq $-1 /* ORIG_RAX: no syscall to restart */
1439 movq $-1, %rsi
1462 /* EBX == 0 -> invoke SWAPGS */
1501 * This handles SYSCALL from 32-bit code. There is no way to program
1502 * MSRs to fully disable 32-bit SYSCALL.
1506 mov $-ENOSYS, %eax
1518 leaq -PTREGS_SIZE(%rax), %rsp