x86/entry: Make entry_64_compat.S objtool clean
authorPeter Zijlstra <peterz@infradead.org>
Tue, 12 May 2020 16:17:12 +0000 (18:17 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 11 Jun 2020 13:14:40 +0000 (15:14 +0200)
Currently entry_64_compat is exempt from objtool, but with vmlinux
mode there is no hiding it.

Make the following changes to make it pass:

 - change entry_SYSENTER_compat to STT_NOTYPE; it's not a function
   and doesn't have function type stack setup.

 - mark all STT_NOTYPE symbols with UNWIND_HINT_EMPTY; so we do
   validate them and don't treat them as unreachable.

 - don't abuse RSP as a temp register, this confuses objtool
   mightily as it (rightfully) thinks we're doing unspeakable
   things to the stack.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: Andy Lutomirski <luto@kernel.org>
Link: https://lkml.kernel.org/r/20200505134341.272248024@linutronix.de
arch/x86/entry/Makefile
arch/x86/entry/entry_64_compat.S

index cdf45ff..b7a5790 100644 (file)
@@ -11,8 +11,6 @@ CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-
 CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
 CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE) -fstack-protector -fstack-protector-strong
 
-OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
-
 CFLAGS_syscall_64.o            += $(call cc-option,-Wno-override-init,)
 CFLAGS_syscall_32.o            += $(call cc-option,-Wno-override-init,)
 obj-y                          := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
index 7c29ed8..0f974ae 100644 (file)
  * ebp  user stack
  * 0(%ebp) arg6
  */
-SYM_FUNC_START(entry_SYSENTER_compat)
+SYM_CODE_START(entry_SYSENTER_compat)
+       UNWIND_HINT_EMPTY
        /* Interrupts are off on entry. */
        SWAPGS
 
-       /* We are about to clobber %rsp anyway, clobbering here is OK */
-       SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
+       pushq   %rax
+       SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
+       popq    %rax
 
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
@@ -104,6 +106,9 @@ SYM_FUNC_START(entry_SYSENTER_compat)
        xorl    %r14d, %r14d            /* nospec   r14 */
        pushq   $0                      /* pt_regs->r15 = 0 */
        xorl    %r15d, %r15d            /* nospec   r15 */
+
+       UNWIND_HINT_REGS
+
        cld
 
        /*
@@ -141,7 +146,7 @@ SYM_FUNC_START(entry_SYSENTER_compat)
        popfq
        jmp     .Lsysenter_flags_fixed
 SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL)
-SYM_FUNC_END(entry_SYSENTER_compat)
+SYM_CODE_END(entry_SYSENTER_compat)
 
 /*
  * 32-bit SYSCALL entry.
@@ -191,6 +196,7 @@ SYM_FUNC_END(entry_SYSENTER_compat)
  * 0(%esp) arg6
  */
 SYM_CODE_START(entry_SYSCALL_compat)
+       UNWIND_HINT_EMPTY
        /* Interrupts are off on entry. */
        swapgs
 
@@ -241,6 +247,8 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL)
        pushq   $0                      /* pt_regs->r15 = 0 */
        xorl    %r15d, %r15d            /* nospec   r15 */
 
+       UNWIND_HINT_REGS
+
        movq    %rsp, %rdi
        call    do_fast_syscall_32
        /* XEN PV guests always use IRET path */
@@ -328,6 +336,7 @@ SYM_CODE_END(entry_SYSCALL_compat)
  * ebp  arg6
  */
 SYM_CODE_START(entry_INT80_compat)
+       UNWIND_HINT_EMPTY
        /*
         * Interrupts are off on entry.
         */
@@ -349,8 +358,11 @@ SYM_CODE_START(entry_INT80_compat)
 
        /* Need to switch before accessing the thread stack. */
        SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
+
        /* In the Xen PV case we already run on the thread stack. */
-       ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
+       ALTERNATIVE "", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
+
+       movq    %rsp, %rdi
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
        pushq   6*8(%rdi)               /* regs->ss */
@@ -389,6 +401,9 @@ SYM_CODE_START(entry_INT80_compat)
        xorl    %r14d, %r14d            /* nospec   r14 */
        pushq   %r15                    /* pt_regs->r15 */
        xorl    %r15d, %r15d            /* nospec   r15 */
+
+       UNWIND_HINT_REGS
+
        cld
 
        movq    %rsp, %rdi