aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/stackprotector.h
diff options
context:
space:
mode:
authorBrian Gerst <brgerst@gmail.com>2025-01-23 14:07:39 -0500
committerIngo Molnar <mingo@kernel.org>2025-02-18 10:15:09 +0100
commit80d47defddc000271502057ebd7efa4fd6481542 (patch)
tree8b352a713b9f87567eb3d3a4ac1bcc55bc9311ed /arch/x86/include/asm/stackprotector.h
parentx86/module: Deal with GOT based stack cookie load on Clang < 17 (diff)
downloadlinux-80d47defddc000271502057ebd7efa4fd6481542.tar.gz
linux-80d47defddc000271502057ebd7efa4fd6481542.zip
x86/stackprotector/64: Convert to normal per-CPU variable
Older versions of GCC fixed the location of the stack protector canary at %gs:40. This constraint forced the percpu section to be linked at absolute address 0 so that the canary could be the first data object in the percpu section. Supporting the zero-based percpu section requires additional code to handle relocations for RIP-relative references to percpu data, extra complexity to kallsyms, and workarounds for linker bugs due to the use of absolute symbols. GCC 8.1 supports redefining where the canary is located, allowing it to become a normal percpu variable instead of at a fixed location. This removes the constraint that the percpu section must be zero-based. Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Uros Bizjak <ubizjak@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: https://lore.kernel.org/r/20250123190747.745588-8-brgerst@gmail.com
Diffstat (limited to 'arch/x86/include/asm/stackprotector.h')
-rw-r--r--arch/x86/include/asm/stackprotector.h36
1 files changed, 5 insertions, 31 deletions
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 00473a650f51..d43fb589fcf6 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -2,26 +2,10 @@
/*
* GCC stack protector support.
*
- * Stack protector works by putting predefined pattern at the start of
+ * Stack protector works by putting a predefined pattern at the start of
* the stack frame and verifying that it hasn't been overwritten when
- * returning from the function. The pattern is called stack canary
- * and unfortunately gcc historically required it to be at a fixed offset
- * from the percpu segment base. On x86_64, the offset is 40 bytes.
- *
- * The same segment is shared by percpu area and stack canary. On
- * x86_64, percpu symbols are zero based and %gs (64-bit) points to the
- * base of percpu area. The first occupant of the percpu area is always
- * fixed_percpu_data which contains stack_canary at the appropriate
- * offset. On x86_32, the stack canary is just a regular percpu
- * variable.
- *
- * Putting percpu data in %fs on 32-bit is a minor optimization compared to
- * using %gs. Since 32-bit userspace normally has %fs == 0, we are likely
- * to load 0 into %fs on exit to usermode, whereas with percpu data in
- * %gs, we are likely to load a non-null %gs on return to user mode.
- *
- * Once we are willing to require GCC 8.1 or better for 64-bit stackprotector
- * support, we can remove some of this complexity.
+ * returning from the function. The pattern is called the stack canary
+ * and is a unique value for each task.
*/
#ifndef _ASM_STACKPROTECTOR_H
@@ -36,6 +20,8 @@
#include <linux/sched.h>
+DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
+
/*
* Initialize the stackprotector canary value.
*
@@ -51,25 +37,13 @@ static __always_inline void boot_init_stack_canary(void)
{
unsigned long canary = get_random_canary();
-#ifdef CONFIG_X86_64
- BUILD_BUG_ON(offsetof(struct fixed_percpu_data, stack_canary) != 40);
-#endif
-
current->stack_canary = canary;
-#ifdef CONFIG_X86_64
- this_cpu_write(fixed_percpu_data.stack_canary, canary);
-#else
this_cpu_write(__stack_chk_guard, canary);
-#endif
}
static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
{
-#ifdef CONFIG_X86_64
- per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
-#else
per_cpu(__stack_chk_guard, cpu) = idle->stack_canary;
-#endif
}
#else /* STACKPROTECTOR */