aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristian Brauner <brauner@kernel.org>2024-08-28 13:08:18 +0200
committerChristian Brauner <brauner@kernel.org>2024-08-29 15:20:33 +0200
commitdfdc8d2565e82d726ce7d99c424c213ed17320f5 (patch)
tree303fc2b39b85f01582497fcb2f8a2c38bb24063b /mm/slub.c
parentfs: pack struct file (diff)
parentfs: use kmem_cache_create_rcu() (diff)
downloadlinux-dfdc8d2565e82d726ce7d99c424c213ed17320f5.tar.gz
linux-dfdc8d2565e82d726ce7d99c424c213ed17320f5.zip
Merge patch series "fs,mm: add kmem_cache_create_rcu()"
Christian Brauner <brauner@kernel.org> says: When a kmem cache is created with SLAB_TYPESAFE_BY_RCU the free pointer must be located outside of the object because we don't know what part of the memory can safely be overwritten as it may be needed to prevent object recycling. That has the consequence that SLAB_TYPESAFE_BY_RCU may end up adding a new cacheline. This is the case for e.g., struct file. After having it shrunk down by 40 bytes and having it fit in three cachelines we still have SLAB_TYPESAFE_BY_RCU adding a fourth cacheline because it needs to accommodate the free pointer. Add a new kmem_cache_create_rcu() function that allows the caller to specify an offset where the free pointer is supposed to be placed. Before this series cat /proc/slabinfo: filp 1198 1248 256 32 2 : tunables 0 0 0 : slabdata 39 39 0 ^^^ After this series cat /proc/slabinfo: filp 1323 1323 192 21 1 : tunables 0 0 0 : slabdata 63 63 0 ^^^ * patches from https://lore.kernel.org/r/20240828-work-kmem_cache-rcu-v3-0-5460bc1f09f6@kernel.org: fs: use kmem_cache_create_rcu() mm: add kmem_cache_create_rcu() mm: remove unused root_cache argument Link: https://lore.kernel.org/r/20240828-work-kmem_cache-rcu-v3-0-5460bc1f09f6@kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/mm/slub.c b/mm/slub.c
index c9d8a2497fd6..9aa5da1e8e27 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -466,12 +466,6 @@ static struct workqueue_struct *flushwq;
*******************************************************************/
/*
- * freeptr_t represents a SLUB freelist pointer, which might be encoded
- * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled.
- */
-typedef struct { unsigned long v; } freeptr_t;
-
-/*
* Returns freelist pointer (ptr). With hardening, this is obfuscated
* with an XOR of the address where the pointer is held and a per-cache
* random number.
@@ -3921,6 +3915,9 @@ static void *__slab_alloc_node(struct kmem_cache *s,
/*
* If the object has been wiped upon free, make sure it's fully initialized by
* zeroing out freelist pointer.
+ *
+ * Note that we also wipe custom freelist pointers specified via
+ * s->rcu_freeptr_offset.
*/
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
void *obj)
@@ -5144,6 +5141,12 @@ static void set_cpu_partial(struct kmem_cache *s)
#endif
}
+/* Was a valid freeptr offset requested? */
+static inline bool has_freeptr_offset(const struct kmem_cache *s)
+{
+ return s->rcu_freeptr_offset != UINT_MAX;
+}
+
/*
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
@@ -5189,7 +5192,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->inuse = size;
- if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) || s->ctor ||
+ if (((flags & SLAB_TYPESAFE_BY_RCU) && !has_freeptr_offset(s)) ||
+ (flags & SLAB_POISON) || s->ctor ||
((flags & SLAB_RED_ZONE) &&
(s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
/*
@@ -5210,6 +5214,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->offset = size;
size += sizeof(void *);
+ } else if ((flags & SLAB_TYPESAFE_BY_RCU) && has_freeptr_offset(s)) {
+ s->offset = s->rcu_freeptr_offset;
} else {
/*
* Store freelist pointer near middle of object to keep