diff options
| author | Ingo Molnar <mingo@kernel.org> | 2024-02-14 10:45:07 +0100 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2024-02-14 10:45:07 +0100 |
| commit | 03c11eb3b16dc0058589751dfd91f254be2be613 (patch) | |
| tree | e5f2889212fec0bb0babdce9abd781ab487e246a /kernel/bpf/queue_stack_maps.c | |
| parent | x86/percpu: Use %RIP-relative address in untagged_addr() (diff) | |
| parent | Linux 6.8-rc4 (diff) | |
| download | linux-03c11eb3b16dc0058589751dfd91f254be2be613.tar.gz linux-03c11eb3b16dc0058589751dfd91f254be2be613.zip | |
Merge tag 'v6.8-rc4' into x86/percpu, to resolve conflicts and refresh the branch
Conflicts:
arch/x86/include/asm/percpu.h
arch/x86/include/asm/text-patching.h
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/bpf/queue_stack_maps.c')
| -rw-r--r-- | kernel/bpf/queue_stack_maps.c | 21 |
1 files changed, 18 insertions, 3 deletions
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c index 8d2ddcb7566b..d869f51ea93a 100644 --- a/kernel/bpf/queue_stack_maps.c +++ b/kernel/bpf/queue_stack_maps.c @@ -98,7 +98,12 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete) int err = 0; void *ptr; - raw_spin_lock_irqsave(&qs->lock, flags); + if (in_nmi()) { + if (!raw_spin_trylock_irqsave(&qs->lock, flags)) + return -EBUSY; + } else { + raw_spin_lock_irqsave(&qs->lock, flags); + } if (queue_stack_map_is_empty(qs)) { memset(value, 0, qs->map.value_size); @@ -128,7 +133,12 @@ static long __stack_map_get(struct bpf_map *map, void *value, bool delete) void *ptr; u32 index; - raw_spin_lock_irqsave(&qs->lock, flags); + if (in_nmi()) { + if (!raw_spin_trylock_irqsave(&qs->lock, flags)) + return -EBUSY; + } else { + raw_spin_lock_irqsave(&qs->lock, flags); + } if (queue_stack_map_is_empty(qs)) { memset(value, 0, qs->map.value_size); @@ -193,7 +203,12 @@ static long queue_stack_map_push_elem(struct bpf_map *map, void *value, if (flags & BPF_NOEXIST || flags > BPF_EXIST) return -EINVAL; - raw_spin_lock_irqsave(&qs->lock, irq_flags); + if (in_nmi()) { + if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags)) + return -EBUSY; + } else { + raw_spin_lock_irqsave(&qs->lock, irq_flags); + } if (queue_stack_map_is_full(qs)) { if (!replace) { |
