diff options
| author | Maxime Ripard <mripard@kernel.org> | 2024-04-23 08:48:56 +0200 |
|---|---|---|
| committer | Maxime Ripard <mripard@kernel.org> | 2024-04-23 08:48:56 +0200 |
| commit | c058e7a8f8af355e4a441c89400a6e95a16320e5 (patch) | |
| tree | ed96dcd06e69f508a3f8a112a29fd228289429a6 /net/unix | |
| parent | drm/rockchip: rk3066_hdmi: switch to struct drm_edid (diff) | |
| parent | Backmerge tag 'v6.9-rc5' into drm-next (diff) | |
| download | linux-c058e7a8f8af355e4a441c89400a6e95a16320e5.tar.gz linux-c058e7a8f8af355e4a441c89400a6e95a16320e5.zip | |
Merge drm/drm-next into drm-misc-next
MaĆra needs a backmerge to apply v3d patches, and Danilo for some
nouveau patches.
Signed-off-by: Maxime Ripard <mripard@kernel.org>
Diffstat (limited to 'net/unix')
| -rw-r--r-- | net/unix/af_unix.c | 16 | ||||
| -rw-r--r-- | net/unix/garbage.c | 18 |
2 files changed, 26 insertions, 8 deletions
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 5b41e2321209..9a6ad5974dff 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2663,9 +2663,13 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk, WRITE_ONCE(u->oob_skb, NULL); consume_skb(skb); } - } else if (!(flags & MSG_PEEK)) { + } else if (flags & MSG_PEEK) { + skb = NULL; + } else { skb_unlink(skb, &sk->sk_receive_queue); - consume_skb(skb); + WRITE_ONCE(u->oob_skb, NULL); + if (!WARN_ON_ONCE(skb_unref(skb))) + kfree_skb(skb); skb = skb_peek(&sk->sk_receive_queue); } } @@ -2739,18 +2743,16 @@ redo: last = skb = skb_peek(&sk->sk_receive_queue); last_len = last ? last->len : 0; +again: #if IS_ENABLED(CONFIG_AF_UNIX_OOB) if (skb) { skb = manage_oob(skb, sk, flags, copied); - if (!skb) { + if (!skb && copied) { unix_state_unlock(sk); - if (copied) - break; - goto redo; + break; } } #endif -again: if (skb == NULL) { if (copied >= target) goto unlock; diff --git a/net/unix/garbage.c b/net/unix/garbage.c index fa39b6265238..6433a414acf8 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -274,11 +274,22 @@ static void __unix_gc(struct work_struct *work) * receive queues. Other, non candidate sockets _can_ be * added to queue, so we must make sure only to touch * candidates. + * + * Embryos, though never candidates themselves, affect which + * candidates are reachable by the garbage collector. Before + * being added to a listener's queue, an embryo may already + * receive data carrying SCM_RIGHTS, potentially making the + * passed socket a candidate that is not yet reachable by the + * collector. It becomes reachable once the embryo is + * enqueued. Therefore, we must ensure that no SCM-laden + * embryo appears in a (candidate) listener's queue between + * consecutive scan_children() calls. */ list_for_each_entry_safe(u, next, &gc_inflight_list, link) { + struct sock *sk = &u->sk; long total_refs; - total_refs = file_count(u->sk.sk_socket->file); + total_refs = file_count(sk->sk_socket->file); WARN_ON_ONCE(!u->inflight); WARN_ON_ONCE(total_refs < u->inflight); @@ -286,6 +297,11 @@ static void __unix_gc(struct work_struct *work) list_move_tail(&u->link, &gc_candidates); __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); + + if (sk->sk_state == TCP_LISTEN) { + unix_state_lock(sk); + unix_state_unlock(sk); + } } } |
