aboutsummaryrefslogtreecommitdiffstats
path: root/net/mptcp/protocol.c
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2021-01-20 15:39:14 +0100
committerJakub Kicinski <kuba@kernel.org>2021-01-22 19:21:02 -0800
commitb19bc2945b40b9fd38e835700907ffe8534ef0de (patch)
treea950b8f23a733ab58c59be32d870a1e30d4bf7b8 /net/mptcp/protocol.c
parentmptcp: schedule work for better snd subflow selection (diff)
downloadlinux-b19bc2945b40b9fd38e835700907ffe8534ef0de.tar.gz
linux-b19bc2945b40b9fd38e835700907ffe8534ef0de.zip
mptcp: implement delegated actions
On MPTCP-level ack reception, the packet scheduler may select a subflow other then the current one. Prior to this commit we rely on the workqueue to trigger action on such subflow. This changeset introduces an infrastructure that allows any MPTCP subflow to schedule actions (MPTCP xmit) on others subflows without resorting to (multiple) process reschedule. A dummy NAPI instance is used instead. When MPTCP needs to trigger action an a different subflow, it enqueues the target subflow on the NAPI backlog and schedule such instance as needed. The dummy NAPI poll method walks the sockets backlog and tries to acquire the (BH) socket lock on each of them. If the socket is owned by the user space, the action will be completed by the sock release cb, otherwise push is started. This change leverages the delegated action infrastructure to avoid invoking the MPTCP worker to spool the pending data, when the packet scheduler picks a subflow other then the one currently processing the incoming MPTCP-level ack. Additionally we further refine the subflow selection invoking the packet scheduler for each chunk of data even inside __mptcp_subflow_push_pending(). v1 -> v2: - fix possible UaF at shutdown time, resetting sock ops after removing the ulp context Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/mptcp/protocol.c')
-rw-r--r--net/mptcp/protocol.c86
1 files changed, 82 insertions, 4 deletions
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 8cb582eee286..a033bf9c26ee 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -45,6 +45,9 @@ static struct percpu_counter mptcp_sockets_allocated;
static void __mptcp_destroy_sock(struct sock *sk);
static void __mptcp_check_send_data_fin(struct sock *sk);
+DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
+static struct net_device mptcp_napi_dev;
+
/* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
* completed yet or has failed, return the subflow socket.
* Otherwise return NULL.
@@ -1506,7 +1509,9 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
struct mptcp_sock *msk = mptcp_sk(sk);
struct mptcp_sendmsg_info info;
struct mptcp_data_frag *dfrag;
+ struct sock *xmit_ssk;
int len, copied = 0;
+ bool first = true;
info.flags = 0;
while ((dfrag = mptcp_send_head(sk))) {
@@ -1516,6 +1521,18 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
while (len > 0) {
int ret = 0;
+ /* the caller already invoked the packet scheduler,
+ * check for a different subflow usage only after
+ * spooling the first chunk of data
+ */
+ xmit_ssk = first ? ssk : mptcp_subflow_get_send(mptcp_sk(sk));
+ if (!xmit_ssk)
+ goto out;
+ if (xmit_ssk != ssk) {
+ mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
+ goto out;
+ }
+
if (unlikely(mptcp_must_reclaim_memory(sk, ssk))) {
__mptcp_update_wmem(sk);
sk_mem_reclaim_partial(sk);
@@ -1534,6 +1551,7 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
msk->tx_pending_data -= ret;
copied += ret;
len -= ret;
+ first = false;
}
WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
}
@@ -2242,7 +2260,6 @@ static void mptcp_worker(struct work_struct *work)
if (unlikely(state == TCP_CLOSE))
goto unlock;
- mptcp_push_pending(sk, 0);
mptcp_check_data_fin_ack(sk);
__mptcp_flush_join_list(msk);
@@ -2901,10 +2918,12 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
return;
if (!sock_owned_by_user(sk)) {
- if (mptcp_subflow_get_send(mptcp_sk(sk)) == ssk)
+ struct sock *xmit_ssk = mptcp_subflow_get_send(mptcp_sk(sk));
+
+ if (xmit_ssk == ssk)
__mptcp_subflow_push_pending(sk, ssk);
- else
- mptcp_schedule_work(sk);
+ else if (xmit_ssk)
+ mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
} else {
set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
}
@@ -2955,6 +2974,20 @@ static void mptcp_release_cb(struct sock *sk)
}
}
+void mptcp_subflow_process_delegated(struct sock *ssk)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ struct sock *sk = subflow->conn;
+
+ mptcp_data_lock(sk);
+ if (!sock_owned_by_user(sk))
+ __mptcp_subflow_push_pending(sk, ssk);
+ else
+ set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
+ mptcp_data_unlock(sk);
+ mptcp_subflow_delegated_done(subflow);
+}
+
static int mptcp_hash(struct sock *sk)
{
/* should never be called,
@@ -3365,13 +3398,58 @@ static struct inet_protosw mptcp_protosw = {
.flags = INET_PROTOSW_ICSK,
};
+static int mptcp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct mptcp_delegated_action *delegated;
+ struct mptcp_subflow_context *subflow;
+ int work_done = 0;
+
+ delegated = container_of(napi, struct mptcp_delegated_action, napi);
+ while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ bh_lock_sock_nested(ssk);
+ if (!sock_owned_by_user(ssk) &&
+ mptcp_subflow_has_delegated_action(subflow))
+ mptcp_subflow_process_delegated(ssk);
+ /* ... elsewhere tcp_release_cb_override already processed
+ * the action or will do at next release_sock().
+ * In both case must dequeue the subflow here - on the same
+ * CPU that scheduled it.
+ */
+ bh_unlock_sock(ssk);
+ sock_put(ssk);
+
+ if (++work_done == budget)
+ return budget;
+ }
+
+ /* always provide a 0 'work_done' argument, so that napi_complete_done
+ * will not try accessing the NULL napi->dev ptr
+ */
+ napi_complete_done(napi, 0);
+ return work_done;
+}
+
void __init mptcp_proto_init(void)
{
+ struct mptcp_delegated_action *delegated;
+ int cpu;
+
mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL))
panic("Failed to allocate MPTCP pcpu counter\n");
+ init_dummy_netdev(&mptcp_napi_dev);
+ for_each_possible_cpu(cpu) {
+ delegated = per_cpu_ptr(&mptcp_delegated_actions, cpu);
+ INIT_LIST_HEAD(&delegated->head);
+ netif_tx_napi_add(&mptcp_napi_dev, &delegated->napi, mptcp_napi_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&delegated->napi);
+ }
+
mptcp_subflow_init();
mptcp_pm_init();
mptcp_token_init();