diff options
| author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2025-07-01 08:09:36 +0200 |
|---|---|---|
| committer | Herbert Xu <herbert@gondor.apana.org.au> | 2025-07-18 20:51:59 +1000 |
| commit | 590f8a67ba3cafba48e62d20ee03ab7d9a2c51f9 (patch) | |
| tree | e5259280e8ec974c420927109c63d38c3fdd1af0 | |
| parent | crypto: qat - disable ZUC-256 capability for QAT GEN5 (diff) | |
| download | linux-590f8a67ba3cafba48e62d20ee03ab7d9a2c51f9.tar.gz linux-590f8a67ba3cafba48e62d20ee03ab7d9a2c51f9.zip | |
crypto: cryptd - Use nested-BH locking for cryptd_cpu_queue
cryptd_queue::cryptd_cpu_queue is a per-CPU variable and relies on
disabled BH for its locking. Without per-CPU locking in
local_bh_disable() on PREEMPT_RT this data structure requires explicit
locking.
Add a local_lock_t to the struct cryptd_cpu_queue and use
local_lock_nested_bh() for locking. This change adds only lockdep
coverage and does not alter the functional behaviour for !PREEMPT_RT.
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: linux-crypto@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
| -rw-r--r-- | crypto/cryptd.c | 6 |
1 files changed, 6 insertions, 0 deletions
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 5bb6f8d88cc2..efff54e707cb 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c @@ -34,6 +34,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); static struct workqueue_struct *cryptd_wq; struct cryptd_cpu_queue { + local_lock_t bh_lock; struct crypto_queue queue; struct work_struct work; }; @@ -110,6 +111,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue, cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); INIT_WORK(&cpu_queue->work, cryptd_queue_worker); + local_lock_init(&cpu_queue->bh_lock); } pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); return 0; @@ -135,6 +137,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, refcount_t *refcnt; local_bh_disable(); + local_lock_nested_bh(&queue->cpu_queue->bh_lock); cpu_queue = this_cpu_ptr(queue->cpu_queue); err = crypto_enqueue_request(&cpu_queue->queue, request); @@ -151,6 +154,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue, refcount_inc(refcnt); out: + local_unlock_nested_bh(&queue->cpu_queue->bh_lock); local_bh_enable(); return err; @@ -169,8 +173,10 @@ static void cryptd_queue_worker(struct work_struct *work) * Only handle one request at a time to avoid hogging crypto workqueue. */ local_bh_disable(); + __local_lock_nested_bh(&cpu_queue->bh_lock); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); + __local_unlock_nested_bh(&cpu_queue->bh_lock); local_bh_enable(); if (!req) |
