mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-22 07:53:11 -05:00
mm: zswap: move allocations during CPU init outside the lock
In zswap_cpu_comp_prepare(), allocations are made and assigned to various
members of acomp_ctx under acomp_ctx->mutex. However, allocations may
recurse into zswap through reclaim, trying to acquire the same mutex and
deadlocking.
Move the allocations before the mutex critical section. Only the
initialization of acomp_ctx needs to be done with the mutex held.
Link: https://lkml.kernel.org/r/20250113214458.2123410-1-yosryahmed@google.com
Fixes: 12dcb0ef54
("mm: zswap: properly synchronize freeing resources during CPU hotunplug")
Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
f1897f2f08
commit
779b9955f6
1 changed files with 24 additions and 18 deletions
42
mm/zswap.c
42
mm/zswap.c
|
@ -820,15 +820,15 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
|
|||
{
|
||||
struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
|
||||
struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
|
||||
struct crypto_acomp *acomp;
|
||||
struct acomp_req *req;
|
||||
struct crypto_acomp *acomp = NULL;
|
||||
struct acomp_req *req = NULL;
|
||||
u8 *buffer = NULL;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&acomp_ctx->mutex);
|
||||
acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!acomp_ctx->buffer) {
|
||||
buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!buffer) {
|
||||
ret = -ENOMEM;
|
||||
goto buffer_fail;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
|
||||
|
@ -836,21 +836,25 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
|
|||
pr_err("could not alloc crypto acomp %s : %ld\n",
|
||||
pool->tfm_name, PTR_ERR(acomp));
|
||||
ret = PTR_ERR(acomp);
|
||||
goto acomp_fail;
|
||||
goto fail;
|
||||
}
|
||||
acomp_ctx->acomp = acomp;
|
||||
acomp_ctx->is_sleepable = acomp_is_async(acomp);
|
||||
|
||||
req = acomp_request_alloc(acomp_ctx->acomp);
|
||||
req = acomp_request_alloc(acomp);
|
||||
if (!req) {
|
||||
pr_err("could not alloc crypto acomp_request %s\n",
|
||||
pool->tfm_name);
|
||||
ret = -ENOMEM;
|
||||
goto req_fail;
|
||||
goto fail;
|
||||
}
|
||||
acomp_ctx->req = req;
|
||||
|
||||
/*
|
||||
* Only hold the mutex after completing allocations, otherwise we may
|
||||
* recurse into zswap through reclaim and attempt to hold the mutex
|
||||
* again resulting in a deadlock.
|
||||
*/
|
||||
mutex_lock(&acomp_ctx->mutex);
|
||||
crypto_init_wait(&acomp_ctx->wait);
|
||||
|
||||
/*
|
||||
* if the backend of acomp is async zip, crypto_req_done() will wakeup
|
||||
* crypto_wait_req(); if the backend of acomp is scomp, the callback
|
||||
|
@ -859,15 +863,17 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
|
|||
acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
crypto_req_done, &acomp_ctx->wait);
|
||||
|
||||
acomp_ctx->buffer = buffer;
|
||||
acomp_ctx->acomp = acomp;
|
||||
acomp_ctx->is_sleepable = acomp_is_async(acomp);
|
||||
acomp_ctx->req = req;
|
||||
mutex_unlock(&acomp_ctx->mutex);
|
||||
return 0;
|
||||
|
||||
req_fail:
|
||||
crypto_free_acomp(acomp_ctx->acomp);
|
||||
acomp_fail:
|
||||
kfree(acomp_ctx->buffer);
|
||||
buffer_fail:
|
||||
mutex_unlock(&acomp_ctx->mutex);
|
||||
fail:
|
||||
if (acomp)
|
||||
crypto_free_acomp(acomp);
|
||||
kfree(buffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue