
driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IC2NHC CVE: NA ---------------------------------------------------------------------- When the number of packets being processed in the hardware queue is greater than 512, the SEC driver continues to send the packet to the hardware, but adds the packet to the backlog list. Then, the SEC driver returns -EBUSY to the caller, and the caller stops sending packets. When the number of packets in the queue queried in the callback is less than 512, The packet sending thread is woken up. When the number of send packet threads is greater than 512, packages in the backlog may be complete but the packet is not deleted from list. The released memory is accessed during the deletion, causing a system panic. Therefore, delete the backlog, determine whether the packet sending thread needs to be woken up based on 'fake_busy' in the sec_req, and then invoke the callback function of the user to ensure that the thread is woken up before releasing the req memory. log likes: [ 169.430697][ T1354] CPU: 27 PID: 1354 Comm: kworker/u262:1 Kdump: loaded Not tainted 5.10.0+ #1 [ 169.439678][ T1354] Hardware name: Huawei TaiShan 200 (Model 2280)/BC82AMDD, BIOS 2280-V2 CS V5.B211.01 11/10/2021 [ 169.450421][ T1354] Workqueue: 0000:76:00.0 qm_work_process [hisi_qm] [ 169.457050][ T1354] Call trace: [ 169.460285][ T1354] dump_backtrace+0x0/0x300 [ 169.464780][ T1354] show_stack+0x20/0x30 [ 169.468936][ T1354] dump_stack+0x104/0x17c [ 169.473240][ T1354] print_address_description.constprop.0+0x68/0x204 [ 169.479889][ T1354] __kasan_report+0xe0/0x140 [ 169.484463][ T1354] kasan_report+0x44/0xe0 [ 169.488771][ T1354] __asan_load8+0x94/0xd0 [ 169.493088][ T1354] __list_del_entry_valid+0x20/0x180 [ 169.498408][ T1354] sec_back_req_clear+0x184/0x2dc [hisi_sec2] [ 169.504494][ T1354] sec_skcipher_callback+0x84/0x150 [hisi_sec2] [ 169.510800][ T1354] sec_req_cb+0x1d4/0x334 [hisi_sec2] [ 169.516227][ T1354] qm_poll_req_cb+0x170/0x20c [hisi_qm] [ 169.524821][ T1354] qm_work_process+0xf8/0x124 [hisi_qm] [ 169.533436][ T1354] process_one_work+0x3a8/0x860 [ 169.541063][ T1354] worker_thread+0x280/0x670 [ 169.548349][ T1354] kthread+0x18c/0x1d0 [ 169.555169][ T1354] ret_from_fork+0x10/0x18 [ 169.562107][ T1354] Fixes: c16a70c1f253 ("crypto: hisilicon/sec - add new algorithm mode for AEAD") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 49 ++++------------------ 1 file changed, 8 insertions(+), 41 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 11c6e1d9887f..0428c50bea31 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -288,10 +288,10 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); if (ctx->fake_req_limit <= atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { - list_add_tail(&req->backlog_head, &qp_ctx->backlog); + req->fake_busy = true; + spin_unlock_bh(&qp_ctx->req_lock); atomic64_inc(&ctx->sec->debug.dfx.send_cnt); atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); - spin_unlock_bh(&qp_ctx->req_lock); return -EBUSY; } spin_unlock_bh(&qp_ctx->req_lock); @@ -559,7 +559,6 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id) spin_lock_init(&qp_ctx->req_lock); idr_init(&qp_ctx->req_idr); - INIT_LIST_HEAD(&qp_ctx->backlog); ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx); if (ret) @@ -1394,31 +1393,10 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) } } -static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, - struct sec_qp_ctx *qp_ctx) -{ - struct sec_req *backlog_req = NULL; - - spin_lock_bh(&qp_ctx->req_lock); - if (ctx->fake_req_limit >= - atomic_read(&qp_ctx->qp->qp_status.used) && - !list_empty(&qp_ctx->backlog)) { - backlog_req = list_first_entry(&qp_ctx->backlog, - typeof(*backlog_req), backlog_head); - list_del(&backlog_req->backlog_head); - } - spin_unlock_bh(&qp_ctx->req_lock); - - return backlog_req; -} - static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, int err) { struct skcipher_request *sk_req = req->c_req.sk_req; - struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct skcipher_request *backlog_sk_req; - struct sec_req *backlog_req; sec_free_req_id(req); @@ -1427,13 +1405,8 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt) sec_update_iv(req, SEC_SKCIPHER); - while (1) { - backlog_req = sec_back_req_clear(ctx, qp_ctx); - if (!backlog_req) - break; - - backlog_sk_req = backlog_req->c_req.sk_req; - skcipher_request_complete(backlog_sk_req, -EINPROGRESS); + if (req->fake_busy) { + skcipher_request_complete(sk_req, -EINPROGRESS); atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); } @@ -1678,9 +1651,6 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) size_t authsize = crypto_aead_authsize(tfm); struct sec_aead_req *aead_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; - struct sec_qp_ctx *qp_ctx = req->qp_ctx; - struct aead_request *backlog_aead_req; - struct sec_req *backlog_req; size_t sz; if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) @@ -1700,13 +1670,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) sec_free_req_id(req); - while (1) { - backlog_req = sec_back_req_clear(c, qp_ctx); - if (!backlog_req) - break; - - backlog_aead_req = backlog_req->aead_req.aead_req; - aead_request_complete(backlog_aead_req, -EINPROGRESS); + if (req->fake_busy) { + aead_request_complete(a_req, -EINPROGRESS); atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); } @@ -2103,6 +2068,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) req->c_req.sk_req = sk_req; req->c_req.encrypt = encrypt; req->ctx = ctx; + req->fake_busy = false; ret = sec_skcipher_param_check(ctx, req, &need_fallback); if (unlikely(ret)) @@ -2323,6 +2289,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; + req->fake_busy = false; req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz); ret = sec_aead_param_check(ctx, req, &need_fallback); -- 2.43.0