[PATCH OLK-6.6 00/12] crypto: hisilicon - add reference counting to queues for tfm kernel reuse
From: JiangShui Yang <yangjiangshui@h-partners.com> Chenghai Huang (8): crypto: hisilicon/zip - adjust the way to obtain the req in the callback function crypto: hisilicon/sec - move backlog management to qp and store sqe in qp for callback crypto: hisilicon/qm - enhance the configuration of req_type in queue attributes crypto: hisilicon/qm - centralize the sending locks of each module into qm crypto: hisilicon - consolidate qp creation and start in hisi_qm_alloc_qps_node crypto: hisilicon/qm - add reference counting to queues for tfm kernel reuse crypto: hisilicon/qm - optimize device selection priority based on queue ref count and NUMA distance crypto: hisilicon/zip - support fallback for zip JiangShui Yang (1): Revert "crypto: hisilicon/hpre - support the hpre algorithm fallback" Qi Tao (1): crypto: hisilicon/sec2 - support skcipher/aead fallback for hardware queue unavailable Weili Qian (1): crypto: hisilicon/hpre - support the hpre algorithm fallback lizhi (1): crypto: hisilicon/hpre: extend tag field to 64 bits for better performance drivers/crypto/hisilicon/Kconfig | 1 + drivers/crypto/hisilicon/hpre/hpre.h | 5 +- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 261 +++----------------- drivers/crypto/hisilicon/hpre/hpre_main.c | 2 +- drivers/crypto/hisilicon/qm.c | 206 +++++++++++---- drivers/crypto/hisilicon/sec2/sec.h | 7 - drivers/crypto/hisilicon/sec2/sec_crypto.c | 159 +++++++----- drivers/crypto/hisilicon/sec2/sec_main.c | 21 +- drivers/crypto/hisilicon/zip/zip.h | 2 +- drivers/crypto/hisilicon/zip/zip_crypto.c | 164 +++++++----- drivers/crypto/hisilicon/zip/zip_main.c | 4 +- include/linux/hisi_acc_qm.h | 14 +- 12 files changed, 417 insertions(+), 429 deletions(-) -- 2.43.0
maillist inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQ94 CVE: NA ---------------------------------------------------------------------- In the shared queue design, multiple tfms use same qp, and one qp need to corresponds to multiple qp_ctx. So use tag to obtain the req virtual address. Build a one-to-one relationship between tfm and qp_ctx. finaly remove the old get_tag operation. Fixes: 2bcf36348ce5 ("crypto: hisilicon/zip - initialize operations about 'sqe' in 'acomp_alg.init'") Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/zip/zip_crypto.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 9df9a97633c6..b4a656e0177d 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -39,6 +39,7 @@ enum { HZIP_CTX_Q_NUM }; +#define GET_REQ_FROM_SQE(sqe) ((u64)(sqe)->dw26 | (u64)(sqe)->dw27 << 32) #define COMP_NAME_TO_TYPE(alg_name) \ (!strcmp((alg_name), "deflate") ? HZIP_ALG_TYPE_DEFLATE : 0) @@ -48,6 +49,7 @@ struct hisi_zip_req { struct hisi_acc_hw_sgl *hw_dst; dma_addr_t dma_src; dma_addr_t dma_dst; + struct hisi_zip_qp_ctx *qp_ctx; u16 req_id; }; @@ -74,7 +76,6 @@ struct hisi_zip_sqe_ops { void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type); void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type); - u32 (*get_tag)(struct hisi_zip_sqe *sqe); u32 (*get_status)(struct hisi_zip_sqe *sqe); u32 (*get_dstlen)(struct hisi_zip_sqe *sqe); }; @@ -131,6 +132,7 @@ static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx, req_cache = q + req_id; req_cache->req_id = req_id; req_cache->req = req; + req_cache->qp_ctx = qp_ctx; return req_cache; } @@ -181,7 +183,8 @@ static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type) static void hisi_zip_fill_tag(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) { - sqe->dw26 = req->req_id; + sqe->dw26 = lower_32_bits((u64)req); + sqe->dw27 = upper_32_bits((u64)req); } static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type) @@ -265,11 +268,6 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx, return ret; } -static u32 hisi_zip_get_tag(struct hisi_zip_sqe *sqe) -{ - return sqe->dw26; -} - static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe) { return sqe->dw3 & HZIP_BD_STATUS_M; @@ -282,14 +280,12 @@ static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe) static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) { - struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx; + struct hisi_zip_sqe *sqe = data; + struct hisi_zip_req *req = (struct hisi_zip_req *)GET_REQ_FROM_SQE(sqe); + struct hisi_zip_qp_ctx *qp_ctx = req->qp_ctx; const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops; struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; - struct hisi_zip_req_q *req_q = &qp_ctx->req_q; struct device *dev = &qp->qm->pdev->dev; - struct hisi_zip_sqe *sqe = data; - u32 tag = ops->get_tag(sqe); - struct hisi_zip_req *req = req_q->q + tag; struct acomp_req *acomp_req = req->req; int err = 0; u32 status; @@ -393,7 +389,6 @@ static const struct hisi_zip_sqe_ops hisi_zip_ops = { .fill_req_type = hisi_zip_fill_req_type, .fill_tag = hisi_zip_fill_tag, .fill_sqe_type = hisi_zip_fill_sqe_type, - .get_tag = hisi_zip_get_tag, .get_status = hisi_zip_get_status, .get_dstlen = hisi_zip_get_dstlen, }; @@ -581,7 +576,6 @@ static void hisi_zip_acomp_exit(struct crypto_acomp *tfm) { struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base); - hisi_zip_set_acomp_cb(ctx, NULL); hisi_zip_release_sgl_pool(ctx); hisi_zip_release_req_q(ctx); hisi_zip_ctx_exit(ctx); -- 2.43.0
maillist inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQ94 CVE: NA ---------------------------------------------------------------------- When multiple tfm use a same qp, the backlog data should be managed centrally by the qp, rather than in the qp_ctx of each req. Additionally, since SEC_BD_TYPE1 and SEC_BD_TYPE2 cannot use the tag of the sqe to carry the virtual address of the req, the sent sqe is stored in the qp. This allows the callback function to get the req address. To handle the differences between hardware types, the callback functions are split into two separate implementations. Fixes: f0ae287c5045 ("crypto: hisilicon/sec2 - implement full backlog mode for sec") Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/qm.c | 20 ++++- drivers/crypto/hisilicon/sec2/sec.h | 7 -- drivers/crypto/hisilicon/sec2/sec_crypto.c | 88 +++++++++++----------- include/linux/hisi_acc_qm.h | 8 ++ 4 files changed, 69 insertions(+), 54 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 4c9c2913f1e4..c9d4745cb789 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -2323,6 +2323,7 @@ static void qp_stop_fail_cb(struct hisi_qp *qp) for (i = 0; i < qp_used; i++) { pos = (i + cur_head) % sq_depth; qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); + qm_cq_head_update(qp); atomic_dec(&qp->qp_status.used); } } @@ -2487,6 +2488,7 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg) return -EBUSY; memcpy(sqe, msg, qp->qm->sqe_size); + qp->msg[sq_tail] = msg; qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); atomic_inc(&qp->qp_status.used); @@ -3067,12 +3069,13 @@ EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish); static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) { struct device *dev = &qm->pdev->dev; - struct qm_dma *qdma; + struct hisi_qp *qp; int i; for (i = num - 1; i >= 0; i--) { - qdma = &qm->qp_array[i].qdma; - dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); + qp = &qm->qp_array[i]; + dma_free_coherent(dev, qp->qdma.size, qp->qdma.va, qp->qdma.dma); + kfree(qp->msg); } kfree(qm->qp_array); @@ -3086,10 +3089,16 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, struct hisi_qp *qp; qp = &qm->qp_array[id]; + qp->msg = kmalloc_array(sq_depth, sizeof(void *), GFP_KERNEL); + if (!qp->msg) + return -ENOMEM; + qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, GFP_KERNEL); - if (!qp->qdma.va) + if (!qp->qdma.va) { + kfree(qp->msg); return -ENOMEM; + } qp->sqe = qp->qdma.va; qp->sqe_dma = qp->qdma.dma; @@ -3101,6 +3110,9 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, qp->qm = qm; qp->qp_id = id; + spin_lock_init(&qp->backlog.lock); + INIT_LIST_HEAD(&qp->backlog.list); + return 0; } diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 81d0beda93b2..0710977861f3 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -82,11 +82,6 @@ struct sec_aead_req { __u8 out_mac_buf[SEC_MAX_MAC_LEN]; }; -struct sec_instance_backlog { - struct list_head list; - spinlock_t lock; -}; - /* SEC request of Crypto */ struct sec_req { union { @@ -112,7 +107,6 @@ struct sec_req { bool use_pbuf; struct list_head list; - struct sec_instance_backlog *backlog; struct sec_request_buf buf; }; @@ -172,7 +166,6 @@ struct sec_qp_ctx { spinlock_t id_lock; struct hisi_acc_sgl_pool *c_in_pool; struct hisi_acc_sgl_pool *c_out_pool; - struct sec_instance_backlog backlog; u16 send_head; }; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 006350b8a1fd..f472477d38a0 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -54,7 +54,6 @@ #define SEC_AUTH_CIPHER_V3 0x40 #define SEC_FLAG_OFFSET 7 #define SEC_FLAG_MASK 0x0780 -#define SEC_TYPE_MASK 0x0F #define SEC_DONE_MASK 0x0001 #define SEC_ICV_MASK 0x000E @@ -148,7 +147,7 @@ static void sec_free_req_id(struct sec_req *req) spin_unlock_bh(&qp_ctx->id_lock); } -static u8 pre_parse_finished_bd(struct bd_status *status, void *resp) +static void pre_parse_finished_bd(struct bd_status *status, void *resp) { struct sec_sqe *bd = resp; @@ -158,11 +157,9 @@ static u8 pre_parse_finished_bd(struct bd_status *status, void *resp) SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; status->tag = le16_to_cpu(bd->type2.tag); status->err_type = bd->type2.error_type; - - return bd->type_cipher_auth & SEC_TYPE_MASK; } -static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp) +static void pre_parse_finished_bd3(struct bd_status *status, void *resp) { struct sec_sqe3 *bd3 = resp; @@ -172,8 +169,6 @@ static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp) SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; status->tag = le64_to_cpu(bd3->tag); status->err_type = bd3->error_type; - - return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK; } static int sec_cb_status_check(struct sec_req *req, @@ -244,7 +239,7 @@ static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp struct sec_req *req, *tmp; int ret; - list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) { + list_for_each_entry_safe(req, tmp, &qp_ctx->qp->backlog.list, list) { list_del(&req->list); ctx->req_op->buf_unmap(ctx, req); if (req->req_id >= 0) @@ -265,11 +260,12 @@ static void sec_alg_send_backlog_soft(struct sec_ctx *ctx, struct sec_qp_ctx *qp static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { + struct hisi_qp *qp = qp_ctx->qp; struct sec_req *req, *tmp; int ret; - spin_lock_bh(&qp_ctx->backlog.lock); - list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) { + spin_lock_bh(&qp->backlog.lock); + list_for_each_entry_safe(req, tmp, &qp->backlog.list, list) { ret = qp_send_message(req); switch (ret) { case -EINPROGRESS: @@ -287,42 +283,46 @@ static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) } unlock: - spin_unlock_bh(&qp_ctx->backlog.lock); + spin_unlock_bh(&qp->backlog.lock); } static void sec_req_cb(struct hisi_qp *qp, void *resp) { - struct sec_qp_ctx *qp_ctx = qp->qp_ctx; - struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; - u8 type_supported = qp_ctx->ctx->type_supported; + const struct sec_sqe *sqe = qp->msg[qp->qp_status.cq_head]; + struct sec_req *req = container_of(sqe, struct sec_req, sec_sqe); + struct sec_ctx *ctx = req->ctx; + struct sec_dfx *dfx = &ctx->sec->debug.dfx; struct bd_status status; - struct sec_ctx *ctx; - struct sec_req *req; int err; - u8 type; - if (type_supported == SEC_BD_TYPE2) { - type = pre_parse_finished_bd(&status, resp); - req = qp_ctx->req_list[status.tag]; - } else { - type = pre_parse_finished_bd3(&status, resp); - req = (void *)(uintptr_t)status.tag; - } + pre_parse_finished_bd(&status, resp); - if (unlikely(type != type_supported)) { - atomic64_inc(&dfx->err_bd_cnt); - pr_err("err bd type [%u]\n", type); - return; - } + req->err_type = status.err_type; + err = sec_cb_status_check(req, &status); + if (err) + atomic64_inc(&dfx->done_flag_cnt); - if (unlikely(!req)) { - atomic64_inc(&dfx->invalid_req_cnt); - atomic_inc(&qp->qp_status.used); - return; - } + atomic64_inc(&dfx->recv_cnt); + ctx->req_op->buf_unmap(ctx, req); + ctx->req_op->callback(ctx, req, err); +} + +static void sec_req_cb3(struct hisi_qp *qp, void *resp) +{ + struct bd_status status; + struct sec_ctx *ctx; + struct sec_dfx *dfx; + struct sec_req *req; + int err; + + pre_parse_finished_bd3(&status, resp); + + req = (void *)(uintptr_t)status.tag; req->err_type = status.err_type; ctx = req->ctx; + dfx = &ctx->sec->debug.dfx; + err = sec_cb_status_check(req, &status); if (err) atomic64_inc(&dfx->done_flag_cnt); @@ -330,7 +330,6 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) atomic64_inc(&dfx->recv_cnt); ctx->req_op->buf_unmap(ctx, req); - ctx->req_op->callback(ctx, req, err); } @@ -348,8 +347,10 @@ static int sec_alg_send_message_retry(struct sec_req *req) static int sec_alg_try_enqueue(struct sec_req *req) { + struct hisi_qp *qp = req->qp_ctx->qp; + /* Check if any request is already backlogged */ - if (!list_empty(&req->backlog->list)) + if (!list_empty(&qp->backlog.list)) return -EBUSY; /* Try to enqueue to HW ring */ @@ -359,17 +360,18 @@ static int sec_alg_try_enqueue(struct sec_req *req) static int sec_alg_send_message_maybacklog(struct sec_req *req) { + struct hisi_qp *qp = req->qp_ctx->qp; int ret; ret = sec_alg_try_enqueue(req); if (ret != -EBUSY) return ret; - spin_lock_bh(&req->backlog->lock); + spin_lock_bh(&qp->backlog.lock); ret = sec_alg_try_enqueue(req); if (ret == -EBUSY) - list_add_tail(&req->list, &req->backlog->list); - spin_unlock_bh(&req->backlog->lock); + list_add_tail(&req->list, &qp->backlog.list); + spin_unlock_bh(&qp->backlog.lock); return ret; } @@ -629,13 +631,14 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id) qp_ctx->qp = qp; qp_ctx->ctx = ctx; - qp->req_cb = sec_req_cb; + if (ctx->type_supported == SEC_BD_TYPE3) + qp->req_cb = sec_req_cb3; + else + qp->req_cb = sec_req_cb; spin_lock_init(&qp_ctx->req_lock); idr_init(&qp_ctx->req_idr); - spin_lock_init(&qp_ctx->backlog.lock); spin_lock_init(&qp_ctx->id_lock); - INIT_LIST_HEAD(&qp_ctx->backlog.list); qp_ctx->send_head = 0; ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx); @@ -1952,7 +1955,6 @@ static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) } while (req->req_id < 0 && ++i < ctx->sec->ctx_q_num); req->qp_ctx = qp_ctx; - req->backlog = &qp_ctx->backlog; return 0; } diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 9e5b360cc447..82f1ab92fad4 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -448,6 +448,11 @@ struct hisi_qp_ops { int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm); }; +struct instance_backlog { + struct list_head list; + spinlock_t lock; +}; + struct hisi_qp { u32 qp_id; u16 sq_depth; @@ -472,6 +477,9 @@ struct hisi_qp { bool is_in_kernel; u16 pasid; struct uacce_queue *uacce_q; + + struct instance_backlog backlog; + const void **msg; }; static inline int vfs_num_set(const char *val, const struct kernel_param *kp) -- 2.43.0
From: lizhi <lizhi206@huawei.com> maillist inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQ94 CVE: NA ---------------------------------------------------------------------- This commit expands the tag field in hpre_sqe structure from 16-bit to 64-bit. The change enables storing request addresses directly in the tag field, allowing callback functions to access request messages without the previous indirection mechanism. By eliminating the need for lookup tables, this modification reduces lock contention and associated overhead, leading to improved efficiency and simplified code. Fixes: c8b4b477079d ("crypto: hisilicon - add HiSilicon HPRE accelerator") Signed-off-by: lizhi <lizhi206@huawei.com> Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/hpre/hpre.h | 5 +- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 151 ++++---------------- 2 files changed, 27 insertions(+), 129 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h index 0f3ddbadbcf9..021dbd9a1d48 100644 --- a/drivers/crypto/hisilicon/hpre/hpre.h +++ b/drivers/crypto/hisilicon/hpre/hpre.h @@ -94,9 +94,8 @@ struct hpre_sqe { __le64 key; __le64 in; __le64 out; - __le16 tag; - __le16 resv2; -#define _HPRE_SQE_ALIGN_EXT 7 + __le64 tag; +#define _HPRE_SQE_ALIGN_EXT 6 __le32 rsvd1[_HPRE_SQE_ALIGN_EXT]; }; diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index fbd2b3510752..84c8d3e2cc93 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -122,12 +122,10 @@ struct hpre_curve25519_ctx { struct hpre_ctx { struct hisi_qp *qp; struct device *dev; - struct hpre_asym_request **req_list; struct hpre *hpre; spinlock_t req_lock; unsigned int key_sz; bool crt_g2_mode; - struct idr req_idr; union { struct hpre_rsa_ctx rsa; struct hpre_dh_ctx dh; @@ -153,7 +151,6 @@ struct hpre_asym_request { struct kpp_request *curve25519; } areq; int err; - int req_id; hpre_cb cb; struct timespec64 req_time; }; @@ -168,58 +165,13 @@ static inline unsigned int hpre_align_pd(void) return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1); } -static int hpre_alloc_req_id(struct hpre_ctx *ctx) +static void hpre_dfx_add_req_time(struct hpre_asym_request *hpre_req) { - unsigned long flags; - int id; - - spin_lock_irqsave(&ctx->req_lock, flags); - id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC); - spin_unlock_irqrestore(&ctx->req_lock, flags); - - return id; -} - -static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id) -{ - unsigned long flags; - - spin_lock_irqsave(&ctx->req_lock, flags); - idr_remove(&ctx->req_idr, req_id); - spin_unlock_irqrestore(&ctx->req_lock, flags); -} - -static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req) -{ - struct hpre_ctx *ctx; - struct hpre_dfx *dfx; - int id; - - ctx = hpre_req->ctx; - id = hpre_alloc_req_id(ctx); - if (unlikely(id < 0)) - return -EINVAL; - - ctx->req_list[id] = hpre_req; - hpre_req->req_id = id; + struct hpre_ctx *ctx = hpre_req->ctx; + struct hpre_dfx *dfx = ctx->hpre->debug.dfx; - dfx = ctx->hpre->debug.dfx; if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value)) ktime_get_ts64(&hpre_req->req_time); - - return id; -} - -static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req) -{ - struct hpre_ctx *ctx = hpre_req->ctx; - int id = hpre_req->req_id; - - if (hpre_req->req_id >= 0) { - hpre_req->req_id = HPRE_INVLD_REQ_ID; - ctx->req_list[id] = NULL; - hpre_free_req_id(ctx, id); - } } static struct hisi_qp *hpre_get_qp_and_start(u8 type) @@ -357,26 +309,19 @@ static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, void **kreq) { - struct hpre_asym_request *req; unsigned int err, done, alg; - int id; #define HPRE_NO_HW_ERR 0 #define HPRE_HW_TASK_DONE 3 #define HREE_HW_ERR_MASK GENMASK(10, 0) #define HREE_SQE_DONE_MASK GENMASK(1, 0) #define HREE_ALG_TYPE_MASK GENMASK(4, 0) - id = (int)le16_to_cpu(sqe->tag); - req = ctx->req_list[id]; - hpre_rm_req_from_ctx(req); - *kreq = req; + *kreq = (void *)le64_to_cpu(sqe->tag); err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) & HREE_HW_ERR_MASK; - done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) & HREE_SQE_DONE_MASK; - if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)) return 0; @@ -387,34 +332,9 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, return -EINVAL; } -static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen) -{ - struct hpre *hpre; - - if (!ctx || !qp || qlen < 0) - return -EINVAL; - - spin_lock_init(&ctx->req_lock); - ctx->qp = qp; - ctx->dev = &qp->qm->pdev->dev; - - hpre = container_of(ctx->qp->qm, struct hpre, qm); - ctx->hpre = hpre; - ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL); - if (!ctx->req_list) - return -ENOMEM; - ctx->key_sz = 0; - ctx->crt_g2_mode = false; - idr_init(&ctx->req_idr); - - return 0; -} - static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all) { if (is_clear_all) { - idr_destroy(&ctx->req_idr); - kfree(ctx->req_list); hisi_qm_free_qps(&ctx->qp, 1); } @@ -484,29 +404,22 @@ static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp) static void hpre_alg_cb(struct hisi_qp *qp, void *resp) { - struct hpre_ctx *ctx = qp->qp_ctx; - struct hpre_dfx *dfx = ctx->hpre->debug.dfx; + struct hpre_asym_request *h_req; struct hpre_sqe *sqe = resp; - struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)]; - if (unlikely(!req)) { - atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value); + h_req = (struct hpre_asym_request *)le64_to_cpu(sqe->tag); + if (unlikely(!h_req)) { + pr_err("Failed to get request, and qp_id is %u\n", qp->qp_id); return; } - req->cb(ctx, resp); -} - -static void hpre_stop_qp_and_put(struct hisi_qp *qp) -{ - hisi_qm_stop_qp(qp); - hisi_qm_free_qps(&qp, 1); + h_req->cb(h_req->ctx, resp); } static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) { struct hisi_qp *qp; - int ret; + struct hpre *hpre; qp = hpre_get_qp_and_start(type); if (IS_ERR(qp)) { @@ -516,19 +429,21 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) qp->qp_ctx = ctx; qp->req_cb = hpre_alg_cb; + spin_lock_init(&ctx->req_lock); + ctx->qp = qp; + ctx->dev = &qp->qm->pdev->dev; + hpre = container_of(ctx->qp->qm, struct hpre, qm); + ctx->hpre = hpre; + ctx->key_sz = 0; + ctx->crt_g2_mode = false; - ret = hpre_ctx_set(ctx, qp, qp->sq_depth); - if (ret) - hpre_stop_qp_and_put(qp); - - return ret; + return 0; } static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) { struct hpre_asym_request *h_req; struct hpre_sqe *msg; - int req_id; void *tmp; if (is_rsa) { @@ -568,11 +483,8 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; h_req->ctx = ctx; - req_id = hpre_add_req_to_ctx(h_req); - if (req_id < 0) - return -EBUSY; - - msg->tag = cpu_to_le16((u16)req_id); + hpre_dfx_add_req_time(h_req); + msg->tag = cpu_to_le64((uintptr_t)h_req); return 0; } @@ -638,7 +550,6 @@ static int hpre_dh_compute_value(struct kpp_request *req) return -EINPROGRESS; clear_all: - hpre_rm_req_from_ctx(hpre_req); hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); return ret; @@ -920,7 +831,6 @@ static int hpre_rsa_enc(struct akcipher_request *req) return -EINPROGRESS; clear_all: - hpre_rm_req_from_ctx(hpre_req); hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); return ret; @@ -974,7 +884,6 @@ static int hpre_rsa_dec(struct akcipher_request *req) return -EINPROGRESS; clear_all: - hpre_rm_req_from_ctx(hpre_req); hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); return ret; @@ -1459,7 +1368,7 @@ static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params) return 0; } -static bool hpre_key_is_zero(char *key, unsigned short key_sz) +static bool hpre_key_is_zero(const char *key, unsigned short key_sz) { int i; @@ -1604,7 +1513,6 @@ static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx, { struct hpre_asym_request *h_req; struct hpre_sqe *msg; - int req_id; void *tmp; if (req->dst_len < ctx->key_sz << 1) { @@ -1626,11 +1534,8 @@ static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx, msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; h_req->ctx = ctx; - req_id = hpre_add_req_to_ctx(h_req); - if (req_id < 0) - return -EBUSY; - - msg->tag = cpu_to_le16((u16)req_id); + hpre_dfx_add_req_time(h_req); + msg->tag = cpu_to_le64((uintptr_t)h_req); return 0; } @@ -1728,7 +1633,6 @@ static int hpre_ecdh_compute_value(struct kpp_request *req) return -EINPROGRESS; clear_all: - hpre_rm_req_from_ctx(hpre_req); hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); return ret; } @@ -1984,7 +1888,6 @@ static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx, { struct hpre_asym_request *h_req; struct hpre_sqe *msg; - int req_id; void *tmp; if (unlikely(req->dst_len < ctx->key_sz)) { @@ -2006,11 +1909,8 @@ static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx, msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; h_req->ctx = ctx; - req_id = hpre_add_req_to_ctx(h_req); - if (req_id < 0) - return -EBUSY; - - msg->tag = cpu_to_le16((u16)req_id); + hpre_dfx_add_req_time(h_req); + msg->tag = cpu_to_le64((uintptr_t)h_req); return 0; } @@ -2147,7 +2047,6 @@ static int hpre_curve25519_compute_value(struct kpp_request *req) return -EINPROGRESS; clear_all: - hpre_rm_req_from_ctx(hpre_req); hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); return ret; } -- 2.43.0
From: JiangShui Yang <yangjiangshui@h-partners.com> driver inclusion category: cleanup bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQ94 CVE: NA ---------------------------------------------------------------------- Use the mainline patch to replace it. This reverts commit 1435c045dd96c51426a0b0a0ba9c43148fb89dae. Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 312 +++----------------- drivers/crypto/hisilicon/qm.c | 4 +- 2 files changed, 47 insertions(+), 269 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 84c8d3e2cc93..48f909957983 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -94,7 +94,6 @@ struct hpre_dh_ctx { char *g; /* m */ dma_addr_t dma_g; - struct crypto_kpp *soft_tfm; }; struct hpre_ecdh_ctx { @@ -105,7 +104,6 @@ struct hpre_ecdh_ctx { /* low address: x->y */ unsigned char *g; dma_addr_t dma_g; - struct crypto_kpp *soft_tfm; }; struct hpre_curve25519_ctx { @@ -116,7 +114,6 @@ struct hpre_curve25519_ctx { /* gx coordinate */ unsigned char *g; dma_addr_t dma_g; - struct crypto_kpp *soft_tfm; }; struct hpre_ctx { @@ -136,7 +133,6 @@ struct hpre_ctx { unsigned int curve_id; /* for high performance core */ u8 enable_hpcore; - bool fallback; }; struct hpre_asym_request { @@ -181,7 +177,7 @@ static struct hisi_qp *hpre_get_qp_and_start(u8 type) qp = hpre_create_qp(type); if (!qp) { - pr_info_ratelimited("Can not create hpre qp, alloc soft tfm!\n"); + pr_err("Can not create hpre qp!\n"); return ERR_PTR(-ENODEV); } @@ -422,10 +418,8 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) struct hpre *hpre; qp = hpre_get_qp_and_start(type); - if (IS_ERR(qp)) { - ctx->qp = NULL; - return -ENODEV; - } + if (IS_ERR(qp)) + return PTR_ERR(qp); qp->qp_ctx = ctx; qp->req_cb = hpre_alg_cb; @@ -555,48 +549,6 @@ static int hpre_dh_compute_value(struct kpp_request *req) return ret; } -static struct kpp_request *hpre_dh_prepare_fb_req(struct kpp_request *req) -{ - struct kpp_request *fb_req = kpp_request_ctx(req); - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - - kpp_request_set_tfm(fb_req, ctx->dh.soft_tfm); - kpp_request_set_callback(fb_req, req->base.flags, req->base.complete, req->base.data); - kpp_request_set_input(fb_req, req->src, req->src_len); - kpp_request_set_output(fb_req, req->dst, req->dst_len); - - return fb_req; -} - -static int hpre_dh_generate_public_key(struct kpp_request *req) -{ - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - struct kpp_request *fb_req; - - if (ctx->fallback) { - fb_req = hpre_dh_prepare_fb_req(req); - return crypto_kpp_generate_public_key(fb_req); - } - - return hpre_dh_compute_value(req); -} - -static int hpre_dh_compute_shared_secret(struct kpp_request *req) -{ - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - struct kpp_request *fb_req; - - if (ctx->fallback) { - fb_req = hpre_dh_prepare_fb_req(req); - return crypto_kpp_compute_shared_secret(fb_req); - } - - return hpre_dh_compute_value(req); -} - static int hpre_is_dh_params_length_valid(unsigned int key_sz) { #define _HPRE_DH_GRP1 768 @@ -623,6 +575,13 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params) struct device *dev = ctx->dev; unsigned int sz; + if (params->p_size > HPRE_DH_MAX_P_SZ) + return -EINVAL; + + if (hpre_is_dh_params_length_valid(params->p_size << + HPRE_BITS_2_BYTES_SHIFT)) + return -EINVAL; + sz = ctx->key_sz = params->p_size; ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1, &ctx->dh.dma_xa_p, GFP_KERNEL); @@ -655,9 +614,6 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) struct device *dev = ctx->dev; unsigned int sz = ctx->key_sz; - if (!ctx->qp) - return; - if (is_clear_all) hisi_qm_stop_qp(ctx->qp); @@ -686,13 +642,6 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, if (crypto_dh_decode_key(buf, len, ¶ms) < 0) return -EINVAL; - if (!ctx->qp) - goto set_soft_secret; - - if (hpre_is_dh_params_length_valid(params.p_size << - HPRE_BITS_2_BYTES_SHIFT)) - goto set_soft_secret; - /* Free old secret if any */ hpre_dh_clear_ctx(ctx, false); @@ -703,55 +652,27 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key, params.key_size); - ctx->fallback = false; return 0; err_clear_ctx: hpre_dh_clear_ctx(ctx, false); return ret; -set_soft_secret: - ctx->fallback = true; - return crypto_kpp_set_secret(ctx->dh.soft_tfm, buf, len); } static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - if (ctx->fallback) - return crypto_kpp_maxsize(ctx->dh.soft_tfm); - return ctx->key_sz; } static int hpre_dh_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - const char *alg = kpp_alg_name(tfm); - unsigned int reqsize; - int ret; - - ctx->dh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->dh.soft_tfm)) { - pr_err("Failed to alloc dh tfm!\n"); - return PTR_ERR(ctx->dh.soft_tfm); - } - crypto_kpp_set_flags(ctx->dh.soft_tfm, crypto_kpp_get_flags(tfm)); + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - reqsize = max(sizeof(struct hpre_asym_request) + hpre_align_pd(), - sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->dh.soft_tfm)); - kpp_set_reqsize(tfm, reqsize); - - ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); - if (ret && ret != -ENODEV) { - crypto_free_kpp(ctx->dh.soft_tfm); - return ret; - } else if (ret == -ENODEV) { - ctx->fallback = true; - } - - return 0; + return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); } static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) @@ -759,7 +680,6 @@ static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); hpre_dh_clear_ctx(ctx, true); - crypto_free_kpp(ctx->dh.soft_tfm); } static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len) @@ -799,8 +719,9 @@ static int hpre_rsa_enc(struct akcipher_request *req) struct hpre_sqe *msg = &hpre_req->req; int ret; - /* For unsupported key size and unavailable devices, use soft tfm instead */ - if (ctx->fallback) { + /* For 512 and 1536 bits key size, use soft tfm instead */ + if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || + ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); ret = crypto_akcipher_encrypt(req); akcipher_request_set_tfm(req, tfm); @@ -845,8 +766,9 @@ static int hpre_rsa_dec(struct akcipher_request *req) struct hpre_sqe *msg = &hpre_req->req; int ret; - /* For unsupported key size and unavailable devices, use soft tfm instead */ - if (ctx->fallback) { + /* For 512 and 1536 bits key size, use soft tfm instead */ + if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || + ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); ret = crypto_akcipher_decrypt(req); akcipher_request_set_tfm(req, tfm); @@ -899,10 +821,8 @@ static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value, ctx->key_sz = vlen; /* if invalid key size provided, we use software tfm */ - if (!hpre_rsa_key_size_is_support(ctx->key_sz)) { - ctx->fallback = true; + if (!hpre_rsa_key_size_is_support(ctx->key_sz)) return 0; - } ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1, &ctx->rsa.dma_pubkey, @@ -1037,9 +957,6 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) unsigned int half_key_sz = ctx->key_sz >> 1; struct device *dev = ctx->dev; - if (!ctx->qp) - return; - if (is_clear_all) hisi_qm_stop_qp(ctx->qp); @@ -1122,7 +1039,6 @@ static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key, goto free; } - ctx->fallback = false; return 0; free: @@ -1140,9 +1056,6 @@ static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, if (ret) return ret; - if (!ctx->qp) - return 0; - return hpre_rsa_setkey(ctx, key, keylen, false); } @@ -1156,9 +1069,6 @@ static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, if (ret) return ret; - if (!ctx->qp) - return 0; - return hpre_rsa_setkey(ctx, key, keylen, true); } @@ -1166,8 +1076,9 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm) { struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); - /* For unsupported key size and unavailable devices, use soft tfm instead */ - if (ctx->fallback) + /* For 512 and 1536 bits key size, use soft tfm instead */ + if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || + ctx->key_sz == HPRE_RSA_1536BITS_KSZ) return crypto_akcipher_maxsize(ctx->rsa.soft_tfm); return ctx->key_sz; @@ -1188,14 +1099,10 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) hpre_align_pd()); ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); - if (ret && ret != -ENODEV) { + if (ret) crypto_free_akcipher(ctx->rsa.soft_tfm); - return ret; - } else if (ret == -ENODEV) { - ctx->fallback = true; - } - return 0; + return ret; } static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) @@ -1409,9 +1316,6 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, struct ecdh params; int ret; - if (ctx->fallback) - return crypto_kpp_set_secret(ctx->ecdh.soft_tfm, buf, len); - if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) { dev_err(dev, "failed to decode ecdh key!\n"); return -EINVAL; @@ -1637,82 +1541,23 @@ static int hpre_ecdh_compute_value(struct kpp_request *req) return ret; } -static int hpre_ecdh_generate_public_key(struct kpp_request *req) -{ - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - int ret; - - if (ctx->fallback) { - kpp_request_set_tfm(req, ctx->ecdh.soft_tfm); - ret = crypto_kpp_generate_public_key(req); - kpp_request_set_tfm(req, tfm); - return ret; - } - - return hpre_ecdh_compute_value(req); -} - -static int hpre_ecdh_compute_shared_secret(struct kpp_request *req) -{ - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - int ret; - - if (ctx->fallback) { - kpp_request_set_tfm(req, ctx->ecdh.soft_tfm); - ret = crypto_kpp_compute_shared_secret(req); - kpp_request_set_tfm(req, tfm); - return ret; - } - - return hpre_ecdh_compute_value(req); -} - static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - if (ctx->fallback) - return crypto_kpp_maxsize(ctx->ecdh.soft_tfm); - /* max size is the pub_key_size, include x and y */ return ctx->key_sz << 1; } -static int hpre_ecdh_init_tfm(struct crypto_kpp *tfm) -{ - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - const char *alg = kpp_alg_name(tfm); - int ret; - - ret = hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); - if (!ret) { - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - return 0; - } else if (ret && ret != -ENODEV) { - return ret; - } - - ctx->ecdh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->ecdh.soft_tfm)) { - pr_err("Failed to alloc %s tfm!\n", alg); - return PTR_ERR(ctx->ecdh.soft_tfm); - } - - crypto_kpp_set_flags(ctx->ecdh.soft_tfm, crypto_kpp_get_flags(tfm)); - ctx->fallback = true; - - return 0; -} - static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); ctx->curve_id = ECC_CURVE_NIST_P192; - return hpre_ecdh_init_tfm(tfm); + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) @@ -1722,7 +1567,9 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) ctx->curve_id = ECC_CURVE_NIST_P256; ctx->enable_hpcore = 1; - return hpre_ecdh_init_tfm(tfm); + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) @@ -1731,18 +1578,15 @@ static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) ctx->curve_id = ECC_CURVE_NIST_P384; - return hpre_ecdh_init_tfm(tfm); + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - if (ctx->fallback) { - crypto_free_kpp(ctx->ecdh.soft_tfm); - return; - } - hpre_ecc_clear_ctx(ctx, true, true); } @@ -1808,9 +1652,6 @@ static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, struct device *dev = ctx->dev; int ret = -EINVAL; - if (ctx->fallback) - return crypto_kpp_set_secret(ctx->curve25519.soft_tfm, buf, len); - if (len != CURVE25519_KEY_SIZE || !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) { dev_err(dev, "key is null or key len is not 32bytes!\n"); @@ -2051,83 +1892,26 @@ static int hpre_curve25519_compute_value(struct kpp_request *req) return ret; } -static int hpre_curve25519_generate_public_key(struct kpp_request *req) -{ - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - int ret; - - if (ctx->fallback) { - kpp_request_set_tfm(req, ctx->curve25519.soft_tfm); - ret = crypto_kpp_generate_public_key(req); - kpp_request_set_tfm(req, tfm); - return ret; - } - - return hpre_curve25519_compute_value(req); -} - -static int hpre_curve25519_compute_shared_secret(struct kpp_request *req) -{ - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - int ret; - - if (ctx->fallback) { - kpp_request_set_tfm(req, ctx->curve25519.soft_tfm); - ret = crypto_kpp_compute_shared_secret(req); - kpp_request_set_tfm(req, tfm); - return ret; - } - - return hpre_curve25519_compute_value(req); -} - static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - if (ctx->fallback) - return crypto_kpp_maxsize(ctx->curve25519.soft_tfm); - return ctx->key_sz; } static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - const char *alg = kpp_alg_name(tfm); - int ret; - - ret = hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); - if (!ret) { - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - return 0; - } else if (ret && ret != -ENODEV) { - return ret; - } - - ctx->curve25519.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->curve25519.soft_tfm)) { - pr_err("Failed to alloc curve25519 tfm!\n"); - return PTR_ERR(ctx->curve25519.soft_tfm); - } - crypto_kpp_set_flags(ctx->curve25519.soft_tfm, crypto_kpp_get_flags(tfm)); - ctx->fallback = true; + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - return 0; + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - if (ctx->fallback) { - crypto_free_kpp(ctx->curve25519.soft_tfm); - return; - } - hpre_ecc_clear_ctx(ctx, true, false); } @@ -2147,14 +1931,13 @@ static struct akcipher_alg rsa = { .cra_name = "rsa", .cra_driver_name = "hpre-rsa", .cra_module = THIS_MODULE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; static struct kpp_alg dh = { .set_secret = hpre_dh_set_secret, - .generate_public_key = hpre_dh_generate_public_key, - .compute_shared_secret = hpre_dh_compute_shared_secret, + .generate_public_key = hpre_dh_compute_value, + .compute_shared_secret = hpre_dh_compute_value, .max_size = hpre_dh_max_size, .init = hpre_dh_init_tfm, .exit = hpre_dh_exit_tfm, @@ -2164,15 +1947,14 @@ static struct kpp_alg dh = { .cra_name = "dh", .cra_driver_name = "hpre-dh", .cra_module = THIS_MODULE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; static struct kpp_alg ecdh_curves[] = { { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_generate_public_key, - .compute_shared_secret = hpre_ecdh_compute_shared_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p192_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -2182,12 +1964,11 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p192", .cra_driver_name = "hpre-ecdh-nist-p192", .cra_module = THIS_MODULE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }, { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_generate_public_key, - .compute_shared_secret = hpre_ecdh_compute_shared_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p256_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -2197,12 +1978,11 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p256", .cra_driver_name = "hpre-ecdh-nist-p256", .cra_module = THIS_MODULE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }, { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_generate_public_key, - .compute_shared_secret = hpre_ecdh_compute_shared_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p384_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -2212,15 +1992,14 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p384", .cra_driver_name = "hpre-ecdh-nist-p384", .cra_module = THIS_MODULE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, } }; static struct kpp_alg curve25519_alg = { .set_secret = hpre_curve25519_set_secret, - .generate_public_key = hpre_curve25519_generate_public_key, - .compute_shared_secret = hpre_curve25519_compute_shared_secret, + .generate_public_key = hpre_curve25519_compute_value, + .compute_shared_secret = hpre_curve25519_compute_value, .max_size = hpre_curve25519_max_size, .init = hpre_curve25519_init_tfm, .exit = hpre_curve25519_exit_tfm, @@ -2230,7 +2009,6 @@ static struct kpp_alg curve25519_alg = { .cra_name = "curve25519", .cra_driver_name = "hpre-curve25519", .cra_module = THIS_MODULE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index c9d4745cb789..d0fbf9407a7e 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3804,8 +3804,8 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, mutex_unlock(&qm_list->lock); if (ret) - pr_info_ratelimited("Too busy to create qps, node[%d], alg[%u], qp[%d]!\n", - node, alg_type, qp_num); + pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n", + node, alg_type, qp_num); err: free_list(&head); -- 2.43.0
maillist inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQ94 CVE: NA ---------------------------------------------------------------------- Originally, when a queue was requested, it could only be configured with the default algorithm type of 0. Now, when multiple tfms use the same queue, the queue must be selected based on its attributes to meet the requirements of tfm tasks. So the algorithm type attribute of queue need to be distinguished. Just like a queue used for compression in ZIP cannot be used for decompression tasks. Fixes: 3f1ec97aacf1 ("crypto: hisilicon/qm - Put device finding logic into QM") Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/hpre/hpre_main.c | 2 +- drivers/crypto/hisilicon/qm.c | 8 ++++---- drivers/crypto/hisilicon/sec2/sec_crypto.c | 1 - drivers/crypto/hisilicon/sec2/sec_main.c | 21 ++++++++++++++++----- drivers/crypto/hisilicon/zip/zip.h | 2 +- drivers/crypto/hisilicon/zip/zip_crypto.c | 13 +++++++++---- drivers/crypto/hisilicon/zip/zip_main.c | 4 ++-- include/linux/hisi_acc_qm.h | 3 +-- 8 files changed, 34 insertions(+), 20 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 1bd362d4704e..430031c442e1 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -464,7 +464,7 @@ struct hisi_qp *hpre_create_qp(u8 type) * type: 0 - RSA/DH. algorithm supported in V2, * 1 - ECC algorithm in V3. */ - ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp); + ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, &type, node, &qp); if (!ret) return qp; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index d0fbf9407a7e..615a618ade89 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3771,7 +3771,7 @@ static int hisi_qm_sort_devices(int node, struct list_head *head, * not meet the requirements will return error. */ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, - u8 alg_type, int node, struct hisi_qp **qps) + u8 *alg_type, int node, struct hisi_qp **qps) { struct hisi_qm_resource *tmp; int ret = -ENODEV; @@ -3789,7 +3789,7 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, list_for_each_entry(tmp, &head, list) { for (i = 0; i < qp_num; i++) { - qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); + qps[i] = hisi_qm_create_qp(tmp->qm, alg_type[i]); if (IS_ERR(qps[i])) { hisi_qm_free_qps(qps, i); break; @@ -3804,8 +3804,8 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, mutex_unlock(&qm_list->lock); if (ret) - pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n", - node, alg_type, qp_num); + pr_info("Failed to create qps, node[%d], qp[%d]!\n", + node, qp_num); err: free_list(&head); diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index f472477d38a0..56158538a43f 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -626,7 +626,6 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id) qp_ctx = &ctx->qp_ctx[qp_ctx_id]; qp = ctx->qps[qp_ctx_id]; - qp->req_type = 0; qp->qp_ctx = qp_ctx; qp_ctx->qp = qp; qp_ctx->ctx = ctx; diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 1e79b7471d1b..30b6e29bb467 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -417,18 +417,29 @@ struct hisi_qp **sec_create_qps(void) int node = cpu_to_node(raw_smp_processor_id()); u32 ctx_num = ctx_q_num; struct hisi_qp **qps; + u8 *type; int ret; qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL); if (!qps) return NULL; - ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps); - if (!ret) - return qps; + /* The type of SEC is all 0, so just allocated by kcalloc */ + type = kcalloc(ctx_num, sizeof(u8), GFP_KERNEL); + if (!type) { + kfree(qps); + return NULL; + } - kfree(qps); - return NULL; + ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, type, node, qps); + if (ret) { + kfree(type); + kfree(qps); + return NULL; + } + + kfree(type); + return qps; } u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low) diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h index 9fb2a9c01132..b83f228281ab 100644 --- a/drivers/crypto/hisilicon/zip/zip.h +++ b/drivers/crypto/hisilicon/zip/zip.h @@ -99,7 +99,7 @@ enum zip_cap_table_type { ZIP_CORE5_BITMAP, }; -int zip_create_qps(struct hisi_qp **qps, int qp_num, int node); +int zip_create_qps(struct hisi_qp **qps, int qp_num, int node, u8 *alg_type); int hisi_zip_register_to_crypto(struct hisi_qm *qm); void hisi_zip_unregister_from_crypto(struct hisi_qm *qm); bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg); diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index b4a656e0177d..8250a33ba586 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -66,6 +66,7 @@ struct hisi_zip_qp_ctx { struct hisi_acc_sgl_pool *sgl_pool; struct hisi_zip *zip_dev; struct hisi_zip_ctx *ctx; + u8 req_type; }; struct hisi_zip_sqe_ops { @@ -245,7 +246,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx, goto err_unmap_input; } - hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req); + hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp_ctx->req_type, req); /* send command to start a task */ atomic64_inc(&dfx->send_cnt); @@ -360,7 +361,6 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx, struct device *dev = &qp->qm->pdev->dev; int ret; - qp->req_type = req_type; qp->alg_type = alg_type; qp->qp_ctx = qp_ctx; @@ -397,10 +397,15 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int { struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL }; struct hisi_zip_qp_ctx *qp_ctx; + u8 alg_type[HZIP_CTX_Q_NUM]; struct hisi_zip *hisi_zip; int ret, i, j; - ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node); + /* alg_type = 0 for compress, 1 for decompress in hw sqe */ + for (i = 0; i < HZIP_CTX_Q_NUM; i++) + alg_type[i] = i; + + ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node, alg_type); if (ret) { pr_err("failed to create zip qps (%d)!\n", ret); return -ENODEV; @@ -409,7 +414,6 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm); for (i = 0; i < HZIP_CTX_Q_NUM; i++) { - /* alg_type = 0 for compress, 1 for decompress in hw sqe */ qp_ctx = &hisi_zip_ctx->qp_ctx[i]; qp_ctx->ctx = hisi_zip_ctx; ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type); @@ -422,6 +426,7 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int } qp_ctx->zip_dev = hisi_zip; + qp_ctx->req_type = req_type; } hisi_zip_ctx->ops = &hisi_zip_ops; diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 956539f3a494..cb55ae8e6ce3 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -445,12 +445,12 @@ static const struct pci_device_id hisi_zip_dev_ids[] = { }; MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); -int zip_create_qps(struct hisi_qp **qps, int qp_num, int node) +int zip_create_qps(struct hisi_qp **qps, int qp_num, int node, u8 *alg_type) { if (node == NUMA_NO_NODE) node = cpu_to_node(raw_smp_processor_id()); - return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps); + return hisi_qm_alloc_qps_node(&zip_devices, qp_num, alg_type, node, qps); } bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg) diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 82f1ab92fad4..135f6d3bbe96 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -458,7 +458,6 @@ struct hisi_qp { u16 sq_depth; u16 cq_depth; u8 alg_type; - u8 req_type; struct qm_dma qdma; void *sqe; @@ -583,7 +582,7 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool); int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, - u8 alg_type, int node, struct hisi_qp **qps); + u8 *alg_type, int node, struct hisi_qp **qps); void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num); void hisi_qm_dev_shutdown(struct pci_dev *pdev); void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list); -- 2.43.0
maillist inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQ94 CVE: NA ---------------------------------------------------------------------- When a single queue used by multiple tfms, the protection of shared resources by individual module driver programs is no longer sufficient. The hisi_qp_send needs to be ensured by the lock in qp. Fixes: 5fdb4b345cfb ("crypto: hisilicon - add a lock for the qp send operation") Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 4 ---- drivers/crypto/hisilicon/qm.c | 16 ++++++++++++---- drivers/crypto/hisilicon/zip/zip_crypto.c | 3 --- include/linux/hisi_acc_qm.h | 1 + 4 files changed, 13 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 48f909957983..ff1bf8fb803a 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -120,7 +120,6 @@ struct hpre_ctx { struct hisi_qp *qp; struct device *dev; struct hpre *hpre; - spinlock_t req_lock; unsigned int key_sz; bool crt_g2_mode; union { @@ -423,7 +422,6 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) qp->qp_ctx = ctx; qp->req_cb = hpre_alg_cb; - spin_lock_init(&ctx->req_lock); ctx->qp = qp; ctx->dev = &qp->qm->pdev->dev; hpre = container_of(ctx->qp->qm, struct hpre, qm); @@ -491,9 +489,7 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg) do { atomic64_inc(&dfx[HPRE_SEND_CNT].value); - spin_lock_bh(&ctx->req_lock); ret = hisi_qp_send(ctx->qp, msg); - spin_unlock_bh(&ctx->req_lock); if (ret != -EBUSY) break; atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 615a618ade89..1b9cd3ce918c 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -2473,26 +2473,33 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); int hisi_qp_send(struct hisi_qp *qp, const void *msg) { struct hisi_qp_status *qp_status = &qp->qp_status; - u16 sq_tail = qp_status->sq_tail; - u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; - void *sqe = qm_get_avail_sqe(qp); + u16 sq_tail, sq_tail_next; + void *sqe; + spin_lock_bh(&qp->qp_lock); if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || atomic_read(&qp->qm->status.flags) == QM_STOP || qp->is_resetting)) { + spin_unlock_bh(&qp->qp_lock); dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); return -EAGAIN; } - if (!sqe) + sqe = qm_get_avail_sqe(qp); + if (!sqe) { + spin_unlock_bh(&qp->qp_lock); return -EBUSY; + } + sq_tail = qp_status->sq_tail; + sq_tail_next = (sq_tail + 1) % qp->sq_depth; memcpy(sqe, msg, qp->qm->sqe_size); qp->msg[sq_tail] = msg; qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); atomic_inc(&qp->qp_status.used); qp_status->sq_tail = sq_tail_next; + spin_unlock_bh(&qp->qp_lock); return 0; } @@ -3110,6 +3117,7 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, qp->qm = qm; qp->qp_id = id; + spin_lock_init(&qp->qp_lock); spin_lock_init(&qp->backlog.lock); INIT_LIST_HEAD(&qp->backlog.list); diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 8250a33ba586..2f9035c016f3 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -217,7 +217,6 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx, { struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; - struct hisi_zip_req_q *req_q = &qp_ctx->req_q; struct acomp_req *a_req = req->req; struct hisi_qp *qp = qp_ctx->qp; struct device *dev = &qp->qm->pdev->dev; @@ -250,9 +249,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx, /* send command to start a task */ atomic64_inc(&dfx->send_cnt); - spin_lock_bh(&req_q->req_lock); ret = hisi_qp_send(qp, &zip_sqe); - spin_unlock_bh(&req_q->req_lock); if (unlikely(ret < 0)) { atomic64_inc(&dfx->send_busy_cnt); ret = -EAGAIN; diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 135f6d3bbe96..a358f5631219 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -477,6 +477,7 @@ struct hisi_qp { u16 pasid; struct uacce_queue *uacce_q; + spinlock_t qp_lock; struct instance_backlog backlog; const void **msg; }; -- 2.43.0
maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQ94 CVE: NA ---------------------------------------------------------------------- Consolidate the creation and start of qp into the function hisi_qm_alloc_qps_node. This change eliminates the need for each module to perform these steps in two separate phases (creation and start). Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 40 ++---------- drivers/crypto/hisilicon/qm.c | 70 ++++++++++++++++----- drivers/crypto/hisilicon/sec2/sec_crypto.c | 8 --- drivers/crypto/hisilicon/zip/zip_crypto.c | 43 ++----------- include/linux/hisi_acc_qm.h | 1 - 5 files changed, 66 insertions(+), 96 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index ff1bf8fb803a..f1121f0f3a4e 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -169,27 +169,6 @@ static void hpre_dfx_add_req_time(struct hpre_asym_request *hpre_req) ktime_get_ts64(&hpre_req->req_time); } -static struct hisi_qp *hpre_get_qp_and_start(u8 type) -{ - struct hisi_qp *qp; - int ret; - - qp = hpre_create_qp(type); - if (!qp) { - pr_err("Can not create hpre qp!\n"); - return ERR_PTR(-ENODEV); - } - - ret = hisi_qm_start_qp(qp, 0); - if (ret < 0) { - hisi_qm_free_qps(&qp, 1); - pci_err(qp->qm->pdev, "Can not start qp!\n"); - return ERR_PTR(-EINVAL); - } - - return qp; -} - static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req, struct scatterlist *data, unsigned int len, int is_src, dma_addr_t *tmp) @@ -329,9 +308,8 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all) { - if (is_clear_all) { + if (is_clear_all) hisi_qm_free_qps(&ctx->qp, 1); - } ctx->crt_g2_mode = false; ctx->key_sz = 0; @@ -416,11 +394,10 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) struct hisi_qp *qp; struct hpre *hpre; - qp = hpre_get_qp_and_start(type); - if (IS_ERR(qp)) - return PTR_ERR(qp); + qp = hpre_create_qp(type); + if (!qp) + return -ENODEV; - qp->qp_ctx = ctx; qp->req_cb = hpre_alg_cb; ctx->qp = qp; ctx->dev = &qp->qm->pdev->dev; @@ -610,9 +587,6 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) struct device *dev = ctx->dev; unsigned int sz = ctx->key_sz; - if (is_clear_all) - hisi_qm_stop_qp(ctx->qp); - if (ctx->dh.g) { dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g); ctx->dh.g = NULL; @@ -953,9 +927,6 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) unsigned int half_key_sz = ctx->key_sz >> 1; struct device *dev = ctx->dev; - if (is_clear_all) - hisi_qm_stop_qp(ctx->qp); - if (ctx->rsa.pubkey) { dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.pubkey, ctx->rsa.dma_pubkey); @@ -1126,9 +1097,6 @@ static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all, unsigned int sz = ctx->key_sz; unsigned int shift = sz << 1; - if (is_clear_all) - hisi_qm_stop_qp(ctx->qp); - if (is_ecdh && ctx->ecdh.p) { /* ecdh: p->a->k->b */ memzero_explicit(ctx->ecdh.p + shift, sz); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 1b9cd3ce918c..fd92e7133986 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3704,6 +3704,14 @@ static void qm_dev_err_uninit(struct hisi_qm *qm) qm->err_ini->hw_err_disable(qm); } +static void qm_release_qp_nolock(struct hisi_qp *qp) +{ + struct hisi_qm *qm = qp->qm; + + qm->qp_in_used--; + idr_remove(&qm->qp_idr, qp->qp_id); +} + /** * hisi_qm_free_qps() - free multiple queue pairs. * @qps: The queue pairs need to be freed. @@ -3716,8 +3724,15 @@ void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) if (!qps || qp_num <= 0) return; - for (i = qp_num - 1; i >= 0; i--) - hisi_qm_release_qp(qps[i]); + down_write(&qps[0]->qm->qps_lock); + + for (i = qp_num - 1; i >= 0; i--) { + qm_stop_qp_nolock(qps[i]); + qm_release_qp_nolock(qps[i]); + } + + up_write(&qps[0]->qm->qps_lock); + qm_pm_put_sync(qps[0]->qm); } EXPORT_SYMBOL_GPL(hisi_qm_free_qps); @@ -3731,6 +3746,43 @@ static void free_list(struct list_head *head) } } +static int qm_get_and_start_qp(struct hisi_qm *qm, int qp_num, struct hisi_qp **qps, u8 *alg_type) +{ + int i, ret; + + ret = qm_pm_get_sync(qm); + if (ret) + return ret; + + down_write(&qm->qps_lock); + for (i = 0; i < qp_num; i++) { + qps[i] = qm_create_qp_nolock(qm, alg_type[i]); + if (IS_ERR(qps[i])) { + ret = -ENODEV; + goto stop_and_free; + } + + ret = qm_start_qp_nolock(qps[i], 0); + if (ret) { + qm_release_qp_nolock(qps[i]); + goto stop_and_free; + } + } + up_write(&qm->qps_lock); + + return 0; + +stop_and_free: + for (i--; i >= 0; i--) { + qm_stop_qp_nolock(qps[i]); + qm_release_qp_nolock(qps[i]); + } + up_write(&qm->qps_lock); + qm_pm_put_sync(qm); + + return ret; +} + static int hisi_qm_sort_devices(int node, struct list_head *head, struct hisi_qm_list *qm_list) { @@ -3784,7 +3836,6 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, struct hisi_qm_resource *tmp; int ret = -ENODEV; LIST_HEAD(head); - int i; if (!qps || !qm_list || qp_num <= 0) return -EINVAL; @@ -3796,18 +3847,9 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, } list_for_each_entry(tmp, &head, list) { - for (i = 0; i < qp_num; i++) { - qps[i] = hisi_qm_create_qp(tmp->qm, alg_type[i]); - if (IS_ERR(qps[i])) { - hisi_qm_free_qps(qps, i); - break; - } - } - - if (i == qp_num) { - ret = 0; + ret = qm_get_and_start_qp(tmp->qm, qp_num, qps, alg_type); + if (!ret) break; - } } mutex_unlock(&qm_list->lock); diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 56158538a43f..1e304c2c52df 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -626,7 +626,6 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id) qp_ctx = &ctx->qp_ctx[qp_ctx_id]; qp = ctx->qps[qp_ctx_id]; - qp->qp_ctx = qp_ctx; qp_ctx->qp = qp; qp_ctx->ctx = ctx; @@ -644,14 +643,8 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id) if (ret) goto err_destroy_idr; - ret = hisi_qm_start_qp(qp, 0); - if (ret < 0) - goto err_resource_free; - return 0; -err_resource_free: - sec_free_qp_ctx_resource(ctx, qp_ctx); err_destroy_idr: idr_destroy(&qp_ctx->req_idr); return ret; @@ -660,7 +653,6 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id) static void sec_release_qp_ctx(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { - hisi_qm_stop_qp(qp_ctx->qp); sec_free_qp_ctx_resource(ctx, qp_ctx); idr_destroy(&qp_ctx->req_idr); } diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 2f9035c016f3..108ab667131d 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -352,32 +352,6 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req) return ret; } -static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx, - int alg_type, int req_type) -{ - struct device *dev = &qp->qm->pdev->dev; - int ret; - - qp->alg_type = alg_type; - qp->qp_ctx = qp_ctx; - - ret = hisi_qm_start_qp(qp, 0); - if (ret < 0) { - dev_err(dev, "failed to start qp (%d)!\n", ret); - return ret; - } - - qp_ctx->qp = qp; - - return 0; -} - -static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx) -{ - hisi_qm_stop_qp(qp_ctx->qp); - hisi_qm_free_qps(&qp_ctx->qp, 1); -} - static const struct hisi_zip_sqe_ops hisi_zip_ops = { .sqe_type = 0x3, .fill_addr = hisi_zip_fill_addr, @@ -396,7 +370,7 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int struct hisi_zip_qp_ctx *qp_ctx; u8 alg_type[HZIP_CTX_Q_NUM]; struct hisi_zip *hisi_zip; - int ret, i, j; + int ret, i; /* alg_type = 0 for compress, 1 for decompress in hw sqe */ for (i = 0; i < HZIP_CTX_Q_NUM; i++) @@ -413,17 +387,9 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int for (i = 0; i < HZIP_CTX_Q_NUM; i++) { qp_ctx = &hisi_zip_ctx->qp_ctx[i]; qp_ctx->ctx = hisi_zip_ctx; - ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type); - if (ret) { - for (j = i - 1; j >= 0; j--) - hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp); - - hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM); - return ret; - } - qp_ctx->zip_dev = hisi_zip; qp_ctx->req_type = req_type; + qp_ctx->qp = qps[i]; } hisi_zip_ctx->ops = &hisi_zip_ops; @@ -433,10 +399,13 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx) { + struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL }; int i; for (i = 0; i < HZIP_CTX_Q_NUM; i++) - hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]); + qps[i] = hisi_zip_ctx->qp_ctx[i].qp; + + hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM); } static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index a358f5631219..f2df7143f00c 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -467,7 +467,6 @@ struct hisi_qp { struct hisi_qp_status qp_status; struct hisi_qp_ops *hw_ops; - void *qp_ctx; void (*req_cb)(struct hisi_qp *qp, void *data); void (*event_cb)(struct hisi_qp *qp); -- 2.43.0
maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQ94 CVE: NA ---------------------------------------------------------------------- Add reference counting to queues. When all queues are occupied, tfm will reuse queues with the same algorithm type that have already been allocated in the kernel. The corresponding queue will be released when the reference count reaches 1. Reviewed-by: Longfang Liu <liulongfang@huawei.com> Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/qm.c | 65 +++++++++++++++++++++++++++-------- include/linux/hisi_acc_qm.h | 1 + 2 files changed, 52 insertions(+), 14 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index fd92e7133986..14382ce021b7 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -2106,7 +2106,38 @@ static void hisi_qm_unset_hw_reset(struct hisi_qp *qp) *addr = 0; } -static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) +static struct hisi_qp *find_shareable_qp(struct hisi_qm *qm, u8 alg_type, bool is_in_kernel) +{ + struct device *dev = &qm->pdev->dev; + struct hisi_qp *share_qp = NULL; + struct hisi_qp *qp; + u32 ref_count = ~0; + int i; + + if (!is_in_kernel) + goto queues_busy; + + for (i = 0; i < qm->qp_num; i++) { + qp = &qm->qp_array[i]; + if (qp->is_in_kernel && qp->alg_type == alg_type && qp->ref_count < ref_count) { + ref_count = qp->ref_count; + share_qp = qp; + } + } + + if (share_qp) { + share_qp->ref_count++; + return share_qp; + } + +queues_busy: + dev_info_ratelimited(dev, "All %u queues of QM are busy and no shareable queue\n", + qm->qp_num); + atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); + return ERR_PTR(-EBUSY); +} + +static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type, bool is_in_kernel) { struct device *dev = &qm->pdev->dev; struct hisi_qp *qp; @@ -2117,12 +2148,9 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) return ERR_PTR(-EPERM); } - if (qm->qp_in_used == qm->qp_num) { - dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", - qm->qp_num); - atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); - return ERR_PTR(-EBUSY); - } + /* Try to find a shareable queue when all queues are busy */ + if (qm->qp_in_used == qm->qp_num) + return find_shareable_qp(qm, alg_type, is_in_kernel); qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); if (qp_id < 0) { @@ -2138,10 +2166,10 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) qp->event_cb = NULL; qp->req_cb = NULL; - qp->qp_id = qp_id; qp->alg_type = alg_type; - qp->is_in_kernel = true; + qp->is_in_kernel = is_in_kernel; qm->qp_in_used++; + qp->ref_count = 1; return qp; } @@ -2163,7 +2191,7 @@ static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) return ERR_PTR(ret); down_write(&qm->qps_lock); - qp = qm_create_qp_nolock(qm, alg_type); + qp = qm_create_qp_nolock(qm, alg_type, false); up_write(&qm->qps_lock); if (IS_ERR(qp)) @@ -2562,7 +2590,6 @@ static int hisi_qm_uacce_get_queue(struct uacce_device *uacce, qp->uacce_q = q; qp->event_cb = qm_qp_event_notifier; qp->pasid = arg; - qp->is_in_kernel = false; return 0; } @@ -3708,6 +3735,9 @@ static void qm_release_qp_nolock(struct hisi_qp *qp) { struct hisi_qm *qm = qp->qm; + if (--qp->ref_count) + return; + qm->qp_in_used--; idr_remove(&qm->qp_idr, qp->qp_id); } @@ -3727,7 +3757,9 @@ void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) down_write(&qps[0]->qm->qps_lock); for (i = qp_num - 1; i >= 0; i--) { - qm_stop_qp_nolock(qps[i]); + if (qps[i]->ref_count == 1) + qm_stop_qp_nolock(qps[i]); + qm_release_qp_nolock(qps[i]); } @@ -3756,12 +3788,15 @@ static int qm_get_and_start_qp(struct hisi_qm *qm, int qp_num, struct hisi_qp ** down_write(&qm->qps_lock); for (i = 0; i < qp_num; i++) { - qps[i] = qm_create_qp_nolock(qm, alg_type[i]); + qps[i] = qm_create_qp_nolock(qm, alg_type[i], true); if (IS_ERR(qps[i])) { ret = -ENODEV; goto stop_and_free; } + if (qps[i]->ref_count != 1) + continue; + ret = qm_start_qp_nolock(qps[i], 0); if (ret) { qm_release_qp_nolock(qps[i]); @@ -3774,7 +3809,9 @@ static int qm_get_and_start_qp(struct hisi_qm *qm, int qp_num, struct hisi_qp ** stop_and_free: for (i--; i >= 0; i--) { - qm_stop_qp_nolock(qps[i]); + if (qps[i]->ref_count == 1) + qm_stop_qp_nolock(qps[i]); + qm_release_qp_nolock(qps[i]); } up_write(&qm->qps_lock); diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index f2df7143f00c..d0141e045ccc 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -476,6 +476,7 @@ struct hisi_qp { u16 pasid; struct uacce_queue *uacce_q; + u32 ref_count; spinlock_t qp_lock; struct instance_backlog backlog; const void **msg; -- 2.43.0
maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQ94 CVE: NA ---------------------------------------------------------------------- Add device sorting criteria to prioritize devices with fewer references and closer NUMA distances. Devices that are fully occupied will not be prioritized for use. Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/qm.c | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 14382ce021b7..8727e1cb6a86 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3768,6 +3768,20 @@ void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) } EXPORT_SYMBOL_GPL(hisi_qm_free_qps); +static void qm_insert_sorted(struct list_head *head, struct hisi_qm_resource *res) +{ + struct hisi_qm_resource *tmp; + struct list_head *n = head; + + list_for_each_entry(tmp, head, list) { + if (res->distance < tmp->distance) { + n = &tmp->list; + break; + } + } + list_add_tail(&res->list, n); +} + static void free_list(struct list_head *head) { struct hisi_qm_resource *res, *tmp; @@ -3823,11 +3837,12 @@ static int qm_get_and_start_qp(struct hisi_qm *qm, int qp_num, struct hisi_qp ** static int hisi_qm_sort_devices(int node, struct list_head *head, struct hisi_qm_list *qm_list) { - struct hisi_qm_resource *res, *tmp; + struct hisi_qm_resource *res; struct hisi_qm *qm; - struct list_head *n; struct device *dev; int dev_node; + LIST_HEAD(non_full_list); + LIST_HEAD(full_list); list_for_each_entry(qm, &qm_list->list, list) { dev = &qm->pdev->dev; @@ -3842,16 +3857,16 @@ static int hisi_qm_sort_devices(int node, struct list_head *head, res->qm = qm; res->distance = node_distance(dev_node, node); - n = head; - list_for_each_entry(tmp, head, list) { - if (res->distance < tmp->distance) { - n = &tmp->list; - break; - } - } - list_add_tail(&res->list, n); + + if (qm->qp_in_used == qm->qp_num) + qm_insert_sorted(&full_list, res); + else + qm_insert_sorted(&non_full_list, res); } + list_splice_tail(&non_full_list, head); + list_splice_tail(&full_list, head); + return 0; } -- 2.43.0
driver inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQ94 CVE: NA ---------------------------------------------------------------------- When the hardware queue resource alloc fail, use soft algorithm to complete the work. In addition, it is force to require that the software tfm allocation is successful before normal compression/deccompression can be conducted. Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/Kconfig | 1 + drivers/crypto/hisilicon/zip/zip_crypto.c | 83 +++++++++++++++++++++-- 2 files changed, 78 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 257c64809b48..5eee12f5f26c 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -57,6 +57,7 @@ config CRYPTO_DEV_HISI_ZIP depends on UACCE || UACCE=n depends on ACPI select CRYPTO_DEV_HISI_QM + select CRYPTO_DEFLATE help Support for HiSilicon ZIP Driver diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 108ab667131d..25493821ac2b 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -4,6 +4,7 @@ #include <linux/bitfield.h> #include <linux/bitmap.h> #include <linux/dma-mapping.h> +#include <linux/highmem.h> #include <linux/scatterlist.h> #include "zip.h" @@ -84,6 +85,8 @@ struct hisi_zip_sqe_ops { struct hisi_zip_ctx { struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM]; const struct hisi_zip_sqe_ops *ops; + struct crypto_comp *soft_tfm; + bool fallback; }; static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp) @@ -110,6 +113,38 @@ static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444); MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)"); +static int hisi_zip_fallback_do_work(struct crypto_comp *tfm, struct acomp_req *acomp_req, + bool is_decompress) +{ + void *input, *output; + const char *algo; + int err = 0; + int ret; + + input = kmap_local_page(sg_page(acomp_req->src)) + acomp_req->src->offset; + output = kmap_local_page(sg_page(acomp_req->dst)) + acomp_req->dst->offset; + + if (!is_decompress) + ret = crypto_comp_compress(tfm, input, acomp_req->slen, + output, &acomp_req->dlen); + else + ret = crypto_comp_decompress(tfm, input, acomp_req->slen, + output, &acomp_req->dlen); + if (ret) { + algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm)); + pr_err("failed to do fallback %s work, ret=%d\n", algo, ret); + err = -EIO; + } + + kunmap_local(output); + kunmap_local(input); + + if (acomp_req->base.complete) + acomp_request_complete(acomp_req, err); + + return ret; +} + static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx, struct acomp_req *req) { @@ -317,6 +352,9 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req) struct hisi_zip_req *req; int ret; + if (ctx->fallback) + return hisi_zip_fallback_do_work(ctx->soft_tfm, acomp_req, 0); + req = hisi_zip_create_req(qp_ctx, acomp_req); if (IS_ERR(req)) return PTR_ERR(req); @@ -338,6 +376,9 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req) struct hisi_zip_req *req; int ret; + if (ctx->fallback) + return hisi_zip_fallback_do_work(ctx->soft_tfm, acomp_req, 1); + req = hisi_zip_create_req(qp_ctx, acomp_req); if (IS_ERR(req)) return PTR_ERR(req); @@ -505,6 +546,29 @@ static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx, ctx->qp_ctx[i].qp->req_cb = fn; } +static int hisi_zip_fallback_init(struct hisi_zip_ctx *ctx, const char *alg_name) +{ + if (!IS_ERR_OR_NULL(ctx->soft_tfm)) + return 0; + + ctx->soft_tfm = crypto_alloc_comp(alg_name, 0, 0); + if (IS_ERR_OR_NULL(ctx->soft_tfm)) { + pr_err("could not alloc soft tfm %s\n", alg_name); + return PTR_ERR(ctx->soft_tfm); + } + + return 0; +} + +static void hisi_zip_fallback_uninit(struct hisi_zip_ctx *ctx) +{ + if (IS_ERR_OR_NULL(ctx->soft_tfm)) + return; + + crypto_free_comp(ctx->soft_tfm); + ctx->soft_tfm = NULL; +} + static int hisi_zip_acomp_init(struct crypto_acomp *tfm) { const char *alg_name = crypto_tfm_alg_name(&tfm->base); @@ -515,7 +579,7 @@ static int hisi_zip_acomp_init(struct crypto_acomp *tfm) ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node); if (ret) { pr_err("failed to init ctx (%d)!\n", ret); - return ret; + goto switch_to_soft; } dev = &ctx->qp_ctx[0].qp->qm->pdev->dev; @@ -540,16 +604,22 @@ static int hisi_zip_acomp_init(struct crypto_acomp *tfm) hisi_zip_release_req_q(ctx); err_ctx_exit: hisi_zip_ctx_exit(ctx); - return ret; +switch_to_soft: + ctx->fallback = true; + return hisi_zip_fallback_init(ctx, alg_name); } static void hisi_zip_acomp_exit(struct crypto_acomp *tfm) { struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base); - hisi_zip_release_sgl_pool(ctx); - hisi_zip_release_req_q(ctx); - hisi_zip_ctx_exit(ctx); + if (!ctx->fallback) { + hisi_zip_release_sgl_pool(ctx); + hisi_zip_release_req_q(ctx); + hisi_zip_ctx_exit(ctx); + } + + hisi_zip_fallback_uninit(ctx); } static struct acomp_alg hisi_zip_acomp_deflate = { @@ -560,7 +630,8 @@ static struct acomp_alg hisi_zip_acomp_deflate = { .base = { .cra_name = "deflate", .cra_driver_name = "hisi-deflate-acomp", - .cra_flags = CRYPTO_ALG_ASYNC, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, .cra_module = THIS_MODULE, .cra_priority = HZIP_ALG_PRIORITY, .cra_ctxsize = sizeof(struct hisi_zip_ctx), -- 2.43.0
From: Weili Qian <qianweili@huawei.com> maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQ94 CVE: NA ---------------------------------------------------------------------- When all hardware queues are busy and no shareable queue, new processes fail to apply for queues. To avoid affecting tasks, support fallback mechanism when hardware queues are unavailable. HPRE driver supports DH algorithm, limited to prime numbers up to 4K. It supports prime numbers larger than 4K via fallback mechanism. Fixes: 05e7b906aa7c ("crypto: hisilicon/hpre - add 'ECDH' algorithm") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 238 ++++++++++++++++---- 1 file changed, 199 insertions(+), 39 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index f1121f0f3a4e..8fd809ba0411 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -94,6 +94,7 @@ struct hpre_dh_ctx { char *g; /* m */ dma_addr_t dma_g; + struct crypto_kpp *soft_tfm; }; struct hpre_ecdh_ctx { @@ -104,6 +105,7 @@ struct hpre_ecdh_ctx { /* low address: x->y */ unsigned char *g; dma_addr_t dma_g; + struct crypto_kpp *soft_tfm; }; struct hpre_curve25519_ctx { @@ -132,6 +134,7 @@ struct hpre_ctx { unsigned int curve_id; /* for high performance core */ u8 enable_hpcore; + bool fallback; }; struct hpre_asym_request { @@ -395,8 +398,10 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) struct hpre *hpre; qp = hpre_create_qp(type); - if (!qp) + if (!qp) { + ctx->qp = NULL; return -ENODEV; + } qp->req_cb = hpre_alg_cb; ctx->qp = qp; @@ -522,6 +527,48 @@ static int hpre_dh_compute_value(struct kpp_request *req) return ret; } +static struct kpp_request *hpre_dh_prepare_fb_req(struct kpp_request *req) +{ + struct kpp_request *fb_req = kpp_request_ctx(req); + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + kpp_request_set_tfm(fb_req, ctx->dh.soft_tfm); + kpp_request_set_callback(fb_req, req->base.flags, req->base.complete, req->base.data); + kpp_request_set_input(fb_req, req->src, req->src_len); + kpp_request_set_output(fb_req, req->dst, req->dst_len); + + return fb_req; +} + +static int hpre_dh_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct kpp_request *fb_req; + + if (ctx->fallback) { + fb_req = hpre_dh_prepare_fb_req(req); + return crypto_kpp_generate_public_key(fb_req); + } + + return hpre_dh_compute_value(req); +} + +static int hpre_dh_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct kpp_request *fb_req; + + if (ctx->fallback) { + fb_req = hpre_dh_prepare_fb_req(req); + return crypto_kpp_compute_shared_secret(fb_req); + } + + return hpre_dh_compute_value(req); +} + static int hpre_is_dh_params_length_valid(unsigned int key_sz) { #define _HPRE_DH_GRP1 768 @@ -548,13 +595,6 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params) struct device *dev = ctx->dev; unsigned int sz; - if (params->p_size > HPRE_DH_MAX_P_SZ) - return -EINVAL; - - if (hpre_is_dh_params_length_valid(params->p_size << - HPRE_BITS_2_BYTES_SHIFT)) - return -EINVAL; - sz = ctx->key_sz = params->p_size; ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1, &ctx->dh.dma_xa_p, GFP_KERNEL); @@ -587,6 +627,9 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) struct device *dev = ctx->dev; unsigned int sz = ctx->key_sz; + if (!ctx->qp) + return; + if (ctx->dh.g) { dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g); ctx->dh.g = NULL; @@ -612,6 +655,13 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, if (crypto_dh_decode_key(buf, len, ¶ms) < 0) return -EINVAL; + if (!ctx->qp) + goto set_soft_secret; + + if (hpre_is_dh_params_length_valid(params.p_size << + HPRE_BITS_2_BYTES_SHIFT)) + goto set_soft_secret; + /* Free old secret if any */ hpre_dh_clear_ctx(ctx, false); @@ -622,27 +672,55 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key, params.key_size); + ctx->fallback = false; return 0; err_clear_ctx: hpre_dh_clear_ctx(ctx, false); return ret; +set_soft_secret: + ctx->fallback = true; + return crypto_kpp_set_secret(ctx->dh.soft_tfm, buf, len); } static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->dh.soft_tfm); + return ctx->key_sz; } static int hpre_dh_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + const char *alg = kpp_alg_name(tfm); + unsigned int reqsize; + int ret; - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + ctx->dh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->dh.soft_tfm)) { + pr_err("Failed to alloc dh tfm!\n"); + return PTR_ERR(ctx->dh.soft_tfm); + } + + crypto_kpp_set_flags(ctx->dh.soft_tfm, crypto_kpp_get_flags(tfm)); + + reqsize = max(sizeof(struct hpre_asym_request) + hpre_align_pd(), + sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->dh.soft_tfm)); + kpp_set_reqsize(tfm, reqsize); - return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); + ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); + if (ret && ret != -ENODEV) { + crypto_free_kpp(ctx->dh.soft_tfm); + return ret; + } else if (ret == -ENODEV) { + ctx->fallback = true; + } + + return 0; } static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) @@ -650,6 +728,7 @@ static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); hpre_dh_clear_ctx(ctx, true); + crypto_free_kpp(ctx->dh.soft_tfm); } static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len) @@ -689,9 +768,8 @@ static int hpre_rsa_enc(struct akcipher_request *req) struct hpre_sqe *msg = &hpre_req->req; int ret; - /* For 512 and 1536 bits key size, use soft tfm instead */ - if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || - ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { + /* For unsupported key size and unavailable devices, use soft tfm instead */ + if (ctx->fallback) { akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); ret = crypto_akcipher_encrypt(req); akcipher_request_set_tfm(req, tfm); @@ -736,9 +814,8 @@ static int hpre_rsa_dec(struct akcipher_request *req) struct hpre_sqe *msg = &hpre_req->req; int ret; - /* For 512 and 1536 bits key size, use soft tfm instead */ - if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || - ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { + /* For unsupported key size and unavailable devices, use soft tfm instead */ + if (ctx->fallback) { akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); ret = crypto_akcipher_decrypt(req); akcipher_request_set_tfm(req, tfm); @@ -791,8 +868,10 @@ static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value, ctx->key_sz = vlen; /* if invalid key size provided, we use software tfm */ - if (!hpre_rsa_key_size_is_support(ctx->key_sz)) + if (!hpre_rsa_key_size_is_support(ctx->key_sz)) { + ctx->fallback = true; return 0; + } ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1, &ctx->rsa.dma_pubkey, @@ -927,6 +1006,9 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) unsigned int half_key_sz = ctx->key_sz >> 1; struct device *dev = ctx->dev; + if (!ctx->qp) + return; + if (ctx->rsa.pubkey) { dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.pubkey, ctx->rsa.dma_pubkey); @@ -1006,6 +1088,7 @@ static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key, goto free; } + ctx->fallback = false; return 0; free: @@ -1023,6 +1106,9 @@ static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, if (ret) return ret; + if (!ctx->qp) + return 0; + return hpre_rsa_setkey(ctx, key, keylen, false); } @@ -1036,6 +1122,9 @@ static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, if (ret) return ret; + if (!ctx->qp) + return 0; + return hpre_rsa_setkey(ctx, key, keylen, true); } @@ -1043,9 +1132,8 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm) { struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); - /* For 512 and 1536 bits key size, use soft tfm instead */ - if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || - ctx->key_sz == HPRE_RSA_1536BITS_KSZ) + /* For unsupported key size and unavailable devices, use soft tfm instead */ + if (ctx->fallback) return crypto_akcipher_maxsize(ctx->rsa.soft_tfm); return ctx->key_sz; @@ -1066,10 +1154,14 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) hpre_align_pd()); ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); - if (ret) + if (ret && ret != -ENODEV) { crypto_free_akcipher(ctx->rsa.soft_tfm); + return ret; + } else if (ret == -ENODEV) { + ctx->fallback = true; + } - return ret; + return 0; } static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) @@ -1280,6 +1372,9 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, struct ecdh params; int ret; + if (ctx->fallback) + return crypto_kpp_set_secret(ctx->ecdh.soft_tfm, buf, len); + if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) { dev_err(dev, "failed to decode ecdh key!\n"); return -EINVAL; @@ -1505,23 +1600,82 @@ static int hpre_ecdh_compute_value(struct kpp_request *req) return ret; } +static int hpre_ecdh_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + int ret; + + if (ctx->fallback) { + kpp_request_set_tfm(req, ctx->ecdh.soft_tfm); + ret = crypto_kpp_generate_public_key(req); + kpp_request_set_tfm(req, tfm); + return ret; + } + + return hpre_ecdh_compute_value(req); +} + +static int hpre_ecdh_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + int ret; + + if (ctx->fallback) { + kpp_request_set_tfm(req, ctx->ecdh.soft_tfm); + ret = crypto_kpp_compute_shared_secret(req); + kpp_request_set_tfm(req, tfm); + return ret; + } + + return hpre_ecdh_compute_value(req); +} + static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->ecdh.soft_tfm); + /* max size is the pub_key_size, include x and y */ return ctx->key_sz << 1; } +static int hpre_ecdh_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + const char *alg = kpp_alg_name(tfm); + int ret; + + ret = hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + if (!ret) { + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + return 0; + } else if (ret && ret != -ENODEV) { + return ret; + } + + ctx->ecdh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->ecdh.soft_tfm)) { + pr_err("Failed to alloc %s tfm!\n", alg); + return PTR_ERR(ctx->ecdh.soft_tfm); + } + + crypto_kpp_set_flags(ctx->ecdh.soft_tfm, crypto_kpp_get_flags(tfm)); + ctx->fallback = true; + + return 0; +} + static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); ctx->curve_id = ECC_CURVE_NIST_P192; - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - - return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + return hpre_ecdh_init_tfm(tfm); } static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) @@ -1531,9 +1685,7 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) ctx->curve_id = ECC_CURVE_NIST_P256; ctx->enable_hpcore = 1; - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - - return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + return hpre_ecdh_init_tfm(tfm); } static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) @@ -1542,15 +1694,18 @@ static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) ctx->curve_id = ECC_CURVE_NIST_P384; - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - - return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + return hpre_ecdh_init_tfm(tfm); } static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) { + crypto_free_kpp(ctx->ecdh.soft_tfm); + return; + } + hpre_ecc_clear_ctx(ctx, true, true); } @@ -1895,13 +2050,14 @@ static struct akcipher_alg rsa = { .cra_name = "rsa", .cra_driver_name = "hpre-rsa", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; static struct kpp_alg dh = { .set_secret = hpre_dh_set_secret, - .generate_public_key = hpre_dh_compute_value, - .compute_shared_secret = hpre_dh_compute_value, + .generate_public_key = hpre_dh_generate_public_key, + .compute_shared_secret = hpre_dh_compute_shared_secret, .max_size = hpre_dh_max_size, .init = hpre_dh_init_tfm, .exit = hpre_dh_exit_tfm, @@ -1911,14 +2067,15 @@ static struct kpp_alg dh = { .cra_name = "dh", .cra_driver_name = "hpre-dh", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; static struct kpp_alg ecdh_curves[] = { { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_compute_value, - .compute_shared_secret = hpre_ecdh_compute_value, + .generate_public_key = hpre_ecdh_generate_public_key, + .compute_shared_secret = hpre_ecdh_compute_shared_secret, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p192_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -1928,11 +2085,12 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p192", .cra_driver_name = "hpre-ecdh-nist-p192", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }, { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_compute_value, - .compute_shared_secret = hpre_ecdh_compute_value, + .generate_public_key = hpre_ecdh_generate_public_key, + .compute_shared_secret = hpre_ecdh_compute_shared_secret, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p256_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -1942,11 +2100,12 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p256", .cra_driver_name = "hpre-ecdh-nist-p256", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }, { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_compute_value, - .compute_shared_secret = hpre_ecdh_compute_value, + .generate_public_key = hpre_ecdh_generate_public_key, + .compute_shared_secret = hpre_ecdh_compute_shared_secret, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p384_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -1956,6 +2115,7 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p384", .cra_driver_name = "hpre-ecdh-nist-p384", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, } }; -- 2.43.0
From: Qi Tao <taoqi10@huawei.com> maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQ94 CVE: NA ---------------------------------------------------------------------- When all hardware queues are busy and no shareable queue, new processes fail to apply for queues. To avoid affecting tasks, support fallback mechanism when hardware queues are unavailable. Fixes: c16a70c1f253 ("crypto: hisilicon/sec - add new algorithm mode for AEAD") Signed-off-by: Qi Tao <taoqi10@huawei.com> Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/sec2/sec_crypto.c | 62 ++++++++++++++++------ 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 1e304c2c52df..d8cca441db27 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -663,10 +663,8 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) int i, ret; ctx->qps = sec_create_qps(); - if (!ctx->qps) { - pr_err("Can not create sec qps!\n"); + if (!ctx->qps) return -ENODEV; - } sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); ctx->sec = sec; @@ -702,6 +700,9 @@ static void sec_ctx_base_uninit(struct sec_ctx *ctx) { int i; + if (!ctx->qps) + return; + for (i = 0; i < ctx->sec->ctx_q_num; i++) sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); @@ -713,6 +714,9 @@ static int sec_cipher_init(struct sec_ctx *ctx) { struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + if (!ctx->qps) + return 0; + c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, &c_ctx->c_key_dma, GFP_KERNEL); if (!c_ctx->c_key) @@ -725,6 +729,9 @@ static void sec_cipher_uninit(struct sec_ctx *ctx) { struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; + if (!ctx->qps) + return; + memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, c_ctx->c_key, c_ctx->c_key_dma); @@ -746,6 +753,9 @@ static void sec_auth_uninit(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + if (!ctx->qps) + return; + memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE); dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, a_ctx->a_key, a_ctx->a_key_dma); @@ -783,7 +793,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm) } ret = sec_ctx_base_init(ctx); - if (ret) + if (ret && ret != -ENODEV) return ret; ret = sec_cipher_init(ctx); @@ -892,6 +902,9 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, struct device *dev = ctx->dev; int ret; + if (!ctx->qps) + goto set_soft_key; + if (c_mode == SEC_CMODE_XTS) { ret = xts_verify_key(tfm, key, keylen); if (ret) { @@ -922,13 +935,14 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, } memcpy(c_ctx->c_key, key, keylen); - if (c_ctx->fbtfm) { - ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); - if (ret) { - dev_err(dev, "failed to set fallback skcipher key!\n"); - return ret; - } + +set_soft_key: + ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); + if (ret) { + dev_err(dev, "failed to set fallback skcipher key!\n"); + return ret; } + return 0; } @@ -1392,6 +1406,9 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, struct crypto_authenc_keys keys; int ret; + if (!ctx->qps) + return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); + ctx->a_ctx.a_alg = a_alg; ctx->c_ctx.c_alg = c_alg; c_ctx->c_mode = c_mode; @@ -2048,6 +2065,9 @@ static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) if (ret) return ret; + if (!ctx->qps) + return 0; + if (ctx->sec->qm.ver < QM_HW_V3) { ctx->type_supported = SEC_BD_TYPE2; ctx->req_op = &sec_skcipher_req_ops; @@ -2056,7 +2076,7 @@ static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) ctx->req_op = &sec_skcipher_req_ops_v3; } - return ret; + return 0; } static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) @@ -2124,7 +2144,7 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) int ret; ret = sec_aead_init(tfm); - if (ret) { + if (ret && ret != -ENODEV) { pr_err("hisi_sec2: aead init error!\n"); return ret; } @@ -2166,7 +2186,7 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm) int ret; ret = sec_aead_init(tfm); - if (ret) { + if (ret && ret != -ENODEV) { dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n"); return ret; } @@ -2311,6 +2331,9 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) bool need_fallback = false; int ret; + if (!ctx->qps) + goto soft_crypto; + if (!sk_req->cryptlen) { if (ctx->c_ctx.c_mode == SEC_CMODE_XTS) return -EINVAL; @@ -2328,9 +2351,12 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) return -EINVAL; if (unlikely(ctx->c_ctx.fallback || need_fallback)) - return sec_skcipher_soft_crypto(ctx, sk_req, encrypt); + goto soft_crypto; return ctx->req_op->process(ctx, req); + +soft_crypto: + return sec_skcipher_soft_crypto(ctx, sk_req, encrypt); } static int sec_skcipher_encrypt(struct skcipher_request *sk_req) @@ -2538,6 +2564,9 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) bool need_fallback = false; int ret; + if (!ctx->qps) + goto soft_crypto; + req->flag = a_req->base.flags; req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; @@ -2548,11 +2577,14 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) ret = sec_aead_param_check(ctx, req, &need_fallback); if (unlikely(ret)) { if (need_fallback) - return sec_aead_soft_crypto(ctx, a_req, encrypt); + goto soft_crypto; return -EINVAL; } return ctx->req_op->process(ctx, req); + +soft_crypto: + return sec_aead_soft_crypto(ctx, a_req, encrypt); } static int sec_aead_encrypt(struct aead_request *a_req) -- 2.43.0
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/19494 邮件列表地址:https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/TL4... FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/19494 Mailing list address: https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/TL4...
participants (2)
-
Chenghai Huang -
patchwork bot