[PATCH OLK-6.6 0/5] crypto: hisilicon - support algorithm fallback

From: Weili Qian <qianweili@huawei.com> Weili Qian (4): uacce: use WARN_ON_ONCE instead of WARN_ON uacce: add queue lock for uacce_get_ss_dma crypto: hisilicon/hpre - support the hpre algorithm fallback crypto: hisilicon/qm - check the value obtained from the hardware Wenkai Lin (1): crypto: hisilicon - implement full backlog support for sec drivers/crypto/hisilicon/debugfs.c | 1 + drivers/crypto/hisilicon/hpre/hpre_crypto.c | 312 ++++++++++-- drivers/crypto/hisilicon/qm.c | 51 +- drivers/crypto/hisilicon/sec2/sec.h | 62 ++- drivers/crypto/hisilicon/sec2/sec_crypto.c | 525 ++++++++++++++------ drivers/misc/uacce/uacce.c | 36 +- 6 files changed, 764 insertions(+), 223 deletions(-) -- 2.33.0

From: Weili Qian <qianweili@huawei.com> driver inclusion category: cleanup bugzilla: https://gitee.com/openeuler/kernel/issues/ICESZH CVE: NA ---------------------------------------------------------------------- In uacce_free_dma_buffers(), if the loop is executed many times, many duplicate logs will be printed when the WARN_ON condition is met. Therefore, use WARN_ON_ONCE instead of WARN_ON. Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/misc/uacce/uacce.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c index f1d74a089459..33ae826f9396 100644 --- a/drivers/misc/uacce/uacce.c +++ b/drivers/misc/uacce/uacce.c @@ -180,7 +180,7 @@ static void uacce_free_dma_buffers(struct uacce_queue *q) return; while (i < qfr->dma_list[0].total_num) { - WARN_ON(!qfr->dma_list[i].size || !qfr->dma_list[i].dma); + WARN_ON_ONCE(!qfr->dma_list[i].size || !qfr->dma_list[i].dma); dev_dbg(pdev, "free dma qfr (index = %d)\n", i); dma_free_coherent(pdev, qfr->dma_list[i].size, qfr->dma_list[i].kaddr, -- 2.33.0

From: Weili Qian <qianweili@huawei.com> driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICESZH CVE: NA ---------------------------------------------------------------------- The slice memory accessed by uacce_get_ss_dma() is allocated by uacce_fops_mmap(), but uacce_get_ss_dma() uses the uacce lock, while uacce_fops_mmap() uses the queue lock. If uacce_fops_mmap() has not yet completed resource allocation, uacce_get_ss_dma() may access an empty address, causing system exceptions. Therefore, add queue lock for uacce_get_ss_dma(). Fixes: c0b0e89513ec ("uacce: support UACCE_MODE_NOIOMMU mode") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/misc/uacce/uacce.c | 34 ++++++++++++++++++---------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c index 33ae826f9396..19a0fdfac47b 100644 --- a/drivers/misc/uacce/uacce.c +++ b/drivers/misc/uacce/uacce.c @@ -122,31 +122,29 @@ static long uacce_get_ss_dma(struct uacce_queue *q, void __user *arg) unsigned long slice_idx = 0; unsigned long dma, size; unsigned int max_idx; - long ret = -EFAULT; + long ret = -EINVAL; + if (copy_from_user(&slice_idx, arg, sizeof(unsigned long))) { + dev_err(&uacce->dev, "copy_from_user fail!\n"); + return -EFAULT; + } + + mutex_lock(&q->mutex); if (q->state == UACCE_Q_ZOMBIE) { dev_err(&uacce->dev, "queue is zombie!\n"); - ret = -EINVAL; - goto param_check; + goto unlock; } if (!q->qfrs[UACCE_QFRT_SS]) { dev_err(&uacce->dev, "no ss dma region!\n"); - ret = -EINVAL; - goto param_check; + goto unlock; } slice = q->qfrs[UACCE_QFRT_SS]->dma_list; - if (copy_from_user(&slice_idx, arg, sizeof(unsigned long))) { - dev_err(&uacce->dev, "copy_from_user fail!\n"); - goto param_check; - } - if (slice[0].total_num - 1 < slice_idx) { dev_err(&uacce->dev, "no ss slice idx %lu err, total %u!\n", slice_idx, slice[0].total_num); - ret = -EINVAL; - goto param_check; + goto unlock; } dma = slice[slice_idx].dma; @@ -156,17 +154,21 @@ static long uacce_get_ss_dma(struct uacce_queue *q, void __user *arg) dev_err(&uacce->dev, "%luth ss region[size = %lu] no exist, range[[0](size = %llu) -> [%u](size = %llu)]\n", slice_idx, size, slice[0].size, max_idx, slice[max_idx].size); ret = -ENODEV; - goto param_check; + goto unlock; } dma = dma | ((size >> UACCE_GRAN_SHIFT) & UACCE_GRAN_NUM_MASK); + ret = (long)(slice[0].total_num - 1 - slice_idx); + mutex_unlock(&q->mutex); + if (copy_to_user(arg, &dma, sizeof(unsigned long))) { dev_err(&uacce->dev, "copy_to_user fail!\n"); - goto param_check; + return -EFAULT; } - ret = (long)(slice[0].total_num - 1 - slice_idx); + return ret; -param_check: +unlock: + mutex_unlock(&q->mutex); return ret; } -- 2.33.0

From: Wenkai Lin <linwenkai6@hisilicon.com> driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICESZH CVE: NA ---------------------------------------------------------------------- This patch introduces a hierarchical backlog mechanism to cache user data in high-throughput encryption/decryption scenarios, the implementation addresses packet loss issues when hardware queues overflow during peak loads. First, we use sec_alloc_req_id to obtain an exclusive resource from the pre-allocated resource pool of each queue, if no resource is allocated, perform the DMA map operation on the request memory. When the task is ready, we will attempt to send it to the hardware, if the hardware queue is already full, we cache the request into the backlog linked list, then return an EBUSY status to the upper layer and instruct the packet-sending thread to pause transmission. Simultaneously, when the hardware completes previous tasks, it triggers the sec callback function, within this function, we reattempt to send the cached requests from the backlog list and wake up the sending thread until the hardware queue becomes fully occupied again. In addition, we need to handle such exceptions like the hardware is reset when packets are sent, we need to switch to the software computing task and release occupied resources. Fixes: c16a70c1f253 ("crypto: hisilicon/sec - add new algorithm mode for AEAD") Signed-off-by: Wenkai Lin <linwenkai6@hisilicon.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/sec2/sec.h | 62 ++- drivers/crypto/hisilicon/sec2/sec_crypto.c | 525 +++++++++++++++------ 2 files changed, 439 insertions(+), 148 deletions(-) diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 703920b49c7c..cf8daf39dd1e 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -7,6 +7,12 @@ #include <linux/hisi_acc_qm.h> #include "sec_crypto.h" +#define SEC_PBUF_SZ 512 +#define SEC_MAX_MAC_LEN 64 +#define SEC_IV_SIZE 24 +#define SEC_SGE_NR_NUM 4 +#define SEC_SGL_ALIGN_SIZE 64 + /* Algorithm resource per hardware SEC queue */ struct sec_alg_res { u8 *pbuf; @@ -20,6 +26,40 @@ struct sec_alg_res { u16 depth; }; +struct sec_hw_sge { + dma_addr_t buf; + void *page_ctrl; + __le32 len; + __le32 pad; + __le32 pad0; + __le32 pad1; +}; + +struct sec_hw_sgl { + dma_addr_t next_dma; + __le16 entry_sum_in_chain; + __le16 entry_sum_in_sgl; + __le16 entry_length_in_sgl; + __le16 pad0; + __le64 pad1[5]; + struct sec_hw_sgl *next; + struct sec_hw_sge sge_entries[SEC_SGE_NR_NUM]; +} __aligned(SEC_SGL_ALIGN_SIZE); + +struct sec_src_dst_buf { + struct sec_hw_sgl in; + struct sec_hw_sgl out; +}; + +struct sec_request_buf { + union { + struct sec_src_dst_buf data_buf; + __u8 pbuf[SEC_PBUF_SZ]; + }; + dma_addr_t in_dma; + dma_addr_t out_dma; +}; + /* Cipher request of SEC private */ struct sec_cipher_req { struct hisi_acc_hw_sgl *c_out; @@ -29,6 +69,7 @@ struct sec_cipher_req { struct skcipher_request *sk_req; u32 c_len; bool encrypt; + __u8 c_ivin_buf[SEC_IV_SIZE]; }; struct sec_aead_req { @@ -37,6 +78,13 @@ struct sec_aead_req { u8 *a_ivin; dma_addr_t a_ivin_dma; struct aead_request *aead_req; + __u8 a_ivin_buf[SEC_IV_SIZE]; + __u8 out_mac_buf[SEC_MAX_MAC_LEN]; +}; + +struct sec_instance_backlog { + struct list_head list; + spinlock_t lock; }; /* SEC request of Crypto */ @@ -55,15 +103,18 @@ struct sec_req { dma_addr_t in_dma; struct sec_cipher_req c_req; struct sec_aead_req aead_req; - struct list_head backlog_head; + struct crypto_async_request *base; int err_type; int req_id; u32 flag; /* Status of the SEC request */ - bool fake_busy; bool use_pbuf; + + struct list_head list; + struct sec_instance_backlog *backlog; + struct sec_request_buf buf; }; /** @@ -119,9 +170,11 @@ struct sec_qp_ctx { struct sec_alg_res *res; struct sec_ctx *ctx; spinlock_t req_lock; - struct list_head backlog; + spinlock_t id_lock; struct hisi_acc_sgl_pool *c_in_pool; struct hisi_acc_sgl_pool *c_out_pool; + struct sec_instance_backlog backlog; + u16 send_head; }; enum sec_alg_type { @@ -139,9 +192,6 @@ struct sec_ctx { /* Half queues for encipher, and half for decipher */ u32 hlf_q_num; - /* Threshold for fake busy, trigger to return -EBUSY to user */ - u32 fake_req_limit; - /* Current cyclic index to select a queue for encipher */ atomic_t enc_qcyclic; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 0428c50bea31..fb849178ec2e 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -67,7 +67,6 @@ #define SEC_MAX_CCM_AAD_LEN 65279 #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth)) -#define SEC_PBUF_SZ 512 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE) #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \ @@ -103,6 +102,7 @@ #define IV_CTR_INIT 0x1 #define IV_BYTE_OFFSET 0x8 #define SEC_GCM_MIN_AUTH_SZ 0x8 +#define SEC_RETRY_MAX_CNT 5U static DEFINE_MUTEX(sec_algs_lock); static unsigned int sec_available_devs; @@ -117,40 +117,19 @@ struct sec_aead { struct aead_alg alg; }; -/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ -static inline u32 sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) -{ - if (req->c_req.encrypt) - return (u32)atomic_inc_return(&ctx->enc_qcyclic) % - ctx->hlf_q_num; - - return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + - ctx->hlf_q_num; -} - -static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req) -{ - if (req->c_req.encrypt) - atomic_dec(&ctx->enc_qcyclic); - else - atomic_dec(&ctx->dec_qcyclic); -} +static int sec_aead_soft_crypto(struct sec_ctx *ctx, + struct aead_request *aead_req, + bool encrypt); +static int sec_skcipher_soft_crypto(struct sec_ctx *ctx, + struct skcipher_request *sreq, bool encrypt); static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) { int req_id; - spin_lock_bh(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->id_lock); req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC); - spin_unlock_bh(&qp_ctx->req_lock); - if (unlikely(req_id < 0)) { - dev_err(req->ctx->dev, "alloc req id fail!\n"); - return req_id; - } - - req->qp_ctx = qp_ctx; - qp_ctx->req_list[req_id] = req; - + spin_unlock_bh(&qp_ctx->id_lock); return req_id; } @@ -164,12 +143,9 @@ static void sec_free_req_id(struct sec_req *req) return; } - qp_ctx->req_list[req_id] = NULL; - req->qp_ctx = NULL; - - spin_lock_bh(&qp_ctx->req_lock); + spin_lock_bh(&qp_ctx->id_lock); idr_remove(&qp_ctx->req_idr, req_id); - spin_unlock_bh(&qp_ctx->req_lock); + spin_unlock_bh(&qp_ctx->id_lock); } static u8 pre_parse_finished_bd(struct bd_status *status, void *resp) @@ -230,6 +206,75 @@ static int sec_cb_status_check(struct sec_req *req, return 0; } +static int qp_send_message(struct sec_req *req) +{ + struct sec_qp_ctx *qp_ctx = req->qp_ctx; + int ret; + + if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1) + return -EBUSY; + + spin_lock_bh(&qp_ctx->req_lock); + if (atomic_read(&qp_ctx->qp->qp_status.used) == qp_ctx->qp->sq_depth - 1) { + spin_unlock_bh(&qp_ctx->req_lock); + return -EBUSY; + } + + if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2) { + req->sec_sqe.type2.tag = cpu_to_le16((u16)qp_ctx->send_head); + qp_ctx->req_list[qp_ctx->send_head] = req; + } + + ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); + if (ret) { + spin_unlock_bh(&qp_ctx->req_lock); + atomic64_inc(&req->ctx->sec->debug.dfx.send_busy_cnt); + return ret; + } + if (qp_ctx->ctx->type_supported == SEC_BD_TYPE2) + qp_ctx->send_head = (qp_ctx->send_head + 1) % qp_ctx->qp->sq_depth; + + spin_unlock_bh(&qp_ctx->req_lock); + + atomic64_inc(&req->ctx->sec->debug.dfx.send_cnt); + return -EINPROGRESS; +} + +static void sec_alg_send_backlog(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) +{ + struct sec_req *req, *tmp; + int ret = 0; + + spin_lock_bh(&qp_ctx->backlog.lock); + list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) { + ret = qp_send_message(req); + if (ret != -EINPROGRESS) + break; + + list_del(&req->list); + crypto_request_complete(req->base, -EINPROGRESS); + } + if (unlikely(ret != -EINPROGRESS && ret != -EBUSY)) { + list_for_each_entry_safe(req, tmp, &qp_ctx->backlog.list, list) { + list_del(&req->list); + ctx->req_op->buf_unmap(ctx, req); + if (req->req_id >= 0) + sec_free_req_id(req); + + if (ctx->alg_type == SEC_AEAD) + ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req, + req->c_req.encrypt); + else + ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req, + req->c_req.encrypt); + /* Wake up the busy thread first, then return the errno. */ + crypto_request_complete(req->base, -EINPROGRESS); + crypto_request_complete(req->base, ret); + } + } + spin_unlock_bh(&qp_ctx->backlog.lock); +} + static void sec_req_cb(struct hisi_qp *qp, void *resp) { struct sec_qp_ctx *qp_ctx = qp->qp_ctx; @@ -274,40 +319,54 @@ static void sec_req_cb(struct hisi_qp *qp, void *resp) ctx->req_op->callback(ctx, req, err); } -static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) +static int sec_alg_send_message_retry(struct sec_req *req) { - struct sec_qp_ctx *qp_ctx = req->qp_ctx; + int ctr = 0; int ret; - if (ctx->fake_req_limit <= - atomic_read(&qp_ctx->qp->qp_status.used) && - !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) - return -EBUSY; + do { + ret = qp_send_message(req); + } while (ret == -EBUSY && ctr++ < SEC_RETRY_MAX_CNT); - spin_lock_bh(&qp_ctx->req_lock); - ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); - if (ctx->fake_req_limit <= - atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { - req->fake_busy = true; - spin_unlock_bh(&qp_ctx->req_lock); - atomic64_inc(&ctx->sec->debug.dfx.send_cnt); - atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); + return ret; +} + +static int sec_alg_try_enqueue(struct sec_req *req) +{ + /* Check if any request is already backlogged */ + if (!list_empty(&req->backlog->list)) return -EBUSY; - } - spin_unlock_bh(&qp_ctx->req_lock); - if (unlikely(ret == -EBUSY)) - return -ENOBUFS; + /* Try to enqueue to HW ring */ + return qp_send_message(req); +} - if (likely(!ret)) { - ret = -EINPROGRESS; - atomic64_inc(&ctx->sec->debug.dfx.send_cnt); - } + +static int sec_alg_send_message_maybacklog(struct sec_req *req) +{ + int ret; + + ret = sec_alg_try_enqueue(req); + if (ret != -EBUSY) + return ret; + + spin_lock_bh(&req->backlog->lock); + ret = sec_alg_try_enqueue(req); + if (ret == -EBUSY) + list_add_tail(&req->list, &req->backlog->list); + spin_unlock_bh(&req->backlog->lock); return ret; } -/* Get DMA memory resources */ +static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) +{ + if (req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG) + return sec_alg_send_message_maybacklog(req); + + return sec_alg_send_message_retry(req); +} + static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) { u16 q_depth = res->depth; @@ -559,6 +618,10 @@ static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id) spin_lock_init(&qp_ctx->req_lock); idr_init(&qp_ctx->req_idr); + spin_lock_init(&qp_ctx->backlog.lock); + spin_lock_init(&qp_ctx->id_lock); + INIT_LIST_HEAD(&qp_ctx->backlog.list); + qp_ctx->send_head = 0; ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx); if (ret) @@ -602,9 +665,6 @@ static int sec_ctx_base_init(struct sec_ctx *ctx) ctx->hlf_q_num = sec->ctx_q_num >> 1; ctx->pbuf_supported = sec->qm.use_iommu; - - /* Half of queue depth is taken as fake requests limit in the queue. */ - ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1; ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), GFP_KERNEL); if (!ctx->qp_ctx) { @@ -706,7 +766,7 @@ static int sec_skcipher_init(struct crypto_skcipher *tfm) int ret; ctx->alg_type = SEC_SKCIPHER; - crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); + crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct sec_req)); ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { pr_err("get error skcipher iv size!\n"); @@ -883,24 +943,25 @@ GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR) static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src) { - struct sec_aead_req *a_req = &req->aead_req; - struct aead_request *aead_req = a_req->aead_req; + struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct sec_request_buf *buf = &req->buf; struct device *dev = ctx->dev; int copy_size, pbuf_length; int req_id = req->req_id; struct crypto_aead *tfm; + u8 *mac_offset, *pbuf; size_t authsize; - u8 *mac_offset; if (ctx->alg_type == SEC_AEAD) copy_size = aead_req->cryptlen + aead_req->assoclen; else copy_size = c_req->c_len; - pbuf_length = sg_copy_to_buffer(src, sg_nents(src), - qp_ctx->res[req_id].pbuf, copy_size); + + pbuf = req->req_id < 0 ? buf->pbuf : qp_ctx->res[req_id].pbuf; + pbuf_length = sg_copy_to_buffer(src, sg_nents(src), pbuf, copy_size); if (unlikely(pbuf_length != copy_size)) { dev_err(dev, "copy src data to pbuf error!\n"); return -EINVAL; @@ -908,8 +969,17 @@ static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { tfm = crypto_aead_reqtfm(aead_req); authsize = crypto_aead_authsize(tfm); - mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize; - memcpy(a_req->out_mac, mac_offset, authsize); + mac_offset = pbuf + copy_size - authsize; + memcpy(req->aead_req.out_mac, mac_offset, authsize); + } + + if (req->req_id < 0) { + buf->in_dma = dma_map_single(dev, buf->pbuf, SEC_PBUF_SZ, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, buf->in_dma))) + return -ENOMEM; + + buf->out_dma = buf->in_dma; + return 0; } req->in_dma = qp_ctx->res[req_id].pbuf_dma; @@ -924,6 +994,7 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; + struct sec_request_buf *buf = &req->buf; int copy_size, pbuf_length; int req_id = req->req_id; @@ -932,10 +1003,16 @@ static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, else copy_size = c_req->c_len; - pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), - qp_ctx->res[req_id].pbuf, copy_size); + if (req->req_id < 0) + pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), buf->pbuf, copy_size); + else + pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf, + copy_size); if (unlikely(pbuf_length != copy_size)) dev_err(ctx->dev, "copy pbuf data to dst error!\n"); + + if (req->req_id < 0) + dma_unmap_single(ctx->dev, buf->in_dma, SEC_PBUF_SZ, DMA_BIDIRECTIONAL); } static int sec_aead_mac_init(struct sec_aead_req *req) @@ -957,8 +1034,88 @@ static int sec_aead_mac_init(struct sec_aead_req *req) return 0; } -static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, - struct scatterlist *src, struct scatterlist *dst) +static void fill_sg_to_hw_sge(struct scatterlist *sgl, struct sec_hw_sge *hw_sge) +{ + hw_sge->buf = sg_dma_address(sgl); + hw_sge->len = cpu_to_le32(sg_dma_len(sgl)); + hw_sge->page_ctrl = sg_virt(sgl); +} + +static int sec_cipher_to_hw_sgl(struct device *dev, struct scatterlist *src, + struct sec_hw_sgl *src_in, dma_addr_t *hw_sgl_dma, + int dma_dir) +{ + struct sec_hw_sge *curr_hw_sge = src_in->sge_entries; + u32 i, sg_n, sg_n_mapped; + struct scatterlist *sg; + u32 sge_var = 0; + + sg_n = sg_nents(src); + sg_n_mapped = dma_map_sg(dev, src, sg_n, dma_dir); + if (unlikely(!sg_n_mapped)) { + dev_err(dev, "dma mapping for SG error!\n"); + return -EINVAL; + } else if (unlikely(sg_n_mapped > SEC_SGE_NR_NUM)) { + dev_err(dev, "the number of entries in input scatterlist error!\n"); + dma_unmap_sg(dev, src, sg_n, dma_dir); + return -EINVAL; + } + + for_each_sg(src, sg, sg_n_mapped, i) { + fill_sg_to_hw_sge(sg, curr_hw_sge); + curr_hw_sge++; + sge_var++; + } + + src_in->entry_sum_in_sgl = cpu_to_le16(sge_var); + src_in->entry_sum_in_chain = cpu_to_le16(SEC_SGE_NR_NUM); + src_in->entry_length_in_sgl = cpu_to_le16(SEC_SGE_NR_NUM); + *hw_sgl_dma = dma_map_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir); + if (unlikely(dma_mapping_error(dev, *hw_sgl_dma))) { + dma_unmap_sg(dev, src, sg_n, dma_dir); + return -ENOMEM; + } + + return 0; +} + +static void sec_cipher_put_hw_sgl(struct device *dev, struct scatterlist *src, + dma_addr_t src_in, int dma_dir) +{ + dma_unmap_single(dev, src_in, sizeof(struct sec_hw_sgl), dma_dir); + dma_unmap_sg(dev, src, sg_nents(src), dma_dir); +} + +static int sec_cipher_map_sgl(struct device *dev, struct sec_req *req, + struct scatterlist *src, struct scatterlist *dst) +{ + struct sec_hw_sgl *src_in = &req->buf.data_buf.in; + struct sec_hw_sgl *dst_out = &req->buf.data_buf.out; + int ret; + + if (dst == src) { + ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma, + DMA_BIDIRECTIONAL); + req->buf.out_dma = req->buf.in_dma; + return ret; + } + + ret = sec_cipher_to_hw_sgl(dev, src, src_in, &req->buf.in_dma, DMA_TO_DEVICE); + if (unlikely(ret)) + return ret; + + ret = sec_cipher_to_hw_sgl(dev, dst, dst_out, &req->buf.out_dma, + DMA_FROM_DEVICE); + if (unlikely(ret)) { + sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE); + return ret; + } + + return 0; +} + +static int sec_cipher_map_inner(struct sec_ctx *ctx, struct sec_req *req, + struct scatterlist *src, struct scatterlist *dst) { struct sec_cipher_req *c_req = &req->c_req; struct sec_aead_req *a_req = &req->aead_req; @@ -977,10 +1134,9 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, a_req->out_mac_dma = res->pbuf_dma + SEC_PBUF_MAC_OFFSET; } - ret = sec_cipher_pbuf_map(ctx, req, src); - - return ret; + return sec_cipher_pbuf_map(ctx, req, src); } + c_req->c_ivin = res->c_ivin; c_req->c_ivin_dma = res->c_ivin_dma; if (ctx->alg_type == SEC_AEAD) { @@ -1027,19 +1183,105 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, return 0; } +static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, + struct scatterlist *src, struct scatterlist *dst) +{ + struct sec_aead_req *a_req = &req->aead_req; + struct sec_cipher_req *c_req = &req->c_req; + bool is_aead = (ctx->alg_type == SEC_AEAD); + struct device *dev = ctx->dev; + int ret = -ENOMEM; + + if (req->req_id >= 0) + return sec_cipher_map_inner(ctx, req, src, dst); + + c_req->c_ivin = c_req->c_ivin_buf; + c_req->c_ivin_dma = dma_map_single(dev, c_req->c_ivin, + SEC_IV_SIZE, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, c_req->c_ivin_dma))) + return -ENOMEM; + + if (is_aead) { + a_req->a_ivin = a_req->a_ivin_buf; + a_req->out_mac = a_req->out_mac_buf; + a_req->a_ivin_dma = dma_map_single(dev, a_req->a_ivin, + SEC_IV_SIZE, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, a_req->a_ivin_dma))) + goto free_c_ivin_dma; + + a_req->out_mac_dma = dma_map_single(dev, a_req->out_mac, + SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, a_req->out_mac_dma))) + goto free_a_ivin_dma; + } + if (req->use_pbuf) { + ret = sec_cipher_pbuf_map(ctx, req, src); + if (unlikely(ret)) + goto free_out_mac_dma; + + return 0; + } + + if (!c_req->encrypt && is_aead) { + ret = sec_aead_mac_init(a_req); + if (unlikely(ret)) { + dev_err(dev, "fail to init mac data for ICV!\n"); + goto free_out_mac_dma; + } + } + + ret = sec_cipher_map_sgl(dev, req, src, dst); + if (unlikely(ret)) { + dev_err(dev, "fail to dma map input sgl buffers!\n"); + goto free_out_mac_dma; + } + + return 0; + +free_out_mac_dma: + if (is_aead) + dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL); +free_a_ivin_dma: + if (is_aead) + dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE); +free_c_ivin_dma: + dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE); + return ret; +} + static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src, struct scatterlist *dst) { + struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct device *dev = ctx->dev; + if (req->req_id >= 0) { + if (req->use_pbuf) { + sec_cipher_pbuf_unmap(ctx, req, dst); + } else { + if (dst != src) + hisi_acc_sg_buf_unmap(dev, src, req->in); + hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); + } + return; + } + if (req->use_pbuf) { sec_cipher_pbuf_unmap(ctx, req, dst); } else { - if (dst != src) - hisi_acc_sg_buf_unmap(dev, src, req->in); + if (dst != src) { + sec_cipher_put_hw_sgl(dev, dst, req->buf.out_dma, DMA_FROM_DEVICE); + sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_TO_DEVICE); + } else { + sec_cipher_put_hw_sgl(dev, src, req->buf.in_dma, DMA_BIDIRECTIONAL); + } + } - hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); + dma_unmap_single(dev, c_req->c_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE); + if (ctx->alg_type == SEC_AEAD) { + dma_unmap_single(dev, a_req->a_ivin_dma, SEC_IV_SIZE, DMA_TO_DEVICE); + dma_unmap_single(dev, a_req->out_mac_dma, SEC_MAX_MAC_LEN, DMA_BIDIRECTIONAL); } } @@ -1257,8 +1499,15 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); - sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma); - sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); + if (req->req_id < 0) { + sec_sqe->type2.data_src_addr = cpu_to_le64(req->buf.in_dma); + sec_sqe->type2.data_dst_addr = cpu_to_le64(req->buf.out_dma); + } else { + sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma); + sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); + } + if (sec_sqe->type2.data_src_addr != sec_sqe->type2.data_dst_addr) + de = 0x1 << SEC_DE_OFFSET; sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << SEC_CMODE_OFFSET); @@ -1284,13 +1533,10 @@ static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) sec_sqe->sdm_addr_type |= da_type; scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; - if (req->in_dma != c_req->c_out_dma) - de = 0x1 << SEC_DE_OFFSET; sec_sqe->sds_sa_type = (de | scene | sa_type); sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); - sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); return 0; } @@ -1307,8 +1553,15 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma); sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); - sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma); - sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma); + if (req->req_id < 0) { + sec_sqe3->data_src_addr = cpu_to_le64(req->buf.in_dma); + sec_sqe3->data_dst_addr = cpu_to_le64(req->buf.out_dma); + } else { + sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma); + sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma); + } + if (sec_sqe3->data_src_addr != sec_sqe3->data_dst_addr) + bd_param |= 0x1 << SEC_DE_OFFSET_V3; sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) | c_ctx->c_mode; @@ -1334,8 +1587,6 @@ static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) } bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3; - if (req->in_dma != c_req->c_out_dma) - bd_param |= 0x1 << SEC_DE_OFFSET_V3; bd_param |= SEC_BD_TYPE3; sec_sqe3->bd_param = cpu_to_le32(bd_param); @@ -1367,15 +1618,12 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) size_t sz; u8 *iv; - if (req->c_req.encrypt) - sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst; - else - sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src; - if (alg_type == SEC_SKCIPHER) { + sgl = req->c_req.encrypt ? sk_req->dst : sk_req->src; iv = sk_req->iv; cryptlen = sk_req->cryptlen; } else { + sgl = req->c_req.encrypt ? aead_req->dst : aead_req->src; iv = aead_req->iv; cryptlen = aead_req->cryptlen; } @@ -1386,9 +1634,7 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) if (unlikely(sz != iv_size)) dev_err(req->ctx->dev, "copy output iv error!\n"); } else { - sz = cryptlen / iv_size; - if (cryptlen % iv_size) - sz += 1; + sz = (cryptlen + iv_size - 1) / iv_size; ctr_iv_inc(iv, iv_size, sz); } } @@ -1396,21 +1642,18 @@ static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, int err) { - struct skcipher_request *sk_req = req->c_req.sk_req; + struct sec_qp_ctx *qp_ctx = req->qp_ctx; - sec_free_req_id(req); + if (req->req_id >= 0) + sec_free_req_id(req); /* IV output at encrypto of CBC/CTR mode */ if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt) sec_update_iv(req, SEC_SKCIPHER); - if (req->fake_busy) { - skcipher_request_complete(sk_req, -EINPROGRESS); - atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); - } - - skcipher_request_complete(sk_req, err); + crypto_request_complete(req->base, err); + sec_alg_send_backlog(ctx, qp_ctx); } static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) @@ -1649,18 +1892,14 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) struct aead_request *a_req = req->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); size_t authsize = crypto_aead_authsize(tfm); - struct sec_aead_req *aead_req = &req->aead_req; - struct sec_cipher_req *c_req = &req->c_req; + struct sec_qp_ctx *qp_ctx = req->qp_ctx; size_t sz; - if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) - sec_update_iv(req, SEC_AEAD); + if (!err && req->c_req.encrypt) { + if (c->c_ctx.c_mode == SEC_CMODE_CBC) + sec_update_iv(req, SEC_AEAD); - /* Copy output mac */ - if (!err && c_req->encrypt) { - struct scatterlist *sgl = a_req->dst; - - sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac, + sz = sg_pcopy_from_buffer(a_req->dst, sg_nents(a_req->dst), req->aead_req.out_mac, authsize, a_req->cryptlen + a_req->assoclen); if (unlikely(sz != authsize)) { dev_err(c->dev, "copy out mac err!\n"); @@ -1668,43 +1907,39 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) } } - sec_free_req_id(req); - - if (req->fake_busy) { - aead_request_complete(a_req, -EINPROGRESS); - atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); - } + if (req->req_id >= 0) + sec_free_req_id(req); - aead_request_complete(a_req, err); + crypto_request_complete(req->base, err); + sec_alg_send_backlog(c, qp_ctx); } -static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) +static void sec_request_uninit(struct sec_req *req) { - sec_free_req_id(req); - sec_free_queue_id(ctx, req); + if (req->req_id >= 0) + sec_free_req_id(req); } static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) { struct sec_qp_ctx *qp_ctx; - int queue_id; - - /* To load balance */ - queue_id = sec_alloc_queue_id(ctx, req); - qp_ctx = &ctx->qp_ctx[queue_id]; + int i; - req->req_id = sec_alloc_req_id(req, qp_ctx); - if (unlikely(req->req_id < 0)) { - sec_free_queue_id(ctx, req); - return req->req_id; + for (i = 0; i < ctx->sec->ctx_q_num; i++) { + qp_ctx = &ctx->qp_ctx[i]; + req->req_id = sec_alloc_req_id(req, qp_ctx); + if (req->req_id >= 0) + break; } + req->qp_ctx = qp_ctx; + req->backlog = &qp_ctx->backlog; + return 0; } static int sec_process(struct sec_ctx *ctx, struct sec_req *req) { - struct sec_cipher_req *c_req = &req->c_req; int ret; ret = sec_request_init(ctx, req); @@ -1721,8 +1956,7 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) sec_update_iv(req, ctx->alg_type); ret = ctx->req_op->bd_send(ctx, req); - if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || - (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { + if (unlikely((ret != -EBUSY && ret != -EINPROGRESS))) { dev_err_ratelimited(ctx->dev, "send sec request failed!\n"); goto err_send_req; } @@ -1733,16 +1967,23 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) /* As failing, restore the IV from user */ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { if (ctx->alg_type == SEC_SKCIPHER) - memcpy(req->c_req.sk_req->iv, c_req->c_ivin, + memcpy(req->c_req.sk_req->iv, req->c_req.c_ivin, ctx->c_ctx.ivsize); else - memcpy(req->aead_req.aead_req->iv, c_req->c_ivin, + memcpy(req->aead_req.aead_req->iv, req->c_req.c_ivin, ctx->c_ctx.ivsize); } sec_request_untransfer(ctx, req); + err_uninit_req: - sec_request_uninit(ctx, req); + sec_request_uninit(req); + if (ctx->alg_type == SEC_AEAD) + ret = sec_aead_soft_crypto(ctx, req->aead_req.aead_req, + req->c_req.encrypt); + else + ret = sec_skcipher_soft_crypto(ctx, req->c_req.sk_req, + req->c_req.encrypt); return ret; } @@ -1816,7 +2057,7 @@ static int sec_aead_init(struct crypto_aead *tfm) struct sec_ctx *ctx = crypto_aead_ctx(tfm); int ret; - crypto_aead_set_reqsize(tfm, sizeof(struct sec_req)); + crypto_aead_set_reqsize_dma(tfm, sizeof(struct sec_req)); ctx->alg_type = SEC_AEAD; ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); if (ctx->c_ctx.ivsize < SEC_AIV_SIZE || @@ -2053,7 +2294,7 @@ static int sec_skcipher_soft_crypto(struct sec_ctx *ctx, static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); - struct sec_req *req = skcipher_request_ctx(sk_req); + struct sec_req *req = skcipher_request_ctx_dma(sk_req); struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); bool need_fallback = false; int ret; @@ -2068,7 +2309,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) req->c_req.sk_req = sk_req; req->c_req.encrypt = encrypt; req->ctx = ctx; - req->fake_busy = false; + req->base = &sk_req->base; ret = sec_skcipher_param_check(ctx, req, &need_fallback); if (unlikely(ret)) @@ -2279,7 +2520,7 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) { struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); - struct sec_req *req = aead_request_ctx(a_req); + struct sec_req *req = aead_request_ctx_dma(a_req); struct sec_ctx *ctx = crypto_aead_ctx(tfm); size_t sz = crypto_aead_authsize(tfm); bool need_fallback = false; @@ -2289,7 +2530,7 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; - req->fake_busy = false; + req->base = &a_req->base; req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz); ret = sec_aead_param_check(ctx, req, &need_fallback); -- 2.33.0

From: Weili Qian <qianweili@huawei.com> driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICESZH CVE: NA ---------------------------------------------------------------------- There is an upper limit on the number of hardware queues. When all hardware queues are busy, new processes fail to apply for queues. To avoid affecting tasks, support fallback mechanism when hardware queues are unavailable. HPRE driver supports DH algorithm, limited to prime numbers up to 4K. It supports prime numbers larger than 4K via fallback mechanism. Fixes: 90274769cf79 ("crypto: hisilicon/hpre - add 'CURVE25519' algorithm") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 312 +++++++++++++++++--- drivers/crypto/hisilicon/qm.c | 4 +- 2 files changed, 269 insertions(+), 47 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 96b89e78db2f..4d3efc74b8f3 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -94,6 +94,7 @@ struct hpre_dh_ctx { char *g; /* m */ dma_addr_t dma_g; + struct crypto_kpp *soft_tfm; }; struct hpre_ecdh_ctx { @@ -104,6 +105,7 @@ struct hpre_ecdh_ctx { /* low address: x->y */ unsigned char *g; dma_addr_t dma_g; + struct crypto_kpp *soft_tfm; }; struct hpre_curve25519_ctx { @@ -114,6 +116,7 @@ struct hpre_curve25519_ctx { /* gx coordinate */ unsigned char *g; dma_addr_t dma_g; + struct crypto_kpp *soft_tfm; }; struct hpre_ctx { @@ -135,6 +138,7 @@ struct hpre_ctx { unsigned int curve_id; /* for high performance core */ u8 enable_hpcore; + bool fallback; }; struct hpre_asym_request { @@ -225,7 +229,7 @@ static struct hisi_qp *hpre_get_qp_and_start(u8 type) qp = hpre_create_qp(type); if (!qp) { - pr_err("Can not create hpre qp!\n"); + pr_info_ratelimited("Can not create hpre qp, alloc soft tfm!\n"); return ERR_PTR(-ENODEV); } @@ -505,8 +509,10 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) int ret; qp = hpre_get_qp_and_start(type); - if (IS_ERR(qp)) - return PTR_ERR(qp); + if (IS_ERR(qp)) { + ctx->qp = NULL; + return -ENODEV; + } qp->qp_ctx = ctx; qp->req_cb = hpre_alg_cb; @@ -638,6 +644,48 @@ static int hpre_dh_compute_value(struct kpp_request *req) return ret; } +static struct kpp_request *hpre_dh_prepare_fb_req(struct kpp_request *req) +{ + struct kpp_request *fb_req = kpp_request_ctx(req); + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + kpp_request_set_tfm(fb_req, ctx->dh.soft_tfm); + kpp_request_set_callback(fb_req, req->base.flags, req->base.complete, req->base.data); + kpp_request_set_input(fb_req, req->src, req->src_len); + kpp_request_set_output(fb_req, req->dst, req->dst_len); + + return fb_req; +} + +static int hpre_dh_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct kpp_request *fb_req; + + if (ctx->fallback) { + fb_req = hpre_dh_prepare_fb_req(req); + return crypto_kpp_generate_public_key(fb_req); + } + + return hpre_dh_compute_value(req); +} + +static int hpre_dh_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct kpp_request *fb_req; + + if (ctx->fallback) { + fb_req = hpre_dh_prepare_fb_req(req); + return crypto_kpp_compute_shared_secret(fb_req); + } + + return hpre_dh_compute_value(req); +} + static int hpre_is_dh_params_length_valid(unsigned int key_sz) { #define _HPRE_DH_GRP1 768 @@ -664,13 +712,6 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params) struct device *dev = ctx->dev; unsigned int sz; - if (params->p_size > HPRE_DH_MAX_P_SZ) - return -EINVAL; - - if (hpre_is_dh_params_length_valid(params->p_size << - HPRE_BITS_2_BYTES_SHIFT)) - return -EINVAL; - sz = ctx->key_sz = params->p_size; ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1, &ctx->dh.dma_xa_p, GFP_KERNEL); @@ -703,6 +744,9 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) struct device *dev = ctx->dev; unsigned int sz = ctx->key_sz; + if (!ctx->qp) + return; + if (is_clear_all) hisi_qm_stop_qp(ctx->qp); @@ -731,6 +775,13 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, if (crypto_dh_decode_key(buf, len, ¶ms) < 0) return -EINVAL; + if (!ctx->qp) + goto set_soft_secret; + + if (hpre_is_dh_params_length_valid(params.p_size << + HPRE_BITS_2_BYTES_SHIFT)) + goto set_soft_secret; + /* Free old secret if any */ hpre_dh_clear_ctx(ctx, false); @@ -741,27 +792,55 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key, params.key_size); + ctx->fallback = false; return 0; err_clear_ctx: hpre_dh_clear_ctx(ctx, false); return ret; +set_soft_secret: + ctx->fallback = true; + return crypto_kpp_set_secret(ctx->dh.soft_tfm, buf, len); } static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->dh.soft_tfm); + return ctx->key_sz; } static int hpre_dh_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + const char *alg = kpp_alg_name(tfm); + unsigned int reqsize; + int ret; + + ctx->dh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->dh.soft_tfm)) { + pr_err("Failed to alloc dh tfm!\n"); + return PTR_ERR(ctx->dh.soft_tfm); + } - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + crypto_kpp_set_flags(ctx->dh.soft_tfm, crypto_kpp_get_flags(tfm)); - return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); + reqsize = max(sizeof(struct hpre_asym_request) + hpre_align_pd(), + sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->dh.soft_tfm)); + kpp_set_reqsize(tfm, reqsize); + + ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); + if (ret && ret != -ENODEV) { + crypto_free_kpp(ctx->dh.soft_tfm); + return ret; + } else if (ret == -ENODEV) { + ctx->fallback = true; + } + + return 0; } static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) @@ -769,6 +848,7 @@ static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); hpre_dh_clear_ctx(ctx, true); + crypto_free_kpp(ctx->dh.soft_tfm); } static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len) @@ -808,9 +888,8 @@ static int hpre_rsa_enc(struct akcipher_request *req) struct hpre_sqe *msg = &hpre_req->req; int ret; - /* For 512 and 1536 bits key size, use soft tfm instead */ - if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || - ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { + /* For unsupported key size and unavailable devices, use soft tfm instead */ + if (ctx->fallback) { akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); ret = crypto_akcipher_encrypt(req); akcipher_request_set_tfm(req, tfm); @@ -856,9 +935,8 @@ static int hpre_rsa_dec(struct akcipher_request *req) struct hpre_sqe *msg = &hpre_req->req; int ret; - /* For 512 and 1536 bits key size, use soft tfm instead */ - if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || - ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { + /* For unsupported key size and unavailable devices, use soft tfm instead */ + if (ctx->fallback) { akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); ret = crypto_akcipher_decrypt(req); akcipher_request_set_tfm(req, tfm); @@ -912,8 +990,10 @@ static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value, ctx->key_sz = vlen; /* if invalid key size provided, we use software tfm */ - if (!hpre_rsa_key_size_is_support(ctx->key_sz)) + if (!hpre_rsa_key_size_is_support(ctx->key_sz)) { + ctx->fallback = true; return 0; + } ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1, &ctx->rsa.dma_pubkey, @@ -1048,6 +1128,9 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) unsigned int half_key_sz = ctx->key_sz >> 1; struct device *dev = ctx->dev; + if (!ctx->qp) + return; + if (is_clear_all) hisi_qm_stop_qp(ctx->qp); @@ -1130,6 +1213,7 @@ static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key, goto free; } + ctx->fallback = false; return 0; free: @@ -1147,6 +1231,9 @@ static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, if (ret) return ret; + if (!ctx->qp) + return 0; + return hpre_rsa_setkey(ctx, key, keylen, false); } @@ -1160,6 +1247,9 @@ static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, if (ret) return ret; + if (!ctx->qp) + return 0; + return hpre_rsa_setkey(ctx, key, keylen, true); } @@ -1167,9 +1257,8 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm) { struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); - /* For 512 and 1536 bits key size, use soft tfm instead */ - if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || - ctx->key_sz == HPRE_RSA_1536BITS_KSZ) + /* For unsupported key size and unavailable devices, use soft tfm instead */ + if (ctx->fallback) return crypto_akcipher_maxsize(ctx->rsa.soft_tfm); return ctx->key_sz; @@ -1190,10 +1279,14 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) hpre_align_pd()); ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); - if (ret) + if (ret && ret != -ENODEV) { crypto_free_akcipher(ctx->rsa.soft_tfm); + return ret; + } else if (ret == -ENODEV) { + ctx->fallback = true; + } - return ret; + return 0; } static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) @@ -1407,6 +1500,9 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, struct ecdh params; int ret; + if (ctx->fallback) + return crypto_kpp_set_secret(ctx->ecdh.soft_tfm, buf, len); + if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) { dev_err(dev, "failed to decode ecdh key!\n"); return -EINVAL; @@ -1637,23 +1733,82 @@ static int hpre_ecdh_compute_value(struct kpp_request *req) return ret; } +static int hpre_ecdh_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + int ret; + + if (ctx->fallback) { + kpp_request_set_tfm(req, ctx->ecdh.soft_tfm); + ret = crypto_kpp_generate_public_key(req); + kpp_request_set_tfm(req, tfm); + return ret; + } + + return hpre_ecdh_compute_value(req); +} + +static int hpre_ecdh_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + int ret; + + if (ctx->fallback) { + kpp_request_set_tfm(req, ctx->ecdh.soft_tfm); + ret = crypto_kpp_compute_shared_secret(req); + kpp_request_set_tfm(req, tfm); + return ret; + } + + return hpre_ecdh_compute_value(req); +} + static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->ecdh.soft_tfm); + /* max size is the pub_key_size, include x and y */ return ctx->key_sz << 1; } +static int hpre_ecdh_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + const char *alg = kpp_alg_name(tfm); + int ret; + + ret = hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + if (!ret) { + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + return 0; + } else if (ret && ret != -ENODEV) { + return ret; + } + + ctx->ecdh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->ecdh.soft_tfm)) { + pr_err("Failed to alloc %s tfm!\n", alg); + return PTR_ERR(ctx->ecdh.soft_tfm); + } + + crypto_kpp_set_flags(ctx->ecdh.soft_tfm, crypto_kpp_get_flags(tfm)); + ctx->fallback = true; + + return 0; +} + static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); ctx->curve_id = ECC_CURVE_NIST_P192; - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - - return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + return hpre_ecdh_init_tfm(tfm); } static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) @@ -1663,9 +1818,7 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) ctx->curve_id = ECC_CURVE_NIST_P256; ctx->enable_hpcore = 1; - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - - return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + return hpre_ecdh_init_tfm(tfm); } static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) @@ -1674,15 +1827,18 @@ static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) ctx->curve_id = ECC_CURVE_NIST_P384; - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - - return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + return hpre_ecdh_init_tfm(tfm); } static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) { + crypto_free_kpp(ctx->ecdh.soft_tfm); + return; + } + hpre_ecc_clear_ctx(ctx, true, true); } @@ -1748,6 +1904,9 @@ static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, struct device *dev = ctx->dev; int ret = -EINVAL; + if (ctx->fallback) + return crypto_kpp_set_secret(ctx->curve25519.soft_tfm, buf, len); + if (len != CURVE25519_KEY_SIZE || !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) { dev_err(dev, "key is null or key len is not 32bytes!\n"); @@ -1993,26 +2152,83 @@ static int hpre_curve25519_compute_value(struct kpp_request *req) return ret; } +static int hpre_curve25519_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + int ret; + + if (ctx->fallback) { + kpp_request_set_tfm(req, ctx->curve25519.soft_tfm); + ret = crypto_kpp_generate_public_key(req); + kpp_request_set_tfm(req, tfm); + return ret; + } + + return hpre_curve25519_compute_value(req); +} + +static int hpre_curve25519_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + int ret; + + if (ctx->fallback) { + kpp_request_set_tfm(req, ctx->curve25519.soft_tfm); + ret = crypto_kpp_compute_shared_secret(req); + kpp_request_set_tfm(req, tfm); + return ret; + } + + return hpre_curve25519_compute_value(req); +} + static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->curve25519.soft_tfm); + return ctx->key_sz; } static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + const char *alg = kpp_alg_name(tfm); + int ret; + + ret = hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + if (!ret) { + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + return 0; + } else if (ret && ret != -ENODEV) { + return ret; + } + + ctx->curve25519.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->curve25519.soft_tfm)) { + pr_err("Failed to alloc curve25519 tfm!\n"); + return PTR_ERR(ctx->curve25519.soft_tfm); + } - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + crypto_kpp_set_flags(ctx->curve25519.soft_tfm, crypto_kpp_get_flags(tfm)); + ctx->fallback = true; - return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + return 0; } static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) { + crypto_free_kpp(ctx->dh.soft_tfm); + return; + } + hpre_ecc_clear_ctx(ctx, true, false); } @@ -2032,13 +2248,14 @@ static struct akcipher_alg rsa = { .cra_name = "rsa", .cra_driver_name = "hpre-rsa", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; static struct kpp_alg dh = { .set_secret = hpre_dh_set_secret, - .generate_public_key = hpre_dh_compute_value, - .compute_shared_secret = hpre_dh_compute_value, + .generate_public_key = hpre_dh_generate_public_key, + .compute_shared_secret = hpre_dh_compute_shared_secret, .max_size = hpre_dh_max_size, .init = hpre_dh_init_tfm, .exit = hpre_dh_exit_tfm, @@ -2048,14 +2265,15 @@ static struct kpp_alg dh = { .cra_name = "dh", .cra_driver_name = "hpre-dh", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; static struct kpp_alg ecdh_curves[] = { { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_compute_value, - .compute_shared_secret = hpre_ecdh_compute_value, + .generate_public_key = hpre_ecdh_generate_public_key, + .compute_shared_secret = hpre_ecdh_compute_shared_secret, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p192_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -2065,11 +2283,12 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p192", .cra_driver_name = "hpre-ecdh-nist-p192", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }, { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_compute_value, - .compute_shared_secret = hpre_ecdh_compute_value, + .generate_public_key = hpre_ecdh_generate_public_key, + .compute_shared_secret = hpre_ecdh_compute_shared_secret, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p256_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -2079,11 +2298,12 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p256", .cra_driver_name = "hpre-ecdh-nist-p256", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }, { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_compute_value, - .compute_shared_secret = hpre_ecdh_compute_value, + .generate_public_key = hpre_ecdh_generate_public_key, + .compute_shared_secret = hpre_ecdh_compute_shared_secret, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p384_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -2093,14 +2313,15 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p384", .cra_driver_name = "hpre-ecdh-nist-p384", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, } }; static struct kpp_alg curve25519_alg = { .set_secret = hpre_curve25519_set_secret, - .generate_public_key = hpre_curve25519_compute_value, - .compute_shared_secret = hpre_curve25519_compute_value, + .generate_public_key = hpre_curve25519_generate_public_key, + .compute_shared_secret = hpre_curve25519_compute_shared_secret, .max_size = hpre_curve25519_max_size, .init = hpre_curve25519_init_tfm, .exit = hpre_curve25519_exit_tfm, @@ -2110,6 +2331,7 @@ static struct kpp_alg curve25519_alg = { .cra_name = "curve25519", .cra_driver_name = "hpre-curve25519", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index d7a7826cb016..06d8e7aa048b 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3748,8 +3748,8 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, mutex_unlock(&qm_list->lock); if (ret) - pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n", - node, alg_type, qp_num); + pr_info_ratelimited("Too busy to create qps, node[%d], alg[%u], qp[%d]!\n", + node, alg_type, qp_num); err: free_list(&head); -- 2.33.0

From: Weili Qian <qianweili@huawei.com> driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICESZH CVE: NA ---------------------------------------------------------------------- When hardware is abnormal, the values obtained from the hardware may be incorrect. Add checks to prevent unexpected errors when using these values. And remove the redundant vfs_num clearing. Fixes: 3e1d2c52b204 ("crypto: hisilicon - check _PS0 and _PR0 method") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/debugfs.c | 1 + drivers/crypto/hisilicon/qm.c | 47 +++++++++++++++++++++++------- 2 files changed, 37 insertions(+), 11 deletions(-) diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 45e130b901eb..17eb236e9ee4 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -888,6 +888,7 @@ static int qm_diff_regs_init(struct hisi_qm *qm, dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); ret = PTR_ERR(qm->debug.acc_diff_regs); qm->debug.acc_diff_regs = NULL; + qm->debug.qm_diff_regs = NULL; return ret; } diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 06d8e7aa048b..3ba5de946d53 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -80,6 +80,8 @@ #define QM_EQ_OVERFLOW 1 #define QM_CQE_ERROR 2 +#define QM_XQC_MIN_DEPTH 1024 + #define QM_XQ_DEPTH_SHIFT 16 #define QM_XQ_DEPTH_MASK GENMASK(15, 0) @@ -3091,6 +3093,7 @@ static inline bool is_iommu_used(struct device *dev) static void hisi_qm_pre_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; + struct acpi_device *adev; if (qm->ver == QM_HW_V1) qm->ops = &qm_hw_ops_v1; @@ -3107,9 +3110,16 @@ static void hisi_qm_pre_init(struct hisi_qm *qm) init_rwsem(&qm->qps_lock); qm->qp_in_used = 0; qm->use_iommu = is_iommu_used(&pdev->dev); + /* + * If the firmware does not support power manageable, + * clear the flag to avoid entering the suspend and resume process later. + */ if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { - if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) - dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); + adev = ACPI_COMPANION(&pdev->dev); + if (!adev || !acpi_device_power_manageable(adev)) { + dev_info(&pdev->dev, "_PS0 and _PR0 are not defined!\n"); + clear_bit(QM_SUPPORT_RPM, &qm->caps); + } } } @@ -3814,7 +3824,6 @@ static int qm_clear_vft_config(struct hisi_qm *qm) if (ret) return ret; } - qm->vfs_num = 0; return 0; } @@ -4749,6 +4758,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm) qm_restart_prepare(qm); qm_dev_err_init(qm); qm_disable_axi_error(qm); + qm_restart_done(qm); if (qm->err_ini->open_axi_master_ooo) qm->err_ini->open_axi_master_ooo(qm); @@ -4773,8 +4783,6 @@ static int qm_controller_reset_done(struct hisi_qm *qm) pci_err(pdev, "failed to start by vfs in soft reset!\n"); qm_enable_axi_error(qm); qm_cmd_init(qm); - qm_restart_done(qm); - qm_reset_bit_clear(qm); return 0; @@ -5410,7 +5418,15 @@ static int qm_init_eq_work(struct hisi_qm *qm) if (!qm->poll_data) return ret; + /* Check the minimum value to avoid array out of boundary errors. */ qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); + if (qm->eq_depth < QM_XQC_MIN_DEPTH || qm->aeq_depth < QM_XQC_MIN_DEPTH) { + dev_err(&qm->pdev->dev, "invalid, eq depth %u, aeq depth %u, the min value %u!\n", + qm->eq_depth, qm->aeq_depth, QM_XQC_MIN_DEPTH); + kfree(qm->poll_data); + return -EINVAL; + } + eq_depth = qm->eq_depth >> 1; for (i = 0; i < qm->qp_num; i++) { qm->poll_data[i].qp_finish_id = kcalloc(eq_depth, sizeof(u16), GFP_KERNEL); @@ -5595,9 +5611,11 @@ static int qm_get_hw_caps(struct hisi_qm *qm) if (val) set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); - if (qm->ver >= QM_HW_V3) { - val = readl(qm->io_base + QM_FUNC_CAPS_REG); - qm->cap_ver = val & QM_CAPBILITY_VERSION; + val = readl(qm->io_base + QM_FUNC_CAPS_REG); + qm->cap_ver = val & QM_CAPBILITY_VERSION; + if (qm->cap_ver == QM_CAPBILITY_VERSION) { + dev_err(&qm->pdev->dev, "Device is abnormal, return directly!\n"); + return -EINVAL; } /* Get PF/VF common capbility */ @@ -5787,17 +5805,24 @@ static int hisi_qp_alloc_memory(struct hisi_qm *qm) if (!qm->qp_array) return -ENOMEM; + /* Check the minimum value to avoid division by zero later. */ qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); + if (sq_depth < QM_XQC_MIN_DEPTH || cq_depth < QM_XQC_MIN_DEPTH) { + dev_err(dev, "invalid, sq depth %u, cq depth %u, the min value %u!\n", + sq_depth, cq_depth, QM_XQC_MIN_DEPTH); + kfree(qm->qp_array); + return -EINVAL; + } /* one more page for device or qp statuses */ qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; for (i = 0; i < qm->qp_num; i++) { ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); - if (ret) + if (ret) { + dev_err(dev, "failed to allocate qp dma buf size=%zx)\n", qp_dma_size); goto err_init_qp_mem; - - dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size); + } } return 0; -- 2.33.0

反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/16707 邮件列表地址:https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/EJZ... FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/16707 Mailing list address: https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/EJZ...
participants (2)
-
patchwork bot
-
q00515174