From: Yu'an Wang wangyuan46@huawei.com
driver inclusion category: Bugfix bugzilla: NA CVE: NA
Set the flag CRYPTO_TFM_REQ_MAY_BACKLOG in the crypto driver, which can limit task process
Signed-off-by: Yu'an Wang wangyuan46@huawei.com Reviewed-by: Longfang Liu liulongfang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/crypto/hisilicon/sec2/sec.h | 1 + drivers/crypto/hisilicon/sec2/sec_crypto.c | 11 ++++++++--- 2 files changed, 9 insertions(+), 3 deletions(-)
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index f6d1878edcc5c..51b93e25b750b 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -43,6 +43,7 @@ struct sec_req {
int err_type; int req_id; + u32 flag;
/* Status of the SEC request */ bool fake_busy; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 68fb0e5ef761d..482aa8d26640e 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -171,10 +171,13 @@ static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) struct sec_qp_ctx *qp_ctx = req->qp_ctx; int ret;
- mutex_lock(&qp_ctx->req_lock); + if (ctx->fake_req_limit <= + atomic_read(&qp_ctx->qp->qp_status.used) && + !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY;
+ mutex_lock(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); - if (ctx->fake_req_limit <= atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { list_add_tail(&req->backlog_head, &qp_ctx->backlog); @@ -917,7 +920,8 @@ static int sec_process(struct sec_ctx *ctx, struct sec_req *req) sec_update_iv(req, ctx->alg_type);
ret = ctx->req_op->bd_send(ctx, req); - if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) { + if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || + (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { dev_err_ratelimited(SEC_CTX_DEV(ctx), "send sec request failed!\n"); goto err_send_req; @@ -1009,6 +1013,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) if (!sk_req->cryptlen) return 0;
+ req->flag = sk_req->base.flags; req->c_req.sk_req = sk_req; req->c_req.encrypt = encrypt; req->ctx = ctx;
From: Yu'an Wang wangyuan46@huawei.com
driver inclusion category: Feature bugzilla: NA CVE: NA
In this patch, delete several invalid define and api: 1. sq_head in hisi_qp_status is not used for any judge, as well as qm_sq_head_update 2. crypto hisilicon just support async logic in kernel driver, so hisi_qp_wait logic is abandoned 3. CONFIG_CRYPTO_QM_UACCE seems redundant, so we delete it
Signed-off-by: Yu'an Wang wangyuan46@huawei.com Reviewed-by: Weili Qian qianweili@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- drivers/crypto/hisilicon/Kconfig | 6 -- drivers/crypto/hisilicon/hpre/hpre_main.c | 12 +--- drivers/crypto/hisilicon/qm.c | 76 +---------------------- drivers/crypto/hisilicon/qm.h | 20 +----- drivers/crypto/hisilicon/rde/rde_main.c | 5 -- drivers/crypto/hisilicon/sec2/sec_main.c | 4 -- drivers/crypto/hisilicon/zip/zip_main.c | 9 +-- 7 files changed, 7 insertions(+), 125 deletions(-)
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 95cb605baf885..a40648d7acafc 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -16,13 +16,7 @@ config CRYPTO_DEV_HISI_SEC config CRYPTO_DEV_HISI_QM tristate depends on ARM64 && PCI - -config CRYPTO_QM_UACCE - bool "enable UACCE support for all acceleartor with Hisi QM" - depends on CRYPTO_DEV_HISI_QM select UACCE - help - Support UACCE interface in Hisi QM.
config CRYPTO_DEV_HISI_ZIP tristate "Support for HISI ZIP Driver" diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 6c5dc53c92fa6..1a980f255ad48 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -182,7 +182,6 @@ static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = { "invalid_req_cnt" };
-#ifdef CONFIG_CRYPTO_QM_UACCE static int uacce_mode_set(const char *val, const struct kernel_param *kp) { return mode_set(val, kp); @@ -196,7 +195,6 @@ static const struct kernel_param_ops uacce_mode_ops = { static int uacce_mode = UACCE_MODE_NOUACCE; module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2"); -#endif
static int pf_q_num_set(const char *val, const struct kernel_param *kp) { @@ -786,10 +784,8 @@ static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) { int ret;
-#ifdef CONFIG_CRYPTO_QM_UACCE qm->algs = "rsa\ndh\n"; qm->uacce_mode = uacce_mode; -#endif qm->pdev = pdev; ret = hisi_qm_pre_init(qm, pf_q_num, HPRE_PF_DEF_Q_BASE); if (ret) @@ -960,10 +956,8 @@ static void hpre_remove(struct pci_dev *pdev) struct hisi_qm *qm = pci_get_drvdata(pdev); int ret;
-#ifdef CONFIG_CRYPTO_QM_UACCE - if (uacce_mode != UACCE_MODE_NOUACCE) - hisi_qm_remove_wait_delay(qm, &hpre_devices); -#endif + hisi_qm_remove_wait_delay(qm, &hpre_devices); + hpre_algs_unregister(); hisi_qm_del_from_list(qm, &hpre_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) { @@ -990,10 +984,8 @@ static void hpre_remove(struct pci_dev *pdev) static const struct pci_error_handlers hpre_err_handler = { .error_detected = hisi_qm_dev_err_detected, .slot_reset = hisi_qm_dev_slot_reset, -#ifdef CONFIG_CRYPTO_QM_UACCE .reset_prepare = hisi_qm_reset_prepare, .reset_done = hisi_qm_reset_done, -#endif };
static struct pci_driver hpre_pci_driver = { diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index a6bf95cb7baf6..8fa855fd23875 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -12,7 +12,6 @@ #include <linux/log2.h> #include <linux/seq_file.h> #include "qm.h" -#include "qm_usr_if.h"
/* eq/aeq irq enable */ #define QM_VF_AEQ_INT_SOURCE 0x0 @@ -509,14 +508,6 @@ static u32 qm_get_irq_num_v2(struct hisi_qm *qm) return QM_IRQ_NUM_VF_V2; }
-static void qm_sq_head_update(struct hisi_qp *qp) -{ - if (qp->qp_status.sq_head == QM_Q_DEPTH - 1) - qp->qp_status.sq_head = 0; - else - qp->qp_status.sq_head++; -} - static void qm_cq_head_update(struct hisi_qp *qp) { if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) { @@ -531,7 +522,7 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm) { struct qm_cqe *cqe;
- if (atomic_read(&qp->qp_status.flags) == QP_STOP) + if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) return;
if (qp->event_cb) @@ -548,25 +539,11 @@ static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm) cqe = qp->cqe + qp->qp_status.cq_head; qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 0); - qm_sq_head_update(qp); atomic_dec(&qp->qp_status.used); } /* set c_flag */ qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); - } else { - if (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { - dma_rmb(); - complete(&qp->completion); - qm_cq_head_update(qp); - cqe = qp->cqe + qp->qp_status.cq_head; - qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, - qp->qp_status.cq_head, 0); - /* set c_flag */ - qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, - qp->qp_status.cq_head, 1); - atomic_dec(&qp->qp_status.used); - } } } } @@ -747,7 +724,6 @@ static void qm_init_qp_status(struct hisi_qp *qp) struct hisi_qp_status *qp_status = &qp->qp_status;
qp_status->sq_tail = 0; - qp_status->sq_head = 0; qp_status->cq_head = 0; qp_status->cqc_phase = true; atomic_set(&qp_status->used, 0); @@ -1778,7 +1754,6 @@ static struct hisi_qp *hisi_qm_create_qp_nolock(struct hisi_qm *qm, qp->alg_type = alg_type; qp->c_flag = 1; qp->is_in_kernel = true; - init_completion(&qp->completion); qm->free_qp_num--; atomic_set(&qp->qp_status.flags, QP_INIT);
@@ -2122,28 +2097,6 @@ int hisi_qp_send(struct hisi_qp *qp, const void *msg) } EXPORT_SYMBOL_GPL(hisi_qp_send);
-/** - * hisi_qp_wait() - Wait a task in qp to finish. - * @qp: The qp which will wait. - * - * This function will block and wait task finish in qp, or return -ETIME for - * timeout. - * - * This function should be called after hisi_qp_send. - */ -int hisi_qp_wait(struct hisi_qp *qp) -{ - if (wait_for_completion_timeout(&qp->completion, - msecs_to_jiffies(TASK_TIMEOUT)) == 0) { - atomic_dec(&qp->qp_status.used); - dev_err(&qp->qm->pdev->dev, "QM task timeout\n"); - return -ETIME; - } - - return 0; -} -EXPORT_SYMBOL_GPL(hisi_qp_wait); - static void hisi_qm_cache_wb(struct hisi_qm *qm) { unsigned int val; @@ -2170,7 +2123,6 @@ int hisi_qm_get_free_qp_num(struct hisi_qm *qm) } EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
-#ifdef CONFIG_CRYPTO_QM_UACCE static void qm_qp_event_notifier(struct hisi_qp *qp) { uacce_wake_up(qp->uacce_q); @@ -2229,7 +2181,6 @@ static int hisi_qm_uacce_get_queue(struct uacce *uacce, unsigned long arg, qp->event_cb = qm_qp_event_notifier; qp->pasid = arg; qp->is_in_kernel = false; - init_waitqueue_head(&wd_q->wait);
up_write(&qm->qps_lock); return 0; @@ -2465,7 +2416,6 @@ static int qm_unregister_uacce(struct hisi_qm *qm)
return 0; } -#endif
/** * hisi_qm_frozen() - Try to froze QM to cut continuous queue request. If @@ -2586,11 +2536,8 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ } while (0)
- -#ifdef CONFIG_CRYPTO_QM_UACCE if (qm->use_uacce) hisi_qm_uacce_memory_init(qm); -#endif
idr_init(&qm->qp_idr); qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) + @@ -2654,10 +2601,8 @@ static int hisi_qm_pci_init(struct hisi_qm *qm) goto err_request_mem_regions; }
-#ifdef CONFIG_CRYPTO_QM_UACCE qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); qm->size = pci_resource_len(qm->pdev, PCI_BAR_2); -#endif qm->io_base = devm_ioremap(dev, pci_resource_start(pdev, PCI_BAR_2), pci_resource_len(qm->pdev, PCI_BAR_2)); if (!qm->io_base) { @@ -2724,7 +2669,6 @@ int hisi_qm_init(struct hisi_qm *qm) return -EINVAL; }
-#ifdef CONFIG_CRYPTO_QM_UACCE if (qm->use_uacce) { dev_info(dev, "qm register to uacce\n"); ret = qm_register_uacce(qm); @@ -2733,7 +2677,6 @@ int hisi_qm_init(struct hisi_qm *qm) return ret; } } -#endif
ret = hisi_qm_pci_init(qm); if (ret) @@ -2771,10 +2714,8 @@ int hisi_qm_init(struct hisi_qm *qm) err_irq_register: hisi_qm_pci_uninit(qm); err_pci_init: -#ifdef CONFIG_CRYPTO_QM_UACCE if (qm->use_uacce) qm_unregister_uacce(qm); -#endif
return ret; } @@ -2810,13 +2751,10 @@ void hisi_qm_uninit(struct hisi_qm *qm)
qm_irq_unregister(qm); hisi_qm_pci_uninit(qm); - up_write(&qm->qps_lock);
-#ifdef CONFIG_CRYPTO_QM_UACCE if (qm->use_uacce) uacce_unregister(&qm->uacce); -#endif } EXPORT_SYMBOL_GPL(hisi_qm_uninit);
@@ -3138,15 +3076,12 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
if (qm->status.stop_reason == QM_SOFT_RESET || qm->status.stop_reason == QM_FLR) { -#ifdef CONFIG_CRYPTO_QM_UACCE hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); -#endif ret = qm_stop_started_qp(qm); if (ret < 0) goto err_unlock; -#ifdef CONFIG_CRYPTO_QM_UACCE + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); -#endif }
/* Mask eq and aeq irq */ @@ -3833,7 +3768,6 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) return ret; }
-#ifdef CONFIG_CRYPTO_QM_UACCE if (qm->use_uacce) { ret = uacce_hw_err_isolate(&qm->uacce); if (ret) { @@ -3841,7 +3775,6 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm) return ret; } } -#endif
return 0; } @@ -4107,22 +4040,17 @@ int hisi_qm_controller_reset(struct hisi_qm *qm) return 0;
err_prepare: -#ifdef CONFIG_CRYPTO_QM_UACCE pci_info(pdev, "Controller reset_prepare failed\n"); writel(MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + MASTER_GLOBAL_CTRL); hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); -#endif
err_reset: pci_info(pdev, "Controller reset failed\n"); clear_bit(QM_DEV_RESET_STATUS, &qm->hw_status); - -#ifdef CONFIG_CRYPTO_QM_UACCE /* if resetting fails, isolate the device */ if (qm->use_uacce && !qm->uacce.is_vf) atomic_set(&qm->uacce.isolate->is_isolate, 1); -#endif
return ret; } diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h index 73e231fac1a6c..d7d23d1ec34c5 100644 --- a/drivers/crypto/hisilicon/qm.h +++ b/drivers/crypto/hisilicon/qm.h @@ -10,11 +10,7 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/uaccess.h> - -#ifdef CONFIG_CRYPTO_QM_UACCE #include <linux/uacce.h> -#endif - #include "qm_usr_if.h"
#define QNUM_V1 4096 @@ -333,13 +329,12 @@ struct hisi_qm { bool use_sva; bool is_frozen;
-#ifdef CONFIG_CRYPTO_QM_UACCE resource_size_t phys_base; resource_size_t size; struct uacce uacce; const char *algs; int uacce_mode; -#endif + struct workqueue_struct *wq; struct work_struct work; /* design for module not support aer, such as rde */ @@ -350,7 +345,6 @@ struct hisi_qp_status { atomic_t used; atomic_t send_ref; u16 sq_tail; - u16 sq_head; u16 cq_head; bool cqc_phase; atomic_t flags; @@ -373,7 +367,6 @@ struct hisi_qp { dma_addr_t cqe_dma;
struct hisi_qp_status qp_status; - struct completion completion; struct hisi_qp_ops *hw_ops; void *qp_ctx; void (*req_cb)(struct hisi_qp *qp, void *data); @@ -382,11 +375,8 @@ struct hisi_qp { struct hisi_qm *qm; bool is_resetting; bool is_in_kernel; - -#ifdef CONFIG_CRYPTO_QM_UACCE u16 pasid; struct uacce_queue *uacce_q; -#endif };
static inline int q_num_set(const char *val, const struct kernel_param *kp, @@ -444,7 +434,6 @@ static inline int vf_num_set(const char *val, const struct kernel_param *kp) return param_set_int(val, kp); }
-#ifdef CONFIG_CRYPTO_QM_UACCE static inline int mode_set(const char *val, const struct kernel_param *kp) { u32 n; @@ -460,7 +449,6 @@ static inline int mode_set(const char *val, const struct kernel_param *kp)
return param_set_int(val, kp); } -#endif
static inline void hisi_qm_add_to_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list) @@ -495,7 +483,6 @@ static inline int hisi_qm_pre_init(struct hisi_qm *qm,
pci_set_drvdata(pdev, qm);
-#ifdef CONFIG_CRYPTO_QM_UACCE switch (qm->uacce_mode) { case UACCE_MODE_NOUACCE: qm->use_uacce = false; @@ -507,9 +494,7 @@ static inline int hisi_qm_pre_init(struct hisi_qm *qm, pci_err(pdev, "uacce mode error!\n"); return -EINVAL; } -#else - qm->use_uacce = false; -#endif + if (qm->fun_type == QM_HW_PF) { qm->qp_base = def_q_num; qm->qp_num = pf_q_num; @@ -534,7 +519,6 @@ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); int hisi_qm_stop_qp(struct hisi_qp *qp); void hisi_qm_release_qp(struct hisi_qp *qp); int hisi_qp_send(struct hisi_qp *qp, const void *msg); -int hisi_qp_wait(struct hisi_qp *qp); int hisi_qm_get_free_qp_num(struct hisi_qm *qm); int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number); void hisi_qm_debug_regs_clear(struct hisi_qm *qm); diff --git a/drivers/crypto/hisilicon/rde/rde_main.c b/drivers/crypto/hisilicon/rde/rde_main.c index 946532fe06699..f3f70079aa773 100644 --- a/drivers/crypto/hisilicon/rde/rde_main.c +++ b/drivers/crypto/hisilicon/rde/rde_main.c @@ -188,7 +188,6 @@ static struct debugfs_reg32 hrde_ooo_dfx_regs[] = { {"HRDE_AM_CURR_WR_TXID_STS_2", 0x300178ull}, };
-#ifdef CONFIG_CRYPTO_QM_UACCE static int uacce_mode_set(const char *val, const struct kernel_param *kp) { return mode_set(val, kp); @@ -202,7 +201,6 @@ static const struct kernel_param_ops uacce_mode_ops = { static int uacce_mode = UACCE_MODE_NOUACCE; module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2"); -#endif
static int pf_q_num_set(const char *val, const struct kernel_param *kp) { @@ -681,11 +679,8 @@ static int hisi_rde_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) { int ret;
-#ifdef CONFIG_CRYPTO_QM_UACCE qm->algs = "ec\n"; qm->uacce_mode = uacce_mode; -#endif - qm->pdev = pdev; ret = hisi_qm_pre_init(qm, pf_q_num, HRDE_PF_DEF_Q_BASE); if (ret) diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index deda0f679ff65..58f726ba022f9 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -206,7 +206,6 @@ struct hisi_qp **sec_create_qps(void) return NULL; }
-#ifdef CONFIG_CRYPTO_QM_UACCE static int uacce_mode_set(const char *val, const struct kernel_param *kp) { return mode_set(val, kp); @@ -220,7 +219,6 @@ static const struct kernel_param_ops sec_uacce_mode_ops = { static u32 uacce_mode = UACCE_MODE_NOUACCE; module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2"); -#endif
static int pf_q_num_set(const char *val, const struct kernel_param *kp) { @@ -815,10 +813,8 @@ static int sec_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) { int ret;
-#ifdef CONFIG_CRYPTO_QM_UACCE qm->algs = "sec\ncipher\ndigest\naead\n"; qm->uacce_mode = uacce_mode; -#endif qm->pdev = pdev; ret = hisi_qm_pre_init(qm, pf_q_num, SEC_PF_DEF_Q_BASE); if (ret) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 49dbf90daa980..17bbab6675536 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -217,7 +217,6 @@ static struct debugfs_reg32 hzip_dfx_regs[] = { {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull}, };
-#ifdef CONFIG_CRYPTO_QM_UACCE static int uacce_mode_set(const char *val, const struct kernel_param *kp) { return mode_set(val, kp); @@ -231,7 +230,6 @@ static const struct kernel_param_ops uacce_mode_ops = { static int uacce_mode = UACCE_MODE_NOUACCE; module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2"); -#endif
static int pf_q_num_set(const char *val, const struct kernel_param *kp) { @@ -815,10 +813,8 @@ static int hisi_zip_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev) { int ret;
-#ifdef CONFIG_CRYPTO_QM_UACCE qm->algs = "zlib\ngzip\nxts(sm4)\nxts(aes)\n"; qm->uacce_mode = uacce_mode; -#endif qm->pdev = pdev; ret = hisi_qm_pre_init(qm, pf_q_num, HZIP_PF_DEF_Q_BASE); if (ret) @@ -916,10 +912,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) { struct hisi_qm *qm = pci_get_drvdata(pdev);
-#ifdef CONFIG_CRYPTO_QM_UACCE - if (uacce_mode != UACCE_MODE_NOUACCE) - hisi_qm_remove_wait_delay(qm, &zip_devices); -#endif + hisi_qm_remove_wait_delay(qm, &zip_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev, NULL);