From: JiangShui Yang <yangjiangshui@h-partners.com> driver inclusion category: cleanup bugzilla: https://gitee.com/openeuler/kernel/issues/IDAQ94 CVE: NA ---------------------------------------------------------------------- Use the mainline patch to replace it. This reverts commit 1435c045dd96c51426a0b0a0ba9c43148fb89dae. Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 312 +++----------------- drivers/crypto/hisilicon/qm.c | 4 +- 2 files changed, 47 insertions(+), 269 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 84c8d3e2cc93..48f909957983 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -94,7 +94,6 @@ struct hpre_dh_ctx { char *g; /* m */ dma_addr_t dma_g; - struct crypto_kpp *soft_tfm; }; struct hpre_ecdh_ctx { @@ -105,7 +104,6 @@ struct hpre_ecdh_ctx { /* low address: x->y */ unsigned char *g; dma_addr_t dma_g; - struct crypto_kpp *soft_tfm; }; struct hpre_curve25519_ctx { @@ -116,7 +114,6 @@ struct hpre_curve25519_ctx { /* gx coordinate */ unsigned char *g; dma_addr_t dma_g; - struct crypto_kpp *soft_tfm; }; struct hpre_ctx { @@ -136,7 +133,6 @@ struct hpre_ctx { unsigned int curve_id; /* for high performance core */ u8 enable_hpcore; - bool fallback; }; struct hpre_asym_request { @@ -181,7 +177,7 @@ static struct hisi_qp *hpre_get_qp_and_start(u8 type) qp = hpre_create_qp(type); if (!qp) { - pr_info_ratelimited("Can not create hpre qp, alloc soft tfm!\n"); + pr_err("Can not create hpre qp!\n"); return ERR_PTR(-ENODEV); } @@ -422,10 +418,8 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) struct hpre *hpre; qp = hpre_get_qp_and_start(type); - if (IS_ERR(qp)) { - ctx->qp = NULL; - return -ENODEV; - } + if (IS_ERR(qp)) + return PTR_ERR(qp); qp->qp_ctx = ctx; qp->req_cb = hpre_alg_cb; @@ -555,48 +549,6 @@ static int hpre_dh_compute_value(struct kpp_request *req) return ret; } -static struct kpp_request *hpre_dh_prepare_fb_req(struct kpp_request *req) -{ - struct kpp_request *fb_req = kpp_request_ctx(req); - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - - kpp_request_set_tfm(fb_req, ctx->dh.soft_tfm); - kpp_request_set_callback(fb_req, req->base.flags, req->base.complete, req->base.data); - kpp_request_set_input(fb_req, req->src, req->src_len); - kpp_request_set_output(fb_req, req->dst, req->dst_len); - - return fb_req; -} - -static int hpre_dh_generate_public_key(struct kpp_request *req) -{ - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - struct kpp_request *fb_req; - - if (ctx->fallback) { - fb_req = hpre_dh_prepare_fb_req(req); - return crypto_kpp_generate_public_key(fb_req); - } - - return hpre_dh_compute_value(req); -} - -static int hpre_dh_compute_shared_secret(struct kpp_request *req) -{ - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - struct kpp_request *fb_req; - - if (ctx->fallback) { - fb_req = hpre_dh_prepare_fb_req(req); - return crypto_kpp_compute_shared_secret(fb_req); - } - - return hpre_dh_compute_value(req); -} - static int hpre_is_dh_params_length_valid(unsigned int key_sz) { #define _HPRE_DH_GRP1 768 @@ -623,6 +575,13 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params) struct device *dev = ctx->dev; unsigned int sz; + if (params->p_size > HPRE_DH_MAX_P_SZ) + return -EINVAL; + + if (hpre_is_dh_params_length_valid(params->p_size << + HPRE_BITS_2_BYTES_SHIFT)) + return -EINVAL; + sz = ctx->key_sz = params->p_size; ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1, &ctx->dh.dma_xa_p, GFP_KERNEL); @@ -655,9 +614,6 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) struct device *dev = ctx->dev; unsigned int sz = ctx->key_sz; - if (!ctx->qp) - return; - if (is_clear_all) hisi_qm_stop_qp(ctx->qp); @@ -686,13 +642,6 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, if (crypto_dh_decode_key(buf, len, ¶ms) < 0) return -EINVAL; - if (!ctx->qp) - goto set_soft_secret; - - if (hpre_is_dh_params_length_valid(params.p_size << - HPRE_BITS_2_BYTES_SHIFT)) - goto set_soft_secret; - /* Free old secret if any */ hpre_dh_clear_ctx(ctx, false); @@ -703,55 +652,27 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key, params.key_size); - ctx->fallback = false; return 0; err_clear_ctx: hpre_dh_clear_ctx(ctx, false); return ret; -set_soft_secret: - ctx->fallback = true; - return crypto_kpp_set_secret(ctx->dh.soft_tfm, buf, len); } static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - if (ctx->fallback) - return crypto_kpp_maxsize(ctx->dh.soft_tfm); - return ctx->key_sz; } static int hpre_dh_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - const char *alg = kpp_alg_name(tfm); - unsigned int reqsize; - int ret; - - ctx->dh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->dh.soft_tfm)) { - pr_err("Failed to alloc dh tfm!\n"); - return PTR_ERR(ctx->dh.soft_tfm); - } - crypto_kpp_set_flags(ctx->dh.soft_tfm, crypto_kpp_get_flags(tfm)); + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - reqsize = max(sizeof(struct hpre_asym_request) + hpre_align_pd(), - sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->dh.soft_tfm)); - kpp_set_reqsize(tfm, reqsize); - - ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); - if (ret && ret != -ENODEV) { - crypto_free_kpp(ctx->dh.soft_tfm); - return ret; - } else if (ret == -ENODEV) { - ctx->fallback = true; - } - - return 0; + return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); } static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) @@ -759,7 +680,6 @@ static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); hpre_dh_clear_ctx(ctx, true); - crypto_free_kpp(ctx->dh.soft_tfm); } static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len) @@ -799,8 +719,9 @@ static int hpre_rsa_enc(struct akcipher_request *req) struct hpre_sqe *msg = &hpre_req->req; int ret; - /* For unsupported key size and unavailable devices, use soft tfm instead */ - if (ctx->fallback) { + /* For 512 and 1536 bits key size, use soft tfm instead */ + if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || + ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); ret = crypto_akcipher_encrypt(req); akcipher_request_set_tfm(req, tfm); @@ -845,8 +766,9 @@ static int hpre_rsa_dec(struct akcipher_request *req) struct hpre_sqe *msg = &hpre_req->req; int ret; - /* For unsupported key size and unavailable devices, use soft tfm instead */ - if (ctx->fallback) { + /* For 512 and 1536 bits key size, use soft tfm instead */ + if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || + ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); ret = crypto_akcipher_decrypt(req); akcipher_request_set_tfm(req, tfm); @@ -899,10 +821,8 @@ static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value, ctx->key_sz = vlen; /* if invalid key size provided, we use software tfm */ - if (!hpre_rsa_key_size_is_support(ctx->key_sz)) { - ctx->fallback = true; + if (!hpre_rsa_key_size_is_support(ctx->key_sz)) return 0; - } ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1, &ctx->rsa.dma_pubkey, @@ -1037,9 +957,6 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) unsigned int half_key_sz = ctx->key_sz >> 1; struct device *dev = ctx->dev; - if (!ctx->qp) - return; - if (is_clear_all) hisi_qm_stop_qp(ctx->qp); @@ -1122,7 +1039,6 @@ static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key, goto free; } - ctx->fallback = false; return 0; free: @@ -1140,9 +1056,6 @@ static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, if (ret) return ret; - if (!ctx->qp) - return 0; - return hpre_rsa_setkey(ctx, key, keylen, false); } @@ -1156,9 +1069,6 @@ static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, if (ret) return ret; - if (!ctx->qp) - return 0; - return hpre_rsa_setkey(ctx, key, keylen, true); } @@ -1166,8 +1076,9 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm) { struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); - /* For unsupported key size and unavailable devices, use soft tfm instead */ - if (ctx->fallback) + /* For 512 and 1536 bits key size, use soft tfm instead */ + if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || + ctx->key_sz == HPRE_RSA_1536BITS_KSZ) return crypto_akcipher_maxsize(ctx->rsa.soft_tfm); return ctx->key_sz; @@ -1188,14 +1099,10 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) hpre_align_pd()); ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); - if (ret && ret != -ENODEV) { + if (ret) crypto_free_akcipher(ctx->rsa.soft_tfm); - return ret; - } else if (ret == -ENODEV) { - ctx->fallback = true; - } - return 0; + return ret; } static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) @@ -1409,9 +1316,6 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, struct ecdh params; int ret; - if (ctx->fallback) - return crypto_kpp_set_secret(ctx->ecdh.soft_tfm, buf, len); - if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) { dev_err(dev, "failed to decode ecdh key!\n"); return -EINVAL; @@ -1637,82 +1541,23 @@ static int hpre_ecdh_compute_value(struct kpp_request *req) return ret; } -static int hpre_ecdh_generate_public_key(struct kpp_request *req) -{ - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - int ret; - - if (ctx->fallback) { - kpp_request_set_tfm(req, ctx->ecdh.soft_tfm); - ret = crypto_kpp_generate_public_key(req); - kpp_request_set_tfm(req, tfm); - return ret; - } - - return hpre_ecdh_compute_value(req); -} - -static int hpre_ecdh_compute_shared_secret(struct kpp_request *req) -{ - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - int ret; - - if (ctx->fallback) { - kpp_request_set_tfm(req, ctx->ecdh.soft_tfm); - ret = crypto_kpp_compute_shared_secret(req); - kpp_request_set_tfm(req, tfm); - return ret; - } - - return hpre_ecdh_compute_value(req); -} - static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - if (ctx->fallback) - return crypto_kpp_maxsize(ctx->ecdh.soft_tfm); - /* max size is the pub_key_size, include x and y */ return ctx->key_sz << 1; } -static int hpre_ecdh_init_tfm(struct crypto_kpp *tfm) -{ - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - const char *alg = kpp_alg_name(tfm); - int ret; - - ret = hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); - if (!ret) { - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - return 0; - } else if (ret && ret != -ENODEV) { - return ret; - } - - ctx->ecdh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->ecdh.soft_tfm)) { - pr_err("Failed to alloc %s tfm!\n", alg); - return PTR_ERR(ctx->ecdh.soft_tfm); - } - - crypto_kpp_set_flags(ctx->ecdh.soft_tfm, crypto_kpp_get_flags(tfm)); - ctx->fallback = true; - - return 0; -} - static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); ctx->curve_id = ECC_CURVE_NIST_P192; - return hpre_ecdh_init_tfm(tfm); + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) @@ -1722,7 +1567,9 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) ctx->curve_id = ECC_CURVE_NIST_P256; ctx->enable_hpcore = 1; - return hpre_ecdh_init_tfm(tfm); + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) @@ -1731,18 +1578,15 @@ static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) ctx->curve_id = ECC_CURVE_NIST_P384; - return hpre_ecdh_init_tfm(tfm); + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - if (ctx->fallback) { - crypto_free_kpp(ctx->ecdh.soft_tfm); - return; - } - hpre_ecc_clear_ctx(ctx, true, true); } @@ -1808,9 +1652,6 @@ static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, struct device *dev = ctx->dev; int ret = -EINVAL; - if (ctx->fallback) - return crypto_kpp_set_secret(ctx->curve25519.soft_tfm, buf, len); - if (len != CURVE25519_KEY_SIZE || !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) { dev_err(dev, "key is null or key len is not 32bytes!\n"); @@ -2051,83 +1892,26 @@ static int hpre_curve25519_compute_value(struct kpp_request *req) return ret; } -static int hpre_curve25519_generate_public_key(struct kpp_request *req) -{ - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - int ret; - - if (ctx->fallback) { - kpp_request_set_tfm(req, ctx->curve25519.soft_tfm); - ret = crypto_kpp_generate_public_key(req); - kpp_request_set_tfm(req, tfm); - return ret; - } - - return hpre_curve25519_compute_value(req); -} - -static int hpre_curve25519_compute_shared_secret(struct kpp_request *req) -{ - struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); - struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - int ret; - - if (ctx->fallback) { - kpp_request_set_tfm(req, ctx->curve25519.soft_tfm); - ret = crypto_kpp_compute_shared_secret(req); - kpp_request_set_tfm(req, tfm); - return ret; - } - - return hpre_curve25519_compute_value(req); -} - static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - if (ctx->fallback) - return crypto_kpp_maxsize(ctx->curve25519.soft_tfm); - return ctx->key_sz; } static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - const char *alg = kpp_alg_name(tfm); - int ret; - - ret = hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); - if (!ret) { - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - return 0; - } else if (ret && ret != -ENODEV) { - return ret; - } - - ctx->curve25519.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(ctx->curve25519.soft_tfm)) { - pr_err("Failed to alloc curve25519 tfm!\n"); - return PTR_ERR(ctx->curve25519.soft_tfm); - } - crypto_kpp_set_flags(ctx->curve25519.soft_tfm, crypto_kpp_get_flags(tfm)); - ctx->fallback = true; + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - return 0; + return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); - if (ctx->fallback) { - crypto_free_kpp(ctx->curve25519.soft_tfm); - return; - } - hpre_ecc_clear_ctx(ctx, true, false); } @@ -2147,14 +1931,13 @@ static struct akcipher_alg rsa = { .cra_name = "rsa", .cra_driver_name = "hpre-rsa", .cra_module = THIS_MODULE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; static struct kpp_alg dh = { .set_secret = hpre_dh_set_secret, - .generate_public_key = hpre_dh_generate_public_key, - .compute_shared_secret = hpre_dh_compute_shared_secret, + .generate_public_key = hpre_dh_compute_value, + .compute_shared_secret = hpre_dh_compute_value, .max_size = hpre_dh_max_size, .init = hpre_dh_init_tfm, .exit = hpre_dh_exit_tfm, @@ -2164,15 +1947,14 @@ static struct kpp_alg dh = { .cra_name = "dh", .cra_driver_name = "hpre-dh", .cra_module = THIS_MODULE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; static struct kpp_alg ecdh_curves[] = { { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_generate_public_key, - .compute_shared_secret = hpre_ecdh_compute_shared_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p192_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -2182,12 +1964,11 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p192", .cra_driver_name = "hpre-ecdh-nist-p192", .cra_module = THIS_MODULE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }, { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_generate_public_key, - .compute_shared_secret = hpre_ecdh_compute_shared_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p256_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -2197,12 +1978,11 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p256", .cra_driver_name = "hpre-ecdh-nist-p256", .cra_module = THIS_MODULE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }, { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_generate_public_key, - .compute_shared_secret = hpre_ecdh_compute_shared_secret, + .generate_public_key = hpre_ecdh_compute_value, + .compute_shared_secret = hpre_ecdh_compute_value, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p384_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -2212,15 +1992,14 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p384", .cra_driver_name = "hpre-ecdh-nist-p384", .cra_module = THIS_MODULE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, } }; static struct kpp_alg curve25519_alg = { .set_secret = hpre_curve25519_set_secret, - .generate_public_key = hpre_curve25519_generate_public_key, - .compute_shared_secret = hpre_curve25519_compute_shared_secret, + .generate_public_key = hpre_curve25519_compute_value, + .compute_shared_secret = hpre_curve25519_compute_value, .max_size = hpre_curve25519_max_size, .init = hpre_curve25519_init_tfm, .exit = hpre_curve25519_exit_tfm, @@ -2230,7 +2009,6 @@ static struct kpp_alg curve25519_alg = { .cra_name = "curve25519", .cra_driver_name = "hpre-curve25519", .cra_module = THIS_MODULE, - .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index c9d4745cb789..d0fbf9407a7e 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3804,8 +3804,8 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, mutex_unlock(&qm_list->lock); if (ret) - pr_info_ratelimited("Too busy to create qps, node[%d], alg[%u], qp[%d]!\n", - node, alg_type, qp_num); + pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n", + node, alg_type, qp_num); err: free_list(&head); -- 2.43.0