
From: Weili Qian <qianweili@huawei.com> driver inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICESZH CVE: NA ---------------------------------------------------------------------- There is an upper limit on the number of hardware queues. When all hardware queues are busy, new processes fail to apply for queues. To avoid affecting tasks, support fallback mechanism when hardware queues are unavailable. HPRE driver supports DH algorithm, limited to prime numbers up to 4K. It supports prime numbers larger than 4K via fallback mechanism. Fixes: 90274769cf79 ("crypto: hisilicon/hpre - add 'CURVE25519' algorithm") Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: JiangShui Yang <yangjiangshui@h-partners.com> --- drivers/crypto/hisilicon/hpre/hpre_crypto.c | 312 +++++++++++++++++--- drivers/crypto/hisilicon/qm.c | 4 +- 2 files changed, 269 insertions(+), 47 deletions(-) diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 96b89e78db2f..4d3efc74b8f3 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -94,6 +94,7 @@ struct hpre_dh_ctx { char *g; /* m */ dma_addr_t dma_g; + struct crypto_kpp *soft_tfm; }; struct hpre_ecdh_ctx { @@ -104,6 +105,7 @@ struct hpre_ecdh_ctx { /* low address: x->y */ unsigned char *g; dma_addr_t dma_g; + struct crypto_kpp *soft_tfm; }; struct hpre_curve25519_ctx { @@ -114,6 +116,7 @@ struct hpre_curve25519_ctx { /* gx coordinate */ unsigned char *g; dma_addr_t dma_g; + struct crypto_kpp *soft_tfm; }; struct hpre_ctx { @@ -135,6 +138,7 @@ struct hpre_ctx { unsigned int curve_id; /* for high performance core */ u8 enable_hpcore; + bool fallback; }; struct hpre_asym_request { @@ -225,7 +229,7 @@ static struct hisi_qp *hpre_get_qp_and_start(u8 type) qp = hpre_create_qp(type); if (!qp) { - pr_err("Can not create hpre qp!\n"); + pr_info_ratelimited("Can not create hpre qp, alloc soft tfm!\n"); return ERR_PTR(-ENODEV); } @@ -505,8 +509,10 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) int ret; qp = hpre_get_qp_and_start(type); - if (IS_ERR(qp)) - return PTR_ERR(qp); + if (IS_ERR(qp)) { + ctx->qp = NULL; + return -ENODEV; + } qp->qp_ctx = ctx; qp->req_cb = hpre_alg_cb; @@ -638,6 +644,48 @@ static int hpre_dh_compute_value(struct kpp_request *req) return ret; } +static struct kpp_request *hpre_dh_prepare_fb_req(struct kpp_request *req) +{ + struct kpp_request *fb_req = kpp_request_ctx(req); + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + + kpp_request_set_tfm(fb_req, ctx->dh.soft_tfm); + kpp_request_set_callback(fb_req, req->base.flags, req->base.complete, req->base.data); + kpp_request_set_input(fb_req, req->src, req->src_len); + kpp_request_set_output(fb_req, req->dst, req->dst_len); + + return fb_req; +} + +static int hpre_dh_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct kpp_request *fb_req; + + if (ctx->fallback) { + fb_req = hpre_dh_prepare_fb_req(req); + return crypto_kpp_generate_public_key(fb_req); + } + + return hpre_dh_compute_value(req); +} + +static int hpre_dh_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + struct kpp_request *fb_req; + + if (ctx->fallback) { + fb_req = hpre_dh_prepare_fb_req(req); + return crypto_kpp_compute_shared_secret(fb_req); + } + + return hpre_dh_compute_value(req); +} + static int hpre_is_dh_params_length_valid(unsigned int key_sz) { #define _HPRE_DH_GRP1 768 @@ -664,13 +712,6 @@ static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params) struct device *dev = ctx->dev; unsigned int sz; - if (params->p_size > HPRE_DH_MAX_P_SZ) - return -EINVAL; - - if (hpre_is_dh_params_length_valid(params->p_size << - HPRE_BITS_2_BYTES_SHIFT)) - return -EINVAL; - sz = ctx->key_sz = params->p_size; ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1, &ctx->dh.dma_xa_p, GFP_KERNEL); @@ -703,6 +744,9 @@ static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) struct device *dev = ctx->dev; unsigned int sz = ctx->key_sz; + if (!ctx->qp) + return; + if (is_clear_all) hisi_qm_stop_qp(ctx->qp); @@ -731,6 +775,13 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, if (crypto_dh_decode_key(buf, len, ¶ms) < 0) return -EINVAL; + if (!ctx->qp) + goto set_soft_secret; + + if (hpre_is_dh_params_length_valid(params.p_size << + HPRE_BITS_2_BYTES_SHIFT)) + goto set_soft_secret; + /* Free old secret if any */ hpre_dh_clear_ctx(ctx, false); @@ -741,27 +792,55 @@ static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key, params.key_size); + ctx->fallback = false; return 0; err_clear_ctx: hpre_dh_clear_ctx(ctx, false); return ret; +set_soft_secret: + ctx->fallback = true; + return crypto_kpp_set_secret(ctx->dh.soft_tfm, buf, len); } static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->dh.soft_tfm); + return ctx->key_sz; } static int hpre_dh_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + const char *alg = kpp_alg_name(tfm); + unsigned int reqsize; + int ret; + + ctx->dh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->dh.soft_tfm)) { + pr_err("Failed to alloc dh tfm!\n"); + return PTR_ERR(ctx->dh.soft_tfm); + } - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + crypto_kpp_set_flags(ctx->dh.soft_tfm, crypto_kpp_get_flags(tfm)); - return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); + reqsize = max(sizeof(struct hpre_asym_request) + hpre_align_pd(), + sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->dh.soft_tfm)); + kpp_set_reqsize(tfm, reqsize); + + ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); + if (ret && ret != -ENODEV) { + crypto_free_kpp(ctx->dh.soft_tfm); + return ret; + } else if (ret == -ENODEV) { + ctx->fallback = true; + } + + return 0; } static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) @@ -769,6 +848,7 @@ static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); hpre_dh_clear_ctx(ctx, true); + crypto_free_kpp(ctx->dh.soft_tfm); } static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len) @@ -808,9 +888,8 @@ static int hpre_rsa_enc(struct akcipher_request *req) struct hpre_sqe *msg = &hpre_req->req; int ret; - /* For 512 and 1536 bits key size, use soft tfm instead */ - if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || - ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { + /* For unsupported key size and unavailable devices, use soft tfm instead */ + if (ctx->fallback) { akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); ret = crypto_akcipher_encrypt(req); akcipher_request_set_tfm(req, tfm); @@ -856,9 +935,8 @@ static int hpre_rsa_dec(struct akcipher_request *req) struct hpre_sqe *msg = &hpre_req->req; int ret; - /* For 512 and 1536 bits key size, use soft tfm instead */ - if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || - ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { + /* For unsupported key size and unavailable devices, use soft tfm instead */ + if (ctx->fallback) { akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); ret = crypto_akcipher_decrypt(req); akcipher_request_set_tfm(req, tfm); @@ -912,8 +990,10 @@ static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value, ctx->key_sz = vlen; /* if invalid key size provided, we use software tfm */ - if (!hpre_rsa_key_size_is_support(ctx->key_sz)) + if (!hpre_rsa_key_size_is_support(ctx->key_sz)) { + ctx->fallback = true; return 0; + } ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1, &ctx->rsa.dma_pubkey, @@ -1048,6 +1128,9 @@ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) unsigned int half_key_sz = ctx->key_sz >> 1; struct device *dev = ctx->dev; + if (!ctx->qp) + return; + if (is_clear_all) hisi_qm_stop_qp(ctx->qp); @@ -1130,6 +1213,7 @@ static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key, goto free; } + ctx->fallback = false; return 0; free: @@ -1147,6 +1231,9 @@ static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, if (ret) return ret; + if (!ctx->qp) + return 0; + return hpre_rsa_setkey(ctx, key, keylen, false); } @@ -1160,6 +1247,9 @@ static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, if (ret) return ret; + if (!ctx->qp) + return 0; + return hpre_rsa_setkey(ctx, key, keylen, true); } @@ -1167,9 +1257,8 @@ static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm) { struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); - /* For 512 and 1536 bits key size, use soft tfm instead */ - if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || - ctx->key_sz == HPRE_RSA_1536BITS_KSZ) + /* For unsupported key size and unavailable devices, use soft tfm instead */ + if (ctx->fallback) return crypto_akcipher_maxsize(ctx->rsa.soft_tfm); return ctx->key_sz; @@ -1190,10 +1279,14 @@ static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) hpre_align_pd()); ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); - if (ret) + if (ret && ret != -ENODEV) { crypto_free_akcipher(ctx->rsa.soft_tfm); + return ret; + } else if (ret == -ENODEV) { + ctx->fallback = true; + } - return ret; + return 0; } static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) @@ -1407,6 +1500,9 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, struct ecdh params; int ret; + if (ctx->fallback) + return crypto_kpp_set_secret(ctx->ecdh.soft_tfm, buf, len); + if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) { dev_err(dev, "failed to decode ecdh key!\n"); return -EINVAL; @@ -1637,23 +1733,82 @@ static int hpre_ecdh_compute_value(struct kpp_request *req) return ret; } +static int hpre_ecdh_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + int ret; + + if (ctx->fallback) { + kpp_request_set_tfm(req, ctx->ecdh.soft_tfm); + ret = crypto_kpp_generate_public_key(req); + kpp_request_set_tfm(req, tfm); + return ret; + } + + return hpre_ecdh_compute_value(req); +} + +static int hpre_ecdh_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + int ret; + + if (ctx->fallback) { + kpp_request_set_tfm(req, ctx->ecdh.soft_tfm); + ret = crypto_kpp_compute_shared_secret(req); + kpp_request_set_tfm(req, tfm); + return ret; + } + + return hpre_ecdh_compute_value(req); +} + static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->ecdh.soft_tfm); + /* max size is the pub_key_size, include x and y */ return ctx->key_sz << 1; } +static int hpre_ecdh_init_tfm(struct crypto_kpp *tfm) +{ + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + const char *alg = kpp_alg_name(tfm); + int ret; + + ret = hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + if (!ret) { + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + return 0; + } else if (ret && ret != -ENODEV) { + return ret; + } + + ctx->ecdh.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->ecdh.soft_tfm)) { + pr_err("Failed to alloc %s tfm!\n", alg); + return PTR_ERR(ctx->ecdh.soft_tfm); + } + + crypto_kpp_set_flags(ctx->ecdh.soft_tfm, crypto_kpp_get_flags(tfm)); + ctx->fallback = true; + + return 0; +} + static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); ctx->curve_id = ECC_CURVE_NIST_P192; - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - - return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + return hpre_ecdh_init_tfm(tfm); } static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) @@ -1663,9 +1818,7 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) ctx->curve_id = ECC_CURVE_NIST_P256; ctx->enable_hpcore = 1; - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - - return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + return hpre_ecdh_init_tfm(tfm); } static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) @@ -1674,15 +1827,18 @@ static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) ctx->curve_id = ECC_CURVE_NIST_P384; - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); - - return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + return hpre_ecdh_init_tfm(tfm); } static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) { + crypto_free_kpp(ctx->ecdh.soft_tfm); + return; + } + hpre_ecc_clear_ctx(ctx, true, true); } @@ -1748,6 +1904,9 @@ static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, struct device *dev = ctx->dev; int ret = -EINVAL; + if (ctx->fallback) + return crypto_kpp_set_secret(ctx->curve25519.soft_tfm, buf, len); + if (len != CURVE25519_KEY_SIZE || !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) { dev_err(dev, "key is null or key len is not 32bytes!\n"); @@ -1993,26 +2152,83 @@ static int hpre_curve25519_compute_value(struct kpp_request *req) return ret; } +static int hpre_curve25519_generate_public_key(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + int ret; + + if (ctx->fallback) { + kpp_request_set_tfm(req, ctx->curve25519.soft_tfm); + ret = crypto_kpp_generate_public_key(req); + kpp_request_set_tfm(req, tfm); + return ret; + } + + return hpre_curve25519_compute_value(req); +} + +static int hpre_curve25519_compute_shared_secret(struct kpp_request *req) +{ + struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); + struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + int ret; + + if (ctx->fallback) { + kpp_request_set_tfm(req, ctx->curve25519.soft_tfm); + ret = crypto_kpp_compute_shared_secret(req); + kpp_request_set_tfm(req, tfm); + return ret; + } + + return hpre_curve25519_compute_value(req); +} + static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) + return crypto_kpp_maxsize(ctx->curve25519.soft_tfm); + return ctx->key_sz; } static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + const char *alg = kpp_alg_name(tfm); + int ret; + + ret = hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + if (!ret) { + kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + return 0; + } else if (ret && ret != -ENODEV) { + return ret; + } + + ctx->curve25519.soft_tfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK); + if (IS_ERR(ctx->curve25519.soft_tfm)) { + pr_err("Failed to alloc curve25519 tfm!\n"); + return PTR_ERR(ctx->curve25519.soft_tfm); + } - kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); + crypto_kpp_set_flags(ctx->curve25519.soft_tfm, crypto_kpp_get_flags(tfm)); + ctx->fallback = true; - return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); + return 0; } static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); + if (ctx->fallback) { + crypto_free_kpp(ctx->dh.soft_tfm); + return; + } + hpre_ecc_clear_ctx(ctx, true, false); } @@ -2032,13 +2248,14 @@ static struct akcipher_alg rsa = { .cra_name = "rsa", .cra_driver_name = "hpre-rsa", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; static struct kpp_alg dh = { .set_secret = hpre_dh_set_secret, - .generate_public_key = hpre_dh_compute_value, - .compute_shared_secret = hpre_dh_compute_value, + .generate_public_key = hpre_dh_generate_public_key, + .compute_shared_secret = hpre_dh_compute_shared_secret, .max_size = hpre_dh_max_size, .init = hpre_dh_init_tfm, .exit = hpre_dh_exit_tfm, @@ -2048,14 +2265,15 @@ static struct kpp_alg dh = { .cra_name = "dh", .cra_driver_name = "hpre-dh", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; static struct kpp_alg ecdh_curves[] = { { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_compute_value, - .compute_shared_secret = hpre_ecdh_compute_value, + .generate_public_key = hpre_ecdh_generate_public_key, + .compute_shared_secret = hpre_ecdh_compute_shared_secret, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p192_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -2065,11 +2283,12 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p192", .cra_driver_name = "hpre-ecdh-nist-p192", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }, { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_compute_value, - .compute_shared_secret = hpre_ecdh_compute_value, + .generate_public_key = hpre_ecdh_generate_public_key, + .compute_shared_secret = hpre_ecdh_compute_shared_secret, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p256_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -2079,11 +2298,12 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p256", .cra_driver_name = "hpre-ecdh-nist-p256", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }, { .set_secret = hpre_ecdh_set_secret, - .generate_public_key = hpre_ecdh_compute_value, - .compute_shared_secret = hpre_ecdh_compute_value, + .generate_public_key = hpre_ecdh_generate_public_key, + .compute_shared_secret = hpre_ecdh_compute_shared_secret, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p384_init_tfm, .exit = hpre_ecdh_exit_tfm, @@ -2093,14 +2313,15 @@ static struct kpp_alg ecdh_curves[] = { .cra_name = "ecdh-nist-p384", .cra_driver_name = "hpre-ecdh-nist-p384", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, } }; static struct kpp_alg curve25519_alg = { .set_secret = hpre_curve25519_set_secret, - .generate_public_key = hpre_curve25519_compute_value, - .compute_shared_secret = hpre_curve25519_compute_value, + .generate_public_key = hpre_curve25519_generate_public_key, + .compute_shared_secret = hpre_curve25519_compute_shared_secret, .max_size = hpre_curve25519_max_size, .init = hpre_curve25519_init_tfm, .exit = hpre_curve25519_exit_tfm, @@ -2110,6 +2331,7 @@ static struct kpp_alg curve25519_alg = { .cra_name = "curve25519", .cra_driver_name = "hpre-curve25519", .cra_module = THIS_MODULE, + .cra_flags = CRYPTO_ALG_NEED_FALLBACK, }, }; diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index d7a7826cb016..06d8e7aa048b 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -3748,8 +3748,8 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, mutex_unlock(&qm_list->lock); if (ret) - pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n", - node, alg_type, qp_num); + pr_info_ratelimited("Too busy to create qps, node[%d], alg[%u], qp[%d]!\n", + node, alg_type, qp_num); err: free_list(&head); -- 2.33.0