Kernel
Threads by month
- ----- 2025 -----
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- 54 participants
- 16907 discussions

17 Apr '20
From: Joe Perches <joe(a)perches.com>
[ Upstream commit 20faba848752901de23a4d45a1174d64d2069dde ]
Arguments are supposed to be ordered high then low.
Signed-off-by: Joe Perches <joe(a)perches.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Acked-by: Marc Zyngier <marc.zyngier(a)arm.com>
Link: https://lkml.kernel.org/r/ab5deb4fc3cd604cb620054770b7d00016d736bc.15627348…
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/irqchip/irq-gic-v3-its.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 4a9c14f..860f3ef 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -215,7 +215,7 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
static struct its_collection *valid_col(struct its_collection *col)
{
- if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
+ if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
return NULL;
return col;
--
1.8.3
1
159
From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
1. we delete sec_usr_if.h, then move the define of sec hardware structure
into sec_crypto.h and normalize two structure types.
2. In sec_main.c, we remove fusion_limit/fusion_time, because this part of
logic is not used in the end. We also optimize the logic of debugfs without
judging some return codes, because this does not affect the driver loading.
Probe flow is also be optimized, including add sec_iommu_used_check, modify
sec_probe_init, realize sec_qm_pre_init and so on.
3. In sec.h, we define structure of sec_ctx, which defines queue/cipher/
request .etc relatives.
4. In sec_crypto.c,we encapsulate independent interfaces, such as init/
uninit/map/unmap/callback/alloc resource/free resource/encrypt/decrypt/
filling hardware descriptor/set key .etc, which removes fusion logic and is
easy to expand algorithm. Meanwhile, we remove DES algorithm support,
because of its weak key.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Cheng Hu <hucheng.hu(a)huawei.com>
Reviewed-by: Guangwei Zhou <zhouguangwei5(a)huawei.com>
Reviewed-by: Ye Kai <yekai13(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/qm.c | 3 +-
drivers/crypto/hisilicon/sec2/sec.h | 166 ++-
drivers/crypto/hisilicon/sec2/sec_crypto.c | 1770 +++++++++++-----------------
drivers/crypto/hisilicon/sec2/sec_crypto.h | 237 +++-
drivers/crypto/hisilicon/sec2/sec_main.c | 541 ++++-----
drivers/crypto/hisilicon/sec2/sec_usr_if.h | 179 ---
6 files changed, 1246 insertions(+), 1650 deletions(-)
delete mode 100644 drivers/crypto/hisilicon/sec2/sec_usr_if.h
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index d3429e7..8b49902 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1389,6 +1389,7 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
+
dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
kfree(sqc);
@@ -1598,7 +1599,7 @@ static int hisi_qm_stop_qp_nolock(struct hisi_qp *qp)
else
flush_work(&qp->qm->work);
- /* wait for increase used count in qp send and last poll qp finish */
+ /* waiting for increase used count in qp send and last poll qp finish */
udelay(WAIT_PERIOD);
if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
qp_stop_fail_cb(qp);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index f85dd06..e3b581a 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -1,19 +1,124 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2018-2019 HiSilicon Limited. */
-#ifndef HISI_SEC_H
-#define HISI_SEC_H
+#ifndef __HISI_SEC_V2_H
+#define __HISI_SEC_V2_H
#include <linux/list.h>
+
#include "../qm.h"
-#include "sec_usr_if.h"
+#include "sec_crypto.h"
+
+/* Algorithm resource per hardware SEC queue */
+struct sec_alg_res {
+ u8 *pbuf;
+ dma_addr_t pbuf_dma;
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
+};
+
+/* Cipher request of SEC private */
+struct sec_cipher_req {
+ struct hisi_acc_hw_sgl *c_in;
+ dma_addr_t c_in_dma;
+ struct hisi_acc_hw_sgl *c_out;
+ dma_addr_t c_out_dma;
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+ struct skcipher_request *sk_req;
+ u32 c_len;
+ bool encrypt;
+};
+
+/* SEC request of Crypto */
+struct sec_req {
+ struct sec_sqe sec_sqe;
+ struct sec_ctx *ctx;
+ struct sec_qp_ctx *qp_ctx;
+
+ struct sec_cipher_req c_req;
+
+ int err_type;
+ int req_id;
+
+ /* Status of the SEC request */
+ bool fake_busy;
+};
+
+/**
+ * struct sec_req_op - Operations for SEC request
+ * @buf_map: DMA map the SGL buffers of the request
+ * @buf_unmap: DMA unmap the SGL buffers of the request
+ * @bd_fill: Fill the SEC queue BD
+ * @bd_send: Send the SEC BD into the hardware queue
+ * @callback: Call back for the request
+ * @process: Main processing logic of Skcipher
+ */
+struct sec_req_op {
+ int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
+ int (*process)(struct sec_ctx *ctx, struct sec_req *req);
+};
+
+/* SEC cipher context which cipher's relatives */
+struct sec_cipher_ctx {
+ u8 *c_key;
+ dma_addr_t c_key_dma;
+ sector_t iv_offset;
+ u32 c_gran_size;
+ u32 ivsize;
+ u8 c_mode;
+ u8 c_alg;
+ u8 c_key_len;
+};
-#undef pr_fmt
-#define pr_fmt(fmt) "hisi_sec: " fmt
+/* SEC queue context which defines queue's relatives */
+struct sec_qp_ctx {
+ struct hisi_qp *qp;
+ struct sec_req *req_list[QM_Q_DEPTH];
+ struct idr req_idr;
+ struct sec_alg_res res[QM_Q_DEPTH];
+ struct sec_ctx *ctx;
+ struct mutex req_lock;
+ struct hisi_acc_sgl_pool *c_in_pool;
+ struct hisi_acc_sgl_pool *c_out_pool;
+ atomic_t pending_reqs;
+};
+enum sec_alg_type {
+ SEC_SKCIPHER,
+ SEC_AEAD
+};
+
+/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
+struct sec_ctx {
+ struct sec_qp_ctx *qp_ctx;
+ struct sec_dev *sec;
+ const struct sec_req_op *req_op;
+ struct hisi_qp **qps;
+
+ /* Half queues for encipher, and half for decipher */
+ u32 hlf_q_num;
+
+ /* Threshold for fake busy, trigger to return -EBUSY to user */
+ u32 fake_req_limit;
+
+ /* Currrent cyclic index to select a queue for encipher */
+ atomic_t enc_qcyclic;
+
+ /* Currrent cyclic index to select a queue for decipher */
+ atomic_t dec_qcyclic;
-#define FUSION_LIMIT_DEF 1
-#define FUSION_LIMIT_MAX 64
-#define FUSION_TMOUT_NSEC_DEF (400 * 1000)
+ enum sec_alg_type alg_type;
+ bool pbuf_supported;
+ bool use_pbuf;
+ struct sec_cipher_ctx c_ctx;
+};
enum sec_endian {
SEC_LE = 0,
@@ -21,32 +126,37 @@ enum sec_endian {
SEC_64BE
};
-struct hisi_sec_ctrl;
+enum sec_debug_file_index {
+ SEC_CURRENT_QM,
+ SEC_CLEAR_ENABLE,
+ SEC_DEBUG_FILE_NUM,
+};
+
+struct sec_debug_file {
+ enum sec_debug_file_index index;
+ spinlock_t lock;
+ struct hisi_qm *qm;
+};
-struct hisi_sec_dfx {
- u64 send_cnt;
- u64 send_by_tmout;
- u64 send_by_full;
- u64 recv_cnt;
- u64 get_task_cnt;
- u64 put_task_cnt;
- u64 gran_task_cnt;
- u64 thread_cnt;
- u64 fake_busy_cnt;
- u64 busy_comp_cnt;
+struct sec_dfx {
+ atomic64_t send_cnt;
+ atomic64_t recv_cnt;
};
-struct hisi_sec {
+struct sec_debug {
+ struct sec_dfx dfx;
+ struct sec_debug_file files[SEC_DEBUG_FILE_NUM];
+};
+
+struct sec_dev {
struct hisi_qm qm;
- struct hisi_sec_dfx sec_dfx;
- struct hisi_sec_ctrl *ctrl;
- int ctx_q_num;
- int fusion_limit;
- int fusion_tmout_nsec;
+ struct sec_debug debug;
+ u32 ctx_q_num;
+ bool iommu_used;
};
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
-struct hisi_sec *find_sec_device(int node);
-
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 0643955..52448d0 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -1,384 +1,329 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018-2019 HiSilicon Limited. */
-#include <linux/crypto.h>
-#include <linux/hrtimer.h>
-#include <linux/dma-mapping.h>
-#include <linux/ktime.h>
-
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <crypto/skcipher.h>
#include <crypto/xts.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
#include "sec.h"
#include "sec_crypto.h"
-static atomic_t sec_active_devs;
-
-#define SEC_ASYNC
-
-#define SEC_INVLD_REQ_ID (-1)
-#define SEC_PRIORITY 4001
-#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
-#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
-#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
-#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
-
-#define BUF_MAP_PER_SGL 64
-#define SEC_FUSION_BD
-
-enum C_ALG {
- C_ALG_DES = 0x0,
- C_ALG_3DES = 0x1,
- C_ALG_AES = 0x2,
- C_ALG_SM4 = 0x3,
-};
-
-enum C_MODE {
- C_MODE_ECB = 0x0,
- C_MODE_CBC = 0x1,
- C_MODE_CTR = 0x4,
- C_MODE_CCM = 0x5,
- C_MODE_GCM = 0x6,
- C_MODE_XTS = 0x7,
- C_MODE_CBC_CS = 0x9,
-};
-
-enum CKEY_LEN {
- CKEY_LEN_128_BIT = 0x0,
- CKEY_LEN_192_BIT = 0x1,
- CKEY_LEN_256_BIT = 0x2,
- CKEY_LEN_DES = 0x1,
- CKEY_LEN_3DES_3KEY = 0x1,
- CKEY_LEN_3DES_2KEY = 0x3,
-};
-
-enum SEC_BD_TYPE {
- BD_TYPE1 = 0x1,
- BD_TYPE2 = 0x2,
-};
-
-enum SEC_CIPHER_TYPE {
- SEC_CIPHER_ENC = 0x1,
- SEC_CIPHER_DEC = 0x2,
-};
-
-enum SEC_ADDR_TYPE {
- PBUF = 0x0,
- SGL = 0x1,
- PRP = 0x2,
-};
-
-enum SEC_CI_GEN {
- CI_GEN_BY_ADDR = 0x0,
- CI_GEN_BY_LBA = 0X3,
-};
-
-enum SEC_SCENE {
- SCENE_IPSEC = 0x0,
- SCENE_STORAGE = 0x5,
-};
-
-enum {
- SEC_NO_FUSION = 0x0,
- SEC_IV_FUSION = 0x1,
- SEC_FUSION_BUTT
-};
-
-enum SEC_REQ_OPS_TYPE {
- SEC_OPS_SKCIPHER_ALG = 0x0,
- SEC_OPS_MULTI_IV = 0x1,
- SEC_OPS_BUTT
-};
-
-struct cipher_res {
- struct skcipher_request_ctx **sk_reqs;
- u8 *c_ivin;
- dma_addr_t c_ivin_dma;
- struct scatterlist *src;
- struct scatterlist *dst;
-};
-
-struct hisi_sec_cipher_req {
- struct hisi_acc_hw_sgl *c_in;
- dma_addr_t c_in_dma;
- struct hisi_acc_hw_sgl *c_out;
- dma_addr_t c_out_dma;
- u8 *c_ivin;
- dma_addr_t c_ivin_dma;
- struct skcipher_request *sk_req;
- struct scatterlist *src;
- struct scatterlist *dst;
- u32 c_len;
- u32 gran_num;
- u64 lba;
- bool encrypt;
-};
-
-struct hisi_sec_ctx;
-struct hisi_sec_qp_ctx;
-
-struct hisi_sec_req {
- struct hisi_sec_sqe sec_sqe;
- struct hisi_sec_ctx *ctx;
- struct hisi_sec_qp_ctx *qp_ctx;
- void **priv;
- struct hisi_sec_cipher_req c_req;
- ktime_t st_time;
- int err_type;
- int req_id;
- int req_cnt;
- int fusion_num;
- int fake_busy;
-};
+#define SEC_PRIORITY 4001
+#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
+#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
+#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
+#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
+
+/* SEC sqe(bd) bit operational relative MACRO */
+#define SEC_DE_OFFSET 1
+#define SEC_CI_GEN_OFFSET 6
+#define SEC_CIPHER_OFFSET 4
+#define SEC_SCENE_OFFSET 3
+#define SEC_DST_SGL_OFFSET 2
+#define SEC_SRC_SGL_OFFSET 7
+#define SEC_CKEY_OFFSET 9
+#define SEC_CMODE_OFFSET 12
+#define SEC_AKEY_OFFSET 5
+#define SEC_AEAD_ALG_OFFSET 11
+#define SEC_AUTH_OFFSET 6
+
+#define SEC_FLAG_OFFSET 7
+#define SEC_FLAG_MASK 0x0780
+#define SEC_TYPE_MASK 0x0F
+#define SEC_DONE_MASK 0x0001
+
+#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
+#define SEC_SGL_SGE_NR 128
+#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
+#define SEC_CIPHER_AUTH 0xfe
+#define SEC_AUTH_CIPHER 0x1
+#define SEC_MAX_MAC_LEN 64
+#define SEC_MAX_AAD_LEN 65535
+#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+
+#define SEC_PBUF_SZ 512
+#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
+#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
+#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
+ SEC_MAX_MAC_LEN * 2)
+#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
+#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
+#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
+ SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
+#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
+ SEC_PBUF_LEFT_SZ)
+
+#define SEC_SQE_LEN_RATE 4
+#define SEC_SQE_CFLAG 2
+#define SEC_SQE_AEAD_FLAG 3
+#define SEC_SQE_DONE 0x1
-struct hisi_sec_req_op {
- int fusion_type;
- int (*get_res)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*queue_alloc)(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
- int (*queue_free)(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
- int (*buf_map)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*buf_unmap)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*do_transfer)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*bd_fill)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*bd_send)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*callback)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
-};
-
-struct hisi_sec_cipher_ctx {
- u8 *c_key;
- dma_addr_t c_key_dma;
- sector_t iv_offset;
- u32 c_gran_size;
- u8 c_mode;
- u8 c_alg;
- u8 c_key_len;
-};
-
-struct hisi_sec_qp_ctx {
- struct hisi_qp *qp;
- struct hisi_sec_req **req_list;
- struct hisi_sec_req *fusion_req;
- unsigned long *req_bitmap;
- void *priv_req_res;
- struct hisi_sec_ctx *ctx;
- struct mutex req_lock;
- atomic_t req_cnt;
- struct hisi_sec_sqe *sqe_list;
- struct hisi_acc_sgl_pool *c_in_pool;
- struct hisi_acc_sgl_pool *c_out_pool;
- int fusion_num;
- int fusion_limit;
-};
+static atomic_t sec_active_devs;
-struct hisi_sec_ctx {
- struct hisi_sec_qp_ctx *qp_ctx;
- struct hisi_sec *sec;
- struct device *dev;
- struct hisi_sec_req_op *req_op;
- struct hisi_qp **qps;
- struct hrtimer timer;
- struct work_struct work;
- atomic_t thread_cnt;
- int req_fake_limit;
- int req_limit;
- int q_num;
- int enc_q_num;
- atomic_t enc_qid;
- atomic_t dec_qid;
- struct hisi_sec_cipher_ctx c_ctx;
- int fusion_tmout_nsec;
- int fusion_limit;
- u64 enc_fusion_num;
- u64 dec_fusion_num;
- bool is_fusion;
-};
+/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
+static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ return atomic_inc_return(&ctx->enc_qcyclic) % ctx->hlf_q_num;
-#define DES_WEAK_KEY_NUM 4
-u64 des_weak_key[DES_WEAK_KEY_NUM] = {0x0101010101010101, 0xFEFEFEFEFEFEFEFE,
- 0xE0E0E0E0F1F1F1F1, 0x1F1F1F1F0E0E0E0E};
+ return atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
+ ctx->hlf_q_num;
+}
-static void hisi_sec_req_cb(struct hisi_qp *qp, void *);
+static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ atomic_dec(&ctx->enc_qcyclic);
+ else
+ atomic_dec(&ctx->dec_qcyclic);
+}
-static int hisi_sec_alloc_req_id(struct hisi_sec_req *req,
- struct hisi_sec_qp_ctx *qp_ctx)
+static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
{
- struct hisi_sec_ctx *ctx = req->ctx;
int req_id;
- req_id = find_first_zero_bit(qp_ctx->req_bitmap, ctx->req_limit);
- if (req_id >= ctx->req_limit || req_id < 0) {
- dev_err(ctx->dev, "no free req id\n");
- return -ENOBUFS;
+ mutex_lock(&qp_ctx->req_lock);
+
+ req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
+ 0, QM_Q_DEPTH, GFP_ATOMIC);
+ mutex_unlock(&qp_ctx->req_lock);
+ if (unlikely(req_id < 0)) {
+ dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
+ return req_id;
}
- set_bit(req_id, qp_ctx->req_bitmap);
- qp_ctx->req_list[req_id] = req;
- req->req_id = req_id;
req->qp_ctx = qp_ctx;
-
- return 0;
+ qp_ctx->req_list[req_id] = req;
+ return req_id;
}
-static void hisi_sec_free_req_id(struct hisi_sec_qp_ctx *qp_ctx, int req_id)
+static void sec_free_req_id(struct sec_req *req)
{
- if (req_id < 0 || req_id >= qp_ctx->ctx->req_limit) {
- pr_err("invalid req_id[%d]\n", req_id);
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int req_id = req->req_id;
+
+ if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
+ dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
return;
}
qp_ctx->req_list[req_id] = NULL;
+ req->qp_ctx = NULL;
mutex_lock(&qp_ctx->req_lock);
- clear_bit(req_id, qp_ctx->req_bitmap);
- atomic_dec(&qp_ctx->req_cnt);
+ idr_remove(&qp_ctx->req_idr, req_id);
mutex_unlock(&qp_ctx->req_lock);
}
-static int sec_request_transfer(struct hisi_sec_ctx *, struct hisi_sec_req *);
-static int sec_request_send(struct hisi_sec_ctx *, struct hisi_sec_req *);
-
-void qp_ctx_work_process(struct hisi_sec_qp_ctx *qp_ctx)
+static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
- struct hisi_sec_req *req;
- struct hisi_sec_ctx *ctx;
- ktime_t cur_time = ktime_get();
- int ret;
-
- mutex_lock(&qp_ctx->req_lock);
-
- req = qp_ctx->fusion_req;
- if (req == NULL) {
- mutex_unlock(&qp_ctx->req_lock);
+ struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct sec_sqe *bd = resp;
+ struct sec_ctx *ctx;
+ struct sec_req *req;
+ u16 done, flag;
+ int err = 0;
+ u8 type;
+
+ type = bd->type_cipher_auth & SEC_TYPE_MASK;
+ if (unlikely(type != SEC_BD_TYPE2)) {
+ pr_err("err bd type [%d]\n", type);
return;
}
- ctx = req->ctx;
- if (ctx == NULL || req->fusion_num == qp_ctx->fusion_limit) {
- mutex_unlock(&qp_ctx->req_lock);
+ req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ if (unlikely(!req)) {
+ atomic_inc(&qp->qp_status.used);
return;
}
- if (cur_time - qp_ctx->fusion_req->st_time < ctx->fusion_tmout_nsec) {
- mutex_unlock(&qp_ctx->req_lock);
- return;
+ req->err_type = bd->type2.error_type;
+ ctx = req->ctx;
+ done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ if (unlikely(req->err_type || done != SEC_SQE_DONE ||
+ (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG))) {
+ dev_err_ratelimited(SEC_CTX_DEV(ctx),
+ "err_type[%d],done[%d],flag[%d]\n",
+ req->err_type, done, flag);
+ err = -EIO;
}
- qp_ctx->fusion_req = NULL;
+ atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
+
+ ctx->req_op->buf_unmap(ctx, req);
+
+ ctx->req_op->callback(ctx, req, err);
+}
+
+static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ret;
+ mutex_lock(&qp_ctx->req_lock);
+ ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
mutex_unlock(&qp_ctx->req_lock);
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- ret = sec_request_transfer(ctx, req);
- if (ret)
- goto err_free_req;
-
- ret = sec_request_send(ctx, req);
- __sync_add_and_fetch(&ctx->sec->sec_dfx.send_by_tmout, 1);
- if (ret != -EBUSY && ret != -EINPROGRESS) {
- dev_err(ctx->dev, "[%s][%d] ret[%d]\n", __func__,
- __LINE__, ret);
- goto err_unmap_req;
- }
+ if (unlikely(ret == -EBUSY))
+ return -ENOBUFS;
- return;
+ if (!ret) {
+ if (req->fake_busy)
+ ret = -EBUSY;
+ else
+ ret = -EINPROGRESS;
+ }
-err_unmap_req:
- ctx->req_op->buf_unmap(ctx, req);
-err_free_req:
- hisi_sec_free_req_id(qp_ctx, req->req_id);
- atomic_dec(&ctx->thread_cnt);
+ return ret;
}
-void ctx_work_process(struct work_struct *work)
+/* Get DMA memory resources */
+static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
- struct hisi_sec_ctx *ctx;
int i;
- ctx = container_of(work, struct hisi_sec_ctx, work);
- for (i = 0; i < ctx->q_num; i++)
- qp_ctx_work_process(&ctx->qp_ctx[i]);
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->c_ivin_dma, GFP_KERNEL);
+ if (!res->c_ivin)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
+ res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
+ }
+
+ return 0;
}
-static enum hrtimer_restart hrtimer_handler(struct hrtimer *timer)
+static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
{
- struct hisi_sec_ctx *ctx;
- ktime_t tim;
+ if (res->c_ivin)
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->c_ivin, res->c_ivin_dma);
+}
- ctx = container_of(timer, struct hisi_sec_ctx, timer);
- tim = ktime_set(0, ctx->fusion_tmout_nsec);
+static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->pbuf)
+ dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ res->pbuf, res->pbuf_dma);
+}
- if (ctx->sec->qm.wq)
- queue_work(ctx->sec->qm.wq, &ctx->work);
- else
- schedule_work(&ctx->work);
+/*
+ * To improve performance, pbuffer is used for
+ * small packets (< 576Bytes) as IOMMU translation using.
+ */
+static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int pbuf_page_offset;
+ int i, j, k;
- hrtimer_forward(timer, timer->base->get_time(), tim);
+ res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ, &res->pbuf_dma,
+ GFP_KERNEL);
+ if (!res->pbuf)
+ return -ENOMEM;
- return HRTIMER_RESTART;
+ /*
+ * SEC_PBUF_PKG contains data pbuf, iv and
+ * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
+ * Every PAGE contains six SEC_PBUF_PKG
+ * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
+ * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
+ * for the SEC_TOTAL_PBUF_SZ
+ */
+ for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
+ pbuf_page_offset = PAGE_SIZE * i;
+ for (j = 0; j < SEC_PBUF_NUM; j++) {
+ k = i * SEC_PBUF_NUM + j;
+ if (k == QM_Q_DEPTH)
+ break;
+ res[k].pbuf = res->pbuf +
+ j * SEC_PBUF_PKG + pbuf_page_offset;
+ res[k].pbuf_dma = res->pbuf_dma +
+ j * SEC_PBUF_PKG + pbuf_page_offset;
+ }
+ }
+ return 0;
}
-static int hisi_sec_create_qp_ctx(struct hisi_sec_ctx *ctx,
- int qp_ctx_id, int req_type)
+static int sec_alg_resource_alloc(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
{
- struct hisi_sec_qp_ctx *qp_ctx;
- struct device *dev = ctx->dev;
- struct hisi_qp *qp;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_alg_res *res = qp_ctx->res;
int ret;
+ ret = sec_alloc_civ_resource(dev, res);
+ if (ret)
+ return ret;
+
+ if (ctx->pbuf_supported) {
+ ret = sec_alloc_pbuf_resource(dev, res);
+ if (ret) {
+ dev_err(dev, "fail to alloc pbuf dma resource!\n");
+ goto alloc_fail;
+ }
+ }
+ return 0;
+alloc_fail:
+ sec_free_civ_resource(dev, res);
+
+ return ret;
+}
+
+static void sec_alg_resource_free(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ sec_free_civ_resource(dev, qp_ctx->res);
+
+ if (ctx->pbuf_supported)
+ sec_free_pbuf_resource(dev, qp_ctx->res);
+}
+
+static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id, int alg_type)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_qp_ctx *qp_ctx;
+ struct hisi_qp *qp;
+ int ret = -ENOMEM;
+
qp_ctx = &ctx->qp_ctx[qp_ctx_id];
qp = ctx->qps[qp_ctx_id];
- qp->req_type = req_type;
+ qp->req_type = 0;
qp->qp_ctx = qp_ctx;
-#ifdef SEC_ASYNC
- qp->req_cb = hisi_sec_req_cb;
-#endif
+ qp->req_cb = sec_req_cb;
qp_ctx->qp = qp;
- qp_ctx->fusion_num = 0;
- qp_ctx->fusion_req = NULL;
- qp_ctx->fusion_limit = ctx->fusion_limit;
qp_ctx->ctx = ctx;
mutex_init(&qp_ctx->req_lock);
- atomic_set(&qp_ctx->req_cnt, 0);
-
- qp_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(QM_Q_DEPTH), sizeof(long),
- GFP_ATOMIC);
- if (!qp_ctx->req_bitmap)
- return -ENOMEM;
-
- qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
- if (!qp_ctx->req_list) {
- ret = -ENOMEM;
- goto err_free_req_bitmap;
- }
-
- qp_ctx->sqe_list = kcalloc(ctx->fusion_limit,
- sizeof(struct hisi_sec_sqe), GFP_KERNEL);
- if (!qp_ctx->sqe_list) {
- ret = -ENOMEM;
- goto err_free_req_list;
- }
+ atomic_set(&qp_ctx->pending_reqs, 0);
+ idr_init(&qp_ctx->req_idr);
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- FUSION_LIMIT_MAX);
+ SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_in_pool)) {
- ret = PTR_ERR(qp_ctx->c_in_pool);
- goto err_free_sqe_list;
+ dev_err(dev, "fail to create sgl pool for input!\n");
+ goto err_destroy_idr;
}
qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- FUSION_LIMIT_MAX);
+ SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_out_pool)) {
- ret = PTR_ERR(qp_ctx->c_out_pool);
+ dev_err(dev, "fail to create sgl pool for output!\n");
goto err_free_c_in_pool;
}
- ret = ctx->req_op->queue_alloc(ctx, qp_ctx);
+ ret = sec_alg_resource_alloc(ctx, qp_ctx);
if (ret)
goto err_free_c_out_pool;
@@ -389,304 +334,153 @@ static int hisi_sec_create_qp_ctx(struct hisi_sec_ctx *ctx,
return 0;
err_queue_free:
- ctx->req_op->queue_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
err_free_c_out_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_free_sqe_list:
- kfree(qp_ctx->sqe_list);
-err_free_req_list:
- kfree(qp_ctx->req_list);
-err_free_req_bitmap:
- kfree(qp_ctx->req_bitmap);
+err_destroy_idr:
+ idr_destroy(&qp_ctx->req_idr);
return ret;
}
-static void hisi_sec_release_qp_ctx(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx)
+static void sec_release_qp_ctx(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = ctx->dev;
+ struct device *dev = SEC_CTX_DEV(ctx);
hisi_qm_stop_qp(qp_ctx->qp);
- ctx->req_op->queue_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
+
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
- kfree(qp_ctx->req_bitmap);
- kfree(qp_ctx->req_list);
- kfree(qp_ctx->sqe_list);
-}
-
-static int __hisi_sec_ctx_init(struct hisi_sec_ctx *ctx, int qlen)
-{
- if (!ctx || qlen < 0)
- return -EINVAL;
-
- ctx->req_limit = qlen;
- ctx->req_fake_limit = qlen / 2;
- atomic_set(&ctx->thread_cnt, 0);
- atomic_set(&ctx->enc_qid, 0);
- atomic_set(&ctx->dec_qid, ctx->enc_q_num);
- if (ctx->fusion_limit > 1 && ctx->fusion_tmout_nsec > 0) {
- ktime_t tim = ktime_set(0, ctx->fusion_tmout_nsec);
-
- hrtimer_init(&ctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ctx->timer.function = hrtimer_handler;
- hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
- INIT_WORK(&ctx->work, ctx_work_process);
- }
-
- return 0;
-}
-
-static void hisi_sec_get_fusion_param(struct hisi_sec_ctx *ctx,
- struct hisi_sec *sec)
-{
- if (ctx->is_fusion) {
- ctx->fusion_tmout_nsec = sec->fusion_tmout_nsec;
- ctx->fusion_limit = sec->fusion_limit;
- } else {
- ctx->fusion_tmout_nsec = 0;
- ctx->fusion_limit = 1;
- }
+ idr_destroy(&qp_ctx->req_idr);
}
-static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
+static int sec_ctx_base_init(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_sec_cipher_ctx *c_ctx;
- struct hisi_sec *sec;
+ struct sec_dev *sec;
int i, ret;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct hisi_sec_req));
-
ctx->qps = sec_create_qps();
if (!ctx->qps) {
pr_err("Can not create sec qps!\n");
return -ENODEV;
}
- sec = container_of(ctx->qps[0]->qm, struct hisi_sec, qm);
+ sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
ctx->sec = sec;
+ ctx->hlf_q_num = sec->ctx_q_num >> 1;
- ctx->dev = &sec->qm.pdev->dev;
-
- ctx->q_num = sec->ctx_q_num;
-
- ctx->enc_q_num = ctx->q_num / 2;
- ctx->qp_ctx = kcalloc(ctx->q_num, sizeof(struct hisi_sec_qp_ctx),
- GFP_KERNEL);
- if (!ctx->qp_ctx) {
- dev_err(ctx->dev, "failed to alloc qp_ctx");
+ if (ctx->sec->iommu_used)
+ ctx->pbuf_supported = true;
+ else
+ ctx->pbuf_supported = false;
+ ctx->use_pbuf = false;
+
+ /* Half of queue depth is taken as fake requests limit in the queue. */
+ ctx->fake_req_limit = QM_Q_DEPTH >> 1;
+ ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
+ GFP_KERNEL);
+ if (!ctx->qp_ctx)
return -ENOMEM;
- }
-
- hisi_sec_get_fusion_param(ctx, sec);
- for (i = 0; i < ctx->q_num; i++) {
- ret = hisi_sec_create_qp_ctx(ctx, i, 0);
+ for (i = 0; i < sec->ctx_q_num; i++) {
+ ret = sec_create_qp_ctx(ctx, i, 0);
if (ret)
goto err_sec_release_qp_ctx;
}
-
- c_ctx = &ctx->c_ctx;
- c_ctx->c_key = dma_alloc_coherent(ctx->dev,
- SEC_MAX_KEY_SIZE, &c_ctx->c_key_dma, GFP_KERNEL);
-
- if (!ctx->c_ctx.c_key) {
- ret = -ENOMEM;
- goto err_sec_release_qp_ctx;
- }
-
- return __hisi_sec_ctx_init(ctx, QM_Q_DEPTH);
-
+ return 0;
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
- hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
sec_destroy_qps(ctx->qps, sec->ctx_q_num);
kfree(ctx->qp_ctx);
+
return ret;
}
-static void hisi_sec_cipher_ctx_exit(struct crypto_skcipher *tfm)
+static void sec_ctx_base_uninit(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_sec_cipher_ctx *c_ctx;
- int i = 0;
-
- c_ctx = &ctx->c_ctx;
-
- if (ctx->fusion_limit > 1 && ctx->fusion_tmout_nsec > 0)
- hrtimer_cancel(&ctx->timer);
-
- if (c_ctx->c_key) {
- memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
- dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, c_ctx->c_key,
- c_ctx->c_key_dma);
- c_ctx->c_key = NULL;
- }
+ int i;
- for (i = 0; i < ctx->q_num; i++)
- hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ for (i = 0; i < ctx->sec->ctx_q_num; i++)
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
- sec_destroy_qps(ctx->qps, ctx->q_num);
+ sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
kfree(ctx->qp_ctx);
}
-static int hisi_sec_skcipher_get_res(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_queue_alloc(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
-static int hisi_sec_skcipher_queue_free(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
-static int hisi_sec_skcipher_buf_map(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_buf_unmap(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_copy_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_bd_fill_base(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_bd_fill_storage(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_bd_fill_multi_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_bd_send_asyn(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_callback(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-
-struct hisi_sec_req_op sec_req_ops_tbl[] = {
- {
- .fusion_type = SEC_NO_FUSION,
- .get_res = hisi_sec_skcipher_get_res,
- .queue_alloc = hisi_sec_skcipher_queue_alloc,
- .queue_free = hisi_sec_skcipher_queue_free,
- .buf_map = hisi_sec_skcipher_buf_map,
- .buf_unmap = hisi_sec_skcipher_buf_unmap,
- .do_transfer = hisi_sec_skcipher_copy_iv,
- .bd_fill = hisi_sec_skcipher_bd_fill_base,
- .bd_send = hisi_sec_bd_send_asyn,
- .callback = hisi_sec_skcipher_callback,
- }, {
- .fusion_type = SEC_IV_FUSION,
- .get_res = hisi_sec_skcipher_get_res,
- .queue_alloc = hisi_sec_skcipher_queue_alloc,
- .queue_free = hisi_sec_skcipher_queue_free,
- .buf_map = hisi_sec_skcipher_buf_map,
- .buf_unmap = hisi_sec_skcipher_buf_unmap,
- .do_transfer = hisi_sec_skcipher_copy_iv,
- .bd_fill = hisi_sec_skcipher_bd_fill_multi_iv,
- .bd_send = hisi_sec_bd_send_asyn,
- .callback = hisi_sec_skcipher_callback,
- }
-};
-
-static int hisi_sec_cipher_ctx_init_alg(struct crypto_skcipher *tfm)
+static int sec_cipher_init(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- ctx->req_op = &sec_req_ops_tbl[SEC_OPS_SKCIPHER_ALG];
- ctx->is_fusion = ctx->req_op->fusion_type;
+ c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &c_ctx->c_key_dma, GFP_KERNEL);
+ if (!c_ctx->c_key)
+ return -ENOMEM;
- return hisi_sec_cipher_ctx_init(tfm);
+ return 0;
}
-static int hisi_sec_cipher_ctx_init_multi_iv(struct crypto_skcipher *tfm)
+static void sec_cipher_uninit(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- ctx->req_op = &sec_req_ops_tbl[SEC_OPS_MULTI_IV];
- ctx->is_fusion = ctx->req_op->fusion_type;
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- return hisi_sec_cipher_ctx_init(tfm);
+ memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key, c_ctx->c_key_dma);
}
-static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp)
+static int sec_skcipher_init(struct crypto_skcipher *tfm)
{
- struct hisi_sec_sqe *sec_sqe = (struct hisi_sec_sqe *)resp;
- struct hisi_sec_qp_ctx *qp_ctx = qp->qp_ctx;
- struct device *dev = &qp->qm->pdev->dev;
- struct hisi_sec_req *req;
- struct hisi_sec_dfx *dfx;
- u32 req_id;
-
- if (sec_sqe->type == 1) {
- req_id = sec_sqe->type1.tag;
- req = qp_ctx->req_list[req_id];
-
- req->err_type = sec_sqe->type1.error_type;
- if (req->err_type || sec_sqe->type1.done != 0x1 ||
- sec_sqe->type1.flag != 0x2) {
- dev_err_ratelimited(dev,
- "err_type[%d] done[%d] flag[%d]\n",
- req->err_type,
- sec_sqe->type1.done,
- sec_sqe->type1.flag);
- }
- } else if (sec_sqe->type == 2) {
- req_id = sec_sqe->type2.tag;
- req = qp_ctx->req_list[req_id];
-
- req->err_type = sec_sqe->type2.error_type;
- if (req->err_type || sec_sqe->type2.done != 0x1 ||
- sec_sqe->type2.flag != 0x2) {
- dev_err_ratelimited(dev,
- "err_type[%d] done[%d] flag[%d]\n",
- req->err_type,
- sec_sqe->type2.done,
- sec_sqe->type2.flag);
- }
- } else {
- dev_err_ratelimited(dev, "err bd type [%d]\n", sec_sqe->type);
- return;
- }
-
- dfx = &req->ctx->sec->sec_dfx;
-
- req->ctx->req_op->buf_unmap(req->ctx, req);
- req->ctx->req_op->callback(req->ctx, req);
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
- __sync_add_and_fetch(&dfx->recv_cnt, 1);
-}
+ ctx->alg_type = SEC_SKCIPHER;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
+ return -EINVAL;
+ }
-static int sec_des_weak_key(const u64 *key, const u32 keylen)
-{
- int i;
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
- for (i = 0; i < DES_WEAK_KEY_NUM; i++)
- if (*key == des_weak_key[i])
- return 1;
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
return 0;
+err_cipher_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
}
-static int sec_skcipher_des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
- const u32 keylen, const u8 *key)
+static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
{
- if (keylen != DES_KEY_SIZE)
- return -EINVAL;
-
- if (sec_des_weak_key((const u64 *)key, keylen))
- return -EKEYREJECTED;
-
- c_ctx->c_key_len = CKEY_LEN_DES;
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- return 0;
+ sec_cipher_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
}
-static int sec_skcipher_3des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
- const u32 keylen, const enum C_MODE c_mode)
+static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
{
switch (keylen) {
case SEC_DES3_2KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_3DES_2KEY;
+ c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
break;
case SEC_DES3_3KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_3DES_3KEY;
+ c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
break;
default:
return -EINVAL;
@@ -695,32 +489,35 @@ static int sec_skcipher_3des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
return 0;
}
-static int sec_skcipher_aes_sm4_setkey(struct hisi_sec_cipher_ctx *c_ctx,
- const u32 keylen, const enum C_MODE c_mode)
+static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
{
- if (c_mode == C_MODE_XTS) {
+ if (c_mode == SEC_CMODE_XTS) {
switch (keylen) {
case SEC_XTS_MIN_KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_128_BIT;
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
break;
case SEC_XTS_MAX_KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_256_BIT;
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
break;
default:
+ pr_err("hisi_sec2: xts mode key error!\n");
return -EINVAL;
}
} else {
switch (keylen) {
case AES_KEYSIZE_128:
- c_ctx->c_key_len = CKEY_LEN_128_BIT;
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
break;
case AES_KEYSIZE_192:
- c_ctx->c_key_len = CKEY_LEN_192_BIT;
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
break;
case AES_KEYSIZE_256:
- c_ctx->c_key_len = CKEY_LEN_256_BIT;
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
break;
default:
+ pr_err("hisi_sec2: aes key error!\n");
return -EINVAL;
}
}
@@ -729,38 +526,40 @@ static int sec_skcipher_aes_sm4_setkey(struct hisi_sec_cipher_ctx *c_ctx,
}
static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
- const u32 keylen, const enum C_ALG c_alg, const enum C_MODE c_mode)
+ const u32 keylen, const enum sec_calg c_alg,
+ const enum sec_cmode c_mode)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
int ret;
- if (c_mode == C_MODE_XTS) {
+ if (c_mode == SEC_CMODE_XTS) {
ret = xts_verify_key(tfm, key, keylen);
- if (ret)
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
return ret;
+ }
}
c_ctx->c_alg = c_alg;
c_ctx->c_mode = c_mode;
switch (c_alg) {
- case C_ALG_DES:
- ret = sec_skcipher_des_setkey(c_ctx, keylen, key);
- break;
- case C_ALG_3DES:
+ case SEC_CALG_3DES:
ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
break;
- case C_ALG_AES:
- case C_ALG_SM4:
+ case SEC_CALG_AES:
+ case SEC_CALG_SM4:
ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
break;
default:
return -EINVAL;
}
- if (ret)
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
return ret;
+ }
memcpy(c_ctx->c_key, key, keylen);
@@ -769,639 +568,423 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
- u32 keylen)\
+ u32 keylen) \
{ \
return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
}
-GEN_SEC_SETKEY_FUNC(aes_ecb, C_ALG_AES, C_MODE_ECB)
-GEN_SEC_SETKEY_FUNC(aes_cbc, C_ALG_AES, C_MODE_CBC)
-GEN_SEC_SETKEY_FUNC(sm4_cbc, C_ALG_SM4, C_MODE_CBC)
-
-GEN_SEC_SETKEY_FUNC(des_ecb, C_ALG_DES, C_MODE_ECB)
-GEN_SEC_SETKEY_FUNC(des_cbc, C_ALG_DES, C_MODE_CBC)
-GEN_SEC_SETKEY_FUNC(3des_ecb, C_ALG_3DES, C_MODE_ECB)
-GEN_SEC_SETKEY_FUNC(3des_cbc, C_ALG_3DES, C_MODE_CBC)
-
-GEN_SEC_SETKEY_FUNC(aes_xts, C_ALG_AES, C_MODE_XTS)
-GEN_SEC_SETKEY_FUNC(sm4_xts, C_ALG_SM4, C_MODE_XTS)
+GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
-static int hisi_sec_get_async_ret(int ret, int req_cnt, int req_fake_limit)
-{
- if (ret == 0) {
- if (req_cnt >= req_fake_limit)
- ret = -EBUSY;
- else
- ret = -EINPROGRESS;
- } else {
- if (ret == -EBUSY)
- ret = -ENOBUFS;
- }
+GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
- return ret;
-}
+GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
+GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
-static int hisi_sec_skcipher_get_res(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src)
{
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct cipher_res *c_res = (struct cipher_res *)qp_ctx->priv_req_res;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int copy_size, pbuf_length;
int req_id = req->req_id;
- c_req->c_ivin = c_res[req_id].c_ivin;
- c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
- req->priv = (void **)c_res[req_id].sk_reqs;
- c_req->src = c_res[req_id].src;
- c_req->dst = c_res[req_id].dst;
-
- return 0;
-}
-
-static int hisi_sec_skcipher_queue_alloc(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx)
-{
- struct cipher_res *c_res;
- int req_num = ctx->fusion_limit;
- int alloc_num = QM_Q_DEPTH * ctx->fusion_limit;
- int buf_map_num = QM_Q_DEPTH * ctx->fusion_limit;
- struct device *dev = ctx->dev;
- int i, ret;
-
- c_res = kcalloc(QM_Q_DEPTH, sizeof(struct cipher_res), GFP_KERNEL);
- if (!c_res)
- return -ENOMEM;
-
- qp_ctx->priv_req_res = (void *)c_res;
-
- c_res[0].sk_reqs = kcalloc(alloc_num,
- sizeof(struct skcipher_request_ctx *), GFP_KERNEL);
- if (!c_res[0].sk_reqs) {
- ret = -ENOMEM;
- goto err_free_c_res;
- }
-
- c_res[0].c_ivin = dma_alloc_coherent(dev,
- SEC_IV_SIZE * alloc_num, &c_res[0].c_ivin_dma, GFP_KERNEL);
- if (!c_res[0].c_ivin) {
- ret = -ENOMEM;
- goto err_free_sk_reqs;
- }
+ copy_size = c_req->c_len;
- c_res[0].src = kcalloc(buf_map_num, sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!c_res[0].src) {
- ret = -ENOMEM;
- goto err_free_c_ivin;
+ pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
+ qp_ctx->res[req_id].pbuf, copy_size);
+ if (unlikely(pbuf_length != copy_size)) {
+ dev_err(dev, "copy src data to pbuf error!\n");
+ return -EINVAL;
}
- c_res[0].dst = kcalloc(buf_map_num, sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!c_res[0].dst) {
- ret = -ENOMEM;
- goto err_free_src;
+ c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
+ if (!c_req->c_in_dma) {
+ dev_err(dev, "fail to set pbuffer address!\n");
+ return -ENOMEM;
}
- for (i = 1; i < QM_Q_DEPTH; i++) {
- c_res[i].sk_reqs = c_res[0].sk_reqs + i * req_num;
- c_res[i].c_ivin = c_res[0].c_ivin
- + i * req_num * SEC_IV_SIZE;
- c_res[i].c_ivin_dma = c_res[0].c_ivin_dma
- + i * req_num * SEC_IV_SIZE;
- c_res[i].src = c_res[0].src + i * req_num;
- c_res[i].dst = c_res[0].dst + i * req_num;
- }
+ c_req->c_out_dma = c_req->c_in_dma;
return 0;
-
-err_free_src:
- kfree(c_res[0].src);
-err_free_c_ivin:
- dma_free_coherent(dev, SEC_IV_SIZE * alloc_num, c_res[0].c_ivin,
- c_res[0].c_ivin_dma);
-err_free_sk_reqs:
- kfree(c_res[0].sk_reqs);
-err_free_c_res:
- kfree(c_res);
-
- return ret;
}
-static int hisi_sec_skcipher_queue_free(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx)
+static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *dst)
{
- struct cipher_res *c_res = (struct cipher_res *)qp_ctx->priv_req_res;
- struct device *dev = ctx->dev;
- int alloc_num = QM_Q_DEPTH * ctx->fusion_limit;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int copy_size, pbuf_length;
+ int req_id = req->req_id;
- kfree(c_res[0].dst);
- kfree(c_res[0].src);
- dma_free_coherent(dev, SEC_IV_SIZE * alloc_num, c_res[0].c_ivin,
- c_res[0].c_ivin_dma);
- kfree(c_res[0].sk_reqs);
- kfree(c_res);
+ copy_size = c_req->c_len;
- return 0;
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
+ qp_ctx->res[req_id].pbuf, copy_size);
+ if (unlikely(pbuf_length != copy_size))
+ dev_err(dev, "copy pbuf data to dst error!\n");
}
-static int hisi_sec_skcipher_buf_map(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct device *dev = ctx->dev;
- struct skcipher_request *sk_next;
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- int src_nents, src_nents_sum, copyed_src_nents;
- int dst_nents, dst_nents_sum, copyed_dst_nents;
- int i, ret, buf_map_limit;
-
- src_nents_sum = 0;
- dst_nents_sum = 0;
- for (i = 0; i < req->fusion_num; i++) {
- sk_next = (struct skcipher_request *)req->priv[i];
- if (sk_next == NULL) {
- dev_err(ctx->dev, "nullptr at [%d]\n", i);
- return -EFAULT;
- }
- src_nents_sum += sg_nents(sk_next->src);
- dst_nents_sum += sg_nents(sk_next->dst);
- if (sk_next->src == sk_next->dst && i > 0) {
- dev_err(ctx->dev, "err: src == dst\n");
- return -EFAULT;
- }
- }
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
- buf_map_limit = FUSION_LIMIT_MAX;
- if (src_nents_sum > buf_map_limit || dst_nents_sum > buf_map_limit) {
- dev_err(ctx->dev, "src[%d] or dst[%d] bigger than %d\n",
- src_nents_sum, dst_nents_sum, buf_map_limit);
- return -ENOBUFS;
- }
-
- copyed_src_nents = 0;
- copyed_dst_nents = 0;
- for (i = 0; i < req->fusion_num; i++) {
- sk_next = (struct skcipher_request *)req->priv[i];
- src_nents = sg_nents(sk_next->src);
- dst_nents = sg_nents(sk_next->dst);
-
- if (i != req->fusion_num - 1) {
- sg_unmark_end(&sk_next->src[src_nents - 1]);
- sg_unmark_end(&sk_next->dst[dst_nents - 1]);
- }
-
- memcpy(c_req->src + copyed_src_nents, sk_next->src,
- src_nents * sizeof(struct scatterlist));
- memcpy(c_req->dst + copyed_dst_nents, sk_next->dst,
- dst_nents * sizeof(struct scatterlist));
+ if (ctx->use_pbuf)
+ return sec_cipher_pbuf_map(ctx, req, src);
- copyed_src_nents += src_nents;
- copyed_dst_nents += dst_nents;
- }
-
- c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, c_req->src,
- qp_ctx->c_in_pool, req->req_id, &c_req->c_in_dma);
+ c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
+ qp_ctx->c_in_pool,
+ req->req_id,
+ &c_req->c_in_dma);
- if (IS_ERR(c_req->c_in))
+ if (IS_ERR(c_req->c_in)) {
+ dev_err(dev, "fail to dma map input sgl buffers!\n");
return PTR_ERR(c_req->c_in);
+ }
- if (c_req->dst == c_req->src) {
+ if (dst == src) {
c_req->c_out = c_req->c_in;
c_req->c_out_dma = c_req->c_in_dma;
} else {
- c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, c_req->dst,
- qp_ctx->c_out_pool, req->req_id, &c_req->c_out_dma);
+ c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
+ qp_ctx->c_out_pool,
+ req->req_id,
+ &c_req->c_out_dma);
+
if (IS_ERR(c_req->c_out)) {
- ret = PTR_ERR(c_req->c_out);
- goto err_unmap_src;
+ dev_err(dev, "fail to dma map output sgl buffers!\n");
+ hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
+ return PTR_ERR(c_req->c_out);
}
}
return 0;
-
-err_unmap_src:
- hisi_acc_sg_buf_unmap(dev, c_req->src, c_req->c_in);
-
- return ret;
}
-static int hisi_sec_skcipher_buf_unmap(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct device *dev = ctx->dev;
-
- if (c_req->dst != c_req->src)
- hisi_acc_sg_buf_unmap(dev, c_req->src, c_req->c_in);
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct device *dev = SEC_CTX_DEV(ctx);
- hisi_acc_sg_buf_unmap(dev, c_req->dst, c_req->c_out);
+ if (ctx->use_pbuf) {
+ sec_cipher_pbuf_unmap(ctx, req, dst);
+ } else {
+ if (dst != src)
+ hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
- return 0;
+ hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
+ }
}
-static int hisi_sec_skcipher_copy_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct skcipher_request *sk_req =
- (struct skcipher_request *)req->priv[0];
- struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(sk_req);
- struct skcipher_request *sk_next;
- int i, iv_size;
-
- c_req->c_len = sk_req->cryptlen;
-
- iv_size = crypto_skcipher_ivsize(atfm);
- if (iv_size > SEC_IV_SIZE)
- return -EINVAL;
+ struct skcipher_request *sq = req->c_req.sk_req;
- memcpy(c_req->c_ivin, sk_req->iv, iv_size);
-
- if (ctx->is_fusion) {
- for (i = 1; i < req->fusion_num; i++) {
- sk_next = (struct skcipher_request *)req->priv[i];
- memcpy(c_req->c_ivin + i * iv_size, sk_next->iv,
- iv_size);
- }
-
- c_req->gran_num = req->fusion_num;
- c_ctx->c_gran_size = sk_req->cryptlen;
- }
-
- return 0;
+ return sec_cipher_map(ctx, req, sq->src, sq->dst);
}
-static int hisi_sec_skcipher_bd_fill_storage(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
+ struct skcipher_request *sq = req->c_req.sk_req;
- if (!c_req->c_len)
- return -EINVAL;
-
- sec_sqe->type1.c_key_addr_l = lower_32_bits(c_ctx->c_key_dma);
- sec_sqe->type1.c_key_addr_h = upper_32_bits(c_ctx->c_key_dma);
- sec_sqe->type1.c_ivin_addr_l = lower_32_bits(c_req->c_ivin_dma);
- sec_sqe->type1.c_ivin_addr_h = upper_32_bits(c_req->c_ivin_dma);
- sec_sqe->type1.data_src_addr_l = lower_32_bits(c_req->c_in_dma);
- sec_sqe->type1.data_src_addr_h = upper_32_bits(c_req->c_in_dma);
- sec_sqe->type1.data_dst_addr_l = lower_32_bits(c_req->c_out_dma);
- sec_sqe->type1.data_dst_addr_h = upper_32_bits(c_req->c_out_dma);
-
- sec_sqe->type1.c_mode = c_ctx->c_mode;
- sec_sqe->type1.c_alg = c_ctx->c_alg;
- sec_sqe->type1.c_key_len = c_ctx->c_key_len;
-
- sec_sqe->src_addr_type = SGL;
- sec_sqe->dst_addr_type = SGL;
- sec_sqe->type = BD_TYPE1;
- sec_sqe->scene = SCENE_STORAGE;
- sec_sqe->de = c_req->c_in_dma != c_req->c_out_dma;
-
- if (c_req->encrypt)
- sec_sqe->cipher = SEC_CIPHER_ENC;
- else
- sec_sqe->cipher = SEC_CIPHER_DEC;
-
- if (c_ctx->c_mode == C_MODE_XTS)
- sec_sqe->type1.ci_gen = CI_GEN_BY_LBA;
-
- sec_sqe->type1.cipher_gran_size = c_ctx->c_gran_size;
- sec_sqe->type1.gran_num = c_req->gran_num;
- __sync_fetch_and_add(&ctx->sec->sec_dfx.gran_task_cnt, c_req->gran_num);
- sec_sqe->type1.block_size = c_req->c_len;
-
- sec_sqe->type1.lba_l = lower_32_bits(c_req->lba);
- sec_sqe->type1.lba_h = upper_32_bits(c_req->lba);
-
- sec_sqe->type1.tag = req->req_id;
-
- return 0;
+ sec_cipher_unmap(ctx, req, sq->src, sq->dst);
}
-static int hisi_sec_skcipher_bd_fill_multi_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
{
int ret;
- ret = hisi_sec_skcipher_bd_fill_storage(ctx, req);
- if (ret)
+ ret = ctx->req_op->buf_map(ctx, req);
+ if (unlikely(ret))
return ret;
- req->sec_sqe.type1.ci_gen = CI_GEN_BY_ADDR;
-
- return 0;
-}
-
-static int hisi_sec_skcipher_bd_fill_base(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
-{
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
-
- if (!c_req->c_len)
- return -EINVAL;
-
- sec_sqe->type2.c_key_addr_l = lower_32_bits(c_ctx->c_key_dma);
- sec_sqe->type2.c_key_addr_h = upper_32_bits(c_ctx->c_key_dma);
- sec_sqe->type2.c_ivin_addr_l = lower_32_bits(c_req->c_ivin_dma);
- sec_sqe->type2.c_ivin_addr_h = upper_32_bits(c_req->c_ivin_dma);
- sec_sqe->type2.data_src_addr_l = lower_32_bits(c_req->c_in_dma);
- sec_sqe->type2.data_src_addr_h = upper_32_bits(c_req->c_in_dma);
- sec_sqe->type2.data_dst_addr_l = lower_32_bits(c_req->c_out_dma);
- sec_sqe->type2.data_dst_addr_h = upper_32_bits(c_req->c_out_dma);
+ ctx->req_op->do_transfer(ctx, req);
- sec_sqe->type2.c_mode = c_ctx->c_mode;
- sec_sqe->type2.c_alg = c_ctx->c_alg;
- sec_sqe->type2.c_key_len = c_ctx->c_key_len;
-
- sec_sqe->src_addr_type = SGL;
- sec_sqe->dst_addr_type = SGL;
- sec_sqe->type = BD_TYPE2;
- sec_sqe->scene = SCENE_IPSEC;
- sec_sqe->de = c_req->c_in_dma != c_req->c_out_dma;
+ ret = ctx->req_op->bd_fill(ctx, req);
+ if (unlikely(ret))
+ goto unmap_req_buf;
- __sync_fetch_and_add(&ctx->sec->sec_dfx.gran_task_cnt, 1);
+ return ret;
- if (c_req->encrypt)
- sec_sqe->cipher = SEC_CIPHER_ENC;
- else
- sec_sqe->cipher = SEC_CIPHER_DEC;
+unmap_req_buf:
+ ctx->req_op->buf_unmap(ctx, req);
- sec_sqe->type2.c_len = c_req->c_len;
- sec_sqe->type2.tag = req->req_id;
+ return ret;
+}
- return 0;
+static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
+{
+ ctx->req_op->buf_unmap(ctx, req);
}
-static int hisi_sec_bd_send_asyn(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- int req_cnt = req->req_cnt;
- int ret;
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_alg_res *res = &req->qp_ctx->res[req->req_id];
- mutex_lock(&qp_ctx->req_lock);
- ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
- if (ret == 0)
- ctx->sec->sec_dfx.send_cnt++;
- mutex_unlock(&qp_ctx->req_lock);
+ if (ctx->use_pbuf) {
+ c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
+ c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
+ } else {
+ c_req->c_ivin = res->c_ivin;
+ c_req->c_ivin_dma = res->c_ivin_dma;
+ }
- return hisi_sec_get_async_ret(ret, req_cnt, ctx->req_fake_limit);
+ memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
}
-static void hisi_sec_skcipher_complete(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req, int err_code)
+static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
{
- struct skcipher_request **sk_reqs =
- (struct skcipher_request **)req->priv;
- int i, req_fusion_num;
-
- if (ctx->is_fusion == SEC_NO_FUSION)
- req_fusion_num = 1;
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_sqe *sec_sqe = &req->sec_sqe;
+ u8 scene, sa_type, da_type;
+ u8 bd_type, cipher;
+ u8 de = 0;
+
+ memset(sec_sqe, 0, sizeof(struct sec_sqe));
+
+ sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
+ sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
+ SEC_CMODE_OFFSET);
+ sec_sqe->type2.c_alg = c_ctx->c_alg;
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
+ SEC_CKEY_OFFSET);
+
+ bd_type = SEC_BD_TYPE2;
+ if (c_req->encrypt)
+ cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
else
- req_fusion_num = req->fusion_num;
+ cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
+ sec_sqe->type_cipher_auth = bd_type | cipher;
- for (i = 0; i < req_fusion_num; i++)
- sk_reqs[i]->base.complete(&sk_reqs[i]->base, err_code);
-
- /* free sk_reqs if this request is completed */
- if (err_code != -EINPROGRESS)
- __sync_add_and_fetch(&ctx->sec->sec_dfx.put_task_cnt,
- req_fusion_num);
+ if (ctx->use_pbuf)
+ sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
else
- __sync_add_and_fetch(&ctx->sec->sec_dfx.busy_comp_cnt,
- req_fusion_num);
-}
-
-static int hisi_sec_skcipher_callback(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
-{
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- int req_id = req->req_id;
+ sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
+ scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
+ if (c_req->c_in_dma != c_req->c_out_dma)
+ de = 0x1 << SEC_DE_OFFSET;
- if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
- hisi_sec_skcipher_complete(ctx, req, -EINPROGRESS);
+ sec_sqe->sds_sa_type = (de | scene | sa_type);
- hisi_sec_skcipher_complete(ctx, req, req->err_type);
+ /* Just set DST address type */
+ if (ctx->use_pbuf)
+ da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
+ else
+ da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
+ sec_sqe->sdm_addr_type |= da_type;
- hisi_sec_free_req_id(qp_ctx, req_id);
+ sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
+ sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
return 0;
}
-static int sec_get_issue_id_range(atomic_t *qid, int start, int end)
+static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
{
- int issue_id;
- int issue_len = end - start;
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ u32 iv_size = req->ctx->c_ctx.ivsize;
+ struct scatterlist *sgl;
+ unsigned int cryptlen;
+ size_t sz;
+ u8 *iv;
+
+ if (req->c_req.encrypt)
+ sgl = sk_req->dst;
+ else
+ sgl = sk_req->src;
- issue_id = (atomic_inc_return(qid) - start) % issue_len + start;
- if (issue_id % issue_len == 0 && atomic_read(qid) > issue_len)
- atomic_sub(issue_len, qid);
+ iv = sk_req->iv;
+ cryptlen = sk_req->cryptlen;
- return issue_id;
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
+ cryptlen - iv_size);
+ if (unlikely(sz != iv_size))
+ dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}
-static inline int sec_get_issue_id(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
+ int err)
{
- int issue_id;
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- if (req->c_req.encrypt == 1)
- issue_id = sec_get_issue_id_range(&ctx->enc_qid, 0,
- ctx->enc_q_num);
- else
- issue_id = sec_get_issue_id_range(&ctx->dec_qid, ctx->enc_q_num,
- ctx->q_num);
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
- return issue_id;
-}
+ /* IV output at encrypto of CBC mode */
+ if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ sec_update_iv(req, SEC_SKCIPHER);
-static inline void hisi_sec_inc_thread_cnt(struct hisi_sec_ctx *ctx)
-{
- int thread_cnt = atomic_inc_return(&ctx->thread_cnt);
+ if (req->fake_busy)
+ sk_req->base.complete(&sk_req->base, -EINPROGRESS);
- if (thread_cnt > ctx->sec->sec_dfx.thread_cnt)
- ctx->sec->sec_dfx.thread_cnt = thread_cnt;
+ sk_req->base.complete(&sk_req->base, err);
}
-static struct hisi_sec_req *sec_request_alloc(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *in_req, int *fusion_send, int *fake_busy)
+static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_qp_ctx *qp_ctx;
- struct hisi_sec_req *req;
- int issue_id, ret;
-
- __sync_add_and_fetch(&ctx->sec->sec_dfx.get_task_cnt, 1);
-
- issue_id = sec_get_issue_id(ctx, in_req);
- hisi_sec_inc_thread_cnt(ctx);
-
- qp_ctx = &ctx->qp_ctx[issue_id];
-
- mutex_lock(&qp_ctx->req_lock);
-
- if (in_req->c_req.sk_req->src == in_req->c_req.sk_req->dst) {
- *fusion_send = 1;
- } else if (qp_ctx->fusion_req &&
- qp_ctx->fusion_req->fusion_num < qp_ctx->fusion_limit) {
- req = qp_ctx->fusion_req;
-
- *fake_busy = req->fake_busy;
- __sync_add_and_fetch(&ctx->sec->sec_dfx.fake_busy_cnt,
- *fake_busy);
-
- req->priv[req->fusion_num] = in_req->c_req.sk_req;
- req->fusion_num++;
- in_req->fusion_num = req->fusion_num;
- if (req->fusion_num == qp_ctx->fusion_limit) {
- *fusion_send = 1;
- qp_ctx->fusion_req = NULL;
- }
- mutex_unlock(&qp_ctx->req_lock);
- return req;
- }
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- req = in_req;
-
- if (hisi_sec_alloc_req_id(req, qp_ctx)) {
- mutex_unlock(&qp_ctx->req_lock);
- return NULL;
- }
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
+ sec_free_queue_id(ctx, req);
+}
- req->fake_busy = 0;
+static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx;
+ int queue_id;
- req->req_cnt = atomic_inc_return(&qp_ctx->req_cnt);
- if (req->req_cnt >= ctx->req_fake_limit) {
- req->fake_busy = 1;
- *fake_busy = 1;
- __sync_add_and_fetch(&ctx->sec->sec_dfx.fake_busy_cnt, 1);
- }
+ /* To load balance */
+ queue_id = sec_alloc_queue_id(ctx, req);
+ qp_ctx = &ctx->qp_ctx[queue_id];
- ret = ctx->req_op->get_res(ctx, req);
- if (ret) {
- dev_err(ctx->dev, "req_op get_res failed\n");
- mutex_unlock(&qp_ctx->req_lock);
- goto err_free_req_id;
+ req->req_id = sec_alloc_req_id(req, qp_ctx);
+ if (unlikely(req->req_id < 0)) {
+ sec_free_queue_id(ctx, req);
+ return req->req_id;
}
- if (ctx->fusion_limit <= 1 || ctx->fusion_tmout_nsec == 0)
- *fusion_send = 1;
-
- if (ctx->is_fusion && *fusion_send == 0)
- qp_ctx->fusion_req = req;
-
- req->fusion_num = 1;
-
- req->priv[0] = in_req->c_req.sk_req;
- req->st_time = ktime_get();
-
- mutex_unlock(&qp_ctx->req_lock);
-
- return req;
+ if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
+ req->fake_busy = true;
+ else
+ req->fake_busy = false;
-err_free_req_id:
- hisi_sec_free_req_id(qp_ctx, req->req_id);
- return NULL;
+ return 0;
}
-static int sec_request_transfer(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
{
+ struct sec_cipher_req *c_req = &req->c_req;
int ret;
- ret = ctx->req_op->buf_map(ctx, req);
- if (ret)
+ ret = sec_request_init(ctx, req);
+ if (unlikely(ret))
return ret;
- ret = ctx->req_op->do_transfer(ctx, req);
- if (ret)
- goto unmap_req_buf;
+ ret = sec_request_transfer(ctx, req);
+ if (unlikely(ret))
+ goto err_uninit_req;
- memset(&req->sec_sqe, 0, sizeof(struct hisi_sec_sqe));
- ret = ctx->req_op->bd_fill(ctx, req);
- if (ret)
- goto unmap_req_buf;
+ /* Output IV as decrypto */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
+ sec_update_iv(req, ctx->alg_type);
- return 0;
+ ret = ctx->req_op->bd_send(ctx, req);
+ if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
+ dev_err_ratelimited(SEC_CTX_DEV(ctx),
+ "send sec request failed!\n");
+ goto err_send_req;
+ }
-unmap_req_buf:
- ctx->req_op->buf_unmap(ctx, req);
return ret;
-}
-
-static int sec_request_send(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req)
-{
- int ret;
- ret = ctx->req_op->bd_send(ctx, req);
+err_send_req:
+ /* As failing, restore the IV from user */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
+ if (ctx->alg_type == SEC_SKCIPHER)
+ memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
+ ctx->c_ctx.ivsize);
+ }
- if (ret == 0 || ret == -EBUSY || ret == -EINPROGRESS)
- atomic_dec(&ctx->thread_cnt);
+ sec_request_untransfer(ctx, req);
+err_uninit_req:
+ sec_request_uninit(ctx, req);
return ret;
}
-static int sec_io_proc(struct hisi_sec_ctx *ctx, struct hisi_sec_req *in_req)
+static const struct sec_req_op sec_skcipher_req_ops = {
+ .buf_map = sec_skcipher_sgl_map,
+ .buf_unmap = sec_skcipher_sgl_unmap,
+ .do_transfer = sec_skcipher_copy_iv,
+ .bd_fill = sec_skcipher_bd_fill,
+ .bd_send = sec_bd_send,
+ .callback = sec_skcipher_callback,
+ .process = sec_process,
+};
+
+static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
{
- struct hisi_sec_req *req;
- int fusion_send = 0;
- int fake_busy = 0;
- int ret;
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- in_req->fusion_num = 1;
+ ctx->req_op = &sec_skcipher_req_ops;
- req = sec_request_alloc(ctx, in_req, &fusion_send, &fake_busy);
+ return sec_skcipher_init(tfm);
+}
- if (!req) {
- dev_err_ratelimited(ctx->dev, "sec_request_alloc failed\n");
- return -ENOMEM;
- }
+static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
+{
+ sec_skcipher_uninit(tfm);
+}
- if (ctx->is_fusion && fusion_send == 0)
- return fake_busy ? -EBUSY : -EINPROGRESS;
+static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ struct skcipher_request *sk_req = sreq->c_req.sk_req;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ u8 c_alg = ctx->c_ctx.c_alg;
- ret = sec_request_transfer(ctx, req);
- if (ret) {
- dev_err_ratelimited(ctx->dev, "sec_transfer ret[%d]\n", ret);
- goto err_free_req;
+ if (unlikely(!sk_req->src || !sk_req->dst)) {
+ dev_err(dev, "skcipher input param error!\n");
+ return -EINVAL;
}
+ sreq->c_req.c_len = sk_req->cryptlen;
- ret = sec_request_send(ctx, req);
- __sync_add_and_fetch(&ctx->sec->sec_dfx.send_by_full, 1);
- if (ret != -EBUSY && ret != -EINPROGRESS) {
- dev_err_ratelimited(ctx->dev, "sec_send ret[%d]\n", ret);
- goto err_unmap_req;
- }
+ if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
+ ctx->use_pbuf = true;
- return ret;
+ if (c_alg == SEC_CALG_3DES) {
+ if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
+ dev_err(dev, "skcipher 3des input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
+ if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
+ dev_err(dev, "skcipher aes input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
-err_unmap_req:
- ctx->req_op->buf_unmap(ctx, req);
-err_free_req:
- hisi_sec_free_req_id(req->qp_ctx, req->req_id);
- atomic_dec(&ctx->thread_cnt);
- return ret;
+ dev_err(dev, "skcipher algorithm error!\n");
+ return -EINVAL;
}
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
{
- struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(sk_req);
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(atfm);
- struct hisi_sec_req *req = skcipher_request_ctx(sk_req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
+ struct sec_req *req = skcipher_request_ctx(sk_req);
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
- if (!sk_req->src || !sk_req->dst || !sk_req->cryptlen)
- return -EINVAL;
+ if (!sk_req->cryptlen)
+ return 0;
- req->c_req.sk_req = sk_req;
+ req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
- req->ctx = ctx;
+ req->ctx = ctx;
+
+ ret = sec_skcipher_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
- return sec_io_proc(ctx, req);
+ return ctx->req_op->process(ctx, req);
}
static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
@@ -1415,7 +998,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
}
#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
- sec_max_key_size, hisi_sec_cipher_ctx_init_func, blk_size, iv_size)\
+ sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
{\
.base = {\
.cra_name = sec_cra_name,\
@@ -1423,12 +1006,11 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.cra_priority = SEC_PRIORITY,\
.cra_flags = CRYPTO_ALG_ASYNC,\
.cra_blocksize = blk_size,\
- .cra_ctxsize = sizeof(struct hisi_sec_ctx),\
- .cra_alignmask = 0,\
+ .cra_ctxsize = sizeof(struct sec_ctx),\
.cra_module = THIS_MODULE,\
},\
- .init = hisi_sec_cipher_ctx_init_func,\
- .exit = hisi_sec_cipher_ctx_exit,\
+ .init = ctx_init,\
+ .exit = ctx_exit,\
.setkey = sec_set_key,\
.decrypt = sec_skcipher_decrypt,\
.encrypt = sec_skcipher_encrypt,\
@@ -1437,75 +1019,55 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.ivsize = iv_size,\
},
-#define SEC_SKCIPHER_NORMAL_ALG(name, key_func, min_key_size, \
+#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
max_key_size, blk_size, iv_size) \
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
- hisi_sec_cipher_ctx_init_alg, blk_size, iv_size)
+ sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-#define SEC_SKCIPHER_FUSION_ALG(name, key_func, min_key_size, \
- max_key_size, blk_size, iv_size) \
- SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
- hisi_sec_cipher_ctx_init_multi_iv, blk_size, iv_size)
-
-static struct skcipher_alg sec_normal_algs[] = {
- SEC_SKCIPHER_NORMAL_ALG("ecb(aes)", sec_setkey_aes_ecb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0)
- SEC_SKCIPHER_NORMAL_ALG("cbc(aes)", sec_setkey_aes_cbc,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("xts(aes)", sec_setkey_aes_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("ecb(des)", sec_setkey_des_ecb,
- DES_KEY_SIZE, DES_KEY_SIZE, DES_BLOCK_SIZE, 0)
- SEC_SKCIPHER_NORMAL_ALG("cbc(des)", sec_setkey_des_cbc,
- DES_KEY_SIZE, DES_KEY_SIZE, DES_BLOCK_SIZE, DES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0)
- SEC_SKCIPHER_NORMAL_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
- DES3_EDE_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("xts(sm4)", sec_setkey_sm4_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
-};
+static struct skcipher_alg sec_skciphers[] = {
+ SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
+ AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-static struct skcipher_alg sec_fusion_algs[] = {
- SEC_SKCIPHER_FUSION_ALG("xts(sm4)", sec_setkey_sm4_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_FUSION_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
};
-int hisi_sec_register_to_crypto(int fusion_limit)
+int sec_register_to_crypto(void)
{
/* To avoid repeat register */
- if (atomic_add_return(1, &sec_active_devs) == 1) {
- if (fusion_limit == 1)
- return crypto_register_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- return crypto_register_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
- }
+ if (atomic_add_return(1, &sec_active_devs) == 1)
+ return crypto_register_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
return 0;
}
-void hisi_sec_unregister_from_crypto(int fusion_limit)
+void sec_unregister_from_crypto(void)
{
- if (atomic_sub_return(1, &sec_active_devs) == 0) {
- if (fusion_limit == 1)
- crypto_unregister_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- crypto_unregister_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
- }
+ if (atomic_sub_return(1, &sec_active_devs) == 0)
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
}
-
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index bffbeba..221257e 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -1,13 +1,238 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2018-2019 HiSilicon Limited. */
-#ifndef HISI_SEC_CRYPTO_H
-#define HISI_SEC_CRYPTO_H
+#ifndef __HISI_SEC_V2_CRYPTO_H
+#define __HISI_SEC_V2_CRYPTO_H
-#define SEC_IV_SIZE 24
-#define SEC_MAX_KEY_SIZE 64
+#define SEC_IV_SIZE 24
+#define SEC_MAX_KEY_SIZE 64
+#define SEC_MAX_AUTH_KEY_SIZE 64
-int hisi_sec_register_to_crypto(int fusion_limit);
-void hisi_sec_unregister_from_crypto(int fusion_limit);
+#define SEC_COMM_SCENE 0
+enum sec_calg {
+ SEC_CALG_3DES = 0x1,
+ SEC_CALG_AES = 0x2,
+ SEC_CALG_SM4 = 0x3,
+};
+
+enum sec_hash_alg {
+ SEC_A_HMAC_SHA1 = 0x10,
+ SEC_A_HMAC_SHA256 = 0x11,
+ SEC_A_HMAC_SHA512 = 0x15,
+};
+
+enum sec_mac_len {
+ SEC_HMAC_SHA1_MAC = 20,
+ SEC_HMAC_SHA256_MAC = 32,
+ SEC_HMAC_SHA512_MAC = 64,
+};
+
+enum sec_cmode {
+ SEC_CMODE_ECB = 0x0,
+ SEC_CMODE_CBC = 0x1,
+ SEC_CMODE_CTR = 0x4,
+ SEC_CMODE_XTS = 0x7,
+};
+
+enum sec_ckey_type {
+ SEC_CKEY_128BIT = 0x0,
+ SEC_CKEY_192BIT = 0x1,
+ SEC_CKEY_256BIT = 0x2,
+ SEC_CKEY_3DES_3KEY = 0x1,
+ SEC_CKEY_3DES_2KEY = 0x3,
+};
+
+enum sec_bd_type {
+ SEC_BD_TYPE1 = 0x1,
+ SEC_BD_TYPE2 = 0x2,
+};
+
+enum sec_auth {
+ SEC_NO_AUTH = 0x0,
+ SEC_AUTH_TYPE1 = 0x1,
+ SEC_AUTH_TYPE2 = 0x2,
+};
+
+enum sec_cipher_dir {
+ SEC_CIPHER_ENC = 0x1,
+ SEC_CIPHER_DEC = 0x2,
+};
+
+enum sec_addr_type {
+ SEC_PBUF = 0x0,
+ SEC_SGL = 0x1,
+ SEC_PRP = 0x2,
+};
+
+enum sec_ci_gen {
+ SEC_CI_GEN_BY_ADDR = 0x0,
+ SEC_CI_GEN_BY_LBA = 0X3,
+};
+
+enum sec_scene {
+ SEC_SCENE_IPSEC = 0x1,
+ SEC_SCENE_STORAGE = 0x5,
+};
+
+enum sec_work_mode {
+ SEC_NO_FUSION = 0x0,
+ SEC_IV_FUSION = 0x1,
+ SEC_FUSION_BUTT
+};
+
+enum sec_req_ops_type {
+ SEC_OPS_SKCIPHER_ALG = 0x0,
+ SEC_OPS_DMCRYPT = 0x1,
+ SEC_OPS_MULTI_IV = 0x2,
+ SEC_OPS_BUTT
+};
+
+struct sec_sqe_type2 {
+ /*
+ * mac_len: 0~4 bits
+ * a_key_len: 5~10 bits
+ * a_alg: 11~16 bits
+ */
+ __le32 mac_key_alg;
+
+ /*
+ * c_icv_len: 0~5 bits
+ * c_width: 6~8 bits
+ * c_key_len: 9~11 bits
+ * c_mode: 12~15 bits
+ */
+ __le16 icvw_kmode;
+
+ /* c_alg: 0~3 bits */
+ __u8 c_alg;
+ __u8 rsvd4;
+
+ /*
+ * a_len: 0~23 bits
+ * iv_offset_l: 24~31 bits
+ */
+ __le32 alen_ivllen;
+
+ /*
+ * c_len: 0~23 bits
+ * iv_offset_h: 24~31 bits
+ */
+ __le32 clen_ivhlen;
+
+ __le16 auth_src_offset;
+ __le16 cipher_src_offset;
+ __le16 cs_ip_header_offset;
+ __le16 cs_udp_header_offset;
+ __le16 pass_word_len;
+ __le16 dk_len;
+ __u8 salt3;
+ __u8 salt2;
+ __u8 salt1;
+ __u8 salt0;
+
+ __le16 tag;
+ __le16 rsvd5;
+
+ /*
+ * c_pad_type: 0~3 bits
+ * c_pad_len: 4~11 bits
+ * c_pad_data_type: 12~15 bits
+ */
+ __le16 cph_pad;
+
+ /* c_pad_len_field: 0~1 bits */
+ __le16 c_pad_len_field;
+
+ __le64 long_a_data_len;
+ __le64 a_ivin_addr;
+ __le64 a_key_addr;
+ __le64 mac_addr;
+ __le64 c_ivin_addr;
+ __le64 c_key_addr;
+
+ __le64 data_src_addr;
+ __le64 data_dst_addr;
+
+ /*
+ * done: 0 bit
+ * icv: 1~3 bits
+ * csc: 4~6 bits
+ * flag: 7-10 bits
+ * dif_check: 11~13 bits
+ */
+ __le16 done_flag;
+
+ __u8 error_type;
+ __u8 warning_type;
+ __u8 mac_i3;
+ __u8 mac_i2;
+ __u8 mac_i1;
+ __u8 mac_i0;
+ __le16 check_sum_i;
+ __u8 tls_pad_len_i;
+ __u8 rsvd12;
+ __le32 counter;
+};
+
+struct sec_sqe {
+ /*
+ * type: 0~3 bits
+ * cipher: 4~5 bits
+ * auth: 6~7 bit s
+ */
+ __u8 type_cipher_auth;
+
+ /*
+ * seq: 0 bit
+ * de: 1~2 bits
+ * scene: 3~6 bits
+ * src_addr_type: ~7 bit, with sdm_addr_type 0-1 bits
+ */
+ __u8 sds_sa_type;
+
+ /*
+ * src_addr_type: 0~1 bits, not used now,
+ * if support PRP, set this field, or set zero.
+ * dst_addr_type: 2~4 bits
+ * mac_addr_type: 5~7 bits
+ */
+ __u8 sdm_addr_type;
+ __u8 rsvd0;
+
+ /*
+ * nonce_len(type2): 0~3 bits
+ * huk(type2): 4 bit
+ * key_s(type2): 5 bit
+ * ci_gen: 6~7 bits
+ */
+ __u8 huk_key_ci;
+
+ /*
+ * ai_gen: 0~1 bits
+ * a_pad(type2): 2~3 bits
+ * c_s(type2): 4~5 bits
+ */
+ __u8 ai_apd_cs;
+
+ /*
+ * rhf(type2): 0 bit
+ * c_key_type: 1~2 bits
+ * a_key_type: 3~4 bits
+ * write_frame_len(type2): 5~7 bits
+ */
+ __u8 rca_key_frm;
+
+ /*
+ * cal_iv_addr_en(type2): 0 bit
+ * tls_up(type2): 1 bit
+ * inveld: 7 bit
+ */
+ __u8 iv_tls_ld;
+
+ struct sec_sqe_type2 type2;
+};
+
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index b4e5d57f..b3340c0 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -1,33 +1,30 @@
// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (c) 2018-2019 HiSilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <linux/acpi.h>
#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>
+#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/topology.h>
-#include <linux/uacce.h>
+
#include "sec.h"
-#include "sec_crypto.h"
#define SEC_QUEUE_NUM_V1 4096
#define SEC_QUEUE_NUM_V2 1024
#define SEC_PF_PCI_DEVICE_ID 0xa255
#define SEC_VF_PCI_DEVICE_ID 0xa256
+#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
+#define SEC_BD_ERR_CHK_EN1 0x7ffff7fd
+#define SEC_BD_ERR_CHK_EN3 0xffffbfff
+
#define SEC_SQE_SIZE 128
#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
#define SEC_PF_DEF_Q_NUM 64
@@ -35,8 +32,6 @@
#define SEC_CTX_Q_NUM_DEF 24
#define SEC_CTX_Q_NUM_MAX 32
-#define SEC_AM_CFG_SIG_PORT_MAX_TRANS 0x300014
-#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
#define SEC_CTRL_CNT_CLR_CE 0x301120
#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
#define SEC_ENGINE_PF_CFG_OFF 0x300000
@@ -44,13 +39,13 @@
#define SEC_CORE_INT_SOURCE 0x301010
#define SEC_CORE_INT_MASK 0x301000
#define SEC_CORE_INT_STATUS 0x301008
-#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
-#define SEC_CORE_ECC_INFO 0x301C14
-#define SEC_ECC_NUM(err_val) (((err_val) >> 16) & 0xFFFF)
-#define SEC_ECC_ADDR(err_val) ((err_val) & 0xFFFF)
+#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
+#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFFFF)
+#define SEC_ECC_ADDR(err) ((err) >> 0)
#define SEC_CORE_INT_DISABLE 0x0
#define SEC_CORE_INT_ENABLE 0x1ff
#define SEC_CORE_INT_CLEAR 0x1ff
+#define SEC_SAA_ENABLE 0x17f
#define SEC_RAS_CE_REG 0x301050
#define SEC_RAS_FE_REG 0x301054
@@ -64,6 +59,7 @@
#define SEC_CONTROL_REG 0x0200
#define SEC_TRNG_EN_SHIFT 8
+#define SEC_CLK_GATE_ENABLE BIT(3)
#define SEC_CLK_GATE_DISABLE (~BIT(3))
#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
@@ -71,26 +67,24 @@
#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
-#define SEC_SAA_EN_REG 0x270
-#define SEC_SAA_EN 0x17F
+#define SEC_SAA_EN_REG 0x0270
#define SEC_BD_ERR_CHK_EN_REG0 0x0380
#define SEC_BD_ERR_CHK_EN_REG1 0x0384
#define SEC_BD_ERR_CHK_EN_REG3 0x038c
-#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
-#define SEC_BD_ERR_CHK_EN1 0x7FFFF7FD
-#define SEC_BD_ERR_CHK_EN3 0xFFFFBFFF
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
+#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
#define SEC_DBGFS_VAL_MAX_LEN 20
+#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
-struct hisi_sec_hw_error {
+struct sec_hw_error {
u32 int_msk;
const char *msg;
};
@@ -98,9 +92,8 @@ struct hisi_sec_hw_error {
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
static struct hisi_qm_list sec_devices;
-static struct workqueue_struct *sec_wq;
-static const struct hisi_sec_hw_error sec_hw_error[] = {
+static const struct sec_hw_error sec_hw_errors[] = {
{.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
{.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
{.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
@@ -113,36 +106,13 @@ struct hisi_sec_hw_error {
{ /* sentinel */ }
};
-enum ctrl_debug_file_index {
- SEC_CURRENT_QM,
- SEC_CLEAR_ENABLE,
- SEC_DEBUG_FILE_NUM,
-};
-
-static const char * const ctrl_debug_file_name[] = {
+static const char * const sec_dbg_file_name[] = {
[SEC_CURRENT_QM] = "current_qm",
[SEC_CLEAR_ENABLE] = "clear_enable",
};
-struct ctrl_debug_file {
- enum ctrl_debug_file_index index;
- spinlock_t lock;
- struct hisi_sec_ctrl *ctrl;
-};
-
-/*
- * One SEC controller has one PF and multiple VFs, some global configurations
- * which PF has need this structure.
- *
- * Just relevant for PF.
- */
-struct hisi_sec_ctrl {
- struct hisi_sec *hisi_sec;
- struct ctrl_debug_file files[SEC_DEBUG_FILE_NUM];
-};
-
static struct debugfs_reg32 sec_dfx_regs[] = {
- {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
+ {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
{"SEC_SAA_EN ", 0x301270},
{"SEC_BD_LATENCY_MIN ", 0x301600},
{"SEC_BD_LATENCY_MAX ", 0x301608},
@@ -262,71 +232,12 @@ static int vfs_num_set(const char *val, const struct kernel_param *kp)
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
-static int sec_fusion_limit_set(const char *val, const struct kernel_param *kp)
-{
- u32 fusion_limit;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &fusion_limit);
- if (ret)
- return ret;
-
- if (!fusion_limit || fusion_limit > FUSION_LIMIT_MAX) {
- pr_err("fusion_limit[%u] is't at range(0, %d)", fusion_limit,
- FUSION_LIMIT_MAX);
- return -EINVAL;
- }
-
- return param_set_int(val, kp);
-}
-
-static const struct kernel_param_ops sec_fusion_limit_ops = {
- .set = sec_fusion_limit_set,
- .get = param_get_int,
-};
-static u32 fusion_limit = FUSION_LIMIT_DEF;
-
-module_param_cb(fusion_limit, &sec_fusion_limit_ops, &fusion_limit, 0444);
-MODULE_PARM_DESC(fusion_limit, "(1, acc_sgl_sge_nr of hisilicon QM)");
-
-static int sec_fusion_tmout_ns_set(const char *val,
- const struct kernel_param *kp)
-{
- u32 fusion_tmout_nsec;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &fusion_tmout_nsec);
- if (ret)
- return ret;
-
- if (fusion_tmout_nsec > NSEC_PER_SEC) {
- pr_err("fusion_tmout_nsec[%u] is too large", fusion_tmout_nsec);
- return -EINVAL;
- }
-
- return param_set_int(val, kp);
-}
-
-static const struct kernel_param_ops sec_fusion_time_ops = {
- .set = sec_fusion_tmout_ns_set,
- .get = param_get_int,
-};
-static u32 fusion_time = FUSION_TMOUT_NSEC_DEF; /* ns */
-module_param_cb(fusion_time, &sec_fusion_time_ops, &fusion_time, 0444);
-MODULE_PARM_DESC(fusion_time, "(0, NSEC_PER_SEC)");
-
-static const struct pci_device_id hisi_sec_dev_ids[] = {
+static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
{ 0, }
};
-MODULE_DEVICE_TABLE(pci, hisi_sec_dev_ids);
+MODULE_DEVICE_TABLE(pci, sec_dev_ids);
static u8 sec_get_endian(struct hisi_qm *qm)
{
@@ -390,9 +301,9 @@ static int sec_engine_init(struct hisi_qm *qm)
writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
writel(SEC_SINGLE_PORT_MAX_TRANS,
- qm->io_base + SEC_AM_CFG_SIG_PORT_MAX_TRANS);
+ qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
- writel(SEC_SAA_EN, SEC_ADDR(qm, SEC_SAA_EN_REG));
+ writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG));
/* Enable sm4 extra mode, as ctr/ecb */
writel_relaxed(SEC_BD_ERR_CHK_EN0,
@@ -436,6 +347,7 @@ static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
return sec_engine_init(qm);
}
+/* sec_debug_regs_clear() - clear the sec debug regs */
static void sec_debug_regs_clear(struct hisi_qm *qm)
{
/* clear current_qm */
@@ -497,23 +409,16 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
}
-static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
+static u32 sec_current_qm_read(struct sec_debug_file *file)
{
- struct hisi_sec *hisi_sec = file->ctrl->hisi_sec;
-
- return &hisi_sec->qm;
-}
-
-static u32 current_qm_read(struct ctrl_debug_file *file)
-{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
return readl(qm->io_base + QM_DFX_MB_CNT_VF);
}
-static int current_qm_write(struct ctrl_debug_file *file, u32 val)
+static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
u32 vfq_num;
u32 tmp;
@@ -521,17 +426,17 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (val == 0) {
+ if (!val) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
vfq_num = (qm->ctrl_q_num - qm->qp_num) / qm->vfs_num;
- if (val == qm->vfs_num) {
+
+ if (val == qm->vfs_num)
qm->debug.curr_qm_qp_num =
qm->ctrl_q_num - qm->qp_num -
(qm->vfs_num - 1) * vfq_num;
- } else {
+ else
qm->debug.curr_qm_qp_num = vfq_num;
- }
}
writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
@@ -548,33 +453,33 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val)
return 0;
}
-static u32 clear_enable_read(struct ctrl_debug_file *file)
+static u32 sec_clear_enable_read(struct sec_debug_file *file)
{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
SEC_CTRL_CNT_CLR_CE_BIT;
}
-static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
+static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
u32 tmp;
if (val != 1 && val)
return -EINVAL;
tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
- ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
+ ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
return 0;
}
-static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
+static ssize_t sec_debug_read(struct file *filp, char __user *buf,
size_t count, loff_t *pos)
{
- struct ctrl_debug_file *file = filp->private_data;
+ struct sec_debug_file *file = filp->private_data;
char tbuf[SEC_DBGFS_VAL_MAX_LEN];
u32 val;
int ret;
@@ -583,10 +488,10 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
switch (file->index) {
case SEC_CURRENT_QM:
- val = current_qm_read(file);
+ val = sec_current_qm_read(file);
break;
case SEC_CLEAR_ENABLE:
- val = clear_enable_read(file);
+ val = sec_clear_enable_read(file);
break;
default:
spin_unlock_irq(&file->lock);
@@ -599,10 +504,10 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
-static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
{
- struct ctrl_debug_file *file = filp->private_data;
+ struct sec_debug_file *file = filp->private_data;
char tbuf[SEC_DBGFS_VAL_MAX_LEN];
unsigned long val;
int len, ret;
@@ -626,12 +531,12 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
switch (file->index) {
case SEC_CURRENT_QM:
- ret = current_qm_write(file, val);
+ ret = sec_current_qm_write(file, val);
if (ret)
goto err_input;
break;
case SEC_CLEAR_ENABLE:
- ret = clear_enable_write(file, val);
+ ret = sec_clear_enable_write(file, val);
if (ret)
goto err_input;
break;
@@ -649,30 +554,30 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
return ret;
}
-static const struct file_operations ctrl_debug_fops = {
+static const struct file_operations sec_dbg_fops = {
.owner = THIS_MODULE,
.open = simple_open,
- .read = ctrl_debug_read,
- .write = ctrl_debug_write,
+ .read = sec_debug_read,
+ .write = sec_debug_write,
};
-static int hisi_sec_core_debug_init(struct hisi_qm *qm)
+static int sec_debugfs_atomic64_get(void *data, u64 *val)
{
- struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
+ *val = atomic64_read((atomic64_t *)data);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
+ NULL, "%lld\n");
+
+static int sec_core_debug_init(struct hisi_qm *qm)
+{
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
struct device *dev = &qm->pdev->dev;
- struct hisi_sec_dfx *dfx = &sec->sec_dfx;
+ struct sec_dfx *dfx = &sec->debug.dfx;
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
- char buf[SEC_DBGFS_VAL_MAX_LEN];
- int ret;
+ struct dentry *tmp_d;
- ret = snprintf(buf, SEC_DBGFS_VAL_MAX_LEN, "sec_dfx");
- if (ret < 0)
- return -ENOENT;
-
- tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
- if (!tmp_d)
- return -ENOENT;
+ tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -682,123 +587,69 @@ static int hisi_sec_core_debug_init(struct hisi_qm *qm)
regset->nregs = ARRAY_SIZE(sec_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
- tmp = debugfs_create_u64("send_by_tmout", 0444, tmp_d,
- &dfx->send_by_tmout);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("send_by_full", 0444, tmp_d,
- &dfx->send_by_full);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("get_task_cnt", 0444, tmp_d,
- &dfx->get_task_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("put_task_cnt", 0444, tmp_d,
- &dfx->put_task_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("gran_task_cnt", 0444, tmp_d,
- &dfx->gran_task_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("thread_cnt", 0444, tmp_d, &dfx->thread_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("fake_busy_cnt", 0444,
- tmp_d, &dfx->fake_busy_cnt);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file("send_cnt", 0444, tmp_d,
+ &dfx->send_cnt, &sec_atomic64_ops);
- tmp = debugfs_create_u64("busy_comp_cnt", 0444, tmp_d,
- &dfx->busy_comp_cnt);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file("recv_cnt", 0444, tmp_d,
+ &dfx->recv_cnt, &sec_atomic64_ops);
return 0;
}
-static int hisi_sec_ctrl_debug_init(struct hisi_qm *qm)
+static int sec_debug_init(struct hisi_qm *qm)
{
- struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
- struct dentry *tmp;
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
int i;
for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&sec->ctrl->files[i].lock);
- sec->ctrl->files[i].ctrl = sec->ctrl;
- sec->ctrl->files[i].index = i;
+ spin_lock_init(&sec->debug.files[i].lock);
+ sec->debug.files[i].index = i;
+ sec->debug.files[i].qm = qm;
- tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
+ debugfs_create_file(sec_dbg_file_name[i], 0600,
qm->debug.debug_root,
- sec->ctrl->files + i,
- &ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+ sec->debug.files + i,
+ &sec_dbg_fops);
}
- return hisi_sec_core_debug_init(qm);
+ return sec_core_debug_init(qm);
}
-static int hisi_sec_debugfs_init(struct hisi_qm *qm)
+static int sec_debugfs_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- struct dentry *dev_d;
int ret;
- dev_d = debugfs_create_dir(dev_name(dev), sec_debugfs_root);
- if (!dev_d)
- return -ENOENT;
-
- qm->debug.debug_root = dev_d;
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ sec_debugfs_root);
ret = hisi_qm_debug_init(qm);
if (ret)
goto failed_to_create;
if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
- ret = hisi_sec_ctrl_debug_init(qm);
+ ret = sec_debug_init(qm);
if (ret)
goto failed_to_create;
}
return 0;
- failed_to_create:
+failed_to_create:
debugfs_remove_recursive(sec_debugfs_root);
+
return ret;
}
-static void hisi_sec_debugfs_exit(struct hisi_qm *qm)
+static void sec_debugfs_exit(struct hisi_qm *qm)
{
debugfs_remove_recursive(qm->debug.debug_root);
-
- if (qm->fun_type == QM_HW_PF) {
- sec_debug_regs_clear(qm);
- qm->debug.curr_qm_qp_num = 0;
- }
}
static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
- const struct hisi_sec_hw_error *errs = sec_hw_error;
+ const struct sec_hw_error *errs = sec_hw_errors;
struct device *dev = &qm->pdev->dev;
u32 err_val;
@@ -809,7 +660,7 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
err_val = readl(qm->io_base +
- SEC_CORE_ECC_INFO);
+ SEC_CORE_SRAM_ECC_ERR_INFO);
dev_err(dev, "multi ecc sram num=0x%x\n",
SEC_ECC_NUM(err_val));
}
@@ -837,19 +688,10 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
}
-static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
+static int sec_pf_probe_init(struct hisi_qm *qm)
{
- struct hisi_sec *hisi_sec = container_of(qm, struct hisi_sec, qm);
- struct hisi_sec_ctrl *ctrl;
int ret;
- ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return -ENOMEM;
-
- hisi_sec->ctrl = ctrl;
- ctrl->hisi_sec = hisi_sec;
-
switch (qm->ver) {
case QM_HW_V1:
qm->ctrl_q_num = SEC_QUEUE_NUM_V1;
@@ -868,7 +710,7 @@ static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
qm->err_ini.err_info.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
qm->err_ini.err_info.ce = QM_BASE_CE;
qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
- QM_ACC_WB_NOT_READY_TIMEOUT;
+ QM_ACC_WB_NOT_READY_TIMEOUT;
qm->err_ini.err_info.fe = 0;
qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID;
qm->err_ini.err_info.acpi_rst = "SRST";
@@ -884,42 +726,32 @@ static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
return ret;
hisi_qm_dev_err_init(qm);
- qm->err_ini.open_axi_master_ooo(qm);
sec_debug_regs_clear(qm);
return 0;
}
-static int hisi_sec_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+static int sec_probe_init(struct hisi_qm *qm)
{
int ret;
-#ifdef CONFIG_CRYPTO_QM_UACCE
- qm->algs = "sec\ncipher\ndigest\n";
- qm->uacce_mode = uacce_mode;
-#endif
- qm->pdev = pdev;
- ret = hisi_qm_pre_init(qm, pf_q_num, SEC_PF_DEF_Q_BASE);
- if (ret)
- return ret;
- qm->sqe_size = SEC_SQE_SIZE;
- qm->dev_name = sec_name;
- qm->qm_list = &sec_devices;
- qm->wq = sec_wq;
-
- return 0;
-}
+ qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE |
+ WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(),
+ pci_name(qm->pdev));
+ if (!qm->wq) {
+ pci_err(qm->pdev, "fail to alloc workqueue\n");
+ return -ENOMEM;
+ }
-static int hisi_sec_probe_init(struct hisi_qm *qm)
-{
if (qm->fun_type == QM_HW_PF) {
- return hisi_sec_pf_probe_init(qm);
+ ret = sec_pf_probe_init(qm);
+ if (ret)
+ goto err_probe_uninit;
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
* so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
* to trigger only one VF in v1 hardware.
- *
* v2 hardware has no such problem.
*/
if (qm->ver == QM_HW_V1) {
@@ -927,41 +759,92 @@ static int hisi_sec_probe_init(struct hisi_qm *qm)
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
} else if (qm->ver == QM_HW_V2) {
/* v2 starts to support get vft by mailbox */
- return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ if (ret)
+ goto err_probe_uninit;
}
+ } else {
+ ret = -ENODEV;
+ goto err_probe_uninit;
}
return 0;
+
+err_probe_uninit:
+ destroy_workqueue(qm->wq);
+ return ret;
+}
+
+static void sec_probe_uninit(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_PF)
+ hisi_qm_dev_err_uninit(qm);
+ destroy_workqueue(qm->wq);
+}
+
+static int sec_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+{
+ int ret;
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "sec\ncipher\ndigest\n";
+ qm->uacce_mode = uacce_mode;
+#endif
+ qm->pdev = pdev;
+ ret = hisi_qm_pre_init(qm, pf_q_num, SEC_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
+
+ qm->qm_list = &sec_devices;
+ qm->sqe_size = SEC_SQE_SIZE;
+ qm->dev_name = sec_name;
+
+ return 0;
+}
+
+static void sec_iommu_used_check(struct sec_dev *sec)
+{
+ struct iommu_domain *domain;
+ struct device *dev = &sec->qm.pdev->dev;
+
+ domain = iommu_get_domain_for_dev(dev);
+
+ /* Check if iommu is used */
+ sec->iommu_used = false;
+ if (domain) {
+ if (domain->type & __IOMMU_DOMAIN_PAGING)
+ sec->iommu_used = true;
+ dev_info(dev, "SMMU Opened! the iommu type:= %d!\n",
+ domain->type);
+ }
}
-static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct hisi_sec *hisi_sec;
+ struct sec_dev *sec;
struct hisi_qm *qm;
int ret;
- hisi_sec = devm_kzalloc(&pdev->dev, sizeof(*hisi_sec), GFP_KERNEL);
- if (!hisi_sec)
+ sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
+ if (!sec)
return -ENOMEM;
- qm = &hisi_sec->qm;
+ qm = &sec->qm;
qm->fun_type = pdev->is_physfn ? QM_HW_PF : QM_HW_VF;
- ret = hisi_sec_qm_pre_init(qm, pdev);
+ ret = sec_qm_pre_init(qm, pdev);
if (ret)
return ret;
- hisi_sec->ctx_q_num = ctx_q_num;
- hisi_sec->fusion_limit = fusion_limit;
- hisi_sec->fusion_tmout_nsec = fusion_time;
-
+ sec->ctx_q_num = ctx_q_num;
+ sec_iommu_used_check(sec);
ret = hisi_qm_init(qm);
if (ret) {
pci_err(pdev, "Failed to init qm (%d)!\n", ret);
return ret;
}
- ret = hisi_sec_probe_init(qm);
+ ret = sec_probe_init(qm);
if (ret) {
pci_err(pdev, "Failed to probe init (%d)!\n", ret);
goto err_qm_uninit;
@@ -970,18 +853,18 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm);
if (ret) {
pci_err(pdev, "Failed to start qm (%d)!\n", ret);
- goto err_qm_uninit;
+ goto err_probe_uninit;
}
- ret = hisi_sec_debugfs_init(qm);
+ ret = sec_debugfs_init(qm);
if (ret)
pci_warn(pdev, "Failed to init debugfs (%d)!\n", ret);
hisi_qm_add_to_list(qm, &sec_devices);
- ret = hisi_sec_register_to_crypto(fusion_limit);
+ ret = sec_register_to_crypto();
if (ret < 0) {
- pci_err(pdev, "Failed to register driver to crypto!\n");
+ pr_err("Failed to register driver to crypto!\n");
goto err_remove_from_list;
}
@@ -994,121 +877,115 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_crypto_unregister:
- hisi_sec_unregister_from_crypto(fusion_limit);
+ sec_unregister_from_crypto();
err_remove_from_list:
hisi_qm_del_from_list(qm, &sec_devices);
- hisi_sec_debugfs_exit(qm);
+ sec_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
+err_probe_uninit:
+ sec_probe_uninit(qm);
+
err_qm_uninit:
hisi_qm_uninit(qm);
return ret;
}
-static int hisi_sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
+static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
- if (num_vfs == 0)
- return hisi_qm_sriov_disable(pdev, &sec_devices);
- else
+ if (num_vfs)
return hisi_qm_sriov_enable(pdev, num_vfs);
+ else
+ return hisi_qm_sriov_disable(pdev, &sec_devices);
}
-static void hisi_sec_remove(struct pci_dev *pdev)
+static void sec_remove(struct pci_dev *pdev)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
- if (uacce_mode != UACCE_MODE_NOUACCE)
- hisi_qm_remove_wait_delay(qm, &sec_devices);
+ hisi_qm_remove_wait_delay(qm, &sec_devices);
+
+ sec_unregister_from_crypto();
+
+ hisi_qm_del_from_list(qm, &sec_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
(void)hisi_qm_sriov_disable(pdev, NULL);
- hisi_sec_unregister_from_crypto(fusion_limit);
+ sec_debugfs_exit(qm);
- hisi_qm_del_from_list(qm, &sec_devices);
- hisi_sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
- hisi_qm_dev_err_uninit(qm);
+ sec_debug_regs_clear(qm);
+
+ sec_probe_uninit(qm);
hisi_qm_uninit(qm);
}
-static const struct pci_error_handlers hisi_sec_err_handler = {
+static const struct pci_error_handlers sec_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
- .slot_reset = hisi_qm_dev_slot_reset,
- .reset_prepare = hisi_qm_reset_prepare,
- .reset_done = hisi_qm_reset_done,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
-static struct pci_driver hisi_sec_pci_driver = {
+static struct pci_driver sec_pci_driver = {
.name = "hisi_sec2",
- .id_table = hisi_sec_dev_ids,
- .probe = hisi_sec_probe,
- .remove = hisi_sec_remove,
- .sriov_configure = hisi_sec_sriov_configure,
- .err_handler = &hisi_sec_err_handler,
+ .id_table = sec_dev_ids,
+ .probe = sec_probe,
+ .remove = sec_remove,
+ .err_handler = &sec_err_handler,
+ .sriov_configure = sec_sriov_configure,
.shutdown = hisi_qm_dev_shutdown,
};
-static void hisi_sec_register_debugfs(void)
+static void sec_register_debugfs(void)
{
if (!debugfs_initialized())
return;
sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
- if (IS_ERR_OR_NULL(sec_debugfs_root))
- sec_debugfs_root = NULL;
}
-static void hisi_sec_unregister_debugfs(void)
+static void sec_unregister_debugfs(void)
{
debugfs_remove_recursive(sec_debugfs_root);
}
-static int __init hisi_sec_init(void)
+static int __init sec_init(void)
{
int ret;
- sec_wq = alloc_workqueue("hisi_sec2", WQ_HIGHPRI | WQ_CPU_INTENSIVE |
- WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
-
- if (!sec_wq) {
- pr_err("Fallied to alloc workqueue\n");
- return -ENOMEM;
- }
-
INIT_LIST_HEAD(&sec_devices.list);
mutex_init(&sec_devices.lock);
sec_devices.check = NULL;
+ sec_register_debugfs();
- hisi_sec_register_debugfs();
-
- ret = pci_register_driver(&hisi_sec_pci_driver);
+ ret = pci_register_driver(&sec_pci_driver);
if (ret < 0) {
- hisi_sec_unregister_debugfs();
- if (sec_wq)
- destroy_workqueue(sec_wq);
+ sec_unregister_debugfs();
pr_err("Failed to register pci driver.\n");
+ return ret;
}
- return ret;
+ return 0;
}
-static void __exit hisi_sec_exit(void)
+static void __exit sec_exit(void)
{
- pci_unregister_driver(&hisi_sec_pci_driver);
- hisi_sec_unregister_debugfs();
- if (sec_wq)
- destroy_workqueue(sec_wq);
+ pci_unregister_driver(&sec_pci_driver);
+ sec_unregister_debugfs();
}
-module_init(hisi_sec_init);
-module_exit(hisi_sec_exit);
+module_init(sec_init);
+module_exit(sec_exit);
MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo(a)huawei.com>");
+MODULE_AUTHOR("Longfang Liu <liulongfang(a)huawei.com>");
MODULE_AUTHOR("Zhang Wei <zhangwei375(a)huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");
diff --git a/drivers/crypto/hisilicon/sec2/sec_usr_if.h b/drivers/crypto/hisilicon/sec2/sec_usr_if.h
deleted file mode 100644
index 7c76e19..00000000
--- a/drivers/crypto/hisilicon/sec2/sec_usr_if.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/* Copyright (c) 2018-2019 HiSilicon Limited. */
-
-#ifndef HISI_SEC_USR_IF_H
-#define HISI_SEC_USR_IF_H
-
-struct hisi_sec_sqe_type1 {
- __u32 rsvd2:6;
- __u32 ci_gen:2;
- __u32 ai_gen:2;
- __u32 rsvd1:7;
- __u32 c_key_type:2;
- __u32 a_key_type:2;
- __u32 rsvd0:10;
- __u32 inveld:1;
-
- __u32 mac_len:6;
- __u32 a_key_len:5;
- __u32 a_alg:6;
- __u32 rsvd3:15;
- __u32 c_icv_len:6;
- __u32 c_width:3;
- __u32 c_key_len:3;
- __u32 c_mode:4;
- __u32 c_alg:4;
- __u32 rsvd4:12;
- __u32 auth_gran_size:24;
- __u32:8;
- __u32 cipher_gran_size:24;
- __u32:8;
- __u32 auth_src_offset:16;
- __u32 cipher_src_offset:16;
- __u32 gran_num:16;
- __u32 rsvd5:16;
- __u32 src_skip_data_len:24;
- __u32 rsvd6:8;
- __u32 dst_skip_data_len:24;
- __u32 rsvd7:8;
- __u32 tag:16;
- __u32 rsvd8:16;
- __u32 gen_page_pad_ctrl:4;
- __u32 gen_grd_ctrl:4;
- __u32 gen_ver_ctrl:4;
- __u32 gen_app_ctrl:4;
- __u32 gen_ver_val:8;
- __u32 gen_app_val:8;
- __u32 private_info;
- __u32 gen_ref_ctrl:4;
- __u32 page_pad_type:2;
- __u32 rsvd9:2;
- __u32 chk_grd_ctrl:4;
- __u32 chk_ref_ctrl:4;
- __u32 block_size:16;
- __u32 lba_l;
- __u32 lba_h;
- __u32 a_key_addr_l;
- __u32 a_key_addr_h;
- __u32 mac_addr_l;
- __u32 mac_addr_h;
- __u32 c_ivin_addr_l;
- __u32 c_ivin_addr_h;
- __u32 c_key_addr_l;
- __u32 c_key_addr_h;
- __u32 data_src_addr_l;
- __u32 data_src_addr_h;
- __u32 data_dst_addr_l;
- __u32 data_dst_addr_h;
- __u32 done:1;
- __u32 icv:3;
- __u32 rsvd11:3;
- __u32 flag:4;
- __u32 dif_check:3;
- __u32 rsvd10:2;
- __u32 error_type:8;
- __u32 warning_type:8;
- __u32 dw29;
- __u32 dw30;
- __u32 dw31;
-};
-
-struct hisi_sec_sqe_type2 {
- __u32 nonce_len:4;
- __u32 huk:1;
- __u32 key_s:1;
- __u32 ci_gen:2;
- __u32 ai_gen:2;
- __u32 a_pad:2;
- __u32 c_s:2;
- __u32 rsvd1:2;
- __u32 rhf:1;
- __u32 c_key_type:2;
- __u32 a_key_type:2;
- __u32 write_frame_len:3;
- __u32 cal_iv_addr_en:1;
- __u32 tls_up:1;
- __u32 rsvd0:5;
- __u32 inveld:1;
- __u32 mac_len:5;
- __u32 a_key_len:6;
- __u32 a_alg:6;
- __u32 rsvd3:15;
- __u32 c_icv_len:6;
- __u32 c_width:3;
- __u32 c_key_len:3;
- __u32 c_mode:4;
- __u32 c_alg:4;
- __u32 rsvd4:12;
- __u32 a_len:24;
- __u32 iv_offset_l:8;
- __u32 c_len:24;
- __u32 iv_offset_h:8;
- __u32 auth_src_offset:16;
- __u32 cipher_src_offset:16;
- __u32 cs_ip_header_offset:16;
- __u32 cs_udp_header_offset:16;
- __u32 pass_word_len:16;
- __u32 dk_len:16;
- __u32 salt3:8;
- __u32 salt2:8;
- __u32 salt1:8;
- __u32 salt0:8;
- __u32 tag:16;
- __u32 rsvd5:16;
- __u32 c_pad_type:4;
- __u32 c_pad_len:8;
- __u32 c_pad_data_type:4;
- __u32 c_pad_len_field:2;
- __u32 rsvd6:14;
- __u32 long_a_data_len_l;
- __u32 long_a_data_len_h;
- __u32 a_ivin_addr_l;
- __u32 a_ivin_addr_h;
- __u32 a_key_addr_l;
- __u32 a_key_addr_h;
- __u32 mac_addr_l;
- __u32 mac_addr_h;
- __u32 c_ivin_addr_l;
- __u32 c_ivin_addr_h;
- __u32 c_key_addr_l;
- __u32 c_key_addr_h;
- __u32 data_src_addr_l;
- __u32 data_src_addr_h;
- __u32 data_dst_addr_l;
- __u32 data_dst_addr_h;
- __u32 done:1;
- __u32 icv:3;
- __u32 rsvd11:3;
- __u32 flag:4;
- __u32 rsvd10:5;
- __u32 error_type:8;
- __u32 warning_type:8;
- __u32 mac_i3:8;
- __u32 mac_i2:8;
- __u32 mac_i1:8;
- __u32 mac_i0:8;
- __u32 check_sum_i:16;
- __u32 tls_pad_len_i:8;
- __u32 rsvd12:8;
- __u32 counter;
-};
-
-struct hisi_sec_sqe {
- __u32 type:4;
- __u32 cipher:2;
- __u32 auth:2;
- __u32 seq:1;
- __u32 de:2;
- __u32 scene:4;
- __u32 src_addr_type:3;
- __u32 dst_addr_type:3;
- __u32 mac_addr_type:3;
- __u32 rsvd0:8;
- union {
- struct hisi_sec_sqe_type1 type1;
- struct hisi_sec_sqe_type2 type2;
- };
-};
-
-#endif
--
1.8.3
1
1

[PATCH] arm64: kprobes: Recover pstate.D in single-step exception handler
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Masami Hiramatsu <mhiramat(a)kernel.org>
mainline inclusion
from mainline-5.3-rc3
commit b3980e48528c
category: bugfix
bugzilla: 20080
CVE: NA
-------------------------------------------------
kprobes manipulates the interrupted PSTATE for single step, and
doesn't restore it. Thus, if we put a kprobe where the pstate.D
(debug) masked, the mask will be cleared after the kprobe hits.
Moreover, in the most complicated case, this can lead a kernel
crash with below message when a nested kprobe hits.
[ 152.118921] Unexpected kernel single-step exception at EL1
When the 1st kprobe hits, do_debug_exception() will be called.
At this point, debug exception (= pstate.D) must be masked (=1).
But if another kprobes hits before single-step of the first kprobe
(e.g. inside user pre_handler), it unmask the debug exception
(pstate.D = 0) and return.
Then, when the 1st kprobe setting up single-step, it saves current
DAIF, mask DAIF, enable single-step, and restore DAIF.
However, since "D" flag in DAIF is cleared by the 2nd kprobe, the
single-step exception happens soon after restoring DAIF.
This has been introduced by commit 7419333fa15e ("arm64: kprobe:
Always clear pstate.D in breakpoint exception handler")
To solve this issue, this stores all DAIF bits and restore it
after single stepping.
Reported-by: Naresh Kamboju <naresh.kamboju(a)linaro.org>
Fixes: 7419333fa15e ("arm64: kprobe: Always clear pstate.D in breakpoint exception handler")
Reviewed-by: James Morse <james.morse(a)arm.com>
Tested-by: James Morse <james.morse(a)arm.com>
Signed-off-by: Masami Hiramatsu <mhiramat(a)kernel.org>
Signed-off-by: Will Deacon <will(a)kernel.org>
Signed-off-by: Wei Li <liwei391(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/arm64/include/asm/daifflags.h | 1 +
arch/arm64/kernel/probes/kprobes.c | 40 ++++++--------------------------------
2 files changed, 7 insertions(+), 34 deletions(-)
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 3441ca0..1230923 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -24,6 +24,7 @@
#define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
+#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
/* mask/save/unmask/restore all exceptions, including interrupts. */
static inline void local_daif_mask(void)
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2d63df1..fe9d207 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -30,6 +30,7 @@
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
+#include <asm/daifflags.h>
#include <asm/system_misc.h>
#include <asm/insn.h>
#include <linux/uaccess.h>
@@ -180,33 +181,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
}
/*
- * When PSTATE.D is set (masked), then software step exceptions can not be
- * generated.
- * SPSR's D bit shows the value of PSTATE.D immediately before the
- * exception was taken. PSTATE.D is set while entering into any exception
- * mode, however software clears it for any normal (none-debug-exception)
- * mode in the exception entry. Therefore, when we are entering into kprobe
- * breakpoint handler from any normal mode then SPSR.D bit is already
- * cleared, however it is set when we are entering from any debug exception
- * mode.
- * Since we always need to generate single step exception after a kprobe
- * breakpoint exception therefore we need to clear it unconditionally, when
- * we become sure that the current breakpoint exception is for kprobe.
- */
-static void __kprobes
-spsr_set_debug_flag(struct pt_regs *regs, int mask)
-{
- unsigned long spsr = regs->pstate;
-
- if (mask)
- spsr |= PSR_D_BIT;
- else
- spsr &= ~PSR_D_BIT;
-
- regs->pstate = spsr;
-}
-
-/*
* Interrupts need to be disabled before single-step mode is set, and not
* reenabled until after single-step mode ends.
* Without disabling interrupt on local CPU, there is a chance of
@@ -217,17 +191,17 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- kcb->saved_irqflag = regs->pstate;
+ kcb->saved_irqflag = regs->pstate & DAIF_MASK;
regs->pstate |= PSR_I_BIT;
+ /* Unmask PSTATE.D for enabling software step exceptions. */
+ regs->pstate &= ~PSR_D_BIT;
}
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- if (kcb->saved_irqflag & PSR_I_BIT)
- regs->pstate |= PSR_I_BIT;
- else
- regs->pstate &= ~PSR_I_BIT;
+ regs->pstate &= ~DAIF_MASK;
+ regs->pstate |= kcb->saved_irqflag;
}
static void __kprobes
@@ -264,8 +238,6 @@ static void __kprobes setup_singlestep(struct kprobe *p,
set_ss_context(kcb, slot); /* mark pending ss */
- spsr_set_debug_flag(regs, 0);
-
/* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
kernel_enable_single_step(regs);
--
1.8.3
1
0

[PATCH 1/2] qm: optimize the maximum number of VF and delete invalid addr
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
In this patch, we try to optimize the way to set the maximum number
of VF, which is designed for compation with next hardware standards.
Then we remove invalid address parameter definition and assignment.
Meanwhile, the return code judgment of debugfs related functions is
deleted, because this does not affect the main function of driver.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Cheng Hu <hucheng.hu(a)huawei.com>
Reviewed-by: Guangwei Zhou <zhouguangwei5(a)huawei.com>
Reviewed-by: Junxian Liu <liujunxian3(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/hpre/hpre.h | 1 -
drivers/crypto/hisilicon/hpre/hpre_main.c | 54 ++++++++++++++-----------------
drivers/crypto/hisilicon/qm.c | 22 ++++---------
drivers/crypto/hisilicon/qm.h | 6 ++--
drivers/crypto/hisilicon/rde/rde_main.c | 2 ++
drivers/crypto/hisilicon/zip/zip_main.c | 2 ++
6 files changed, 38 insertions(+), 49 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 3ac02ef..42b2f2a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -18,7 +18,6 @@ enum {
HPRE_CLUSTERS_NUM,
};
-
enum hpre_ctrl_dbgfs_file {
HPRE_CURRENT_QM,
HPRE_CLEAR_ENABLE,
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 4dc0d3e..f727158 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -435,8 +435,7 @@ static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
vfq_num = (qm->ctrl_q_num - qm->qp_num) / num_vfs;
if (val == num_vfs) {
qm->debug.curr_qm_qp_num =
- qm->ctrl_q_num - qm->qp_num -
- (num_vfs - 1) * vfq_num;
+ qm->ctrl_q_num - qm->qp_num - (num_vfs - 1) * vfq_num;
} else {
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -592,7 +591,7 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
- struct dentry *tmp, *file_dir;
+ struct dentry *file_dir;
struct hpre *hpre;
if (dir) {
@@ -609,10 +608,9 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
dbg->files[indx].debug = dbg;
dbg->files[indx].type = type;
dbg->files[indx].index = indx;
- tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
- dbg->files + indx, &hpre_ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+
+ debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
+ dbg->files + indx, &hpre_ctrl_debug_fops);
return 0;
}
@@ -623,7 +621,6 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
- struct dentry *tmp;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -633,11 +630,7 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, qm->debug.debug_root,
- regset);
- if (!tmp)
- return -ENOENT;
-
+ debugfs_create_regset32("regs", 0444, qm->debug.debug_root, regset);
return 0;
}
@@ -648,7 +641,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
+ struct dentry *tmp_d;
int i, ret;
for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
@@ -657,8 +650,6 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
return -EINVAL;
tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
- if (!tmp_d)
- return -ENOENT;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -668,9 +659,8 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
regset->base = qm->io_base + hpre_cluster_offsets[i];
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
+
ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
i + HPRE_CLUSTER_CTRL);
if (ret)
@@ -705,14 +695,10 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
{
struct hpre *hpre = container_of(qm, struct hpre, qm);
struct device *dev = &qm->pdev->dev;
- struct dentry *dir;
int ret;
- dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
- if (!dir)
- return -ENOENT;
-
- qm->debug.debug_root = dir;
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ hpre_debugfs_root);
ret = hisi_qm_debug_init(qm);
if (ret)
@@ -730,6 +716,11 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
return ret;
}
+static void hpre_debugfs_exit(struct hisi_qm *qm)
+{
+ debugfs_remove_recursive(qm->debug.debug_root);
+}
+
static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
int ret;
@@ -929,7 +920,8 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
- debugfs_remove_recursive(qm->debug.debug_root);
+
+ hpre_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
@@ -967,19 +959,23 @@ static void hpre_register_debugfs(void)
hpre_debugfs_root = NULL;
}
+static void hpre_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hpre_debugfs_root);
+}
+
static int __init hpre_init(void)
{
int ret;
INIT_LIST_HEAD(&hpre_devices.list);
mutex_init(&hpre_devices.lock);
- hpre_devices.check = NULL;
hpre_register_debugfs();
ret = pci_register_driver(&hpre_pci_driver);
if (ret) {
- debugfs_remove_recursive(hpre_debugfs_root);
+ hpre_unregister_debugfs();
pr_err("hpre: can't register hisi hpre driver.\n");
}
@@ -989,7 +985,7 @@ static int __init hpre_init(void)
static void __exit hpre_exit(void)
{
pci_unregister_driver(&hpre_pci_driver);
- debugfs_remove_recursive(hpre_debugfs_root);
+ hpre_unregister_debugfs();
}
module_init(hpre_init);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 6a5337a..d3429e7 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1428,6 +1428,7 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
qp->c_flag << QM_CQ_FLAG_SHIFT);
ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
+
dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
kfree(cqc);
@@ -2330,6 +2331,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
+
dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
kfree(aeqc);
@@ -2384,13 +2386,6 @@ static int __hisi_qm_start(struct hisi_qm *qm)
QM_INIT_BUF(qm, sqc, qm->qp_num);
QM_INIT_BUF(qm, cqc, qm->qp_num);
-#ifdef CONFIG_CRYPTO_QM_UACCE
- /* get reserved dma memory */
- qm->reserve = qm->qdma.va + off;
- qm->reserve_dma = qm->qdma.dma + off;
- off += PAGE_SIZE;
-#endif
-
ret = qm_eq_aeq_ctx_cfg(qm);
if (ret)
return ret;
@@ -2681,7 +2676,7 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
*/
int hisi_qm_debug_init(struct hisi_qm *qm)
{
- struct dentry *qm_d, *qm_regs;
+ struct dentry *qm_d;
int i, ret;
qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
@@ -2697,12 +2692,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
goto failed_to_create;
}
- qm_regs = debugfs_create_file("regs", 0444, qm->debug.qm_d, qm,
- &qm_regs_fops);
- if (IS_ERR(qm_regs)) {
- ret = -ENOENT;
- goto failed_to_create;
- }
+ debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
return 0;
@@ -3038,7 +3028,9 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
int pre_existing_vfs, num_vfs, ret;
+ int total_vfs;
+ total_vfs = pci_sriov_get_totalvfs(pdev);
pre_existing_vfs = pci_num_vf(pdev);
if (pre_existing_vfs) {
pci_err(pdev,
@@ -3046,7 +3038,7 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
return 0;
}
- num_vfs = min_t(int, max_vfs, QM_MAX_VFS_NUM);
+ num_vfs = min_t(int, max_vfs, total_vfs);
ret = qm_vf_q_assign(qm, num_vfs);
if (ret) {
pci_err(pdev, "Can't assign queues for VF!\n");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 36e888f..79e29ee 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -19,7 +19,7 @@
#define QNUM_V1 4096
#define QNUM_V2 1024
-#define QM_MAX_VFS_NUM 63
+#define QM_MAX_VFS_NUM_V2 63
/* qm user domain */
#define QM_ARUSER_M_CFG_1 0x100088
#define AXUSER_SNOOP_ENABLE BIT(30)
@@ -322,9 +322,7 @@ struct hisi_qm {
resource_size_t size;
struct uacce uacce;
const char *algs;
- void *reserve;
int uacce_mode;
- dma_addr_t reserve_dma;
#endif
struct workqueue_struct *wq;
struct work_struct work;
@@ -423,7 +421,7 @@ static inline int vf_num_set(const char *val, const struct kernel_param *kp)
if (ret < 0)
return ret;
- if (n > QM_MAX_VFS_NUM)
+ if (n > QM_MAX_VFS_NUM_V2)
return -ERANGE;
return param_set_int(val, kp);
diff --git a/drivers/crypto/hisilicon/rde/rde_main.c b/drivers/crypto/hisilicon/rde/rde_main.c
index 318d4a0..946532f 100644
--- a/drivers/crypto/hisilicon/rde/rde_main.c
+++ b/drivers/crypto/hisilicon/rde/rde_main.c
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>
+#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -20,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/topology.h>
#include <linux/uacce.h>
+
#include "rde.h"
#define HRDE_QUEUE_NUM_V1 4096
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 54681dc..83e2869 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -858,8 +858,10 @@ static void hisi_zip_remove(struct pci_dev *pdev)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
+#ifdef CONFIG_CRYPTO_QM_UACCE
if (uacce_mode != UACCE_MODE_NOUACCE)
hisi_qm_remove_wait_delay(qm, &zip_devices);
+#endif
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
hisi_qm_sriov_disable(pdev, NULL);
--
1.8.3
1
1

[PATCH 1/4] Revert "dm crypt: use WQ_HIGHPRI for the IO and crypt workqueues"
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Mike Snitzer <snitzer(a)redhat.com>
mainline inclusion
from mainline-5.5-rc1
commit f612b2132db529feac4f965f28a1b9258ea7c22b
category: bugfix
bugzilla: 25149
CVE: NA
---------------------------
This reverts commit a1b89132dc4f61071bdeaab92ea958e0953380a1.
Revert required hand-patching due to subsequent changes that were
applied since commit a1b89132dc4f61071bdeaab92ea958e0953380a1.
Requires: ed0302e83098d ("dm crypt: make workqueue names device-specific")
Cc: stable(a)vger.kernel.org
Bug: https://bugzilla.kernel.org/show_bug.cgi?id=199857
Reported-by: Vito Caputo <vcaputo(a)pengaru.com>
Signed-off-by: Mike Snitzer <snitzer(a)redhat.com>
Signed-off-by: Sun Ke <sunke32(a)huawei.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/md/dm-crypt.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f68b9bd..d451f98 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -3996,17 +3996,16 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
else
- cc->crypt_queue = alloc_workqueue("kcryptd",
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
num_online_cpus());
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
--
1.8.3
1
3

17 Apr '20
From: Bob Moore <robert.moore(a)intel.com>
mainline inclusion
from mainline-v5.5-rc1
commit 197aba2090e3
category: bugfix
bugzilla: 25975
CVE: NA
-------------------------------------------------
ACPICA commit 7bc16c650317001bc82d4bae227b888a49c51f5e
Avoid possible overflow from get_tick_count. Also, cast math
using ACPI_100NSEC_PER_MSEC to uint64.
Link: https://github.com/acpica/acpica/commit/7bc16c65
Signed-off-by: Bob Moore <robert.moore(a)intel.com>
Signed-off-by: Erik Schmauss <erik.schmauss(a)intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki(a)intel.com>
Signed-off-by: Xiongfeng Wang <wangxiongfeng2(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/acpi/acpica/dscontrol.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 0da9626..ebaa74d 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -85,7 +85,7 @@
walk_state->parser_state.pkg_end;
control_state->control.opcode = op->common.aml_opcode;
control_state->control.loop_timeout = acpi_os_get_timer() +
- (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
+ ((u64)acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
/* Push the control state on this walk's control stack */
--
1.8.3
1
0

[PATCH 001/276] core: Don't skip generic XDP program execution for cloned SKBs
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Toke Høiland-Jørgensen <toke(a)redhat.com>
[ Upstream commit ad1e03b2b3d4430baaa109b77bc308dc73050de3 ]
The current generic XDP handler skips execution of XDP programs entirely if
an SKB is marked as cloned. This leads to some surprising behaviour, as
packets can end up being cloned in various ways, which will make an XDP
program not see all the traffic on an interface.
This was discovered by a simple test case where an XDP program that always
returns XDP_DROP is installed on a veth device. When combining this with
the Scapy packet sniffer (which uses an AF_PACKET) socket on the sending
side, SKBs reliably end up in the cloned state, causing them to be passed
through to the receiving interface instead of being dropped. A minimal
reproducer script for this is included below.
This patch fixed the issue by simply triggering the existing linearisation
code for cloned SKBs instead of skipping the XDP program execution. This
behaviour is in line with the behaviour of the native XDP implementation
for the veth driver, which will reallocate and copy the SKB data if the SKB
is marked as shared.
Reproducer Python script (requires BCC and Scapy):
from scapy.all import TCP, IP, Ether, sendp, sniff, AsyncSniffer, Raw, UDP
from bcc import BPF
import time, sys, subprocess, shlex
SKB_MODE = (1 << 1)
DRV_MODE = (1 << 2)
PYTHON=sys.executable
def client():
time.sleep(2)
# Sniffing on the sender causes skb_cloned() to be set
s = AsyncSniffer()
s.start()
for p in range(10):
sendp(Ether(dst="aa:aa:aa:aa:aa:aa", src="cc:cc:cc:cc:cc:cc")/IP()/UDP()/Raw("Test"),
verbose=False)
time.sleep(0.1)
s.stop()
return 0
def server(mode):
prog = BPF(text="int dummy_drop(struct xdp_md *ctx) {return XDP_DROP;}")
func = prog.load_func("dummy_drop", BPF.XDP)
prog.attach_xdp("a_to_b", func, mode)
time.sleep(1)
s = sniff(iface="a_to_b", count=10, timeout=15)
if len(s):
print(f"Got {len(s)} packets - should have gotten 0")
return 1
else:
print("Got no packets - as expected")
return 0
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} <skb|drv>")
sys.exit(1)
if sys.argv[1] == "client":
sys.exit(client())
elif sys.argv[1] == "server":
mode = SKB_MODE if sys.argv[2] == 'skb' else DRV_MODE
sys.exit(server(mode))
else:
try:
mode = sys.argv[1]
if mode not in ('skb', 'drv'):
print(f"Usage: {sys.argv[0]} <skb|drv>")
sys.exit(1)
print(f"Running in {mode} mode")
for cmd in [
'ip netns add netns_a',
'ip netns add netns_b',
'ip -n netns_a link add a_to_b type veth peer name b_to_a netns netns_b',
# Disable ipv6 to make sure there's no address autoconf traffic
'ip netns exec netns_a sysctl -qw net.ipv6.conf.a_to_b.disable_ipv6=1',
'ip netns exec netns_b sysctl -qw net.ipv6.conf.b_to_a.disable_ipv6=1',
'ip -n netns_a link set dev a_to_b address aa:aa:aa:aa:aa:aa',
'ip -n netns_b link set dev b_to_a address cc:cc:cc:cc:cc:cc',
'ip -n netns_a link set dev a_to_b up',
'ip -n netns_b link set dev b_to_a up']:
subprocess.check_call(shlex.split(cmd))
server = subprocess.Popen(shlex.split(f"ip netns exec netns_a {PYTHON} {sys.argv[0]} server {mode}"))
client = subprocess.Popen(shlex.split(f"ip netns exec netns_b {PYTHON} {sys.argv[0]} client"))
client.wait()
server.wait()
sys.exit(server.returncode)
finally:
subprocess.run(shlex.split("ip netns delete netns_a"))
subprocess.run(shlex.split("ip netns delete netns_b"))
Fixes: d445516966dc ("net: xdp: support xdp generic on virtual devices")
Reported-by: Stepan Horacek <shoracek(a)redhat.com>
Suggested-by: Paolo Abeni <pabeni(a)redhat.com>
Signed-off-by: Toke Høiland-Jørgensen <toke(a)redhat.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
net/core/dev.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/net/core/dev.c b/net/core/dev.c
index 1c0224e..c1a3baf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4306,14 +4306,14 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
*/
- if (skb_cloned(skb) || skb_is_tc_redirected(skb))
+ if (skb_is_tc_redirected(skb))
return XDP_PASS;
/* XDP packets must be linear and must have sufficient headroom
* of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
* native XDP provides, thus we need to do it here as well.
*/
- if (skb_is_nonlinear(skb) ||
+ if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
int troom = skb->tail + skb->data_len - skb->end;
--
1.8.3
1
275
From: Al Viro <viro(a)zeniv.linux.org.uk>
mainline inclusion
from mainline-5.1-rc1
commit 35ac1184244f1329783e1d897f74926d8bb1103a
category: bugfix
bugzilla: 30217
CVE: NA
---------------------------
* make the reference from superblock to cgroup_root counting -
do cgroup_put() in cgroup_kill_sb() whether we'd done
percpu_ref_kill() or not; matching grab is done when we allocate
a new root. That gives the same refcounting rules for all callers
of cgroup_do_mount() - a reference to cgroup_root has been grabbed
by caller and it either is transferred to new superblock or dropped.
* have cgroup_kill_sb() treat an already killed refcount as "just
don't bother killing it, then".
* after successful cgroup_do_mount() have cgroup1_mount() recheck
if we'd raced with mount/umount from somebody else and cgroup_root
got killed. In that case we drop the superblock and bugger off
with -ERESTARTSYS, same as if we'd found it in the list already
dying.
* don't bother with delayed initialization of refcount - it's
unreliable and not needed. No need to prevent attempts to bump
the refcount if we find cgroup_root of another mount in progress -
sget will reuse an existing superblock just fine and if the
other sb manages to die before we get there, we'll catch
that immediately after cgroup_do_mount().
* don't bother with kernfs_pin_sb() - no need for doing that
either.
Signed-off-by: Al Viro <viro(a)zeniv.linux.org.uk>
Signed-off-by: yangerkun <yangerkun(a)huawei.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
kernel/cgroup/cgroup-internal.h | 2 +-
kernel/cgroup/cgroup-v1.c | 58 +++++++++--------------------------------
kernel/cgroup/cgroup.c | 16 +++++-------
3 files changed, 21 insertions(+), 55 deletions(-)
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index 75568fc..6f02be1 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -196,7 +196,7 @@ int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
void cgroup_free_root(struct cgroup_root *root);
void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts);
-int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags);
+int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask);
int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
struct cgroup_root *root, unsigned long magic,
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index ff3e1aa..e66bb45 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -1112,13 +1112,11 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
void *data, unsigned long magic,
struct cgroup_namespace *ns)
{
- struct super_block *pinned_sb = NULL;
struct cgroup_sb_opts opts;
struct cgroup_root *root;
struct cgroup_subsys *ss;
struct dentry *dentry;
int i, ret;
- bool new_root = false;
cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
@@ -1180,29 +1178,6 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
if (root->flags ^ opts.flags)
pr_warn("new mount options do not match the existing superblock, will be ignored\n");
- /*
- * We want to reuse @root whose lifetime is governed by its
- * ->cgrp. Let's check whether @root is alive and keep it
- * that way. As cgroup_kill_sb() can happen anytime, we
- * want to block it by pinning the sb so that @root doesn't
- * get killed before mount is complete.
- *
- * With the sb pinned, tryget_live can reliably indicate
- * whether @root can be reused. If it's being killed,
- * drain it. We can use wait_queue for the wait but this
- * path is super cold. Let's just sleep a bit and retry.
- */
- pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
- if (IS_ERR(pinned_sb) ||
- !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
- mutex_unlock(&cgroup_mutex);
- if (!IS_ERR_OR_NULL(pinned_sb))
- deactivate_super(pinned_sb);
- msleep(10);
- ret = restart_syscall();
- goto out_free;
- }
-
ret = 0;
goto out_unlock;
}
@@ -1228,15 +1203,20 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
ret = -ENOMEM;
goto out_unlock;
}
- new_root = true;
init_cgroup_root(root, &opts);
- ret = cgroup_setup_root(root, opts.subsys_mask, PERCPU_REF_INIT_DEAD);
+ ret = cgroup_setup_root(root, opts.subsys_mask);
if (ret)
cgroup_free_root(root);
out_unlock:
+ if (!ret && !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
+ mutex_unlock(&cgroup_mutex);
+ msleep(10);
+ ret = restart_syscall();
+ goto out_free;
+ }
mutex_unlock(&cgroup_mutex);
out_free:
kfree(opts.release_agent);
@@ -1248,25 +1228,13 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
dentry = cgroup_do_mount(&cgroup_fs_type, flags, root,
CGROUP_SUPER_MAGIC, ns);
- /*
- * There's a race window after we release cgroup_mutex and before
- * allocating a superblock. Make sure a concurrent process won't
- * be able to re-use the root during this window by delaying the
- * initialization of root refcnt.
- */
- if (new_root) {
- mutex_lock(&cgroup_mutex);
- percpu_ref_reinit(&root->cgrp.self.refcnt);
- mutex_unlock(&cgroup_mutex);
+ if (!IS_ERR(dentry) && percpu_ref_is_dying(&root->cgrp.self.refcnt)) {
+ struct super_block *sb = dentry->d_sb;
+ dput(dentry);
+ deactivate_locked_super(sb);
+ msleep(10);
+ dentry = ERR_PTR(restart_syscall());
}
-
- /*
- * If @pinned_sb, we're reusing an existing root and holding an
- * extra ref on its sb. Mount is complete. Put the extra ref.
- */
- if (pinned_sb)
- deactivate_super(pinned_sb);
-
return dentry;
}
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 08bd40d..eaee21d 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1897,7 +1897,7 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
}
-int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags)
+int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
{
LIST_HEAD(tmp_links);
struct cgroup *root_cgrp = &root->cgrp;
@@ -1914,7 +1914,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags)
root_cgrp->ancestor_ids[0] = ret;
ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release,
- ref_flags, GFP_KERNEL);
+ 0, GFP_KERNEL);
if (ret)
goto out;
@@ -2091,18 +2091,16 @@ static void cgroup_kill_sb(struct super_block *sb)
struct cgroup_root *root = cgroup_root_from_kf(kf_root);
/*
- * If @root doesn't have any mounts or children, start killing it.
+ * If @root doesn't have any children, start killing it.
* This prevents new mounts by disabling percpu_ref_tryget_live().
* cgroup_mount() may wait for @root's release.
*
* And don't kill the default root.
*/
- if (!list_empty(&root->cgrp.self.children) ||
- root == &cgrp_dfl_root)
- cgroup_put(&root->cgrp);
- else
+ if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root &&
+ !percpu_ref_is_dying(&root->cgrp.self.refcnt))
percpu_ref_kill(&root->cgrp.self.refcnt);
-
+ cgroup_put(&root->cgrp);
kernfs_kill_sb(sb);
}
@@ -5371,7 +5369,7 @@ int __init cgroup_init(void)
hash_add(css_set_table, &init_css_set.hlist,
css_set_hash(init_css_set.subsys));
- BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0, 0));
+ BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
mutex_unlock(&cgroup_mutex);
--
1.8.3
1
1

17 Apr '20
From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
In this patch, we try to fixup the problem of wrong judgement of
used parameter. When the accelerator driver registers to crypto,
the self-test program will send task to hardware, the used para
will decrease in interrupt thread, but exit flow of crypto will
call hisi_qm_stop_qp_nolock function to stop queue, which try to
get value of used. In the above scene, it will appear to get the
value first and then decrease, which causes null pointer. So we
should distinguish fault handling process from normal stop process.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Cheng Hu <hucheng.hu(a)huawei.com>
Reviewed-by: Guangwei Zhou <zhouguangwei5(a)huawei.com>
Reviewed-by: Junxian Liu <liujunxian3(a)huawei.com>
Reviewed-by: Shukun Tan <tanshukun1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/qm.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index e89a770..86f4a12 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1591,10 +1591,12 @@ static int hisi_qm_stop_qp_nolock(struct hisi_qp *qp)
if (qp->qm->wq)
flush_workqueue(qp->qm->wq);
+ else
+ flush_work(&qp->qm->work);
/* wait for increase used count in qp send and last poll qp finish */
udelay(WAIT_PERIOD);
- if (atomic_read(&qp->qp_status.used))
+ if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
qp_stop_fail_cb(qp);
dev_dbg(dev, "stop queue %u!", qp->qp_id);
--
1.8.3
1
1

[PATCH 1/9] nfsd: Ensure CLONE persists data and metadata changes to the target file
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Trond Myklebust <trondmy(a)gmail.com>
mainline inclusion
from mainline-v5.5-rc1
commit a25e3726b32c746c0098125d4c7463bb84df72bb
category: bugfix
bugzilla: 27346
CVE: NA
-------------------------------------------------
The NFSv4.2 CLONE operation has implicit persistence requirements on the
target file, since there is no protocol requirement that the client issue
a separate operation to persist data.
For that reason, we should call vfs_fsync_range() on the destination file
after a successful call to vfs_clone_file_range().
Fixes: ffa0160a1039 ("nfsd: implement the NFSv4.2 CLONE operation")
Signed-off-by: Trond Myklebust <trond.myklebust(a)hammerspace.com>
Cc: stable(a)vger.kernel.org # v4.5+
Signed-off-by: J. Bruce Fields <bfields(a)redhat.com>
Conflicts:
fs/nfsd/nfs4proc.c
fs/nfsd/vfs.c
42ec3d4c0218 ("vfs: make remap_file_range functions take and return bytes
completed")
2e5dfc99f2e6 ("vfs: combine the clone and dedupe into a single
remap_file_range")
Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5(a)huawei.com>
Reviewed-by: zhangyi (F) <yi.zhang(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/nfsd/nfs4proc.c | 3 ++-
fs/nfsd/vfs.c | 16 +++++++++++++---
fs/nfsd/vfs.h | 2 +-
3 files changed, 16 insertions(+), 5 deletions(-)
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index f35aa9f..1c3e6de 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1082,7 +1082,8 @@ static __be32 nfsd4_do_lookupp(struct svc_rqst *rqstp, struct svc_fh *fh)
goto out;
status = nfsd4_clone_file_range(src, clone->cl_src_pos,
- dst, clone->cl_dst_pos, clone->cl_count);
+ dst, clone->cl_dst_pos, clone->cl_count,
+ EX_ISSYNC(cstate->current_fh.fh_export));
fput(dst);
fput(src);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 80ceded..90e97c8 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -552,10 +552,20 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
#endif
__be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
- u64 dst_pos, u64 count)
+ u64 dst_pos, u64 count, bool sync)
{
- return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos,
- count));
+ int cloned;
+
+ cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count);
+ if (cloned < 0)
+ return nfserrno(cloned);
+ if (sync) {
+ loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX;
+ int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
+ if (status < 0)
+ return nfserrno(status);
+ }
+ return 0;
}
ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index db35124..02b0a14 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -58,7 +58,7 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *, struct svc_fh *,
__be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *,
struct file *, loff_t, loff_t, int);
__be32 nfsd4_clone_file_range(struct file *, u64, struct file *,
- u64, u64);
+ u64, u64, bool);
#endif /* CONFIG_NFSD_V4 */
__be32 nfsd_create_locked(struct svc_rqst *, struct svc_fh *,
char *name, int len, struct iattr *attrs,
--
1.8.3
1
8