From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
1. we delete sec_usr_if.h, then move the define of sec hardware structure
into sec_crypto.h and normalize two structure types.
2. In sec_main.c, we remove fusion_limit/fusion_time, because this part of
logic is not used in the end. We also optimize the logic of debugfs without
judging some return codes, because this does not affect the driver loading.
Probe flow is also be optimized, including add sec_iommu_used_check, modify
sec_probe_init, realize sec_qm_pre_init and so on.
3. In sec.h, we define structure of sec_ctx, which defines queue/cipher/
request .etc relatives.
4. In sec_crypto.c,we encapsulate independent interfaces, such as init/
uninit/map/unmap/callback/alloc resource/free resource/encrypt/decrypt/
filling hardware descriptor/set key .etc, which removes fusion logic and is
easy to expand algorithm. Meanwhile, we remove DES algorithm support,
because of its weak key.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Cheng Hu <hucheng.hu(a)huawei.com>
Reviewed-by: Guangwei Zhou <zhouguangwei5(a)huawei.com>
Reviewed-by: Ye Kai <yekai13(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/qm.c | 3 +-
drivers/crypto/hisilicon/sec2/sec.h | 166 ++-
drivers/crypto/hisilicon/sec2/sec_crypto.c | 1770 +++++++++++-----------------
drivers/crypto/hisilicon/sec2/sec_crypto.h | 237 +++-
drivers/crypto/hisilicon/sec2/sec_main.c | 541 ++++-----
drivers/crypto/hisilicon/sec2/sec_usr_if.h | 179 ---
6 files changed, 1246 insertions(+), 1650 deletions(-)
delete mode 100644 drivers/crypto/hisilicon/sec2/sec_usr_if.h
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index d3429e7..8b49902 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1389,6 +1389,7 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
+
dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
kfree(sqc);
@@ -1598,7 +1599,7 @@ static int hisi_qm_stop_qp_nolock(struct hisi_qp *qp)
else
flush_work(&qp->qm->work);
- /* wait for increase used count in qp send and last poll qp finish */
+ /* waiting for increase used count in qp send and last poll qp finish */
udelay(WAIT_PERIOD);
if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
qp_stop_fail_cb(qp);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index f85dd06..e3b581a 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -1,19 +1,124 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2018-2019 HiSilicon Limited. */
-#ifndef HISI_SEC_H
-#define HISI_SEC_H
+#ifndef __HISI_SEC_V2_H
+#define __HISI_SEC_V2_H
#include <linux/list.h>
+
#include "../qm.h"
-#include "sec_usr_if.h"
+#include "sec_crypto.h"
+
+/* Algorithm resource per hardware SEC queue */
+struct sec_alg_res {
+ u8 *pbuf;
+ dma_addr_t pbuf_dma;
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
+};
+
+/* Cipher request of SEC private */
+struct sec_cipher_req {
+ struct hisi_acc_hw_sgl *c_in;
+ dma_addr_t c_in_dma;
+ struct hisi_acc_hw_sgl *c_out;
+ dma_addr_t c_out_dma;
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+ struct skcipher_request *sk_req;
+ u32 c_len;
+ bool encrypt;
+};
+
+/* SEC request of Crypto */
+struct sec_req {
+ struct sec_sqe sec_sqe;
+ struct sec_ctx *ctx;
+ struct sec_qp_ctx *qp_ctx;
+
+ struct sec_cipher_req c_req;
+
+ int err_type;
+ int req_id;
+
+ /* Status of the SEC request */
+ bool fake_busy;
+};
+
+/**
+ * struct sec_req_op - Operations for SEC request
+ * @buf_map: DMA map the SGL buffers of the request
+ * @buf_unmap: DMA unmap the SGL buffers of the request
+ * @bd_fill: Fill the SEC queue BD
+ * @bd_send: Send the SEC BD into the hardware queue
+ * @callback: Call back for the request
+ * @process: Main processing logic of Skcipher
+ */
+struct sec_req_op {
+ int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
+ int (*process)(struct sec_ctx *ctx, struct sec_req *req);
+};
+
+/* SEC cipher context which cipher's relatives */
+struct sec_cipher_ctx {
+ u8 *c_key;
+ dma_addr_t c_key_dma;
+ sector_t iv_offset;
+ u32 c_gran_size;
+ u32 ivsize;
+ u8 c_mode;
+ u8 c_alg;
+ u8 c_key_len;
+};
-#undef pr_fmt
-#define pr_fmt(fmt) "hisi_sec: " fmt
+/* SEC queue context which defines queue's relatives */
+struct sec_qp_ctx {
+ struct hisi_qp *qp;
+ struct sec_req *req_list[QM_Q_DEPTH];
+ struct idr req_idr;
+ struct sec_alg_res res[QM_Q_DEPTH];
+ struct sec_ctx *ctx;
+ struct mutex req_lock;
+ struct hisi_acc_sgl_pool *c_in_pool;
+ struct hisi_acc_sgl_pool *c_out_pool;
+ atomic_t pending_reqs;
+};
+enum sec_alg_type {
+ SEC_SKCIPHER,
+ SEC_AEAD
+};
+
+/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
+struct sec_ctx {
+ struct sec_qp_ctx *qp_ctx;
+ struct sec_dev *sec;
+ const struct sec_req_op *req_op;
+ struct hisi_qp **qps;
+
+ /* Half queues for encipher, and half for decipher */
+ u32 hlf_q_num;
+
+ /* Threshold for fake busy, trigger to return -EBUSY to user */
+ u32 fake_req_limit;
+
+ /* Currrent cyclic index to select a queue for encipher */
+ atomic_t enc_qcyclic;
+
+ /* Currrent cyclic index to select a queue for decipher */
+ atomic_t dec_qcyclic;
-#define FUSION_LIMIT_DEF 1
-#define FUSION_LIMIT_MAX 64
-#define FUSION_TMOUT_NSEC_DEF (400 * 1000)
+ enum sec_alg_type alg_type;
+ bool pbuf_supported;
+ bool use_pbuf;
+ struct sec_cipher_ctx c_ctx;
+};
enum sec_endian {
SEC_LE = 0,
@@ -21,32 +126,37 @@ enum sec_endian {
SEC_64BE
};
-struct hisi_sec_ctrl;
+enum sec_debug_file_index {
+ SEC_CURRENT_QM,
+ SEC_CLEAR_ENABLE,
+ SEC_DEBUG_FILE_NUM,
+};
+
+struct sec_debug_file {
+ enum sec_debug_file_index index;
+ spinlock_t lock;
+ struct hisi_qm *qm;
+};
-struct hisi_sec_dfx {
- u64 send_cnt;
- u64 send_by_tmout;
- u64 send_by_full;
- u64 recv_cnt;
- u64 get_task_cnt;
- u64 put_task_cnt;
- u64 gran_task_cnt;
- u64 thread_cnt;
- u64 fake_busy_cnt;
- u64 busy_comp_cnt;
+struct sec_dfx {
+ atomic64_t send_cnt;
+ atomic64_t recv_cnt;
};
-struct hisi_sec {
+struct sec_debug {
+ struct sec_dfx dfx;
+ struct sec_debug_file files[SEC_DEBUG_FILE_NUM];
+};
+
+struct sec_dev {
struct hisi_qm qm;
- struct hisi_sec_dfx sec_dfx;
- struct hisi_sec_ctrl *ctrl;
- int ctx_q_num;
- int fusion_limit;
- int fusion_tmout_nsec;
+ struct sec_debug debug;
+ u32 ctx_q_num;
+ bool iommu_used;
};
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
-struct hisi_sec *find_sec_device(int node);
-
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 0643955..52448d0 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -1,384 +1,329 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018-2019 HiSilicon Limited. */
-#include <linux/crypto.h>
-#include <linux/hrtimer.h>
-#include <linux/dma-mapping.h>
-#include <linux/ktime.h>
-
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <crypto/skcipher.h>
#include <crypto/xts.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
#include "sec.h"
#include "sec_crypto.h"
-static atomic_t sec_active_devs;
-
-#define SEC_ASYNC
-
-#define SEC_INVLD_REQ_ID (-1)
-#define SEC_PRIORITY 4001
-#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
-#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
-#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
-#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
-
-#define BUF_MAP_PER_SGL 64
-#define SEC_FUSION_BD
-
-enum C_ALG {
- C_ALG_DES = 0x0,
- C_ALG_3DES = 0x1,
- C_ALG_AES = 0x2,
- C_ALG_SM4 = 0x3,
-};
-
-enum C_MODE {
- C_MODE_ECB = 0x0,
- C_MODE_CBC = 0x1,
- C_MODE_CTR = 0x4,
- C_MODE_CCM = 0x5,
- C_MODE_GCM = 0x6,
- C_MODE_XTS = 0x7,
- C_MODE_CBC_CS = 0x9,
-};
-
-enum CKEY_LEN {
- CKEY_LEN_128_BIT = 0x0,
- CKEY_LEN_192_BIT = 0x1,
- CKEY_LEN_256_BIT = 0x2,
- CKEY_LEN_DES = 0x1,
- CKEY_LEN_3DES_3KEY = 0x1,
- CKEY_LEN_3DES_2KEY = 0x3,
-};
-
-enum SEC_BD_TYPE {
- BD_TYPE1 = 0x1,
- BD_TYPE2 = 0x2,
-};
-
-enum SEC_CIPHER_TYPE {
- SEC_CIPHER_ENC = 0x1,
- SEC_CIPHER_DEC = 0x2,
-};
-
-enum SEC_ADDR_TYPE {
- PBUF = 0x0,
- SGL = 0x1,
- PRP = 0x2,
-};
-
-enum SEC_CI_GEN {
- CI_GEN_BY_ADDR = 0x0,
- CI_GEN_BY_LBA = 0X3,
-};
-
-enum SEC_SCENE {
- SCENE_IPSEC = 0x0,
- SCENE_STORAGE = 0x5,
-};
-
-enum {
- SEC_NO_FUSION = 0x0,
- SEC_IV_FUSION = 0x1,
- SEC_FUSION_BUTT
-};
-
-enum SEC_REQ_OPS_TYPE {
- SEC_OPS_SKCIPHER_ALG = 0x0,
- SEC_OPS_MULTI_IV = 0x1,
- SEC_OPS_BUTT
-};
-
-struct cipher_res {
- struct skcipher_request_ctx **sk_reqs;
- u8 *c_ivin;
- dma_addr_t c_ivin_dma;
- struct scatterlist *src;
- struct scatterlist *dst;
-};
-
-struct hisi_sec_cipher_req {
- struct hisi_acc_hw_sgl *c_in;
- dma_addr_t c_in_dma;
- struct hisi_acc_hw_sgl *c_out;
- dma_addr_t c_out_dma;
- u8 *c_ivin;
- dma_addr_t c_ivin_dma;
- struct skcipher_request *sk_req;
- struct scatterlist *src;
- struct scatterlist *dst;
- u32 c_len;
- u32 gran_num;
- u64 lba;
- bool encrypt;
-};
-
-struct hisi_sec_ctx;
-struct hisi_sec_qp_ctx;
-
-struct hisi_sec_req {
- struct hisi_sec_sqe sec_sqe;
- struct hisi_sec_ctx *ctx;
- struct hisi_sec_qp_ctx *qp_ctx;
- void **priv;
- struct hisi_sec_cipher_req c_req;
- ktime_t st_time;
- int err_type;
- int req_id;
- int req_cnt;
- int fusion_num;
- int fake_busy;
-};
+#define SEC_PRIORITY 4001
+#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
+#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
+#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
+#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
+
+/* SEC sqe(bd) bit operational relative MACRO */
+#define SEC_DE_OFFSET 1
+#define SEC_CI_GEN_OFFSET 6
+#define SEC_CIPHER_OFFSET 4
+#define SEC_SCENE_OFFSET 3
+#define SEC_DST_SGL_OFFSET 2
+#define SEC_SRC_SGL_OFFSET 7
+#define SEC_CKEY_OFFSET 9
+#define SEC_CMODE_OFFSET 12
+#define SEC_AKEY_OFFSET 5
+#define SEC_AEAD_ALG_OFFSET 11
+#define SEC_AUTH_OFFSET 6
+
+#define SEC_FLAG_OFFSET 7
+#define SEC_FLAG_MASK 0x0780
+#define SEC_TYPE_MASK 0x0F
+#define SEC_DONE_MASK 0x0001
+
+#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
+#define SEC_SGL_SGE_NR 128
+#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
+#define SEC_CIPHER_AUTH 0xfe
+#define SEC_AUTH_CIPHER 0x1
+#define SEC_MAX_MAC_LEN 64
+#define SEC_MAX_AAD_LEN 65535
+#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+
+#define SEC_PBUF_SZ 512
+#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
+#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
+#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
+ SEC_MAX_MAC_LEN * 2)
+#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
+#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
+#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
+ SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
+#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
+ SEC_PBUF_LEFT_SZ)
+
+#define SEC_SQE_LEN_RATE 4
+#define SEC_SQE_CFLAG 2
+#define SEC_SQE_AEAD_FLAG 3
+#define SEC_SQE_DONE 0x1
-struct hisi_sec_req_op {
- int fusion_type;
- int (*get_res)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*queue_alloc)(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
- int (*queue_free)(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
- int (*buf_map)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*buf_unmap)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*do_transfer)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*bd_fill)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*bd_send)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*callback)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
-};
-
-struct hisi_sec_cipher_ctx {
- u8 *c_key;
- dma_addr_t c_key_dma;
- sector_t iv_offset;
- u32 c_gran_size;
- u8 c_mode;
- u8 c_alg;
- u8 c_key_len;
-};
-
-struct hisi_sec_qp_ctx {
- struct hisi_qp *qp;
- struct hisi_sec_req **req_list;
- struct hisi_sec_req *fusion_req;
- unsigned long *req_bitmap;
- void *priv_req_res;
- struct hisi_sec_ctx *ctx;
- struct mutex req_lock;
- atomic_t req_cnt;
- struct hisi_sec_sqe *sqe_list;
- struct hisi_acc_sgl_pool *c_in_pool;
- struct hisi_acc_sgl_pool *c_out_pool;
- int fusion_num;
- int fusion_limit;
-};
+static atomic_t sec_active_devs;
-struct hisi_sec_ctx {
- struct hisi_sec_qp_ctx *qp_ctx;
- struct hisi_sec *sec;
- struct device *dev;
- struct hisi_sec_req_op *req_op;
- struct hisi_qp **qps;
- struct hrtimer timer;
- struct work_struct work;
- atomic_t thread_cnt;
- int req_fake_limit;
- int req_limit;
- int q_num;
- int enc_q_num;
- atomic_t enc_qid;
- atomic_t dec_qid;
- struct hisi_sec_cipher_ctx c_ctx;
- int fusion_tmout_nsec;
- int fusion_limit;
- u64 enc_fusion_num;
- u64 dec_fusion_num;
- bool is_fusion;
-};
+/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
+static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ return atomic_inc_return(&ctx->enc_qcyclic) % ctx->hlf_q_num;
-#define DES_WEAK_KEY_NUM 4
-u64 des_weak_key[DES_WEAK_KEY_NUM] = {0x0101010101010101, 0xFEFEFEFEFEFEFEFE,
- 0xE0E0E0E0F1F1F1F1, 0x1F1F1F1F0E0E0E0E};
+ return atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
+ ctx->hlf_q_num;
+}
-static void hisi_sec_req_cb(struct hisi_qp *qp, void *);
+static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ atomic_dec(&ctx->enc_qcyclic);
+ else
+ atomic_dec(&ctx->dec_qcyclic);
+}
-static int hisi_sec_alloc_req_id(struct hisi_sec_req *req,
- struct hisi_sec_qp_ctx *qp_ctx)
+static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
{
- struct hisi_sec_ctx *ctx = req->ctx;
int req_id;
- req_id = find_first_zero_bit(qp_ctx->req_bitmap, ctx->req_limit);
- if (req_id >= ctx->req_limit || req_id < 0) {
- dev_err(ctx->dev, "no free req id\n");
- return -ENOBUFS;
+ mutex_lock(&qp_ctx->req_lock);
+
+ req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
+ 0, QM_Q_DEPTH, GFP_ATOMIC);
+ mutex_unlock(&qp_ctx->req_lock);
+ if (unlikely(req_id < 0)) {
+ dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
+ return req_id;
}
- set_bit(req_id, qp_ctx->req_bitmap);
- qp_ctx->req_list[req_id] = req;
- req->req_id = req_id;
req->qp_ctx = qp_ctx;
-
- return 0;
+ qp_ctx->req_list[req_id] = req;
+ return req_id;
}
-static void hisi_sec_free_req_id(struct hisi_sec_qp_ctx *qp_ctx, int req_id)
+static void sec_free_req_id(struct sec_req *req)
{
- if (req_id < 0 || req_id >= qp_ctx->ctx->req_limit) {
- pr_err("invalid req_id[%d]\n", req_id);
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int req_id = req->req_id;
+
+ if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
+ dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
return;
}
qp_ctx->req_list[req_id] = NULL;
+ req->qp_ctx = NULL;
mutex_lock(&qp_ctx->req_lock);
- clear_bit(req_id, qp_ctx->req_bitmap);
- atomic_dec(&qp_ctx->req_cnt);
+ idr_remove(&qp_ctx->req_idr, req_id);
mutex_unlock(&qp_ctx->req_lock);
}
-static int sec_request_transfer(struct hisi_sec_ctx *, struct hisi_sec_req *);
-static int sec_request_send(struct hisi_sec_ctx *, struct hisi_sec_req *);
-
-void qp_ctx_work_process(struct hisi_sec_qp_ctx *qp_ctx)
+static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
- struct hisi_sec_req *req;
- struct hisi_sec_ctx *ctx;
- ktime_t cur_time = ktime_get();
- int ret;
-
- mutex_lock(&qp_ctx->req_lock);
-
- req = qp_ctx->fusion_req;
- if (req == NULL) {
- mutex_unlock(&qp_ctx->req_lock);
+ struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct sec_sqe *bd = resp;
+ struct sec_ctx *ctx;
+ struct sec_req *req;
+ u16 done, flag;
+ int err = 0;
+ u8 type;
+
+ type = bd->type_cipher_auth & SEC_TYPE_MASK;
+ if (unlikely(type != SEC_BD_TYPE2)) {
+ pr_err("err bd type [%d]\n", type);
return;
}
- ctx = req->ctx;
- if (ctx == NULL || req->fusion_num == qp_ctx->fusion_limit) {
- mutex_unlock(&qp_ctx->req_lock);
+ req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ if (unlikely(!req)) {
+ atomic_inc(&qp->qp_status.used);
return;
}
- if (cur_time - qp_ctx->fusion_req->st_time < ctx->fusion_tmout_nsec) {
- mutex_unlock(&qp_ctx->req_lock);
- return;
+ req->err_type = bd->type2.error_type;
+ ctx = req->ctx;
+ done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ if (unlikely(req->err_type || done != SEC_SQE_DONE ||
+ (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG))) {
+ dev_err_ratelimited(SEC_CTX_DEV(ctx),
+ "err_type[%d],done[%d],flag[%d]\n",
+ req->err_type, done, flag);
+ err = -EIO;
}
- qp_ctx->fusion_req = NULL;
+ atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
+
+ ctx->req_op->buf_unmap(ctx, req);
+
+ ctx->req_op->callback(ctx, req, err);
+}
+
+static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ret;
+ mutex_lock(&qp_ctx->req_lock);
+ ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
mutex_unlock(&qp_ctx->req_lock);
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- ret = sec_request_transfer(ctx, req);
- if (ret)
- goto err_free_req;
-
- ret = sec_request_send(ctx, req);
- __sync_add_and_fetch(&ctx->sec->sec_dfx.send_by_tmout, 1);
- if (ret != -EBUSY && ret != -EINPROGRESS) {
- dev_err(ctx->dev, "[%s][%d] ret[%d]\n", __func__,
- __LINE__, ret);
- goto err_unmap_req;
- }
+ if (unlikely(ret == -EBUSY))
+ return -ENOBUFS;
- return;
+ if (!ret) {
+ if (req->fake_busy)
+ ret = -EBUSY;
+ else
+ ret = -EINPROGRESS;
+ }
-err_unmap_req:
- ctx->req_op->buf_unmap(ctx, req);
-err_free_req:
- hisi_sec_free_req_id(qp_ctx, req->req_id);
- atomic_dec(&ctx->thread_cnt);
+ return ret;
}
-void ctx_work_process(struct work_struct *work)
+/* Get DMA memory resources */
+static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
- struct hisi_sec_ctx *ctx;
int i;
- ctx = container_of(work, struct hisi_sec_ctx, work);
- for (i = 0; i < ctx->q_num; i++)
- qp_ctx_work_process(&ctx->qp_ctx[i]);
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->c_ivin_dma, GFP_KERNEL);
+ if (!res->c_ivin)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
+ res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
+ }
+
+ return 0;
}
-static enum hrtimer_restart hrtimer_handler(struct hrtimer *timer)
+static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
{
- struct hisi_sec_ctx *ctx;
- ktime_t tim;
+ if (res->c_ivin)
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->c_ivin, res->c_ivin_dma);
+}
- ctx = container_of(timer, struct hisi_sec_ctx, timer);
- tim = ktime_set(0, ctx->fusion_tmout_nsec);
+static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->pbuf)
+ dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ res->pbuf, res->pbuf_dma);
+}
- if (ctx->sec->qm.wq)
- queue_work(ctx->sec->qm.wq, &ctx->work);
- else
- schedule_work(&ctx->work);
+/*
+ * To improve performance, pbuffer is used for
+ * small packets (< 576Bytes) as IOMMU translation using.
+ */
+static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int pbuf_page_offset;
+ int i, j, k;
- hrtimer_forward(timer, timer->base->get_time(), tim);
+ res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ, &res->pbuf_dma,
+ GFP_KERNEL);
+ if (!res->pbuf)
+ return -ENOMEM;
- return HRTIMER_RESTART;
+ /*
+ * SEC_PBUF_PKG contains data pbuf, iv and
+ * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
+ * Every PAGE contains six SEC_PBUF_PKG
+ * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
+ * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
+ * for the SEC_TOTAL_PBUF_SZ
+ */
+ for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
+ pbuf_page_offset = PAGE_SIZE * i;
+ for (j = 0; j < SEC_PBUF_NUM; j++) {
+ k = i * SEC_PBUF_NUM + j;
+ if (k == QM_Q_DEPTH)
+ break;
+ res[k].pbuf = res->pbuf +
+ j * SEC_PBUF_PKG + pbuf_page_offset;
+ res[k].pbuf_dma = res->pbuf_dma +
+ j * SEC_PBUF_PKG + pbuf_page_offset;
+ }
+ }
+ return 0;
}
-static int hisi_sec_create_qp_ctx(struct hisi_sec_ctx *ctx,
- int qp_ctx_id, int req_type)
+static int sec_alg_resource_alloc(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
{
- struct hisi_sec_qp_ctx *qp_ctx;
- struct device *dev = ctx->dev;
- struct hisi_qp *qp;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_alg_res *res = qp_ctx->res;
int ret;
+ ret = sec_alloc_civ_resource(dev, res);
+ if (ret)
+ return ret;
+
+ if (ctx->pbuf_supported) {
+ ret = sec_alloc_pbuf_resource(dev, res);
+ if (ret) {
+ dev_err(dev, "fail to alloc pbuf dma resource!\n");
+ goto alloc_fail;
+ }
+ }
+ return 0;
+alloc_fail:
+ sec_free_civ_resource(dev, res);
+
+ return ret;
+}
+
+static void sec_alg_resource_free(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ sec_free_civ_resource(dev, qp_ctx->res);
+
+ if (ctx->pbuf_supported)
+ sec_free_pbuf_resource(dev, qp_ctx->res);
+}
+
+static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id, int alg_type)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_qp_ctx *qp_ctx;
+ struct hisi_qp *qp;
+ int ret = -ENOMEM;
+
qp_ctx = &ctx->qp_ctx[qp_ctx_id];
qp = ctx->qps[qp_ctx_id];
- qp->req_type = req_type;
+ qp->req_type = 0;
qp->qp_ctx = qp_ctx;
-#ifdef SEC_ASYNC
- qp->req_cb = hisi_sec_req_cb;
-#endif
+ qp->req_cb = sec_req_cb;
qp_ctx->qp = qp;
- qp_ctx->fusion_num = 0;
- qp_ctx->fusion_req = NULL;
- qp_ctx->fusion_limit = ctx->fusion_limit;
qp_ctx->ctx = ctx;
mutex_init(&qp_ctx->req_lock);
- atomic_set(&qp_ctx->req_cnt, 0);
-
- qp_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(QM_Q_DEPTH), sizeof(long),
- GFP_ATOMIC);
- if (!qp_ctx->req_bitmap)
- return -ENOMEM;
-
- qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
- if (!qp_ctx->req_list) {
- ret = -ENOMEM;
- goto err_free_req_bitmap;
- }
-
- qp_ctx->sqe_list = kcalloc(ctx->fusion_limit,
- sizeof(struct hisi_sec_sqe), GFP_KERNEL);
- if (!qp_ctx->sqe_list) {
- ret = -ENOMEM;
- goto err_free_req_list;
- }
+ atomic_set(&qp_ctx->pending_reqs, 0);
+ idr_init(&qp_ctx->req_idr);
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- FUSION_LIMIT_MAX);
+ SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_in_pool)) {
- ret = PTR_ERR(qp_ctx->c_in_pool);
- goto err_free_sqe_list;
+ dev_err(dev, "fail to create sgl pool for input!\n");
+ goto err_destroy_idr;
}
qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- FUSION_LIMIT_MAX);
+ SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_out_pool)) {
- ret = PTR_ERR(qp_ctx->c_out_pool);
+ dev_err(dev, "fail to create sgl pool for output!\n");
goto err_free_c_in_pool;
}
- ret = ctx->req_op->queue_alloc(ctx, qp_ctx);
+ ret = sec_alg_resource_alloc(ctx, qp_ctx);
if (ret)
goto err_free_c_out_pool;
@@ -389,304 +334,153 @@ static int hisi_sec_create_qp_ctx(struct hisi_sec_ctx *ctx,
return 0;
err_queue_free:
- ctx->req_op->queue_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
err_free_c_out_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_free_sqe_list:
- kfree(qp_ctx->sqe_list);
-err_free_req_list:
- kfree(qp_ctx->req_list);
-err_free_req_bitmap:
- kfree(qp_ctx->req_bitmap);
+err_destroy_idr:
+ idr_destroy(&qp_ctx->req_idr);
return ret;
}
-static void hisi_sec_release_qp_ctx(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx)
+static void sec_release_qp_ctx(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = ctx->dev;
+ struct device *dev = SEC_CTX_DEV(ctx);
hisi_qm_stop_qp(qp_ctx->qp);
- ctx->req_op->queue_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
+
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
- kfree(qp_ctx->req_bitmap);
- kfree(qp_ctx->req_list);
- kfree(qp_ctx->sqe_list);
-}
-
-static int __hisi_sec_ctx_init(struct hisi_sec_ctx *ctx, int qlen)
-{
- if (!ctx || qlen < 0)
- return -EINVAL;
-
- ctx->req_limit = qlen;
- ctx->req_fake_limit = qlen / 2;
- atomic_set(&ctx->thread_cnt, 0);
- atomic_set(&ctx->enc_qid, 0);
- atomic_set(&ctx->dec_qid, ctx->enc_q_num);
- if (ctx->fusion_limit > 1 && ctx->fusion_tmout_nsec > 0) {
- ktime_t tim = ktime_set(0, ctx->fusion_tmout_nsec);
-
- hrtimer_init(&ctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ctx->timer.function = hrtimer_handler;
- hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
- INIT_WORK(&ctx->work, ctx_work_process);
- }
-
- return 0;
-}
-
-static void hisi_sec_get_fusion_param(struct hisi_sec_ctx *ctx,
- struct hisi_sec *sec)
-{
- if (ctx->is_fusion) {
- ctx->fusion_tmout_nsec = sec->fusion_tmout_nsec;
- ctx->fusion_limit = sec->fusion_limit;
- } else {
- ctx->fusion_tmout_nsec = 0;
- ctx->fusion_limit = 1;
- }
+ idr_destroy(&qp_ctx->req_idr);
}
-static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
+static int sec_ctx_base_init(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_sec_cipher_ctx *c_ctx;
- struct hisi_sec *sec;
+ struct sec_dev *sec;
int i, ret;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct hisi_sec_req));
-
ctx->qps = sec_create_qps();
if (!ctx->qps) {
pr_err("Can not create sec qps!\n");
return -ENODEV;
}
- sec = container_of(ctx->qps[0]->qm, struct hisi_sec, qm);
+ sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
ctx->sec = sec;
+ ctx->hlf_q_num = sec->ctx_q_num >> 1;
- ctx->dev = &sec->qm.pdev->dev;
-
- ctx->q_num = sec->ctx_q_num;
-
- ctx->enc_q_num = ctx->q_num / 2;
- ctx->qp_ctx = kcalloc(ctx->q_num, sizeof(struct hisi_sec_qp_ctx),
- GFP_KERNEL);
- if (!ctx->qp_ctx) {
- dev_err(ctx->dev, "failed to alloc qp_ctx");
+ if (ctx->sec->iommu_used)
+ ctx->pbuf_supported = true;
+ else
+ ctx->pbuf_supported = false;
+ ctx->use_pbuf = false;
+
+ /* Half of queue depth is taken as fake requests limit in the queue. */
+ ctx->fake_req_limit = QM_Q_DEPTH >> 1;
+ ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
+ GFP_KERNEL);
+ if (!ctx->qp_ctx)
return -ENOMEM;
- }
-
- hisi_sec_get_fusion_param(ctx, sec);
- for (i = 0; i < ctx->q_num; i++) {
- ret = hisi_sec_create_qp_ctx(ctx, i, 0);
+ for (i = 0; i < sec->ctx_q_num; i++) {
+ ret = sec_create_qp_ctx(ctx, i, 0);
if (ret)
goto err_sec_release_qp_ctx;
}
-
- c_ctx = &ctx->c_ctx;
- c_ctx->c_key = dma_alloc_coherent(ctx->dev,
- SEC_MAX_KEY_SIZE, &c_ctx->c_key_dma, GFP_KERNEL);
-
- if (!ctx->c_ctx.c_key) {
- ret = -ENOMEM;
- goto err_sec_release_qp_ctx;
- }
-
- return __hisi_sec_ctx_init(ctx, QM_Q_DEPTH);
-
+ return 0;
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
- hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
sec_destroy_qps(ctx->qps, sec->ctx_q_num);
kfree(ctx->qp_ctx);
+
return ret;
}
-static void hisi_sec_cipher_ctx_exit(struct crypto_skcipher *tfm)
+static void sec_ctx_base_uninit(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_sec_cipher_ctx *c_ctx;
- int i = 0;
-
- c_ctx = &ctx->c_ctx;
-
- if (ctx->fusion_limit > 1 && ctx->fusion_tmout_nsec > 0)
- hrtimer_cancel(&ctx->timer);
-
- if (c_ctx->c_key) {
- memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
- dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, c_ctx->c_key,
- c_ctx->c_key_dma);
- c_ctx->c_key = NULL;
- }
+ int i;
- for (i = 0; i < ctx->q_num; i++)
- hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ for (i = 0; i < ctx->sec->ctx_q_num; i++)
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
- sec_destroy_qps(ctx->qps, ctx->q_num);
+ sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
kfree(ctx->qp_ctx);
}
-static int hisi_sec_skcipher_get_res(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_queue_alloc(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
-static int hisi_sec_skcipher_queue_free(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
-static int hisi_sec_skcipher_buf_map(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_buf_unmap(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_copy_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_bd_fill_base(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_bd_fill_storage(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_bd_fill_multi_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_bd_send_asyn(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_callback(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-
-struct hisi_sec_req_op sec_req_ops_tbl[] = {
- {
- .fusion_type = SEC_NO_FUSION,
- .get_res = hisi_sec_skcipher_get_res,
- .queue_alloc = hisi_sec_skcipher_queue_alloc,
- .queue_free = hisi_sec_skcipher_queue_free,
- .buf_map = hisi_sec_skcipher_buf_map,
- .buf_unmap = hisi_sec_skcipher_buf_unmap,
- .do_transfer = hisi_sec_skcipher_copy_iv,
- .bd_fill = hisi_sec_skcipher_bd_fill_base,
- .bd_send = hisi_sec_bd_send_asyn,
- .callback = hisi_sec_skcipher_callback,
- }, {
- .fusion_type = SEC_IV_FUSION,
- .get_res = hisi_sec_skcipher_get_res,
- .queue_alloc = hisi_sec_skcipher_queue_alloc,
- .queue_free = hisi_sec_skcipher_queue_free,
- .buf_map = hisi_sec_skcipher_buf_map,
- .buf_unmap = hisi_sec_skcipher_buf_unmap,
- .do_transfer = hisi_sec_skcipher_copy_iv,
- .bd_fill = hisi_sec_skcipher_bd_fill_multi_iv,
- .bd_send = hisi_sec_bd_send_asyn,
- .callback = hisi_sec_skcipher_callback,
- }
-};
-
-static int hisi_sec_cipher_ctx_init_alg(struct crypto_skcipher *tfm)
+static int sec_cipher_init(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- ctx->req_op = &sec_req_ops_tbl[SEC_OPS_SKCIPHER_ALG];
- ctx->is_fusion = ctx->req_op->fusion_type;
+ c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &c_ctx->c_key_dma, GFP_KERNEL);
+ if (!c_ctx->c_key)
+ return -ENOMEM;
- return hisi_sec_cipher_ctx_init(tfm);
+ return 0;
}
-static int hisi_sec_cipher_ctx_init_multi_iv(struct crypto_skcipher *tfm)
+static void sec_cipher_uninit(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- ctx->req_op = &sec_req_ops_tbl[SEC_OPS_MULTI_IV];
- ctx->is_fusion = ctx->req_op->fusion_type;
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- return hisi_sec_cipher_ctx_init(tfm);
+ memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key, c_ctx->c_key_dma);
}
-static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp)
+static int sec_skcipher_init(struct crypto_skcipher *tfm)
{
- struct hisi_sec_sqe *sec_sqe = (struct hisi_sec_sqe *)resp;
- struct hisi_sec_qp_ctx *qp_ctx = qp->qp_ctx;
- struct device *dev = &qp->qm->pdev->dev;
- struct hisi_sec_req *req;
- struct hisi_sec_dfx *dfx;
- u32 req_id;
-
- if (sec_sqe->type == 1) {
- req_id = sec_sqe->type1.tag;
- req = qp_ctx->req_list[req_id];
-
- req->err_type = sec_sqe->type1.error_type;
- if (req->err_type || sec_sqe->type1.done != 0x1 ||
- sec_sqe->type1.flag != 0x2) {
- dev_err_ratelimited(dev,
- "err_type[%d] done[%d] flag[%d]\n",
- req->err_type,
- sec_sqe->type1.done,
- sec_sqe->type1.flag);
- }
- } else if (sec_sqe->type == 2) {
- req_id = sec_sqe->type2.tag;
- req = qp_ctx->req_list[req_id];
-
- req->err_type = sec_sqe->type2.error_type;
- if (req->err_type || sec_sqe->type2.done != 0x1 ||
- sec_sqe->type2.flag != 0x2) {
- dev_err_ratelimited(dev,
- "err_type[%d] done[%d] flag[%d]\n",
- req->err_type,
- sec_sqe->type2.done,
- sec_sqe->type2.flag);
- }
- } else {
- dev_err_ratelimited(dev, "err bd type [%d]\n", sec_sqe->type);
- return;
- }
-
- dfx = &req->ctx->sec->sec_dfx;
-
- req->ctx->req_op->buf_unmap(req->ctx, req);
- req->ctx->req_op->callback(req->ctx, req);
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
- __sync_add_and_fetch(&dfx->recv_cnt, 1);
-}
+ ctx->alg_type = SEC_SKCIPHER;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
+ return -EINVAL;
+ }
-static int sec_des_weak_key(const u64 *key, const u32 keylen)
-{
- int i;
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
- for (i = 0; i < DES_WEAK_KEY_NUM; i++)
- if (*key == des_weak_key[i])
- return 1;
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
return 0;
+err_cipher_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
}
-static int sec_skcipher_des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
- const u32 keylen, const u8 *key)
+static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
{
- if (keylen != DES_KEY_SIZE)
- return -EINVAL;
-
- if (sec_des_weak_key((const u64 *)key, keylen))
- return -EKEYREJECTED;
-
- c_ctx->c_key_len = CKEY_LEN_DES;
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- return 0;
+ sec_cipher_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
}
-static int sec_skcipher_3des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
- const u32 keylen, const enum C_MODE c_mode)
+static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
{
switch (keylen) {
case SEC_DES3_2KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_3DES_2KEY;
+ c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
break;
case SEC_DES3_3KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_3DES_3KEY;
+ c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
break;
default:
return -EINVAL;
@@ -695,32 +489,35 @@ static int sec_skcipher_3des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
return 0;
}
-static int sec_skcipher_aes_sm4_setkey(struct hisi_sec_cipher_ctx *c_ctx,
- const u32 keylen, const enum C_MODE c_mode)
+static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
{
- if (c_mode == C_MODE_XTS) {
+ if (c_mode == SEC_CMODE_XTS) {
switch (keylen) {
case SEC_XTS_MIN_KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_128_BIT;
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
break;
case SEC_XTS_MAX_KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_256_BIT;
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
break;
default:
+ pr_err("hisi_sec2: xts mode key error!\n");
return -EINVAL;
}
} else {
switch (keylen) {
case AES_KEYSIZE_128:
- c_ctx->c_key_len = CKEY_LEN_128_BIT;
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
break;
case AES_KEYSIZE_192:
- c_ctx->c_key_len = CKEY_LEN_192_BIT;
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
break;
case AES_KEYSIZE_256:
- c_ctx->c_key_len = CKEY_LEN_256_BIT;
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
break;
default:
+ pr_err("hisi_sec2: aes key error!\n");
return -EINVAL;
}
}
@@ -729,38 +526,40 @@ static int sec_skcipher_aes_sm4_setkey(struct hisi_sec_cipher_ctx *c_ctx,
}
static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
- const u32 keylen, const enum C_ALG c_alg, const enum C_MODE c_mode)
+ const u32 keylen, const enum sec_calg c_alg,
+ const enum sec_cmode c_mode)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
int ret;
- if (c_mode == C_MODE_XTS) {
+ if (c_mode == SEC_CMODE_XTS) {
ret = xts_verify_key(tfm, key, keylen);
- if (ret)
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
return ret;
+ }
}
c_ctx->c_alg = c_alg;
c_ctx->c_mode = c_mode;
switch (c_alg) {
- case C_ALG_DES:
- ret = sec_skcipher_des_setkey(c_ctx, keylen, key);
- break;
- case C_ALG_3DES:
+ case SEC_CALG_3DES:
ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
break;
- case C_ALG_AES:
- case C_ALG_SM4:
+ case SEC_CALG_AES:
+ case SEC_CALG_SM4:
ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
break;
default:
return -EINVAL;
}
- if (ret)
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
return ret;
+ }
memcpy(c_ctx->c_key, key, keylen);
@@ -769,639 +568,423 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
- u32 keylen)\
+ u32 keylen) \
{ \
return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
}
-GEN_SEC_SETKEY_FUNC(aes_ecb, C_ALG_AES, C_MODE_ECB)
-GEN_SEC_SETKEY_FUNC(aes_cbc, C_ALG_AES, C_MODE_CBC)
-GEN_SEC_SETKEY_FUNC(sm4_cbc, C_ALG_SM4, C_MODE_CBC)
-
-GEN_SEC_SETKEY_FUNC(des_ecb, C_ALG_DES, C_MODE_ECB)
-GEN_SEC_SETKEY_FUNC(des_cbc, C_ALG_DES, C_MODE_CBC)
-GEN_SEC_SETKEY_FUNC(3des_ecb, C_ALG_3DES, C_MODE_ECB)
-GEN_SEC_SETKEY_FUNC(3des_cbc, C_ALG_3DES, C_MODE_CBC)
-
-GEN_SEC_SETKEY_FUNC(aes_xts, C_ALG_AES, C_MODE_XTS)
-GEN_SEC_SETKEY_FUNC(sm4_xts, C_ALG_SM4, C_MODE_XTS)
+GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
-static int hisi_sec_get_async_ret(int ret, int req_cnt, int req_fake_limit)
-{
- if (ret == 0) {
- if (req_cnt >= req_fake_limit)
- ret = -EBUSY;
- else
- ret = -EINPROGRESS;
- } else {
- if (ret == -EBUSY)
- ret = -ENOBUFS;
- }
+GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
- return ret;
-}
+GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
+GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
-static int hisi_sec_skcipher_get_res(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src)
{
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct cipher_res *c_res = (struct cipher_res *)qp_ctx->priv_req_res;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int copy_size, pbuf_length;
int req_id = req->req_id;
- c_req->c_ivin = c_res[req_id].c_ivin;
- c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
- req->priv = (void **)c_res[req_id].sk_reqs;
- c_req->src = c_res[req_id].src;
- c_req->dst = c_res[req_id].dst;
-
- return 0;
-}
-
-static int hisi_sec_skcipher_queue_alloc(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx)
-{
- struct cipher_res *c_res;
- int req_num = ctx->fusion_limit;
- int alloc_num = QM_Q_DEPTH * ctx->fusion_limit;
- int buf_map_num = QM_Q_DEPTH * ctx->fusion_limit;
- struct device *dev = ctx->dev;
- int i, ret;
-
- c_res = kcalloc(QM_Q_DEPTH, sizeof(struct cipher_res), GFP_KERNEL);
- if (!c_res)
- return -ENOMEM;
-
- qp_ctx->priv_req_res = (void *)c_res;
-
- c_res[0].sk_reqs = kcalloc(alloc_num,
- sizeof(struct skcipher_request_ctx *), GFP_KERNEL);
- if (!c_res[0].sk_reqs) {
- ret = -ENOMEM;
- goto err_free_c_res;
- }
-
- c_res[0].c_ivin = dma_alloc_coherent(dev,
- SEC_IV_SIZE * alloc_num, &c_res[0].c_ivin_dma, GFP_KERNEL);
- if (!c_res[0].c_ivin) {
- ret = -ENOMEM;
- goto err_free_sk_reqs;
- }
+ copy_size = c_req->c_len;
- c_res[0].src = kcalloc(buf_map_num, sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!c_res[0].src) {
- ret = -ENOMEM;
- goto err_free_c_ivin;
+ pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
+ qp_ctx->res[req_id].pbuf, copy_size);
+ if (unlikely(pbuf_length != copy_size)) {
+ dev_err(dev, "copy src data to pbuf error!\n");
+ return -EINVAL;
}
- c_res[0].dst = kcalloc(buf_map_num, sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!c_res[0].dst) {
- ret = -ENOMEM;
- goto err_free_src;
+ c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
+ if (!c_req->c_in_dma) {
+ dev_err(dev, "fail to set pbuffer address!\n");
+ return -ENOMEM;
}
- for (i = 1; i < QM_Q_DEPTH; i++) {
- c_res[i].sk_reqs = c_res[0].sk_reqs + i * req_num;
- c_res[i].c_ivin = c_res[0].c_ivin
- + i * req_num * SEC_IV_SIZE;
- c_res[i].c_ivin_dma = c_res[0].c_ivin_dma
- + i * req_num * SEC_IV_SIZE;
- c_res[i].src = c_res[0].src + i * req_num;
- c_res[i].dst = c_res[0].dst + i * req_num;
- }
+ c_req->c_out_dma = c_req->c_in_dma;
return 0;
-
-err_free_src:
- kfree(c_res[0].src);
-err_free_c_ivin:
- dma_free_coherent(dev, SEC_IV_SIZE * alloc_num, c_res[0].c_ivin,
- c_res[0].c_ivin_dma);
-err_free_sk_reqs:
- kfree(c_res[0].sk_reqs);
-err_free_c_res:
- kfree(c_res);
-
- return ret;
}
-static int hisi_sec_skcipher_queue_free(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx)
+static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *dst)
{
- struct cipher_res *c_res = (struct cipher_res *)qp_ctx->priv_req_res;
- struct device *dev = ctx->dev;
- int alloc_num = QM_Q_DEPTH * ctx->fusion_limit;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int copy_size, pbuf_length;
+ int req_id = req->req_id;
- kfree(c_res[0].dst);
- kfree(c_res[0].src);
- dma_free_coherent(dev, SEC_IV_SIZE * alloc_num, c_res[0].c_ivin,
- c_res[0].c_ivin_dma);
- kfree(c_res[0].sk_reqs);
- kfree(c_res);
+ copy_size = c_req->c_len;
- return 0;
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
+ qp_ctx->res[req_id].pbuf, copy_size);
+ if (unlikely(pbuf_length != copy_size))
+ dev_err(dev, "copy pbuf data to dst error!\n");
}
-static int hisi_sec_skcipher_buf_map(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct device *dev = ctx->dev;
- struct skcipher_request *sk_next;
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- int src_nents, src_nents_sum, copyed_src_nents;
- int dst_nents, dst_nents_sum, copyed_dst_nents;
- int i, ret, buf_map_limit;
-
- src_nents_sum = 0;
- dst_nents_sum = 0;
- for (i = 0; i < req->fusion_num; i++) {
- sk_next = (struct skcipher_request *)req->priv[i];
- if (sk_next == NULL) {
- dev_err(ctx->dev, "nullptr at [%d]\n", i);
- return -EFAULT;
- }
- src_nents_sum += sg_nents(sk_next->src);
- dst_nents_sum += sg_nents(sk_next->dst);
- if (sk_next->src == sk_next->dst && i > 0) {
- dev_err(ctx->dev, "err: src == dst\n");
- return -EFAULT;
- }
- }
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
- buf_map_limit = FUSION_LIMIT_MAX;
- if (src_nents_sum > buf_map_limit || dst_nents_sum > buf_map_limit) {
- dev_err(ctx->dev, "src[%d] or dst[%d] bigger than %d\n",
- src_nents_sum, dst_nents_sum, buf_map_limit);
- return -ENOBUFS;
- }
-
- copyed_src_nents = 0;
- copyed_dst_nents = 0;
- for (i = 0; i < req->fusion_num; i++) {
- sk_next = (struct skcipher_request *)req->priv[i];
- src_nents = sg_nents(sk_next->src);
- dst_nents = sg_nents(sk_next->dst);
-
- if (i != req->fusion_num - 1) {
- sg_unmark_end(&sk_next->src[src_nents - 1]);
- sg_unmark_end(&sk_next->dst[dst_nents - 1]);
- }
-
- memcpy(c_req->src + copyed_src_nents, sk_next->src,
- src_nents * sizeof(struct scatterlist));
- memcpy(c_req->dst + copyed_dst_nents, sk_next->dst,
- dst_nents * sizeof(struct scatterlist));
+ if (ctx->use_pbuf)
+ return sec_cipher_pbuf_map(ctx, req, src);
- copyed_src_nents += src_nents;
- copyed_dst_nents += dst_nents;
- }
-
- c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, c_req->src,
- qp_ctx->c_in_pool, req->req_id, &c_req->c_in_dma);
+ c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
+ qp_ctx->c_in_pool,
+ req->req_id,
+ &c_req->c_in_dma);
- if (IS_ERR(c_req->c_in))
+ if (IS_ERR(c_req->c_in)) {
+ dev_err(dev, "fail to dma map input sgl buffers!\n");
return PTR_ERR(c_req->c_in);
+ }
- if (c_req->dst == c_req->src) {
+ if (dst == src) {
c_req->c_out = c_req->c_in;
c_req->c_out_dma = c_req->c_in_dma;
} else {
- c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, c_req->dst,
- qp_ctx->c_out_pool, req->req_id, &c_req->c_out_dma);
+ c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
+ qp_ctx->c_out_pool,
+ req->req_id,
+ &c_req->c_out_dma);
+
if (IS_ERR(c_req->c_out)) {
- ret = PTR_ERR(c_req->c_out);
- goto err_unmap_src;
+ dev_err(dev, "fail to dma map output sgl buffers!\n");
+ hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
+ return PTR_ERR(c_req->c_out);
}
}
return 0;
-
-err_unmap_src:
- hisi_acc_sg_buf_unmap(dev, c_req->src, c_req->c_in);
-
- return ret;
}
-static int hisi_sec_skcipher_buf_unmap(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct device *dev = ctx->dev;
-
- if (c_req->dst != c_req->src)
- hisi_acc_sg_buf_unmap(dev, c_req->src, c_req->c_in);
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct device *dev = SEC_CTX_DEV(ctx);
- hisi_acc_sg_buf_unmap(dev, c_req->dst, c_req->c_out);
+ if (ctx->use_pbuf) {
+ sec_cipher_pbuf_unmap(ctx, req, dst);
+ } else {
+ if (dst != src)
+ hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
- return 0;
+ hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
+ }
}
-static int hisi_sec_skcipher_copy_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct skcipher_request *sk_req =
- (struct skcipher_request *)req->priv[0];
- struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(sk_req);
- struct skcipher_request *sk_next;
- int i, iv_size;
-
- c_req->c_len = sk_req->cryptlen;
-
- iv_size = crypto_skcipher_ivsize(atfm);
- if (iv_size > SEC_IV_SIZE)
- return -EINVAL;
+ struct skcipher_request *sq = req->c_req.sk_req;
- memcpy(c_req->c_ivin, sk_req->iv, iv_size);
-
- if (ctx->is_fusion) {
- for (i = 1; i < req->fusion_num; i++) {
- sk_next = (struct skcipher_request *)req->priv[i];
- memcpy(c_req->c_ivin + i * iv_size, sk_next->iv,
- iv_size);
- }
-
- c_req->gran_num = req->fusion_num;
- c_ctx->c_gran_size = sk_req->cryptlen;
- }
-
- return 0;
+ return sec_cipher_map(ctx, req, sq->src, sq->dst);
}
-static int hisi_sec_skcipher_bd_fill_storage(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
+ struct skcipher_request *sq = req->c_req.sk_req;
- if (!c_req->c_len)
- return -EINVAL;
-
- sec_sqe->type1.c_key_addr_l = lower_32_bits(c_ctx->c_key_dma);
- sec_sqe->type1.c_key_addr_h = upper_32_bits(c_ctx->c_key_dma);
- sec_sqe->type1.c_ivin_addr_l = lower_32_bits(c_req->c_ivin_dma);
- sec_sqe->type1.c_ivin_addr_h = upper_32_bits(c_req->c_ivin_dma);
- sec_sqe->type1.data_src_addr_l = lower_32_bits(c_req->c_in_dma);
- sec_sqe->type1.data_src_addr_h = upper_32_bits(c_req->c_in_dma);
- sec_sqe->type1.data_dst_addr_l = lower_32_bits(c_req->c_out_dma);
- sec_sqe->type1.data_dst_addr_h = upper_32_bits(c_req->c_out_dma);
-
- sec_sqe->type1.c_mode = c_ctx->c_mode;
- sec_sqe->type1.c_alg = c_ctx->c_alg;
- sec_sqe->type1.c_key_len = c_ctx->c_key_len;
-
- sec_sqe->src_addr_type = SGL;
- sec_sqe->dst_addr_type = SGL;
- sec_sqe->type = BD_TYPE1;
- sec_sqe->scene = SCENE_STORAGE;
- sec_sqe->de = c_req->c_in_dma != c_req->c_out_dma;
-
- if (c_req->encrypt)
- sec_sqe->cipher = SEC_CIPHER_ENC;
- else
- sec_sqe->cipher = SEC_CIPHER_DEC;
-
- if (c_ctx->c_mode == C_MODE_XTS)
- sec_sqe->type1.ci_gen = CI_GEN_BY_LBA;
-
- sec_sqe->type1.cipher_gran_size = c_ctx->c_gran_size;
- sec_sqe->type1.gran_num = c_req->gran_num;
- __sync_fetch_and_add(&ctx->sec->sec_dfx.gran_task_cnt, c_req->gran_num);
- sec_sqe->type1.block_size = c_req->c_len;
-
- sec_sqe->type1.lba_l = lower_32_bits(c_req->lba);
- sec_sqe->type1.lba_h = upper_32_bits(c_req->lba);
-
- sec_sqe->type1.tag = req->req_id;
-
- return 0;
+ sec_cipher_unmap(ctx, req, sq->src, sq->dst);
}
-static int hisi_sec_skcipher_bd_fill_multi_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
{
int ret;
- ret = hisi_sec_skcipher_bd_fill_storage(ctx, req);
- if (ret)
+ ret = ctx->req_op->buf_map(ctx, req);
+ if (unlikely(ret))
return ret;
- req->sec_sqe.type1.ci_gen = CI_GEN_BY_ADDR;
-
- return 0;
-}
-
-static int hisi_sec_skcipher_bd_fill_base(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
-{
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
-
- if (!c_req->c_len)
- return -EINVAL;
-
- sec_sqe->type2.c_key_addr_l = lower_32_bits(c_ctx->c_key_dma);
- sec_sqe->type2.c_key_addr_h = upper_32_bits(c_ctx->c_key_dma);
- sec_sqe->type2.c_ivin_addr_l = lower_32_bits(c_req->c_ivin_dma);
- sec_sqe->type2.c_ivin_addr_h = upper_32_bits(c_req->c_ivin_dma);
- sec_sqe->type2.data_src_addr_l = lower_32_bits(c_req->c_in_dma);
- sec_sqe->type2.data_src_addr_h = upper_32_bits(c_req->c_in_dma);
- sec_sqe->type2.data_dst_addr_l = lower_32_bits(c_req->c_out_dma);
- sec_sqe->type2.data_dst_addr_h = upper_32_bits(c_req->c_out_dma);
+ ctx->req_op->do_transfer(ctx, req);
- sec_sqe->type2.c_mode = c_ctx->c_mode;
- sec_sqe->type2.c_alg = c_ctx->c_alg;
- sec_sqe->type2.c_key_len = c_ctx->c_key_len;
-
- sec_sqe->src_addr_type = SGL;
- sec_sqe->dst_addr_type = SGL;
- sec_sqe->type = BD_TYPE2;
- sec_sqe->scene = SCENE_IPSEC;
- sec_sqe->de = c_req->c_in_dma != c_req->c_out_dma;
+ ret = ctx->req_op->bd_fill(ctx, req);
+ if (unlikely(ret))
+ goto unmap_req_buf;
- __sync_fetch_and_add(&ctx->sec->sec_dfx.gran_task_cnt, 1);
+ return ret;
- if (c_req->encrypt)
- sec_sqe->cipher = SEC_CIPHER_ENC;
- else
- sec_sqe->cipher = SEC_CIPHER_DEC;
+unmap_req_buf:
+ ctx->req_op->buf_unmap(ctx, req);
- sec_sqe->type2.c_len = c_req->c_len;
- sec_sqe->type2.tag = req->req_id;
+ return ret;
+}
- return 0;
+static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
+{
+ ctx->req_op->buf_unmap(ctx, req);
}
-static int hisi_sec_bd_send_asyn(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- int req_cnt = req->req_cnt;
- int ret;
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_alg_res *res = &req->qp_ctx->res[req->req_id];
- mutex_lock(&qp_ctx->req_lock);
- ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
- if (ret == 0)
- ctx->sec->sec_dfx.send_cnt++;
- mutex_unlock(&qp_ctx->req_lock);
+ if (ctx->use_pbuf) {
+ c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
+ c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
+ } else {
+ c_req->c_ivin = res->c_ivin;
+ c_req->c_ivin_dma = res->c_ivin_dma;
+ }
- return hisi_sec_get_async_ret(ret, req_cnt, ctx->req_fake_limit);
+ memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
}
-static void hisi_sec_skcipher_complete(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req, int err_code)
+static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
{
- struct skcipher_request **sk_reqs =
- (struct skcipher_request **)req->priv;
- int i, req_fusion_num;
-
- if (ctx->is_fusion == SEC_NO_FUSION)
- req_fusion_num = 1;
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_sqe *sec_sqe = &req->sec_sqe;
+ u8 scene, sa_type, da_type;
+ u8 bd_type, cipher;
+ u8 de = 0;
+
+ memset(sec_sqe, 0, sizeof(struct sec_sqe));
+
+ sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
+ sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
+ SEC_CMODE_OFFSET);
+ sec_sqe->type2.c_alg = c_ctx->c_alg;
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
+ SEC_CKEY_OFFSET);
+
+ bd_type = SEC_BD_TYPE2;
+ if (c_req->encrypt)
+ cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
else
- req_fusion_num = req->fusion_num;
+ cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
+ sec_sqe->type_cipher_auth = bd_type | cipher;
- for (i = 0; i < req_fusion_num; i++)
- sk_reqs[i]->base.complete(&sk_reqs[i]->base, err_code);
-
- /* free sk_reqs if this request is completed */
- if (err_code != -EINPROGRESS)
- __sync_add_and_fetch(&ctx->sec->sec_dfx.put_task_cnt,
- req_fusion_num);
+ if (ctx->use_pbuf)
+ sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
else
- __sync_add_and_fetch(&ctx->sec->sec_dfx.busy_comp_cnt,
- req_fusion_num);
-}
-
-static int hisi_sec_skcipher_callback(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
-{
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- int req_id = req->req_id;
+ sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
+ scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
+ if (c_req->c_in_dma != c_req->c_out_dma)
+ de = 0x1 << SEC_DE_OFFSET;
- if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
- hisi_sec_skcipher_complete(ctx, req, -EINPROGRESS);
+ sec_sqe->sds_sa_type = (de | scene | sa_type);
- hisi_sec_skcipher_complete(ctx, req, req->err_type);
+ /* Just set DST address type */
+ if (ctx->use_pbuf)
+ da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
+ else
+ da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
+ sec_sqe->sdm_addr_type |= da_type;
- hisi_sec_free_req_id(qp_ctx, req_id);
+ sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
+ sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
return 0;
}
-static int sec_get_issue_id_range(atomic_t *qid, int start, int end)
+static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
{
- int issue_id;
- int issue_len = end - start;
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ u32 iv_size = req->ctx->c_ctx.ivsize;
+ struct scatterlist *sgl;
+ unsigned int cryptlen;
+ size_t sz;
+ u8 *iv;
+
+ if (req->c_req.encrypt)
+ sgl = sk_req->dst;
+ else
+ sgl = sk_req->src;
- issue_id = (atomic_inc_return(qid) - start) % issue_len + start;
- if (issue_id % issue_len == 0 && atomic_read(qid) > issue_len)
- atomic_sub(issue_len, qid);
+ iv = sk_req->iv;
+ cryptlen = sk_req->cryptlen;
- return issue_id;
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
+ cryptlen - iv_size);
+ if (unlikely(sz != iv_size))
+ dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}
-static inline int sec_get_issue_id(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
+ int err)
{
- int issue_id;
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- if (req->c_req.encrypt == 1)
- issue_id = sec_get_issue_id_range(&ctx->enc_qid, 0,
- ctx->enc_q_num);
- else
- issue_id = sec_get_issue_id_range(&ctx->dec_qid, ctx->enc_q_num,
- ctx->q_num);
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
- return issue_id;
-}
+ /* IV output at encrypto of CBC mode */
+ if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ sec_update_iv(req, SEC_SKCIPHER);
-static inline void hisi_sec_inc_thread_cnt(struct hisi_sec_ctx *ctx)
-{
- int thread_cnt = atomic_inc_return(&ctx->thread_cnt);
+ if (req->fake_busy)
+ sk_req->base.complete(&sk_req->base, -EINPROGRESS);
- if (thread_cnt > ctx->sec->sec_dfx.thread_cnt)
- ctx->sec->sec_dfx.thread_cnt = thread_cnt;
+ sk_req->base.complete(&sk_req->base, err);
}
-static struct hisi_sec_req *sec_request_alloc(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *in_req, int *fusion_send, int *fake_busy)
+static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_qp_ctx *qp_ctx;
- struct hisi_sec_req *req;
- int issue_id, ret;
-
- __sync_add_and_fetch(&ctx->sec->sec_dfx.get_task_cnt, 1);
-
- issue_id = sec_get_issue_id(ctx, in_req);
- hisi_sec_inc_thread_cnt(ctx);
-
- qp_ctx = &ctx->qp_ctx[issue_id];
-
- mutex_lock(&qp_ctx->req_lock);
-
- if (in_req->c_req.sk_req->src == in_req->c_req.sk_req->dst) {
- *fusion_send = 1;
- } else if (qp_ctx->fusion_req &&
- qp_ctx->fusion_req->fusion_num < qp_ctx->fusion_limit) {
- req = qp_ctx->fusion_req;
-
- *fake_busy = req->fake_busy;
- __sync_add_and_fetch(&ctx->sec->sec_dfx.fake_busy_cnt,
- *fake_busy);
-
- req->priv[req->fusion_num] = in_req->c_req.sk_req;
- req->fusion_num++;
- in_req->fusion_num = req->fusion_num;
- if (req->fusion_num == qp_ctx->fusion_limit) {
- *fusion_send = 1;
- qp_ctx->fusion_req = NULL;
- }
- mutex_unlock(&qp_ctx->req_lock);
- return req;
- }
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- req = in_req;
-
- if (hisi_sec_alloc_req_id(req, qp_ctx)) {
- mutex_unlock(&qp_ctx->req_lock);
- return NULL;
- }
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
+ sec_free_queue_id(ctx, req);
+}
- req->fake_busy = 0;
+static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx;
+ int queue_id;
- req->req_cnt = atomic_inc_return(&qp_ctx->req_cnt);
- if (req->req_cnt >= ctx->req_fake_limit) {
- req->fake_busy = 1;
- *fake_busy = 1;
- __sync_add_and_fetch(&ctx->sec->sec_dfx.fake_busy_cnt, 1);
- }
+ /* To load balance */
+ queue_id = sec_alloc_queue_id(ctx, req);
+ qp_ctx = &ctx->qp_ctx[queue_id];
- ret = ctx->req_op->get_res(ctx, req);
- if (ret) {
- dev_err(ctx->dev, "req_op get_res failed\n");
- mutex_unlock(&qp_ctx->req_lock);
- goto err_free_req_id;
+ req->req_id = sec_alloc_req_id(req, qp_ctx);
+ if (unlikely(req->req_id < 0)) {
+ sec_free_queue_id(ctx, req);
+ return req->req_id;
}
- if (ctx->fusion_limit <= 1 || ctx->fusion_tmout_nsec == 0)
- *fusion_send = 1;
-
- if (ctx->is_fusion && *fusion_send == 0)
- qp_ctx->fusion_req = req;
-
- req->fusion_num = 1;
-
- req->priv[0] = in_req->c_req.sk_req;
- req->st_time = ktime_get();
-
- mutex_unlock(&qp_ctx->req_lock);
-
- return req;
+ if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
+ req->fake_busy = true;
+ else
+ req->fake_busy = false;
-err_free_req_id:
- hisi_sec_free_req_id(qp_ctx, req->req_id);
- return NULL;
+ return 0;
}
-static int sec_request_transfer(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
{
+ struct sec_cipher_req *c_req = &req->c_req;
int ret;
- ret = ctx->req_op->buf_map(ctx, req);
- if (ret)
+ ret = sec_request_init(ctx, req);
+ if (unlikely(ret))
return ret;
- ret = ctx->req_op->do_transfer(ctx, req);
- if (ret)
- goto unmap_req_buf;
+ ret = sec_request_transfer(ctx, req);
+ if (unlikely(ret))
+ goto err_uninit_req;
- memset(&req->sec_sqe, 0, sizeof(struct hisi_sec_sqe));
- ret = ctx->req_op->bd_fill(ctx, req);
- if (ret)
- goto unmap_req_buf;
+ /* Output IV as decrypto */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
+ sec_update_iv(req, ctx->alg_type);
- return 0;
+ ret = ctx->req_op->bd_send(ctx, req);
+ if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
+ dev_err_ratelimited(SEC_CTX_DEV(ctx),
+ "send sec request failed!\n");
+ goto err_send_req;
+ }
-unmap_req_buf:
- ctx->req_op->buf_unmap(ctx, req);
return ret;
-}
-
-static int sec_request_send(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req)
-{
- int ret;
- ret = ctx->req_op->bd_send(ctx, req);
+err_send_req:
+ /* As failing, restore the IV from user */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
+ if (ctx->alg_type == SEC_SKCIPHER)
+ memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
+ ctx->c_ctx.ivsize);
+ }
- if (ret == 0 || ret == -EBUSY || ret == -EINPROGRESS)
- atomic_dec(&ctx->thread_cnt);
+ sec_request_untransfer(ctx, req);
+err_uninit_req:
+ sec_request_uninit(ctx, req);
return ret;
}
-static int sec_io_proc(struct hisi_sec_ctx *ctx, struct hisi_sec_req *in_req)
+static const struct sec_req_op sec_skcipher_req_ops = {
+ .buf_map = sec_skcipher_sgl_map,
+ .buf_unmap = sec_skcipher_sgl_unmap,
+ .do_transfer = sec_skcipher_copy_iv,
+ .bd_fill = sec_skcipher_bd_fill,
+ .bd_send = sec_bd_send,
+ .callback = sec_skcipher_callback,
+ .process = sec_process,
+};
+
+static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
{
- struct hisi_sec_req *req;
- int fusion_send = 0;
- int fake_busy = 0;
- int ret;
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- in_req->fusion_num = 1;
+ ctx->req_op = &sec_skcipher_req_ops;
- req = sec_request_alloc(ctx, in_req, &fusion_send, &fake_busy);
+ return sec_skcipher_init(tfm);
+}
- if (!req) {
- dev_err_ratelimited(ctx->dev, "sec_request_alloc failed\n");
- return -ENOMEM;
- }
+static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
+{
+ sec_skcipher_uninit(tfm);
+}
- if (ctx->is_fusion && fusion_send == 0)
- return fake_busy ? -EBUSY : -EINPROGRESS;
+static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ struct skcipher_request *sk_req = sreq->c_req.sk_req;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ u8 c_alg = ctx->c_ctx.c_alg;
- ret = sec_request_transfer(ctx, req);
- if (ret) {
- dev_err_ratelimited(ctx->dev, "sec_transfer ret[%d]\n", ret);
- goto err_free_req;
+ if (unlikely(!sk_req->src || !sk_req->dst)) {
+ dev_err(dev, "skcipher input param error!\n");
+ return -EINVAL;
}
+ sreq->c_req.c_len = sk_req->cryptlen;
- ret = sec_request_send(ctx, req);
- __sync_add_and_fetch(&ctx->sec->sec_dfx.send_by_full, 1);
- if (ret != -EBUSY && ret != -EINPROGRESS) {
- dev_err_ratelimited(ctx->dev, "sec_send ret[%d]\n", ret);
- goto err_unmap_req;
- }
+ if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
+ ctx->use_pbuf = true;
- return ret;
+ if (c_alg == SEC_CALG_3DES) {
+ if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
+ dev_err(dev, "skcipher 3des input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
+ if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
+ dev_err(dev, "skcipher aes input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
-err_unmap_req:
- ctx->req_op->buf_unmap(ctx, req);
-err_free_req:
- hisi_sec_free_req_id(req->qp_ctx, req->req_id);
- atomic_dec(&ctx->thread_cnt);
- return ret;
+ dev_err(dev, "skcipher algorithm error!\n");
+ return -EINVAL;
}
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
{
- struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(sk_req);
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(atfm);
- struct hisi_sec_req *req = skcipher_request_ctx(sk_req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
+ struct sec_req *req = skcipher_request_ctx(sk_req);
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
- if (!sk_req->src || !sk_req->dst || !sk_req->cryptlen)
- return -EINVAL;
+ if (!sk_req->cryptlen)
+ return 0;
- req->c_req.sk_req = sk_req;
+ req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
- req->ctx = ctx;
+ req->ctx = ctx;
+
+ ret = sec_skcipher_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
- return sec_io_proc(ctx, req);
+ return ctx->req_op->process(ctx, req);
}
static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
@@ -1415,7 +998,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
}
#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
- sec_max_key_size, hisi_sec_cipher_ctx_init_func, blk_size, iv_size)\
+ sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
{\
.base = {\
.cra_name = sec_cra_name,\
@@ -1423,12 +1006,11 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.cra_priority = SEC_PRIORITY,\
.cra_flags = CRYPTO_ALG_ASYNC,\
.cra_blocksize = blk_size,\
- .cra_ctxsize = sizeof(struct hisi_sec_ctx),\
- .cra_alignmask = 0,\
+ .cra_ctxsize = sizeof(struct sec_ctx),\
.cra_module = THIS_MODULE,\
},\
- .init = hisi_sec_cipher_ctx_init_func,\
- .exit = hisi_sec_cipher_ctx_exit,\
+ .init = ctx_init,\
+ .exit = ctx_exit,\
.setkey = sec_set_key,\
.decrypt = sec_skcipher_decrypt,\
.encrypt = sec_skcipher_encrypt,\
@@ -1437,75 +1019,55 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.ivsize = iv_size,\
},
-#define SEC_SKCIPHER_NORMAL_ALG(name, key_func, min_key_size, \
+#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
max_key_size, blk_size, iv_size) \
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
- hisi_sec_cipher_ctx_init_alg, blk_size, iv_size)
+ sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-#define SEC_SKCIPHER_FUSION_ALG(name, key_func, min_key_size, \
- max_key_size, blk_size, iv_size) \
- SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
- hisi_sec_cipher_ctx_init_multi_iv, blk_size, iv_size)
-
-static struct skcipher_alg sec_normal_algs[] = {
- SEC_SKCIPHER_NORMAL_ALG("ecb(aes)", sec_setkey_aes_ecb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0)
- SEC_SKCIPHER_NORMAL_ALG("cbc(aes)", sec_setkey_aes_cbc,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("xts(aes)", sec_setkey_aes_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("ecb(des)", sec_setkey_des_ecb,
- DES_KEY_SIZE, DES_KEY_SIZE, DES_BLOCK_SIZE, 0)
- SEC_SKCIPHER_NORMAL_ALG("cbc(des)", sec_setkey_des_cbc,
- DES_KEY_SIZE, DES_KEY_SIZE, DES_BLOCK_SIZE, DES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0)
- SEC_SKCIPHER_NORMAL_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
- DES3_EDE_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("xts(sm4)", sec_setkey_sm4_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
-};
+static struct skcipher_alg sec_skciphers[] = {
+ SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
+ AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-static struct skcipher_alg sec_fusion_algs[] = {
- SEC_SKCIPHER_FUSION_ALG("xts(sm4)", sec_setkey_sm4_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_FUSION_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
};
-int hisi_sec_register_to_crypto(int fusion_limit)
+int sec_register_to_crypto(void)
{
/* To avoid repeat register */
- if (atomic_add_return(1, &sec_active_devs) == 1) {
- if (fusion_limit == 1)
- return crypto_register_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- return crypto_register_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
- }
+ if (atomic_add_return(1, &sec_active_devs) == 1)
+ return crypto_register_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
return 0;
}
-void hisi_sec_unregister_from_crypto(int fusion_limit)
+void sec_unregister_from_crypto(void)
{
- if (atomic_sub_return(1, &sec_active_devs) == 0) {
- if (fusion_limit == 1)
- crypto_unregister_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- crypto_unregister_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
- }
+ if (atomic_sub_return(1, &sec_active_devs) == 0)
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
}
-
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index bffbeba..221257e 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -1,13 +1,238 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2018-2019 HiSilicon Limited. */
-#ifndef HISI_SEC_CRYPTO_H
-#define HISI_SEC_CRYPTO_H
+#ifndef __HISI_SEC_V2_CRYPTO_H
+#define __HISI_SEC_V2_CRYPTO_H
-#define SEC_IV_SIZE 24
-#define SEC_MAX_KEY_SIZE 64
+#define SEC_IV_SIZE 24
+#define SEC_MAX_KEY_SIZE 64
+#define SEC_MAX_AUTH_KEY_SIZE 64
-int hisi_sec_register_to_crypto(int fusion_limit);
-void hisi_sec_unregister_from_crypto(int fusion_limit);
+#define SEC_COMM_SCENE 0
+enum sec_calg {
+ SEC_CALG_3DES = 0x1,
+ SEC_CALG_AES = 0x2,
+ SEC_CALG_SM4 = 0x3,
+};
+
+enum sec_hash_alg {
+ SEC_A_HMAC_SHA1 = 0x10,
+ SEC_A_HMAC_SHA256 = 0x11,
+ SEC_A_HMAC_SHA512 = 0x15,
+};
+
+enum sec_mac_len {
+ SEC_HMAC_SHA1_MAC = 20,
+ SEC_HMAC_SHA256_MAC = 32,
+ SEC_HMAC_SHA512_MAC = 64,
+};
+
+enum sec_cmode {
+ SEC_CMODE_ECB = 0x0,
+ SEC_CMODE_CBC = 0x1,
+ SEC_CMODE_CTR = 0x4,
+ SEC_CMODE_XTS = 0x7,
+};
+
+enum sec_ckey_type {
+ SEC_CKEY_128BIT = 0x0,
+ SEC_CKEY_192BIT = 0x1,
+ SEC_CKEY_256BIT = 0x2,
+ SEC_CKEY_3DES_3KEY = 0x1,
+ SEC_CKEY_3DES_2KEY = 0x3,
+};
+
+enum sec_bd_type {
+ SEC_BD_TYPE1 = 0x1,
+ SEC_BD_TYPE2 = 0x2,
+};
+
+enum sec_auth {
+ SEC_NO_AUTH = 0x0,
+ SEC_AUTH_TYPE1 = 0x1,
+ SEC_AUTH_TYPE2 = 0x2,
+};
+
+enum sec_cipher_dir {
+ SEC_CIPHER_ENC = 0x1,
+ SEC_CIPHER_DEC = 0x2,
+};
+
+enum sec_addr_type {
+ SEC_PBUF = 0x0,
+ SEC_SGL = 0x1,
+ SEC_PRP = 0x2,
+};
+
+enum sec_ci_gen {
+ SEC_CI_GEN_BY_ADDR = 0x0,
+ SEC_CI_GEN_BY_LBA = 0X3,
+};
+
+enum sec_scene {
+ SEC_SCENE_IPSEC = 0x1,
+ SEC_SCENE_STORAGE = 0x5,
+};
+
+enum sec_work_mode {
+ SEC_NO_FUSION = 0x0,
+ SEC_IV_FUSION = 0x1,
+ SEC_FUSION_BUTT
+};
+
+enum sec_req_ops_type {
+ SEC_OPS_SKCIPHER_ALG = 0x0,
+ SEC_OPS_DMCRYPT = 0x1,
+ SEC_OPS_MULTI_IV = 0x2,
+ SEC_OPS_BUTT
+};
+
+struct sec_sqe_type2 {
+ /*
+ * mac_len: 0~4 bits
+ * a_key_len: 5~10 bits
+ * a_alg: 11~16 bits
+ */
+ __le32 mac_key_alg;
+
+ /*
+ * c_icv_len: 0~5 bits
+ * c_width: 6~8 bits
+ * c_key_len: 9~11 bits
+ * c_mode: 12~15 bits
+ */
+ __le16 icvw_kmode;
+
+ /* c_alg: 0~3 bits */
+ __u8 c_alg;
+ __u8 rsvd4;
+
+ /*
+ * a_len: 0~23 bits
+ * iv_offset_l: 24~31 bits
+ */
+ __le32 alen_ivllen;
+
+ /*
+ * c_len: 0~23 bits
+ * iv_offset_h: 24~31 bits
+ */
+ __le32 clen_ivhlen;
+
+ __le16 auth_src_offset;
+ __le16 cipher_src_offset;
+ __le16 cs_ip_header_offset;
+ __le16 cs_udp_header_offset;
+ __le16 pass_word_len;
+ __le16 dk_len;
+ __u8 salt3;
+ __u8 salt2;
+ __u8 salt1;
+ __u8 salt0;
+
+ __le16 tag;
+ __le16 rsvd5;
+
+ /*
+ * c_pad_type: 0~3 bits
+ * c_pad_len: 4~11 bits
+ * c_pad_data_type: 12~15 bits
+ */
+ __le16 cph_pad;
+
+ /* c_pad_len_field: 0~1 bits */
+ __le16 c_pad_len_field;
+
+ __le64 long_a_data_len;
+ __le64 a_ivin_addr;
+ __le64 a_key_addr;
+ __le64 mac_addr;
+ __le64 c_ivin_addr;
+ __le64 c_key_addr;
+
+ __le64 data_src_addr;
+ __le64 data_dst_addr;
+
+ /*
+ * done: 0 bit
+ * icv: 1~3 bits
+ * csc: 4~6 bits
+ * flag: 7-10 bits
+ * dif_check: 11~13 bits
+ */
+ __le16 done_flag;
+
+ __u8 error_type;
+ __u8 warning_type;
+ __u8 mac_i3;
+ __u8 mac_i2;
+ __u8 mac_i1;
+ __u8 mac_i0;
+ __le16 check_sum_i;
+ __u8 tls_pad_len_i;
+ __u8 rsvd12;
+ __le32 counter;
+};
+
+struct sec_sqe {
+ /*
+ * type: 0~3 bits
+ * cipher: 4~5 bits
+ * auth: 6~7 bit s
+ */
+ __u8 type_cipher_auth;
+
+ /*
+ * seq: 0 bit
+ * de: 1~2 bits
+ * scene: 3~6 bits
+ * src_addr_type: ~7 bit, with sdm_addr_type 0-1 bits
+ */
+ __u8 sds_sa_type;
+
+ /*
+ * src_addr_type: 0~1 bits, not used now,
+ * if support PRP, set this field, or set zero.
+ * dst_addr_type: 2~4 bits
+ * mac_addr_type: 5~7 bits
+ */
+ __u8 sdm_addr_type;
+ __u8 rsvd0;
+
+ /*
+ * nonce_len(type2): 0~3 bits
+ * huk(type2): 4 bit
+ * key_s(type2): 5 bit
+ * ci_gen: 6~7 bits
+ */
+ __u8 huk_key_ci;
+
+ /*
+ * ai_gen: 0~1 bits
+ * a_pad(type2): 2~3 bits
+ * c_s(type2): 4~5 bits
+ */
+ __u8 ai_apd_cs;
+
+ /*
+ * rhf(type2): 0 bit
+ * c_key_type: 1~2 bits
+ * a_key_type: 3~4 bits
+ * write_frame_len(type2): 5~7 bits
+ */
+ __u8 rca_key_frm;
+
+ /*
+ * cal_iv_addr_en(type2): 0 bit
+ * tls_up(type2): 1 bit
+ * inveld: 7 bit
+ */
+ __u8 iv_tls_ld;
+
+ struct sec_sqe_type2 type2;
+};
+
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index b4e5d57f..b3340c0 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -1,33 +1,30 @@
// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (c) 2018-2019 HiSilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <linux/acpi.h>
#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>
+#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/topology.h>
-#include <linux/uacce.h>
+
#include "sec.h"
-#include "sec_crypto.h"
#define SEC_QUEUE_NUM_V1 4096
#define SEC_QUEUE_NUM_V2 1024
#define SEC_PF_PCI_DEVICE_ID 0xa255
#define SEC_VF_PCI_DEVICE_ID 0xa256
+#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
+#define SEC_BD_ERR_CHK_EN1 0x7ffff7fd
+#define SEC_BD_ERR_CHK_EN3 0xffffbfff
+
#define SEC_SQE_SIZE 128
#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
#define SEC_PF_DEF_Q_NUM 64
@@ -35,8 +32,6 @@
#define SEC_CTX_Q_NUM_DEF 24
#define SEC_CTX_Q_NUM_MAX 32
-#define SEC_AM_CFG_SIG_PORT_MAX_TRANS 0x300014
-#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
#define SEC_CTRL_CNT_CLR_CE 0x301120
#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
#define SEC_ENGINE_PF_CFG_OFF 0x300000
@@ -44,13 +39,13 @@
#define SEC_CORE_INT_SOURCE 0x301010
#define SEC_CORE_INT_MASK 0x301000
#define SEC_CORE_INT_STATUS 0x301008
-#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
-#define SEC_CORE_ECC_INFO 0x301C14
-#define SEC_ECC_NUM(err_val) (((err_val) >> 16) & 0xFFFF)
-#define SEC_ECC_ADDR(err_val) ((err_val) & 0xFFFF)
+#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
+#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFFFF)
+#define SEC_ECC_ADDR(err) ((err) >> 0)
#define SEC_CORE_INT_DISABLE 0x0
#define SEC_CORE_INT_ENABLE 0x1ff
#define SEC_CORE_INT_CLEAR 0x1ff
+#define SEC_SAA_ENABLE 0x17f
#define SEC_RAS_CE_REG 0x301050
#define SEC_RAS_FE_REG 0x301054
@@ -64,6 +59,7 @@
#define SEC_CONTROL_REG 0x0200
#define SEC_TRNG_EN_SHIFT 8
+#define SEC_CLK_GATE_ENABLE BIT(3)
#define SEC_CLK_GATE_DISABLE (~BIT(3))
#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
@@ -71,26 +67,24 @@
#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
-#define SEC_SAA_EN_REG 0x270
-#define SEC_SAA_EN 0x17F
+#define SEC_SAA_EN_REG 0x0270
#define SEC_BD_ERR_CHK_EN_REG0 0x0380
#define SEC_BD_ERR_CHK_EN_REG1 0x0384
#define SEC_BD_ERR_CHK_EN_REG3 0x038c
-#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
-#define SEC_BD_ERR_CHK_EN1 0x7FFFF7FD
-#define SEC_BD_ERR_CHK_EN3 0xFFFFBFFF
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
+#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
#define SEC_DBGFS_VAL_MAX_LEN 20
+#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
-struct hisi_sec_hw_error {
+struct sec_hw_error {
u32 int_msk;
const char *msg;
};
@@ -98,9 +92,8 @@ struct hisi_sec_hw_error {
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
static struct hisi_qm_list sec_devices;
-static struct workqueue_struct *sec_wq;
-static const struct hisi_sec_hw_error sec_hw_error[] = {
+static const struct sec_hw_error sec_hw_errors[] = {
{.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
{.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
{.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
@@ -113,36 +106,13 @@ struct hisi_sec_hw_error {
{ /* sentinel */ }
};
-enum ctrl_debug_file_index {
- SEC_CURRENT_QM,
- SEC_CLEAR_ENABLE,
- SEC_DEBUG_FILE_NUM,
-};
-
-static const char * const ctrl_debug_file_name[] = {
+static const char * const sec_dbg_file_name[] = {
[SEC_CURRENT_QM] = "current_qm",
[SEC_CLEAR_ENABLE] = "clear_enable",
};
-struct ctrl_debug_file {
- enum ctrl_debug_file_index index;
- spinlock_t lock;
- struct hisi_sec_ctrl *ctrl;
-};
-
-/*
- * One SEC controller has one PF and multiple VFs, some global configurations
- * which PF has need this structure.
- *
- * Just relevant for PF.
- */
-struct hisi_sec_ctrl {
- struct hisi_sec *hisi_sec;
- struct ctrl_debug_file files[SEC_DEBUG_FILE_NUM];
-};
-
static struct debugfs_reg32 sec_dfx_regs[] = {
- {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
+ {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
{"SEC_SAA_EN ", 0x301270},
{"SEC_BD_LATENCY_MIN ", 0x301600},
{"SEC_BD_LATENCY_MAX ", 0x301608},
@@ -262,71 +232,12 @@ static int vfs_num_set(const char *val, const struct kernel_param *kp)
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
-static int sec_fusion_limit_set(const char *val, const struct kernel_param *kp)
-{
- u32 fusion_limit;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &fusion_limit);
- if (ret)
- return ret;
-
- if (!fusion_limit || fusion_limit > FUSION_LIMIT_MAX) {
- pr_err("fusion_limit[%u] is't at range(0, %d)", fusion_limit,
- FUSION_LIMIT_MAX);
- return -EINVAL;
- }
-
- return param_set_int(val, kp);
-}
-
-static const struct kernel_param_ops sec_fusion_limit_ops = {
- .set = sec_fusion_limit_set,
- .get = param_get_int,
-};
-static u32 fusion_limit = FUSION_LIMIT_DEF;
-
-module_param_cb(fusion_limit, &sec_fusion_limit_ops, &fusion_limit, 0444);
-MODULE_PARM_DESC(fusion_limit, "(1, acc_sgl_sge_nr of hisilicon QM)");
-
-static int sec_fusion_tmout_ns_set(const char *val,
- const struct kernel_param *kp)
-{
- u32 fusion_tmout_nsec;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &fusion_tmout_nsec);
- if (ret)
- return ret;
-
- if (fusion_tmout_nsec > NSEC_PER_SEC) {
- pr_err("fusion_tmout_nsec[%u] is too large", fusion_tmout_nsec);
- return -EINVAL;
- }
-
- return param_set_int(val, kp);
-}
-
-static const struct kernel_param_ops sec_fusion_time_ops = {
- .set = sec_fusion_tmout_ns_set,
- .get = param_get_int,
-};
-static u32 fusion_time = FUSION_TMOUT_NSEC_DEF; /* ns */
-module_param_cb(fusion_time, &sec_fusion_time_ops, &fusion_time, 0444);
-MODULE_PARM_DESC(fusion_time, "(0, NSEC_PER_SEC)");
-
-static const struct pci_device_id hisi_sec_dev_ids[] = {
+static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
{ 0, }
};
-MODULE_DEVICE_TABLE(pci, hisi_sec_dev_ids);
+MODULE_DEVICE_TABLE(pci, sec_dev_ids);
static u8 sec_get_endian(struct hisi_qm *qm)
{
@@ -390,9 +301,9 @@ static int sec_engine_init(struct hisi_qm *qm)
writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
writel(SEC_SINGLE_PORT_MAX_TRANS,
- qm->io_base + SEC_AM_CFG_SIG_PORT_MAX_TRANS);
+ qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
- writel(SEC_SAA_EN, SEC_ADDR(qm, SEC_SAA_EN_REG));
+ writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG));
/* Enable sm4 extra mode, as ctr/ecb */
writel_relaxed(SEC_BD_ERR_CHK_EN0,
@@ -436,6 +347,7 @@ static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
return sec_engine_init(qm);
}
+/* sec_debug_regs_clear() - clear the sec debug regs */
static void sec_debug_regs_clear(struct hisi_qm *qm)
{
/* clear current_qm */
@@ -497,23 +409,16 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
}
-static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
+static u32 sec_current_qm_read(struct sec_debug_file *file)
{
- struct hisi_sec *hisi_sec = file->ctrl->hisi_sec;
-
- return &hisi_sec->qm;
-}
-
-static u32 current_qm_read(struct ctrl_debug_file *file)
-{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
return readl(qm->io_base + QM_DFX_MB_CNT_VF);
}
-static int current_qm_write(struct ctrl_debug_file *file, u32 val)
+static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
u32 vfq_num;
u32 tmp;
@@ -521,17 +426,17 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (val == 0) {
+ if (!val) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
vfq_num = (qm->ctrl_q_num - qm->qp_num) / qm->vfs_num;
- if (val == qm->vfs_num) {
+
+ if (val == qm->vfs_num)
qm->debug.curr_qm_qp_num =
qm->ctrl_q_num - qm->qp_num -
(qm->vfs_num - 1) * vfq_num;
- } else {
+ else
qm->debug.curr_qm_qp_num = vfq_num;
- }
}
writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
@@ -548,33 +453,33 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val)
return 0;
}
-static u32 clear_enable_read(struct ctrl_debug_file *file)
+static u32 sec_clear_enable_read(struct sec_debug_file *file)
{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
SEC_CTRL_CNT_CLR_CE_BIT;
}
-static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
+static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
u32 tmp;
if (val != 1 && val)
return -EINVAL;
tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
- ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
+ ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
return 0;
}
-static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
+static ssize_t sec_debug_read(struct file *filp, char __user *buf,
size_t count, loff_t *pos)
{
- struct ctrl_debug_file *file = filp->private_data;
+ struct sec_debug_file *file = filp->private_data;
char tbuf[SEC_DBGFS_VAL_MAX_LEN];
u32 val;
int ret;
@@ -583,10 +488,10 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
switch (file->index) {
case SEC_CURRENT_QM:
- val = current_qm_read(file);
+ val = sec_current_qm_read(file);
break;
case SEC_CLEAR_ENABLE:
- val = clear_enable_read(file);
+ val = sec_clear_enable_read(file);
break;
default:
spin_unlock_irq(&file->lock);
@@ -599,10 +504,10 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
-static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
{
- struct ctrl_debug_file *file = filp->private_data;
+ struct sec_debug_file *file = filp->private_data;
char tbuf[SEC_DBGFS_VAL_MAX_LEN];
unsigned long val;
int len, ret;
@@ -626,12 +531,12 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
switch (file->index) {
case SEC_CURRENT_QM:
- ret = current_qm_write(file, val);
+ ret = sec_current_qm_write(file, val);
if (ret)
goto err_input;
break;
case SEC_CLEAR_ENABLE:
- ret = clear_enable_write(file, val);
+ ret = sec_clear_enable_write(file, val);
if (ret)
goto err_input;
break;
@@ -649,30 +554,30 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
return ret;
}
-static const struct file_operations ctrl_debug_fops = {
+static const struct file_operations sec_dbg_fops = {
.owner = THIS_MODULE,
.open = simple_open,
- .read = ctrl_debug_read,
- .write = ctrl_debug_write,
+ .read = sec_debug_read,
+ .write = sec_debug_write,
};
-static int hisi_sec_core_debug_init(struct hisi_qm *qm)
+static int sec_debugfs_atomic64_get(void *data, u64 *val)
{
- struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
+ *val = atomic64_read((atomic64_t *)data);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
+ NULL, "%lld\n");
+
+static int sec_core_debug_init(struct hisi_qm *qm)
+{
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
struct device *dev = &qm->pdev->dev;
- struct hisi_sec_dfx *dfx = &sec->sec_dfx;
+ struct sec_dfx *dfx = &sec->debug.dfx;
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
- char buf[SEC_DBGFS_VAL_MAX_LEN];
- int ret;
+ struct dentry *tmp_d;
- ret = snprintf(buf, SEC_DBGFS_VAL_MAX_LEN, "sec_dfx");
- if (ret < 0)
- return -ENOENT;
-
- tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
- if (!tmp_d)
- return -ENOENT;
+ tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -682,123 +587,69 @@ static int hisi_sec_core_debug_init(struct hisi_qm *qm)
regset->nregs = ARRAY_SIZE(sec_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
- tmp = debugfs_create_u64("send_by_tmout", 0444, tmp_d,
- &dfx->send_by_tmout);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("send_by_full", 0444, tmp_d,
- &dfx->send_by_full);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("get_task_cnt", 0444, tmp_d,
- &dfx->get_task_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("put_task_cnt", 0444, tmp_d,
- &dfx->put_task_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("gran_task_cnt", 0444, tmp_d,
- &dfx->gran_task_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("thread_cnt", 0444, tmp_d, &dfx->thread_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("fake_busy_cnt", 0444,
- tmp_d, &dfx->fake_busy_cnt);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file("send_cnt", 0444, tmp_d,
+ &dfx->send_cnt, &sec_atomic64_ops);
- tmp = debugfs_create_u64("busy_comp_cnt", 0444, tmp_d,
- &dfx->busy_comp_cnt);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file("recv_cnt", 0444, tmp_d,
+ &dfx->recv_cnt, &sec_atomic64_ops);
return 0;
}
-static int hisi_sec_ctrl_debug_init(struct hisi_qm *qm)
+static int sec_debug_init(struct hisi_qm *qm)
{
- struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
- struct dentry *tmp;
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
int i;
for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&sec->ctrl->files[i].lock);
- sec->ctrl->files[i].ctrl = sec->ctrl;
- sec->ctrl->files[i].index = i;
+ spin_lock_init(&sec->debug.files[i].lock);
+ sec->debug.files[i].index = i;
+ sec->debug.files[i].qm = qm;
- tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
+ debugfs_create_file(sec_dbg_file_name[i], 0600,
qm->debug.debug_root,
- sec->ctrl->files + i,
- &ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+ sec->debug.files + i,
+ &sec_dbg_fops);
}
- return hisi_sec_core_debug_init(qm);
+ return sec_core_debug_init(qm);
}
-static int hisi_sec_debugfs_init(struct hisi_qm *qm)
+static int sec_debugfs_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- struct dentry *dev_d;
int ret;
- dev_d = debugfs_create_dir(dev_name(dev), sec_debugfs_root);
- if (!dev_d)
- return -ENOENT;
-
- qm->debug.debug_root = dev_d;
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ sec_debugfs_root);
ret = hisi_qm_debug_init(qm);
if (ret)
goto failed_to_create;
if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
- ret = hisi_sec_ctrl_debug_init(qm);
+ ret = sec_debug_init(qm);
if (ret)
goto failed_to_create;
}
return 0;
- failed_to_create:
+failed_to_create:
debugfs_remove_recursive(sec_debugfs_root);
+
return ret;
}
-static void hisi_sec_debugfs_exit(struct hisi_qm *qm)
+static void sec_debugfs_exit(struct hisi_qm *qm)
{
debugfs_remove_recursive(qm->debug.debug_root);
-
- if (qm->fun_type == QM_HW_PF) {
- sec_debug_regs_clear(qm);
- qm->debug.curr_qm_qp_num = 0;
- }
}
static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
- const struct hisi_sec_hw_error *errs = sec_hw_error;
+ const struct sec_hw_error *errs = sec_hw_errors;
struct device *dev = &qm->pdev->dev;
u32 err_val;
@@ -809,7 +660,7 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
err_val = readl(qm->io_base +
- SEC_CORE_ECC_INFO);
+ SEC_CORE_SRAM_ECC_ERR_INFO);
dev_err(dev, "multi ecc sram num=0x%x\n",
SEC_ECC_NUM(err_val));
}
@@ -837,19 +688,10 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
}
-static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
+static int sec_pf_probe_init(struct hisi_qm *qm)
{
- struct hisi_sec *hisi_sec = container_of(qm, struct hisi_sec, qm);
- struct hisi_sec_ctrl *ctrl;
int ret;
- ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return -ENOMEM;
-
- hisi_sec->ctrl = ctrl;
- ctrl->hisi_sec = hisi_sec;
-
switch (qm->ver) {
case QM_HW_V1:
qm->ctrl_q_num = SEC_QUEUE_NUM_V1;
@@ -868,7 +710,7 @@ static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
qm->err_ini.err_info.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
qm->err_ini.err_info.ce = QM_BASE_CE;
qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
- QM_ACC_WB_NOT_READY_TIMEOUT;
+ QM_ACC_WB_NOT_READY_TIMEOUT;
qm->err_ini.err_info.fe = 0;
qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID;
qm->err_ini.err_info.acpi_rst = "SRST";
@@ -884,42 +726,32 @@ static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
return ret;
hisi_qm_dev_err_init(qm);
- qm->err_ini.open_axi_master_ooo(qm);
sec_debug_regs_clear(qm);
return 0;
}
-static int hisi_sec_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+static int sec_probe_init(struct hisi_qm *qm)
{
int ret;
-#ifdef CONFIG_CRYPTO_QM_UACCE
- qm->algs = "sec\ncipher\ndigest\n";
- qm->uacce_mode = uacce_mode;
-#endif
- qm->pdev = pdev;
- ret = hisi_qm_pre_init(qm, pf_q_num, SEC_PF_DEF_Q_BASE);
- if (ret)
- return ret;
- qm->sqe_size = SEC_SQE_SIZE;
- qm->dev_name = sec_name;
- qm->qm_list = &sec_devices;
- qm->wq = sec_wq;
-
- return 0;
-}
+ qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE |
+ WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(),
+ pci_name(qm->pdev));
+ if (!qm->wq) {
+ pci_err(qm->pdev, "fail to alloc workqueue\n");
+ return -ENOMEM;
+ }
-static int hisi_sec_probe_init(struct hisi_qm *qm)
-{
if (qm->fun_type == QM_HW_PF) {
- return hisi_sec_pf_probe_init(qm);
+ ret = sec_pf_probe_init(qm);
+ if (ret)
+ goto err_probe_uninit;
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
* so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
* to trigger only one VF in v1 hardware.
- *
* v2 hardware has no such problem.
*/
if (qm->ver == QM_HW_V1) {
@@ -927,41 +759,92 @@ static int hisi_sec_probe_init(struct hisi_qm *qm)
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
} else if (qm->ver == QM_HW_V2) {
/* v2 starts to support get vft by mailbox */
- return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ if (ret)
+ goto err_probe_uninit;
}
+ } else {
+ ret = -ENODEV;
+ goto err_probe_uninit;
}
return 0;
+
+err_probe_uninit:
+ destroy_workqueue(qm->wq);
+ return ret;
+}
+
+static void sec_probe_uninit(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_PF)
+ hisi_qm_dev_err_uninit(qm);
+ destroy_workqueue(qm->wq);
+}
+
+static int sec_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+{
+ int ret;
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "sec\ncipher\ndigest\n";
+ qm->uacce_mode = uacce_mode;
+#endif
+ qm->pdev = pdev;
+ ret = hisi_qm_pre_init(qm, pf_q_num, SEC_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
+
+ qm->qm_list = &sec_devices;
+ qm->sqe_size = SEC_SQE_SIZE;
+ qm->dev_name = sec_name;
+
+ return 0;
+}
+
+static void sec_iommu_used_check(struct sec_dev *sec)
+{
+ struct iommu_domain *domain;
+ struct device *dev = &sec->qm.pdev->dev;
+
+ domain = iommu_get_domain_for_dev(dev);
+
+ /* Check if iommu is used */
+ sec->iommu_used = false;
+ if (domain) {
+ if (domain->type & __IOMMU_DOMAIN_PAGING)
+ sec->iommu_used = true;
+ dev_info(dev, "SMMU Opened! the iommu type:= %d!\n",
+ domain->type);
+ }
}
-static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct hisi_sec *hisi_sec;
+ struct sec_dev *sec;
struct hisi_qm *qm;
int ret;
- hisi_sec = devm_kzalloc(&pdev->dev, sizeof(*hisi_sec), GFP_KERNEL);
- if (!hisi_sec)
+ sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
+ if (!sec)
return -ENOMEM;
- qm = &hisi_sec->qm;
+ qm = &sec->qm;
qm->fun_type = pdev->is_physfn ? QM_HW_PF : QM_HW_VF;
- ret = hisi_sec_qm_pre_init(qm, pdev);
+ ret = sec_qm_pre_init(qm, pdev);
if (ret)
return ret;
- hisi_sec->ctx_q_num = ctx_q_num;
- hisi_sec->fusion_limit = fusion_limit;
- hisi_sec->fusion_tmout_nsec = fusion_time;
-
+ sec->ctx_q_num = ctx_q_num;
+ sec_iommu_used_check(sec);
ret = hisi_qm_init(qm);
if (ret) {
pci_err(pdev, "Failed to init qm (%d)!\n", ret);
return ret;
}
- ret = hisi_sec_probe_init(qm);
+ ret = sec_probe_init(qm);
if (ret) {
pci_err(pdev, "Failed to probe init (%d)!\n", ret);
goto err_qm_uninit;
@@ -970,18 +853,18 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm);
if (ret) {
pci_err(pdev, "Failed to start qm (%d)!\n", ret);
- goto err_qm_uninit;
+ goto err_probe_uninit;
}
- ret = hisi_sec_debugfs_init(qm);
+ ret = sec_debugfs_init(qm);
if (ret)
pci_warn(pdev, "Failed to init debugfs (%d)!\n", ret);
hisi_qm_add_to_list(qm, &sec_devices);
- ret = hisi_sec_register_to_crypto(fusion_limit);
+ ret = sec_register_to_crypto();
if (ret < 0) {
- pci_err(pdev, "Failed to register driver to crypto!\n");
+ pr_err("Failed to register driver to crypto!\n");
goto err_remove_from_list;
}
@@ -994,121 +877,115 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_crypto_unregister:
- hisi_sec_unregister_from_crypto(fusion_limit);
+ sec_unregister_from_crypto();
err_remove_from_list:
hisi_qm_del_from_list(qm, &sec_devices);
- hisi_sec_debugfs_exit(qm);
+ sec_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
+err_probe_uninit:
+ sec_probe_uninit(qm);
+
err_qm_uninit:
hisi_qm_uninit(qm);
return ret;
}
-static int hisi_sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
+static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
- if (num_vfs == 0)
- return hisi_qm_sriov_disable(pdev, &sec_devices);
- else
+ if (num_vfs)
return hisi_qm_sriov_enable(pdev, num_vfs);
+ else
+ return hisi_qm_sriov_disable(pdev, &sec_devices);
}
-static void hisi_sec_remove(struct pci_dev *pdev)
+static void sec_remove(struct pci_dev *pdev)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
- if (uacce_mode != UACCE_MODE_NOUACCE)
- hisi_qm_remove_wait_delay(qm, &sec_devices);
+ hisi_qm_remove_wait_delay(qm, &sec_devices);
+
+ sec_unregister_from_crypto();
+
+ hisi_qm_del_from_list(qm, &sec_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
(void)hisi_qm_sriov_disable(pdev, NULL);
- hisi_sec_unregister_from_crypto(fusion_limit);
+ sec_debugfs_exit(qm);
- hisi_qm_del_from_list(qm, &sec_devices);
- hisi_sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
- hisi_qm_dev_err_uninit(qm);
+ sec_debug_regs_clear(qm);
+
+ sec_probe_uninit(qm);
hisi_qm_uninit(qm);
}
-static const struct pci_error_handlers hisi_sec_err_handler = {
+static const struct pci_error_handlers sec_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
- .slot_reset = hisi_qm_dev_slot_reset,
- .reset_prepare = hisi_qm_reset_prepare,
- .reset_done = hisi_qm_reset_done,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
-static struct pci_driver hisi_sec_pci_driver = {
+static struct pci_driver sec_pci_driver = {
.name = "hisi_sec2",
- .id_table = hisi_sec_dev_ids,
- .probe = hisi_sec_probe,
- .remove = hisi_sec_remove,
- .sriov_configure = hisi_sec_sriov_configure,
- .err_handler = &hisi_sec_err_handler,
+ .id_table = sec_dev_ids,
+ .probe = sec_probe,
+ .remove = sec_remove,
+ .err_handler = &sec_err_handler,
+ .sriov_configure = sec_sriov_configure,
.shutdown = hisi_qm_dev_shutdown,
};
-static void hisi_sec_register_debugfs(void)
+static void sec_register_debugfs(void)
{
if (!debugfs_initialized())
return;
sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
- if (IS_ERR_OR_NULL(sec_debugfs_root))
- sec_debugfs_root = NULL;
}
-static void hisi_sec_unregister_debugfs(void)
+static void sec_unregister_debugfs(void)
{
debugfs_remove_recursive(sec_debugfs_root);
}
-static int __init hisi_sec_init(void)
+static int __init sec_init(void)
{
int ret;
- sec_wq = alloc_workqueue("hisi_sec2", WQ_HIGHPRI | WQ_CPU_INTENSIVE |
- WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
-
- if (!sec_wq) {
- pr_err("Fallied to alloc workqueue\n");
- return -ENOMEM;
- }
-
INIT_LIST_HEAD(&sec_devices.list);
mutex_init(&sec_devices.lock);
sec_devices.check = NULL;
+ sec_register_debugfs();
- hisi_sec_register_debugfs();
-
- ret = pci_register_driver(&hisi_sec_pci_driver);
+ ret = pci_register_driver(&sec_pci_driver);
if (ret < 0) {
- hisi_sec_unregister_debugfs();
- if (sec_wq)
- destroy_workqueue(sec_wq);
+ sec_unregister_debugfs();
pr_err("Failed to register pci driver.\n");
+ return ret;
}
- return ret;
+ return 0;
}
-static void __exit hisi_sec_exit(void)
+static void __exit sec_exit(void)
{
- pci_unregister_driver(&hisi_sec_pci_driver);
- hisi_sec_unregister_debugfs();
- if (sec_wq)
- destroy_workqueue(sec_wq);
+ pci_unregister_driver(&sec_pci_driver);
+ sec_unregister_debugfs();
}
-module_init(hisi_sec_init);
-module_exit(hisi_sec_exit);
+module_init(sec_init);
+module_exit(sec_exit);
MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo(a)huawei.com>");
+MODULE_AUTHOR("Longfang Liu <liulongfang(a)huawei.com>");
MODULE_AUTHOR("Zhang Wei <zhangwei375(a)huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");
diff --git a/drivers/crypto/hisilicon/sec2/sec_usr_if.h b/drivers/crypto/hisilicon/sec2/sec_usr_if.h
deleted file mode 100644
index 7c76e19..00000000
--- a/drivers/crypto/hisilicon/sec2/sec_usr_if.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/* Copyright (c) 2018-2019 HiSilicon Limited. */
-
-#ifndef HISI_SEC_USR_IF_H
-#define HISI_SEC_USR_IF_H
-
-struct hisi_sec_sqe_type1 {
- __u32 rsvd2:6;
- __u32 ci_gen:2;
- __u32 ai_gen:2;
- __u32 rsvd1:7;
- __u32 c_key_type:2;
- __u32 a_key_type:2;
- __u32 rsvd0:10;
- __u32 inveld:1;
-
- __u32 mac_len:6;
- __u32 a_key_len:5;
- __u32 a_alg:6;
- __u32 rsvd3:15;
- __u32 c_icv_len:6;
- __u32 c_width:3;
- __u32 c_key_len:3;
- __u32 c_mode:4;
- __u32 c_alg:4;
- __u32 rsvd4:12;
- __u32 auth_gran_size:24;
- __u32:8;
- __u32 cipher_gran_size:24;
- __u32:8;
- __u32 auth_src_offset:16;
- __u32 cipher_src_offset:16;
- __u32 gran_num:16;
- __u32 rsvd5:16;
- __u32 src_skip_data_len:24;
- __u32 rsvd6:8;
- __u32 dst_skip_data_len:24;
- __u32 rsvd7:8;
- __u32 tag:16;
- __u32 rsvd8:16;
- __u32 gen_page_pad_ctrl:4;
- __u32 gen_grd_ctrl:4;
- __u32 gen_ver_ctrl:4;
- __u32 gen_app_ctrl:4;
- __u32 gen_ver_val:8;
- __u32 gen_app_val:8;
- __u32 private_info;
- __u32 gen_ref_ctrl:4;
- __u32 page_pad_type:2;
- __u32 rsvd9:2;
- __u32 chk_grd_ctrl:4;
- __u32 chk_ref_ctrl:4;
- __u32 block_size:16;
- __u32 lba_l;
- __u32 lba_h;
- __u32 a_key_addr_l;
- __u32 a_key_addr_h;
- __u32 mac_addr_l;
- __u32 mac_addr_h;
- __u32 c_ivin_addr_l;
- __u32 c_ivin_addr_h;
- __u32 c_key_addr_l;
- __u32 c_key_addr_h;
- __u32 data_src_addr_l;
- __u32 data_src_addr_h;
- __u32 data_dst_addr_l;
- __u32 data_dst_addr_h;
- __u32 done:1;
- __u32 icv:3;
- __u32 rsvd11:3;
- __u32 flag:4;
- __u32 dif_check:3;
- __u32 rsvd10:2;
- __u32 error_type:8;
- __u32 warning_type:8;
- __u32 dw29;
- __u32 dw30;
- __u32 dw31;
-};
-
-struct hisi_sec_sqe_type2 {
- __u32 nonce_len:4;
- __u32 huk:1;
- __u32 key_s:1;
- __u32 ci_gen:2;
- __u32 ai_gen:2;
- __u32 a_pad:2;
- __u32 c_s:2;
- __u32 rsvd1:2;
- __u32 rhf:1;
- __u32 c_key_type:2;
- __u32 a_key_type:2;
- __u32 write_frame_len:3;
- __u32 cal_iv_addr_en:1;
- __u32 tls_up:1;
- __u32 rsvd0:5;
- __u32 inveld:1;
- __u32 mac_len:5;
- __u32 a_key_len:6;
- __u32 a_alg:6;
- __u32 rsvd3:15;
- __u32 c_icv_len:6;
- __u32 c_width:3;
- __u32 c_key_len:3;
- __u32 c_mode:4;
- __u32 c_alg:4;
- __u32 rsvd4:12;
- __u32 a_len:24;
- __u32 iv_offset_l:8;
- __u32 c_len:24;
- __u32 iv_offset_h:8;
- __u32 auth_src_offset:16;
- __u32 cipher_src_offset:16;
- __u32 cs_ip_header_offset:16;
- __u32 cs_udp_header_offset:16;
- __u32 pass_word_len:16;
- __u32 dk_len:16;
- __u32 salt3:8;
- __u32 salt2:8;
- __u32 salt1:8;
- __u32 salt0:8;
- __u32 tag:16;
- __u32 rsvd5:16;
- __u32 c_pad_type:4;
- __u32 c_pad_len:8;
- __u32 c_pad_data_type:4;
- __u32 c_pad_len_field:2;
- __u32 rsvd6:14;
- __u32 long_a_data_len_l;
- __u32 long_a_data_len_h;
- __u32 a_ivin_addr_l;
- __u32 a_ivin_addr_h;
- __u32 a_key_addr_l;
- __u32 a_key_addr_h;
- __u32 mac_addr_l;
- __u32 mac_addr_h;
- __u32 c_ivin_addr_l;
- __u32 c_ivin_addr_h;
- __u32 c_key_addr_l;
- __u32 c_key_addr_h;
- __u32 data_src_addr_l;
- __u32 data_src_addr_h;
- __u32 data_dst_addr_l;
- __u32 data_dst_addr_h;
- __u32 done:1;
- __u32 icv:3;
- __u32 rsvd11:3;
- __u32 flag:4;
- __u32 rsvd10:5;
- __u32 error_type:8;
- __u32 warning_type:8;
- __u32 mac_i3:8;
- __u32 mac_i2:8;
- __u32 mac_i1:8;
- __u32 mac_i0:8;
- __u32 check_sum_i:16;
- __u32 tls_pad_len_i:8;
- __u32 rsvd12:8;
- __u32 counter;
-};
-
-struct hisi_sec_sqe {
- __u32 type:4;
- __u32 cipher:2;
- __u32 auth:2;
- __u32 seq:1;
- __u32 de:2;
- __u32 scene:4;
- __u32 src_addr_type:3;
- __u32 dst_addr_type:3;
- __u32 mac_addr_type:3;
- __u32 rsvd0:8;
- union {
- struct hisi_sec_sqe_type1 type1;
- struct hisi_sec_sqe_type2 type2;
- };
-};
-
-#endif
--
1.8.3