Kernel
Threads by month
- ----- 2025 -----
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
April 2020
- 22 participants
- 74 discussions
您好,我采用最细的openEuler-1.0测试perf spe-c2c的功能,但是发现/sys/devices下没有arm_spe_0这个目录
执行perf spe-c2c record时显示:
event syntax error: 'arm_spe_0/ts_enable=1,pct_enable=1,pa_enable=1,load_filter=1,jitter=1,store_filter=1,min_latency=0/'
\___ Cannot find PMU `arm_spe_0'. Missing kernel support?
想问下您这是为什么? 需要哪里开启spe功能吗?
3
2
subscribe linux-kernel
3
2
From: Liu Yanshi <liuyanshi(a)huawei.com>
driver inclusion
category: feature
bugzilla: NA
CVE: NA
pcie_cae add interface to get the chipnums of the current system.
Signed-off-by: Liu Yanshi <liuyanshi(a)huawei.com>
Reviewed-by: Zhu Xiongxiong <zhuxiongxiong(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
.../controller/hisi-pcie-customer/hisi_pcie_cae.c | 37 ++++++++++++++++++++--
1 file changed, 34 insertions(+), 3 deletions(-)
diff --git a/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c b/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c
index 8d0c801..6229a54 100644
--- a/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c
+++ b/drivers/pci/controller/hisi-pcie-customer/hisi_pcie_cae.c
@@ -3,6 +3,7 @@
#include <linux/mm.h>
#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -21,6 +22,7 @@
#define CHIP_INFO_REG_SIZE 4
#define TYPE_SHIFT 4
#define BIT_SHIFT_8 8
+#define PCIE_CMD_GET_CHIPNUMS 0x01
#define DEVICE_NAME "pcie_reg_dev"
@@ -37,7 +39,7 @@ enum {
MMAP_TYPE_VIRTIO
};
-static int current_chip_nums;
+static u32 current_chip_nums;
static const struct vm_operations_struct mmap_pcie_mem_ops = {
#ifdef CONFIG_HAVE_IOREMAP_PROT
@@ -100,10 +102,10 @@ static int pcie_reg_mmap(struct file *filep, struct vm_area_struct *vma)
return 0;
}
-int pcie_get_chipnums(u32 cpu_info)
+u32 pcie_get_chipnums(u32 cpu_info)
{
int i;
- int chip_count = 0;
+ u32 chip_count = 0;
u32 chip_i_info;
for (i = 0; i < MAX_CHIP_NUM; i++) {
@@ -144,12 +146,41 @@ static int pcie_release(struct inode *inode, struct file *f)
return 0;
}
+static long pcie_reg_ioctl(struct file *pfile, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case PCIE_CMD_GET_CHIPNUMS:
+ if ((void *)arg == NULL) {
+ pr_info("[PCIe Base] invalid arg address\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (copy_to_user((void *)arg, (void *)¤t_chip_nums,
+ sizeof(int))) {
+ pr_info("[PCIe Base] copy chip_nums to usr failed\n");
+ ret = -EINVAL;
+ }
+ break;
+
+ default:
+ pr_info("[PCIe Base] invalid pcie ioctl cmd:%u\n", cmd);
+ break;
+ }
+
+ return ret;
+}
+
static const struct file_operations pcie_dfx_fops = {
.owner = THIS_MODULE,
.open = pcie_open,
.release = pcie_release,
.llseek = noop_llseek,
.mmap = pcie_reg_mmap,
+ .unlocked_ioctl = pcie_reg_ioctl,
};
static struct miscdevice pcie_dfx_misc = {
--
1.8.3
1
0
category: other
bugzilla: NA
CVE: NA
----------------------------------
The ccflags removed in this patch shouldn't be in open kernel tree.
Especially for -fstack-protector-strong, there will be a compile error
when CONFIG_STACKPROTECTOR_STRONG isn't enabled.
Signed-off-by: Zhengyuan Liu <liuzhengyuan(a)tj.kylinos.cn>
---
drivers/net/ethernet/hisilicon/hns3/Makefile | 9 ---------
1 file changed, 9 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index 6653e81..0365f1c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -3,15 +3,6 @@
# Makefile for the HISILICON network device drivers.
#
-# Add security options
-ccflags-y += -fstack-protector-strong
-ccflags-y += -Wl,-z,relro,-z,now
-ccflags-y += -Wl,-z,noexecstack
-ccflags-y += -D_FORTIFY_SOURCE=2 -O2
-ccflags-y += -fvisibility=hidden
-ccflags-y += -Wformat=2 -Wfloat-equal
-ccflags-y += -fsigned-char
-
ccflags-y += -DCONFIG_IT_VALIDATION
ccflags-y += -DCONFIG_HNS3_TEST
--
2.7.4
3
2
cc kernel(a)openeuler.org.
Just for members who may not know, dwadm(a)pcl.ac.cn is from 鹏程实验室 and will
help on issues related with VMs in 鹏程实验室。
On Sun, Apr 26, 2020 at 10:08 AM Xie XiuQi via Infra <infra(a)openeuler.org>
wrote:
> 感谢你的热心参与,我们正在和鹏程实验室的兄弟联合定位
> On 2020/4/25 13:30, Qichen Zhang wrote:
>
>
> Qichen Zhang
> 邮箱:17852657226(a)163.com
>
> <https://maas.mail.163.com/dashi-web-extend/html/proSignature.html?ftlId=1&n…>
>
> 签名由 网易邮箱大师 <https://mail.163.com/dashi/dlpro.html?from=mail88> 定制
>
> --------- 转发的邮件 ---------
> 发件人: Qichen Zhang <17852657226(a)163.com>
> 发送日期: 2020年04月25日 13:26
> 收件人: Xiehong (Cynthia) <xiehong(a)huawei.com>
> 抄送人:
> 主题: 回复:gitee上已更新issue,请及时查收
> 各位工程师
> 你们好!
>
> bug依然存在,还是报Message
> kernel watchdog Bug soft lockup
> 在issue上已发布,昨晚上七点多一直等到今天凌晨一点,依然存在bug。
>
> 请各位工程师看看如何解决?
> 上周五发现的这个bug,不能这么愣着啊
> 你们需要我做什么,阔以直接给我说哈,
> 我叫张琦琛,手机号同微信号17852657226
> 希望大家一起努力尽快解决这个bug
>
> 盼回复
>
>
> Qichen Zhang
> 邮箱:17852657226(a)163.com
>
> <https://maas.mail.163.com/dashi-web-extend/html/proSignature.html?ftlId=1&n…>
>
> 签名由 网易邮箱大师 <https://mail.163.com/dashi/dlpro.html?from=mail88> 定制
>
> 在2020年04月21日 14:17,Xiehong (Cynthia) <xiehong(a)huawei.com> 写道:
>
> HI
>
>
>
> 多谢!
>
>
>
> 我们会在社区上的ISSUE系统上继续跟踪。
>
>
>
>
>
> *发件人:* Qichen Zhang [mailto:17852657226@163.com]
> *发送时间:* 2020年4月21日 13:27
> *收件人:* Xiehong (Cynthia) <xiehong(a)huawei.com>
> *主题:* gitee上已更新issue,请及时查收
>
>
>
> 尊敬的工程师
>
> 您好!
>
>
>
> 我已在 gitee 上已更新issue,请及时查收
>
> https://gitee.com/openeuler/community/issues/I1F13I?from=project-issue
>
> 很乐意和大家一块解决这个bug。
>
> — —
>
> Qichen Zhang
>
>
> _______________________________________________
> Community mailing list -- community(a)openeuler.org
> To unsubscribe send an email to community-leave(a)openeuler.org
>
> _______________________________________________
> Infra mailing list -- infra(a)openeuler.org
> To unsubscribe send an email to infra-leave(a)openeuler.org
>
--
Regards
Fred Li (李永乐)
1
0

[PATCH 01/25] net: hns3: merge mac state HCLGE_MAC_TO_DEL and HCLGE_MAC_DEL_FAIL
by Yang Yingliang 26 Apr '20
by Yang Yingliang 26 Apr '20
26 Apr '20
From: shenhao <shenhao21(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
--------------------------------------------
HCLGE_MAC_DEL_FAIL is a middle state for mac address handling,
it can be merged with HCLGE_MAC_TO_DEL.
Btw, this patch also change the enum name from HCLGE_MAC_ADDR_STATE
to HCLGE_MAC_NODE_STATE, for it's used to indicate the state of
mac node.
Signed-off-by: Jian Shen <shenjian15(a)huawei.com>
Signed-off-by: shenhao <shenhao21(a)huawei.com>
Reviewed-by: Zhong Zhaohui <zhongzhaohui(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
.../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 46 ++++++++++------------
.../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 9 ++---
2 files changed, 25 insertions(+), 30 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 478a3b5..d985c68 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -7422,14 +7422,13 @@ static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
return NULL;
}
-static void hclge_mac_node_convert(struct hclge_vport_mac_addr_cfg *mac_node,
- enum HCLGE_MAC_ADDR_STATE state)
+static void hclge_update_mac_node(struct hclge_vport_mac_addr_cfg *mac_node,
+ enum HCLGE_MAC_NODE_STATE state)
{
switch (state) {
/* from set_rx_mode or tmp_add_list */
case HCLGE_MAC_TO_ADD:
- if (mac_node->state == HCLGE_MAC_TO_DEL ||
- mac_node->state == HCLGE_MAC_DEL_FAIL)
+ if (mac_node->state == HCLGE_MAC_TO_DEL)
mac_node->state = HCLGE_MAC_ACTIVE;
break;
/* only from set_rx_mode */
@@ -7442,14 +7441,7 @@ static void hclge_mac_node_convert(struct hclge_vport_mac_addr_cfg *mac_node,
}
break;
/* only from tmp_add_list, the mac_node->state won't be
- * HCLGE_MAC_ACTIVE/HCLGE_MAC_DEL_FAIL/HCLGE_MAC_ADD_FAIL
- */
- case HCLGE_MAC_DEL_FAIL:
- if (mac_node->state == HCLGE_MAC_TO_ADD)
- mac_node->state = HCLGE_MAC_ACTIVE;
- break;
- /* only from tmp_add_list, the mac_node->state won't be
- * HCLGE_MAC_ACTIVE/HCLGE_MAC_DEL_FAIL/HCLGE_MAC_ADD_FAIL
+ * ACTIVE.
*/
case HCLGE_MAC_ACTIVE:
if (mac_node->state == HCLGE_MAC_TO_ADD)
@@ -7460,7 +7452,7 @@ static void hclge_mac_node_convert(struct hclge_vport_mac_addr_cfg *mac_node,
}
int hclge_update_mac_list(struct hclge_vport *vport,
- enum HCLGE_MAC_ADDR_STATE state,
+ enum HCLGE_MAC_NODE_STATE state,
enum HCLGE_MAC_ADDR_TYPE mac_type,
const unsigned char *addr)
{
@@ -7480,7 +7472,7 @@ int hclge_update_mac_list(struct hclge_vport *vport,
*/
mac_node = hclge_find_mac_node(list, addr);
if (mac_node) {
- hclge_mac_node_convert(mac_node, state);
+ hclge_update_mac_node(mac_node, state);
spin_unlock_bh(&vport->mac_list_lock);
return 0;
}
@@ -7731,7 +7723,6 @@ static void hclge_unsync_mac_list(struct hclge_vport *vport,
list_del(&mac_node->node);
kfree(mac_node);
} else {
- mac_node->state = HCLGE_MAC_DEL_FAIL;
set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
&vport->state);
break;
@@ -7753,13 +7744,13 @@ static bool hclge_sync_from_add_list(struct list_head *add_list,
* uc/mc_mac_list, it means have received a TO_DEL request
* during the time window of adding the mac address into mac
* table. if mac_node state is ACTIVE, then change it to TO_DEL,
- * then it will be removed at next time. else it must be TO_ADD
- * or ADD_FAIL, this address hasn't been added into mac table,
+ * then it will be removed at next time. else it must be TO_ADD,
+ * this address hasn't been added into mac table,
* so just remove the mac node.
*/
new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
if (new_node) {
- hclge_mac_node_convert(new_node, mac_node->state);
+ hclge_update_mac_node(new_node, mac_node->state);
list_del(&mac_node->node);
kfree(mac_node);
} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
@@ -7782,7 +7773,16 @@ static void hclge_sync_from_del_list(struct list_head *del_list,
list_for_each_entry_safe(mac_node, tmp, del_list, node) {
new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
if (new_node) {
- hclge_mac_node_convert(new_node, mac_node->state);
+ /* If the mac addr exists in the mac list, it means
+ * received a new TO_ADD request during the time window
+ * of configuring the mac address. For the mac node
+ * state is TO_ADD, and the address is already in the
+ * in the hardware(due to delete fail), so we just need
+ * to change the mac node state to ACTIVE.
+ */
+ new_node->state = HCLGE_MAC_ACTIVE;
+ list_del(&mac_node->node);
+ kfree(mac_node);
} else {
list_del(&mac_node->node);
list_add_tail(&mac_node->node, mac_list);
@@ -7850,7 +7850,6 @@ static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
list_for_each_entry_safe(mac_node, tmp, list, node) {
switch (mac_node->state) {
case HCLGE_MAC_TO_DEL:
- case HCLGE_MAC_DEL_FAIL:
list_del(&mac_node->node);
list_add_tail(&mac_node->node, &tmp_del_list);
break;
@@ -7962,7 +7961,6 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
switch (mac_cfg->state) {
case HCLGE_MAC_TO_DEL:
- case HCLGE_MAC_DEL_FAIL:
case HCLGE_MAC_ACTIVE:
list_del(&mac_cfg->node);
list_add_tail(&mac_cfg->node, &tmp_del_list);
@@ -8021,7 +8019,6 @@ static void hclge_uninit_mac_list(struct hclge_vport *vport,
list_for_each_entry_safe(mac_node, tmp, list, node) {
switch (mac_node->state) {
case HCLGE_MAC_TO_DEL:
- case HCLGE_MAC_DEL_FAIL:
case HCLGE_MAC_ACTIVE:
list_del(&mac_node->node);
list_add_tail(&mac_node->node, &tmp_del_list);
@@ -8221,7 +8218,7 @@ void hclge_replace_mac_node(struct list_head *list, const u8 *old_addr,
}
void hclge_modify_mac_node_state(struct list_head *list, const u8 *addr,
- enum HCLGE_MAC_ADDR_STATE state)
+ enum HCLGE_MAC_NODE_STATE state)
{
struct hclge_vport_mac_addr_cfg *mac_node;
@@ -8989,8 +8986,7 @@ static void hclge_mac_node_convert_for_reset(struct list_head *list)
list_for_each_entry_safe(mac_node, tmp, list, node) {
if (mac_node->state == HCLGE_MAC_ACTIVE) {
mac_node->state = HCLGE_MAC_TO_ADD;
- } else if (mac_node->state == HCLGE_MAC_TO_DEL ||
- mac_node->state == HCLGE_MAC_DEL_FAIL) {
+ } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
list_del(&mac_node->node);
kfree(mac_node);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 5e462cf..a1fa782 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -640,16 +640,15 @@ struct hclge_fd_ad_data {
u16 rule_id;
};
-enum HCLGE_MAC_ADDR_STATE {
+enum HCLGE_MAC_NODE_STATE {
HCLGE_MAC_TO_ADD,
HCLGE_MAC_TO_DEL,
- HCLGE_MAC_DEL_FAIL,
HCLGE_MAC_ACTIVE
};
struct hclge_vport_mac_addr_cfg {
struct list_head node;
- enum HCLGE_MAC_ADDR_STATE state;
+ enum HCLGE_MAC_NODE_STATE state;
u8 mac_addr[ETH_ALEN];
};
@@ -1016,13 +1015,13 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
int hclge_update_mac_list(struct hclge_vport *vport,
- enum HCLGE_MAC_ADDR_STATE state,
+ enum HCLGE_MAC_NODE_STATE state,
enum HCLGE_MAC_ADDR_TYPE mac_type,
const unsigned char *addr);
void hclge_replace_mac_node(struct list_head *list, const u8 *old_addr,
const u8 *new_addr, bool keep_old);
void hclge_modify_mac_node_state(struct list_head *list, const u8 *addr,
- enum HCLGE_MAC_ADDR_STATE state);
+ enum HCLGE_MAC_NODE_STATE state);
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type);
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
--
1.8.3
1
24
From: Huang jun <huangjun61(a)huawei.com>
If we want acpi ged to support wake from freeze, we need to implement
the suspend/resume function. In these two methods, ACPI's _GPO, GPP
method is called to realize the setting of sleep flag and anti-shake.
Signed-off-by: Huangjun <huangjun63(a)huawei.com>
---
drivers/acpi/evged.c | 108 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 108 insertions(+)
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index f13ba2c07667..dde8fbff8d19 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -46,11 +46,20 @@
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
#define MODULE_NAME "acpi-ged"
+struct acpi_ged_handle {
+ struct timer_list timer;/* For 4s anti-shake of power button */
+ acpi_handle gpp_handle; /* ACPI Handle: enable shutdown */
+ acpi_handle gpo_handle; /* ACPI Handle: set sleep flag */
+};
+
struct acpi_ged_device {
struct device *dev;
+ struct acpi_ged_handle *wakeup_handle;
struct list_head event_list;
};
@@ -131,6 +140,34 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
return AE_OK;
}
+#ifdef CONFIG_PM_SLEEP
+static void init_ged_handle(struct acpi_ged_device *geddev) {
+ struct acpi_ged_handle *wakeup_handle;
+ acpi_handle gpo_handle = NULL;
+ acpi_handle gpp_handle = NULL;
+ acpi_status acpi_ret;
+
+ wakeup_handle = devm_kzalloc(geddev->dev, sizeof(*wakeup_handle), GFP_KERNEL);
+ if (!wakeup_handle)
+ return;
+
+ geddev->wakeup_handle = wakeup_handle;
+
+ /* Initialize wakeup_handle, prepare for ged suspend and resume */
+ timer_setup(&wakeup_handle->timer, NULL, 0);
+
+ acpi_ret = acpi_get_handle(ACPI_HANDLE(geddev->dev), "_GPO", &gpo_handle);
+ if (ACPI_FAILURE(acpi_ret))
+ dev_info(geddev->dev, "cannot locate _GPO method\n");
+ wakeup_handle->gpo_handle = gpo_handle;
+
+ acpi_ret = acpi_get_handle(ACPI_HANDLE(geddev->dev), "_GPP", &gpp_handle);
+ if (ACPI_FAILURE(acpi_ret))
+ dev_info(geddev->dev, "cannot locate _GPP method\n");
+ wakeup_handle->gpp_handle = gpp_handle;
+}
+#endif
+
static int ged_probe(struct platform_device *pdev)
{
struct acpi_ged_device *geddev;
@@ -149,6 +186,9 @@ static int ged_probe(struct platform_device *pdev)
return -EINVAL;
}
platform_set_drvdata(pdev, geddev);
+#ifdef CONFIG_PM_SLEEP
+ init_ged_handle(geddev);
+#endif
return 0;
}
@@ -164,6 +204,10 @@ static void ged_shutdown(struct platform_device *pdev)
dev_dbg(geddev->dev, "GED releasing GSI %u @ IRQ %u\n",
event->gsi, event->irq);
}
+
+ if (geddev->wakeup_handle)
+ del_timer(&geddev->wakeup_handle->timer);
+
}
static int ged_remove(struct platform_device *pdev)
@@ -172,6 +216,67 @@ static int ged_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static void ged_timer_callback(struct timer_list *t)
+{
+ struct acpi_ged_handle *wakeup_handle = from_timer(wakeup_handle, t, timer);
+ acpi_status acpi_ret;
+
+ /* _GPP method enable power button */
+ if (wakeup_handle && wakeup_handle->gpp_handle) {
+ acpi_ret = acpi_execute_simple_method(wakeup_handle->gpp_handle, NULL, ACPI_IRQ_MODEL_GIC);
+ if (ACPI_FAILURE(acpi_ret))
+ pr_warn("_GPP method execution failed\n");
+ }
+}
+
+static int ged_suspend(struct device *dev)
+{
+ struct acpi_ged_device *geddev = dev_get_drvdata(dev);
+ struct acpi_ged_handle *wakeup_handle = geddev->wakeup_handle;
+ struct acpi_ged_event *event, *next;
+ acpi_status acpi_ret;
+
+ /* _GPO method set sleep flag */
+ if (wakeup_handle && wakeup_handle->gpo_handle) {
+ acpi_ret = acpi_execute_simple_method(wakeup_handle->gpo_handle, NULL, ACPI_IRQ_MODEL_GIC);
+ if (ACPI_FAILURE(acpi_ret)) {
+ pr_warn("_GPO method execution failed\n");
+ return AE_ERROR;
+ }
+ }
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node)
+ enable_irq_wake(event->irq);
+
+ return 0;
+}
+
+static int ged_resume(struct device *dev)
+{
+ struct acpi_ged_device *geddev = dev_get_drvdata(dev);
+ struct acpi_ged_handle *wakeup_handle = geddev->wakeup_handle;
+ struct acpi_ged_event *event, *next;
+
+ /* use timer to complete 4s anti-shake */
+ if (wakeup_handle && wakeup_handle->gpp_handle) {
+ wakeup_handle->timer.expires = jiffies + (4 * HZ);
+ wakeup_handle->timer.function = ged_timer_callback;
+ add_timer(&wakeup_handle->timer);
+ }
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node)
+ disable_irq_wake(event->irq);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ged_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ged_suspend, ged_resume)
+};
+#endif
+
+
static const struct acpi_device_id ged_acpi_ids[] = {
{"ACPI0013"},
{},
@@ -184,6 +289,9 @@ static struct platform_driver ged_driver = {
.driver = {
.name = MODULE_NAME,
.acpi_match_table = ACPI_PTR(ged_acpi_ids),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &ged_pm_ops,
+#endif
},
};
builtin_platform_driver(ged_driver);
--
2.20.1
1
1
Alexei Avshalom Lazar (1):
wil6210: add general initialization/size checks
Amir Goldstein (1):
ovl: fix value of i_ino for lower hardlink corner case
Austin Kim (1):
mm/vmalloc.c: move 'area->pages' after if statement
Can Guo (1):
scsi: ufs: Fix ufshcd_hold() caused scheduling while atomic
Colin Ian King (2):
ASoC: Intel: mrfld: fix incorrect check on p->sink
ASoC: Intel: mrfld: return error codes when an error occurs
DENG Qingfang (1):
net: dsa: mt7530: fix tagged frames pass-through in VLAN-unaware mode
Dedy Lansky (2):
wil6210: check rx_buff_mgmt before accessing it
wil6210: make sure Rx ring sizes are correlated
Florian Fainelli (1):
net: stmmac: dwmac-sunxi: Provide TX and RX fifo sizes
Greg Kroah-Hartman (1):
Linux 4.19.117
James Morse (1):
x86/resctrl: Preserve CDP enable over CPU hotplug
Jan Kara (1):
ext4: do not zeroout extents beyond i_disksize
Jim Mattson (1):
kvm: x86: Host feature SSBD doesn't imply guest feature SPEC_CTRL_SSBD
John Allen (1):
x86/microcode/AMD: Increase microcode PATCH_MAX_SIZE
Josef Bacik (1):
btrfs: check commit root generation in should_ignore_root
Josh Triplett (2):
ext4: fix incorrect group count in ext4_fill_super error message
ext4: fix incorrect inodes per group in error message
Karthick Gopalasubramanian (1):
wil6210: remove reset file from debugfs
Konstantin Khlebnikov (1):
net: revert default NAPI poll timeout to 2 jiffies
Maurizio Lombardi (2):
scsi: target: remove boilerplate code
scsi: target: fix hang when multiple threads try to destroy the same
iscsi session
Maya Erez (1):
wil6210: ignore HALP ICR if already handled
Reinette Chatre (1):
x86/resctrl: Fix invalid attempt at removing the default resource
group
Sasha Levin (1):
usb: dwc3: gadget: don't enable interrupt when disabling endpoint
Sebastian Andrzej Siewior (1):
amd-xgbe: Use __napi_schedule() in BH context
Sergei Lopatin (1):
drm/amd/powerplay: force the trim of the mclk dpm_levels if OD is
enabled
Sven Van Asbroeck (1):
pwm: pca9685: Fix PWM/GPIO inter-operation
Taehee Yoo (1):
hsr: check protocol version in hsr_newlink()
Takashi Iwai (4):
ALSA: usb-audio: Filter error from connector kctl ops, too
ALSA: usb-audio: Don't override ignore_ctl_error value from the map
ALSA: usb-audio: Don't create jack controls for PCM terminals
ALSA: usb-audio: Check mapping at creating connector controls, too
Taras Chornyi (1):
net: ipv4: devinet: Fix crash when add/del multicast IP with autojoin
Thinh Nguyen (1):
usb: dwc3: gadget: Don't clear flags before transfer ended
Tim Stallard (1):
net: ipv6: do not consider routes via gateways for anycast address
check
Tuomas Tynkkynen (1):
mac80211_hwsim: Use kstrndup() in place of kasprintf()
Vasily Averin (1):
keys: Fix proc_keys_next to increase position index
Wang Wenhu (1):
net: qrtr: send msgs from local of same id as broadcast
Xiao Yang (1):
tracing: Fix the race between registering 'snapshot' event trigger and
triggering 'snapshot' operation
zhangyi (F) (1):
jbd2: improve comments about freeing data buffers whose page mapping
is NULL
Makefile | 2 +-
arch/x86/include/asm/microcode_amd.h | 2 +-
arch/x86/kernel/cpu/intel_rdt.c | 2 +
arch/x86/kernel/cpu/intel_rdt.h | 1 +
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 16 ++++-
arch/x86/kvm/cpuid.c | 3 +-
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 5 +-
drivers/net/dsa/mt7530.c | 18 +++--
drivers/net/dsa/mt7530.h | 7 ++
drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2 +-
drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c | 2 +
drivers/net/wireless/ath/wil6210/debugfs.c | 29 +-------
drivers/net/wireless/ath/wil6210/interrupt.c | 12 ++--
drivers/net/wireless/ath/wil6210/main.c | 5 +-
drivers/net/wireless/ath/wil6210/txrx.c | 4 +-
drivers/net/wireless/ath/wil6210/txrx_edma.c | 14 +++-
drivers/net/wireless/ath/wil6210/wil6210.h | 3 +-
drivers/net/wireless/ath/wil6210/wmi.c | 2 +-
drivers/net/wireless/mac80211_hwsim.c | 12 ++--
drivers/pwm/pwm-pca9685.c | 85 +++++++++++++----------
drivers/scsi/ufs/ufshcd.c | 5 ++
drivers/target/iscsi/iscsi_target.c | 79 ++++++---------------
drivers/target/iscsi/iscsi_target.h | 1 -
drivers/target/iscsi/iscsi_target_configfs.c | 5 +-
drivers/target/iscsi/iscsi_target_login.c | 5 +-
drivers/usb/dwc3/gadget.c | 18 ++---
fs/btrfs/relocation.c | 4 +-
fs/ext4/extents.c | 8 +--
fs/ext4/super.c | 6 +-
fs/jbd2/commit.c | 7 +-
fs/overlayfs/inode.c | 4 +-
include/net/ip6_route.h | 1 +
include/target/iscsi/iscsi_target_core.h | 2 +-
kernel/trace/trace_events_trigger.c | 10 +--
mm/vmalloc.c | 8 ++-
net/core/dev.c | 3 +-
net/hsr/hsr_netlink.c | 10 ++-
net/ipv4/devinet.c | 13 ++--
net/qrtr/qrtr.c | 7 +-
security/keys/proc.c | 2 +
sound/soc/intel/atom/sst-atom-controls.c | 2 +-
sound/soc/intel/atom/sst/sst_pci.c | 2 +-
sound/usb/mixer.c | 31 +++++----
sound/usb/mixer_maps.c | 4 +-
44 files changed, 251 insertions(+), 212 deletions(-)
--
1.8.3
1
41

[PATCH] btrfs: tree-checker: Enhance chunk checker to validate chunk profile
by Yang Yingliang 22 Apr '20
by Yang Yingliang 22 Apr '20
22 Apr '20
From: Qu Wenruo <wqu(a)suse.com>
mainline inclusion
from mainline-v5.2-rc1
commit 80e46cf22ba0bcb57b39c7c3b52961ab3a0fd5f2
category: bugfix
bugzilla: 13690
CVE: CVE-2019-19378
-------------------------------------------------
Btrfs-progs already have a comprehensive type checker, to ensure there
is only 0 (SINGLE profile) or 1 (DUP/RAID0/1/5/6/10) bit set for chunk
profile bits.
Do the same work for kernel.
Reported-by: Yoon Jungyeon <jungyeon(a)gatech.edu>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=202765
Reviewed-by: Nikolay Borisov <nborisov(a)suse.com>
Reviewed-by: Johannes Thumshirn <jthumshirn(a)suse.de>
Signed-off-by: Qu Wenruo <wqu(a)suse.com>
Reviewed-by: David Sterba <dsterba(a)suse.com>
Signed-off-by: David Sterba <dsterba(a)suse.com>
Conflicts:
fs/btrfs/volumes.c
[yyl: btrfs_check_chunk_valid() is defined in fs/btrfs/volumes.c]
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/btrfs/volumes.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 99260d2..110cdfd 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -6391,6 +6391,14 @@ static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
return -EIO;
}
+ if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
+ (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
+ btrfs_err(fs_info,
+ "invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
+ type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
+ return -EIO;
+ }
+
if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
btrfs_err(fs_info, "missing chunk type flag: 0x%llx", type);
return -EIO;
--
1.8.3
1
0
From: Cheng Jian <cj.chengjian(a)huawei.com>
hulk inclusion
category: feature
bugzilla: 5391/28338/24634
CVE: NA
-----------------------------------------------
The previous patch added a field klp_rel_state in the
module structure, which caused KABI changes, so fix
this problem
Signed-off-by: Cheng Jian <cj.chengjian(a)huawei.com>
Reviewed-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
include/linux/module.h | 33 ++++++++++++++++++++-------------
1 file changed, 20 insertions(+), 13 deletions(-)
diff --git a/include/linux/module.h b/include/linux/module.h
index 4994243..e1f3418 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -469,19 +469,6 @@ struct module {
/* Elf information */
struct klp_modinfo *klp_info;
- /*
- * livepatch should relocate the key of jump_label by
- * using klp_write_module_reloc. So it's necessary to
- * do jump_label_apply_nops() and jump_label_add_module()
- * later after livepatch relocation finised.
- *
- * for normal module :
- * always MODULE_KLP_REL_DONE.
- * for livepatch module :
- * init as MODULE_KLP_REL_UNDO,
- * set to MODULE_KLP_REL_DONE when relocate completed.
- */
- enum MODULE_KLP_REL_STATE klp_rel_state;
#endif
#ifdef CONFIG_MODULE_UNLOAD
@@ -507,7 +494,27 @@ struct module {
unsigned int num_ei_funcs;
#endif
+#if defined(CONFIG_LIVEPATCH) && !defined(__GENKSYMS__)
+ union {
+ /*
+ * livepatch should relocate the key of jump_label by
+ * using klp_write_module_reloc. So it's necessary to
+ * do jump_label_apply_nops() and jump_label_add_module()
+ * later after livepatch relocation finised.
+ *
+ * for normal module :
+ * always MODULE_KLP_REL_DONE.
+ * for livepatch module :
+ * init as MODULE_KLP_REL_UNDO,
+ * set to MODULE_KLP_REL_DONE when relocate completed.
+ */
+ enum MODULE_KLP_REL_STATE klp_rel_state;
+ long klp_rel_state_KABI;
+ };
+#else
KABI_RESERVE(1)
+#endif
+
KABI_RESERVE(2)
KABI_RESERVE(3)
KABI_RESERVE(4)
--
1.8.3
1
0

[PATCH v1] scsi: hisi_sas: do not reset the timer to wait for phyup when phy already up
by Luo Jiaxing 22 Apr '20
by Luo Jiaxing 22 Apr '20
22 Apr '20
We found out that after phy up, the hardware report another oob interrupt,
but did not follow a phy up interrupt. like:
oob ready -> phy up -> DEV found -> oob read -> wait phy up -> timeout
We run link reset when wait phy up timeout, and it make a normal disk into
reset processing. So we made some circumvention action in the code, so that
this abnormal oob interrupt will not start the timer to wait for phy up.
Signed-off-by: Luo Jiaxing <luojiaxing(a)huawei.com>
Signed-off-by: John Garry <john.garry(a)huawei.com>
---
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index acf2fc6..5b80856 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1898,8 +1898,11 @@ static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
if (irq_value0 & CHL_INT0_PHY_RDY_MSK) {
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ dev_dbg(dev, "phy%d OOB ready\n", phy_no);
+ if (phy->phy_attached)
+ return;
+
if (!timer_pending(&phy->timer)) {
- dev_dbg(dev, "phy%d OOB ready\n", phy_no);
phy->timer.function = wait_phyup_timedout_v3_hw;
phy->timer.expires = jiffies +
WAIT_PHYUP_TIMEOUT_V3_HW * HZ;
--
2.7.4
1
0
Hi all,
We are frequently asked what patches kernel has comparing with Kernel 4.19.
[1] shows how to find them and hope it helps. If more information is
needed, please leave messages in the issue [1].
[1] https://gitee.com/openeuler/kernel/issues/I1F430
--
Regards
Fred Li (李永乐)
1
0
From: Hao Shen <shenhao21(a)huawei.com>
The suspend and resume feature is useful when you want to
save the current state of your machine, and continue work
later from the same state. This patch makes the hns3 net
device work form the same state after s3 and s4.
Signed-off-by: Weiwei Deng <dengweiwei(a)huawei.com>
Signed-off-by: Hao Shen <shenhao21(a)huawei.com>
Signed-off-by: Jian Shen <shenjian15(a)huawei.com>
---
drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 32 ++++++++++
.../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 70 ++++++++++++++++++++++
3 files changed, 104 insertions(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index bb29b46..8b1e690 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -561,6 +561,8 @@ struct hnae3_ae_ops {
int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
u32 len, u8 *data);
+ int (*suspend)(struct hnae3_ae_dev *ae_dev);
+ int (*resume)(struct hnae3_ae_dev *ae_dev);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 4eca8cf..de3d37c3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2226,6 +2226,34 @@ static void hns3_shutdown(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D3hot);
}
+#ifdef CONFIG_PM
+static int hns3_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ if (ae_dev->ops->suspend)
+ ae_dev->ops->suspend(ae_dev);
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int hns3_resume(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (ae_dev->ops->resume)
+ return ae_dev->ops->resume(ae_dev);
+
+ return 0;
+}
+#endif
+
static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -2310,6 +2338,10 @@ struct pci_driver hns3_driver = {
.probe = hns3_probe,
.remove = hns3_remove,
.shutdown = hns3_shutdown,
+#ifdef CONFIG_PM
+ .suspend = hns3_suspend,
+ .resume = hns3_resume,
+#endif
.sriov_configure = hns3_pci_sriov_configure,
.err_handler = &hns3_err_handler,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c2ae05f..478a3b5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3820,6 +3820,72 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_reset_task_schedule(hdev);
}
+#ifdef CONFIG_PM
+static int hclge_suspend(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ return hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+
+err_reset_lock:
+ rtnl_unlock();
+ return ret;
+}
+
+static int hclge_resume(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ rtnl_lock();
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_lock();
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ return hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+
+err_reset_lock:
+ rtnl_unlock();
+ return ret;
+}
+#endif
+
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
@@ -11525,6 +11591,10 @@ struct hnae3_ae_ops hclge_ops = {
.set_vf_rate = hclge_set_vf_rate,
.set_vf_mac = hclge_set_vf_mac,
.get_module_eeprom = hclge_get_module_eeprom,
+#ifdef CONFIG_PM
+ .suspend = hclge_suspend,
+ .resume = hclge_resume,
+#endif
};
static struct hnae3_ae_algo ae_algo = {
--
1.9.1
1
0
From: Hao Shen <shenhao21(a)huawei.com>
The suspend and resume feature is useful when you want to
save the current state of your machine, and continue work
later from the same state. This patch makes the hns3 net
device work form the same state after s3 and s4.
Signed-off-by: Weiwei Deng <dengweiwei(a)huawei.com>
Signed-off-by: Hao Shen <shenhao21(a)huawei.com>
Signed-off-by: Jian Shen <shenjian15(a)huawei.com>
---
drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 32 ++++++++++
.../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 70 ++++++++++++++++++++++
3 files changed, 104 insertions(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index bb29b46..8b1e690 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -561,6 +561,8 @@ struct hnae3_ae_ops {
int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
u32 len, u8 *data);
+ int (*suspend)(struct hnae3_ae_dev *ae_dev);
+ int (*resume)(struct hnae3_ae_dev *ae_dev);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 4eca8cf..de3d37c3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2226,6 +2226,34 @@ static void hns3_shutdown(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D3hot);
}
+#ifdef CONFIG_PM
+static int hns3_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ if (ae_dev->ops->suspend)
+ ae_dev->ops->suspend(ae_dev);
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int hns3_resume(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (ae_dev->ops->resume)
+ return ae_dev->ops->resume(ae_dev);
+
+ return 0;
+}
+#endif
+
static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -2310,6 +2338,10 @@ struct pci_driver hns3_driver = {
.probe = hns3_probe,
.remove = hns3_remove,
.shutdown = hns3_shutdown,
+#ifdef CONFIG_PM
+ .suspend = hns3_suspend,
+ .resume = hns3_resume,
+#endif
.sriov_configure = hns3_pci_sriov_configure,
.err_handler = &hns3_err_handler,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c2ae05f..478a3b5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3820,6 +3820,72 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_reset_task_schedule(hdev);
}
+#ifdef CONFIG_PM
+static int hclge_suspend(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ return hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+
+err_reset_lock:
+ rtnl_unlock();
+ return ret;
+}
+
+static int hclge_resume(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ rtnl_lock();
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_lock();
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ return hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+
+err_reset_lock:
+ rtnl_unlock();
+ return ret;
+}
+#endif
+
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
@@ -11525,6 +11591,10 @@ struct hnae3_ae_ops hclge_ops = {
.set_vf_rate = hclge_set_vf_rate,
.set_vf_mac = hclge_set_vf_mac,
.get_module_eeprom = hclge_get_module_eeprom,
+#ifdef CONFIG_PM
+ .suspend = hclge_suspend,
+ .resume = hclge_resume,
+#endif
};
static struct hnae3_ae_algo ae_algo = {
--
1.9.1
1
0

[PATCH hulk-4.19-next] net: hns3: add suspend/resume function for hns3 driver
by Jian Shen 21 Apr '20
by Jian Shen 21 Apr '20
21 Apr '20
From: Hao Shen <shenhao21(a)huawei.com>
The suspend and resume feature is useful when you want to
save the current state of your machine, and continue work
later from the same state. This patch makes the hns3 net
device work form the same state after s3 and s4.
Signed-off-by: Weiwei Deng <dengweiwei(a)huawei.com>
Signed-off-by: Hao Shen <shenhao21(a)huawei.com>
Signed-off-by: Jian Shen <shenjian15(a)huawei.com>
---
drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 32 ++++++++++
.../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 70 ++++++++++++++++++++++
3 files changed, 104 insertions(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index bb29b46..8b1e690 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -561,6 +561,8 @@ struct hnae3_ae_ops {
int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
u32 len, u8 *data);
+ int (*suspend)(struct hnae3_ae_dev *ae_dev);
+ int (*resume)(struct hnae3_ae_dev *ae_dev);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 4eca8cf..de3d37c3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2226,6 +2226,34 @@ static void hns3_shutdown(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D3hot);
}
+#ifdef CONFIG_PM
+static int hns3_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ if (ae_dev->ops->suspend)
+ ae_dev->ops->suspend(ae_dev);
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int hns3_resume(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (ae_dev->ops->resume)
+ return ae_dev->ops->resume(ae_dev);
+
+ return 0;
+}
+#endif
+
static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -2310,6 +2338,10 @@ struct pci_driver hns3_driver = {
.probe = hns3_probe,
.remove = hns3_remove,
.shutdown = hns3_shutdown,
+#ifdef CONFIG_PM
+ .suspend = hns3_suspend,
+ .resume = hns3_resume,
+#endif
.sriov_configure = hns3_pci_sriov_configure,
.err_handler = &hns3_err_handler,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c2ae05f..478a3b5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3820,6 +3820,72 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_reset_task_schedule(hdev);
}
+#ifdef CONFIG_PM
+static int hclge_suspend(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ return hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+
+err_reset_lock:
+ rtnl_unlock();
+ return ret;
+}
+
+static int hclge_resume(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ rtnl_lock();
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_lock();
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ return hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+
+err_reset_lock:
+ rtnl_unlock();
+ return ret;
+}
+#endif
+
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
@@ -11525,6 +11591,10 @@ struct hnae3_ae_ops hclge_ops = {
.set_vf_rate = hclge_set_vf_rate,
.set_vf_mac = hclge_set_vf_mac,
.get_module_eeprom = hclge_get_module_eeprom,
+#ifdef CONFIG_PM
+ .suspend = hclge_suspend,
+ .resume = hclge_resume,
+#endif
};
static struct hnae3_ae_algo ae_algo = {
--
1.9.1
1
0
Alain Volmat (1):
i2c: st: fix missing struct parameter description
Alex Vesker (1):
IB/mlx5: Replace tunnel mpls capability bits for tunnel_offloads
Alexander Duyck (1):
mm: Use fixed constant in page_frag_alloc instead of size + 1
Alexander Sverdlin (1):
genirq/irqdomain: Check pointer in irq_domain_alloc_irqs_hierarchy()
Alexey Dobriyan (1):
null_blk: fix spurious IO errors after failed past-wp access
Andrei Botila (1):
crypto: caam - update xts sector size for large input length
Andy Lutomirski (1):
selftests/x86/ptrace_syscall_32: Fix no-vDSO segfault
Andy Shevchenko (1):
mfd: dln2: Fix sanity checking for endpoints
Aneesh Kumar K.V (1):
powerpc/hash64/devmap: Use H_PAGE_THP_HUGE when setting up huge devmap
PTE entries
Anssi Hannula (1):
tools: gpio: Fix out-of-tree build regression
Ard Biesheuvel (1):
efi/x86: Ignore the memory attributes table on i386
Arvind Sankar (1):
x86/boot: Use unsigned comparison for addresses
Bart Van Assche (2):
null_blk: Fix the null_add_dev() error path
null_blk: Handle null_add_dev() failures properly
Benoit Parrot (1):
media: ti-vpe: cal: fix disable_irqs to only the intended target
Bob Liu (1):
dm zoned: remove duplicate nr_rnd_zones increase in dmz_init_zone()
Bob Peterson (1):
gfs2: Don't demote a glock until its revokes are written
Boqun Feng (1):
locking/lockdep: Avoid recursion in
lockdep_count_{for,back}ward_deps()
Changwei Ge (1):
ocfs2: no need try to truncate file beyond i_size
Chris Wilson (1):
drm: Remove PageReserved manipulation from drm_pci_alloc
Christian Gmeiner (2):
drm/etnaviv: rework perfmon query infrastructure
etnaviv: perfmon: fix total and idle HI cyleces readout
Christoph Niedermaier (1):
cpufreq: imx6q: Fixes unwanted cpu overclocking on i.MX6ULL
Christophe Leroy (1):
powerpc/kprobes: Ignore traps that happened in real mode
Clement Courbet (1):
powerpc: Make setjmp/longjmp signature standard
Cédric Le Goater (1):
powerpc/xive: Use XIVE_BAD_IRQ instead of zero to catch non configured
IPIs
David Hildenbrand (2):
KVM: s390: vsie: Fix region 1 ASCE sanity shadow address checks
KVM: s390: vsie: Fix delivery of addressing exceptions
Dongchun Zhu (1):
media: i2c: ov5695: Fix power on and off sequences
Eric Biggers (2):
fs/filesystems.c: downgrade user-reachable WARN_ONCE() to
pr_warn_once()
kmod: make request_module() return an error when autoloading is
disabled
Eric W. Biederman (1):
signal: Extend exec_id to 64bits
Filipe Manana (2):
Btrfs: fix crash during unmount due to race with delayed inode workers
btrfs: fix missing file extent item for hole after ranged fsync
Fredrik Strupe (1):
arm64: armv8_deprecated: Fix undef_hook mask for thumb setend
Frieder Schrempf (2):
mtd: spinand: Stop using spinand->oobbuf for buffering bad block
markers
mtd: spinand: Do not erase the block before writing a bad block marker
Gao Xiang (1):
erofs: correct the remaining shrink objects
Gary Lin (1):
efi/x86: Fix the deletion of variables in mixed mode
Gilad Ben-Yossef (4):
crypto: ccree - zero out internal struct before use
crypto: ccree - don't mangle the request assoclen
crypto: ccree - dec auth tag size from cryptlen map
crypto: ccree - only try to map auth tag if needed
Greg Kroah-Hartman (1):
Linux 4.19.116
Guoqing Jiang (1):
md: check arrays is suspended in mddev_detach before call quiesce
operations
Gustavo A. R. Silva (1):
MIPS: OCTEON: irq: Fix potential NULL pointer dereference
Hadar Gat (1):
crypto: ccree - improve error handling
Hans de Goede (1):
Input: i8042 - add Acer Aspire 5738z to nomux list
Huacai Chen (1):
MIPS/tlbex: Fix LDDIR usage in setup_pw() for Loongson-3
James Morse (1):
firmware: arm_sdei: fix double-lock on hibernate with shared events
James Smart (2):
nvme-fc: Revert "add module to ops template to allow module
references"
nvme: Treat discovery subsystems as unique subsystems
Jan Engelhardt (1):
acpi/x86: ignore unspecified bit positions in the ACPI global lock
field
John Garry (1):
libata: Remove extra scsi_host_put() in ata_scsi_add_hosts()
Josef Bacik (5):
btrfs: remove a BUG_ON() from merge_reloc_roots()
btrfs: track reloc roots based on their commit root bytenr
btrfs: set update the uuid generation as soon as possible
btrfs: drop block from cache on error in relocation
btrfs: use nofs allocations for running delayed items
Juergen Gross (1):
xen/blkfront: fix memory allocation flags in blkfront_setup_indirect()
Junyong Sun (1):
firmware: fix a double abort case with fw_load_sysfs_fallback
Kai-Heng Feng (1):
libata: Return correct status in sata_pmp_eh_recover_pm() when
ATA_DFLAG_DETACH is set
Kees Cook (1):
slub: improve bit diffusion for freelist ptr obfuscation
Kishon Vijay Abraham I (1):
PCI: endpoint: Fix for concurrent memory allocation in OB address
region
Konstantin Khlebnikov (1):
block: keep bdi->io_pages in sync with max_sectors_kb for stacked
devices
Laurentiu Tudor (1):
powerpc/fsl_booke: Avoid creating duplicate tlb1 entry
Libor Pechacek (1):
powerpc/pseries: Avoid NULL pointer dereference when drmem is
unavailable
Logan Gunthorpe (1):
PCI/switchtec: Fix init_completion race condition with poll_wait()
Lukas Wunner (1):
PCI: pciehp: Fix indefinite wait on sysfs requests
Lyude Paul (1):
drm/dp_mst: Fix clearing payload state on topology disable
Marc Zyngier (1):
irqchip/gic-v4: Provide irq_retrigger to avoid circular locking
dependency
Martin Blumenstingl (1):
thermal: devfreq_cooling: inline all stubs for
CONFIG_DEVFREQ_THERMAL=n
Masami Hiramatsu (1):
ftrace/kprobe: Show the maxactive number on kprobe_events
Mathias Nyman (1):
xhci: bail out early if driver can't accress host in resume
Matt Ranostay (1):
media: i2c: video-i2c: fix build errors due to 'imply hwmon'
Matthew Garrett (1):
tpm: Don't make log failures fatal
Maxime Ripard (1):
arm64: dts: allwinner: h6: Fix PMU compatible
Michael Ellerman (1):
powerpc/64/tm: Don't let userspace set regs->trap via sigreturn
Michael Mueller (1):
s390/diag: fix display of diagnose call statistics
Michael Wang (1):
sched: Avoid scale real weight down to zero
Michal Hocko (1):
selftests: vm: drop dependencies on page flags from mlock2 tests
Mikulas Patocka (1):
dm writecache: add cond_resched to avoid CPU hangs
Nathan Chancellor (2):
rtc: omap: Use define directive for PIN_CONFIG_ACTIVE_HIGH
misc: echo: Remove unnecessary parentheses and simplify check for zero
Neil Armstrong (1):
usb: dwc3: core: add support for disabling SS instances in park mode
Oliver O'Halloran (1):
cpufreq: powernv: Fix use-after-free
Ondrej Jirman (2):
ARM: dts: sun8i-a83t-tbs-a711: HM5065 doesn't like such a high voltage
bus: sunxi-rsb: Return correct data when mixing 16-bit and 8-bit reads
Paul Cercueil (1):
clk: ingenic/jz4770: Exit with error if CGU init failed
Qian Cai (1):
ext4: fix a data race at inode->i_blocks
Qu Wenruo (1):
btrfs: qgroup: ensure qgroup_rescan_running is only set when the
worker is at least queued
Raju Rangoju (1):
cxgb4/ptp: pass the sign of offset delta in FW CMD
Remi Pommarel (1):
ath9k: Handle txpower changes even when TPC is disabled
Robbie Ko (1):
btrfs: fix missing semaphore unlock in btrfs_sync_file
Rosioru Dragos (1):
crypto: mxs-dcp - fix scatterlist linearization for hash
Sahitya Tummala (1):
block: Fix use-after-free issue accessing struct io_cq
Sam Lunt (1):
perf tools: Support Python 3.8+ in Makefile
Sasha Levin (1):
Revert "drm/dp_mst: Remove VCPI while disabling topology mgr"
Sean Christopherson (4):
KVM: nVMX: Properly handle userspace interrupt window request
KVM: x86: Allocate new rmap and large page tracking when moving
memslot
KVM: VMX: Always VMCLEAR in-use VMCSes during crash with kexec support
KVM: x86: Gracefully handle __vmalloc() failure during VM allocation
Sean V Kelley (1):
PCI: Add boot interrupt quirk mechanism for Xeon chipsets
Segher Boessenkool (1):
powerpc: Add attributes for setjmp/longjmp
Shetty, Harshini X (EXT-Sony Mobile) (1):
dm verity fec: fix memory leak in verity_fec_dtr
Simon Gander (1):
hfsplus: fix crash and filesystem corruption when deleting files
Sreekanth Reddy (1):
scsi: mpt3sas: Fix kernel panic observed on soft HBA unplug
Sriharsha Allenki (1):
usb: gadget: f_fs: Fix use after free issue as part of queue failure
Steffen Maier (1):
scsi: zfcp: fix missing erp_lock in port recovery trigger for
point-to-point
Stephan Gerhold (1):
media: venus: hfi_parser: Ignore HEVC encoding for V1
Subash Abhinov Kasiviswanathan (1):
net: qualcomm: rmnet: Allow configuration updates to existing devices
Sungbo Eo (2):
irqchip/versatile-fpga: Handle chained IRQs properly
irqchip/versatile-fpga: Apply clear-mask earlier
Takashi Iwai (6):
ALSA: usb-audio: Add mixer workaround for TRX40 and co
ALSA: hda: Add driver blacklist
ALSA: hda: Fix potential access overflow in beep helper
ALSA: ice1724: Fix invalid access for enumerated ctl items
ALSA: pcm: oss: Fix regression by buffer overflow fix
ALSA: hda/realtek - Add quirk for MSI GL63
Thinh Nguyen (1):
usb: gadget: composite: Inform controller driver of self-powered
Thomas Gleixner (1):
x86/entry/32: Add missing ASM_CLAC to general_protection entry
Thomas Hebb (3):
ALSA: doc: Document PC Beep Hidden Register on Realtek ALC256
ALSA: hda/realtek - Set principled PC Beep configuration for ALC256
ALSA: hda/realtek - Remove now-unnecessary XPS 13 headphone noise
fixups
Thomas Hellstrom (1):
x86: Don't let pgprot_modify() change the page encryption bit
Trond Myklebust (1):
NFS: Fix a page leak in nfs_destroy_unlinked_subrequests()
Vasily Averin (3):
tpm: tpm1_bios_measurements_next should increase position index
tpm: tpm2_bios_measurements_next should increase position index
pstore: pstore_ftrace_seq_next should increase position index
Vitaly Kuznetsov (1):
KVM: VMX: fix crash cleanup when KVM wasn't used
Wen Yang (1):
ipmi: fix hung processes in __get_guid()
Xu Wang (1):
qlcnic: Fix bad kzalloc null test
Yang Xu (1):
KEYS: reaching the keys quotas correctly
Yicong Yang (1):
PCI/ASPM: Clear the correct bits when enabling L1 substates
YueHaibing (1):
powerpc/pseries: Drop pointless static qualifier in vpa_debugfs_init()
Yury Norov (1):
uapi: rename ext2_swab() to swab() and share globally in swab.h
Zheng Wei (1):
net: vxge: fix wrong __VA_ARGS__ usage
Zhenzhong Duan (1):
x86/speculation: Remove redundant arch_smt_update() invocation
Zhiqiang Liu (1):
block, bfq: fix use-after-free in bfq_idle_slice_timer_body
chenqiwu (1):
pstore/platform: fix potential mem leak if pstore_init_fs failed
이경택 (4):
ASoC: fix regwmask
ASoC: dapm: connect virtual mux with default value
ASoC: dpcm: allow start or stop during pause for backend
ASoC: topology: use name_prefix for new kcontrol
Documentation/sound/hd-audio/index.rst | 1 +
Documentation/sound/hd-audio/models.rst | 2 -
Documentation/sound/hd-audio/realtek-pc-beep.rst | 129 ++++++++++++
Makefile | 2 +-
arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts | 4 +-
arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi | 3 +-
arch/arm64/kernel/armv8_deprecated.c | 2 +-
arch/mips/cavium-octeon/octeon-irq.c | 3 +
arch/mips/mm/tlbex.c | 5 +-
arch/powerpc/include/asm/book3s/64/hash-4k.h | 6 +
arch/powerpc/include/asm/book3s/64/hash-64k.h | 8 +-
arch/powerpc/include/asm/book3s/64/pgtable.h | 4 +-
arch/powerpc/include/asm/book3s/64/radix.h | 5 +
arch/powerpc/include/asm/drmem.h | 4 +-
arch/powerpc/include/asm/setjmp.h | 6 +-
arch/powerpc/kernel/Makefile | 3 -
arch/powerpc/kernel/kprobes.c | 3 +
arch/powerpc/kernel/signal_64.c | 4 +-
arch/powerpc/mm/tlb_nohash_low.S | 12 +-
arch/powerpc/platforms/pseries/hotplug-memory.c | 8 +-
arch/powerpc/platforms/pseries/lpar.c | 2 +-
arch/powerpc/sysdev/xive/common.c | 12 +-
arch/powerpc/sysdev/xive/native.c | 4 +-
arch/powerpc/sysdev/xive/spapr.c | 4 +-
arch/powerpc/sysdev/xive/xive-internal.h | 7 +
arch/powerpc/xmon/Makefile | 3 -
arch/s390/kernel/diag.c | 2 +-
arch/s390/kvm/vsie.c | 1 +
arch/s390/mm/gmap.c | 6 +-
arch/x86/boot/compressed/head_32.S | 2 +-
arch/x86/boot/compressed/head_64.S | 4 +-
arch/x86/entry/entry_32.S | 1 +
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/include/asm/pgtable.h | 7 +-
arch/x86/include/asm/pgtable_types.h | 2 +-
arch/x86/kernel/acpi/boot.c | 2 +-
arch/x86/kvm/svm.c | 4 +
arch/x86/kvm/vmx.c | 110 ++++------
arch/x86/kvm/x86.c | 21 +-
arch/x86/platform/efi/efi_64.c | 4 +-
block/bfq-iosched.c | 16 +-
block/blk-ioc.c | 7 +
block/blk-settings.c | 3 +
drivers/ata/libata-pmp.c | 1 +
drivers/ata/libata-scsi.c | 9 +-
drivers/base/firmware_loader/fallback.c | 2 +-
drivers/block/null_blk_main.c | 10 +-
drivers/block/xen-blkfront.c | 17 +-
drivers/bus/sunxi-rsb.c | 2 +-
drivers/char/ipmi/ipmi_msghandler.c | 4 +-
drivers/char/tpm/eventlog/common.c | 12 +-
drivers/char/tpm/eventlog/tpm1.c | 2 +-
drivers/char/tpm/eventlog/tpm2.c | 2 +-
drivers/char/tpm/tpm-chip.c | 4 +-
drivers/char/tpm/tpm.h | 2 +-
drivers/clk/ingenic/jz4770-cgu.c | 4 +-
drivers/cpufreq/imx6q-cpufreq.c | 3 +
drivers/cpufreq/powernv-cpufreq.c | 6 +
drivers/crypto/caam/caamalg_desc.c | 16 +-
drivers/crypto/ccree/cc_aead.c | 56 +++--
drivers/crypto/ccree/cc_aead.h | 1 +
drivers/crypto/ccree/cc_buffer_mgr.c | 108 +++++-----
drivers/crypto/mxs-dcp.c | 58 +++--
drivers/firmware/arm_sdei.c | 32 ++-
drivers/firmware/efi/efi.c | 2 +-
drivers/gpu/drm/drm_dp_mst_topology.c | 19 +-
drivers/gpu/drm/drm_pci.c | 25 +--
drivers/gpu/drm/etnaviv/etnaviv_perfmon.c | 103 +++++++--
drivers/i2c/busses/i2c-st.c | 1 +
drivers/infiniband/hw/mlx5/main.c | 6 +-
drivers/input/serio/i8042-x86ia64io.h | 11 +
drivers/irqchip/irq-gic-v3-its.c | 6 +
drivers/irqchip/irq-versatile-fpga.c | 18 +-
drivers/md/dm-verity-fec.c | 1 +
drivers/md/dm-writecache.c | 6 +-
drivers/md/dm-zoned-metadata.c | 1 -
drivers/md/md.c | 2 +-
drivers/media/i2c/ov5695.c | 49 +++--
drivers/media/i2c/video-i2c.c | 2 +-
drivers/media/platform/qcom/venus/hfi_parser.c | 1 +
drivers/media/platform/ti-vpe/cal.c | 16 +-
drivers/mfd/dln2.c | 9 +-
drivers/misc/echo/echo.c | 2 +-
drivers/mtd/nand/spi/core.c | 17 +-
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c | 3 +
drivers/net/ethernet/neterion/vxge/vxge-config.h | 2 +-
drivers/net/ethernet/neterion/vxge/vxge-main.h | 14 +-
.../net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 2 +-
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 31 +--
drivers/net/wireless/ath/ath9k/main.c | 3 +
drivers/nvme/host/core.c | 11 +
drivers/nvme/host/fc.c | 14 +-
drivers/nvme/target/fcloop.c | 1 -
drivers/pci/endpoint/pci-epc-mem.c | 10 +-
drivers/pci/hotplug/pciehp_hpc.c | 14 +-
drivers/pci/pcie/aspm.c | 4 +-
drivers/pci/quirks.c | 80 ++++++-
drivers/pci/switch/switchtec.c | 2 +-
drivers/rtc/rtc-omap.c | 4 +-
drivers/s390/scsi/zfcp_erp.c | 2 +-
drivers/scsi/lpfc/lpfc_nvme.c | 2 -
drivers/scsi/mpt3sas/mpt3sas_scsih.c | 8 +-
drivers/scsi/qla2xxx/qla_nvme.c | 1 -
drivers/staging/erofs/utils.c | 2 +-
drivers/usb/dwc3/core.c | 5 +
drivers/usb/dwc3/core.h | 4 +
drivers/usb/gadget/composite.c | 9 +
drivers/usb/gadget/function/f_fs.c | 1 +
drivers/usb/host/xhci.c | 4 +-
fs/btrfs/async-thread.c | 8 +
fs/btrfs/async-thread.h | 1 +
fs/btrfs/delayed-inode.c | 13 ++
fs/btrfs/disk-io.c | 27 ++-
fs/btrfs/file.c | 11 +
fs/btrfs/qgroup.c | 11 +-
fs/btrfs/relocation.c | 35 ++--
fs/exec.c | 2 +-
fs/ext4/inode.c | 2 +-
fs/filesystems.c | 4 +-
fs/gfs2/glock.c | 3 +
fs/hfsplus/attributes.c | 4 +
fs/nfs/write.c | 1 +
fs/ocfs2/alloc.c | 4 +
fs/pstore/inode.c | 5 +-
fs/pstore/platform.c | 4 +-
include/linux/devfreq_cooling.h | 2 +-
include/linux/iocontext.h | 1 +
include/linux/mlx5/mlx5_ifc.h | 9 +-
include/linux/nvme-fc-driver.h | 4 -
include/linux/pci-epc.h | 3 +
include/linux/sched.h | 4 +-
include/linux/swab.h | 1 +
include/uapi/linux/swab.h | 10 +
kernel/cpu.c | 5 +-
kernel/irq/irqdomain.c | 10 +-
kernel/kmod.c | 4 +-
kernel/locking/lockdep.c | 4 +
kernel/sched/sched.h | 8 +-
kernel/signal.c | 2 +-
kernel/trace/trace_kprobe.c | 2 +
lib/find_bit.c | 16 +-
mm/page_alloc.c | 8 +-
mm/slub.c | 2 +-
security/keys/key.c | 2 +-
security/keys/keyctl.c | 4 +-
sound/core/oss/pcm_plugin.c | 32 ++-
sound/pci/hda/hda_beep.c | 6 +-
sound/pci/hda/hda_intel.c | 16 ++
sound/pci/hda/patch_realtek.c | 50 +----
sound/pci/ice1712/prodigy_hifi.c | 4 +-
sound/soc/soc-dapm.c | 8 +-
sound/soc/soc-ops.c | 4 +-
sound/soc/soc-pcm.c | 6 +-
sound/soc/soc-topology.c | 2 +-
sound/usb/mixer_maps.c | 28 +++
tools/gpio/Makefile | 2 +-
tools/perf/Makefile.config | 11 +-
tools/testing/selftests/vm/mlock2-tests.c | 233 ++++-----------------
tools/testing/selftests/x86/ptrace_syscall.c | 8 +-
159 files changed, 1190 insertions(+), 777 deletions(-)
create mode 100644 Documentation/sound/hd-audio/realtek-pc-beep.rst
--
1.8.3
1
144
From: huangjun <huangjun63(a)huawei.com>
If we want acpi ged to support wake from freeze, we need to implement
the suspend/resume function. In these two methods, ACPI's _GPO, GPP
method is called to realize the setting of sleep flag and anti-shake.
Signed-off-by: Huangjun <huangjun63(a)huawei.com>
---
drivers/acpi/evged.c | 82 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 82 insertions(+)
diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
index f13ba2c07667..84656fc09d15 100644
--- a/drivers/acpi/evged.c
+++ b/drivers/acpi/evged.c
@@ -46,11 +46,14 @@
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/acpi.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
#define MODULE_NAME "acpi-ged"
struct acpi_ged_device {
struct device *dev;
+ struct timer_list timer;
struct list_head event_list;
};
@@ -148,6 +151,8 @@ static int ged_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to parse the _CRS record\n");
return -EINVAL;
}
+
+ timer_setup(&geddev->timer, NULL, 0);
platform_set_drvdata(pdev, geddev);
return 0;
@@ -164,6 +169,7 @@ static void ged_shutdown(struct platform_device *pdev)
dev_dbg(geddev->dev, "GED releasing GSI %u @ IRQ %u\n",
event->gsi, event->irq);
}
+ del_timer(&geddev->timer);
}
static int ged_remove(struct platform_device *pdev)
@@ -177,6 +183,78 @@ static const struct acpi_device_id ged_acpi_ids[] = {
{},
};
+#ifdef CONFIG_PM_SLEEP
+static acpi_status ged_acpi_execute(struct device *dev, char* method, u64 arg)
+{
+ acpi_status acpi_ret;
+ acpi_handle method_handle;
+
+ acpi_ret = acpi_get_handle(ACPI_HANDLE(dev), method, &method_handle);
+
+ if (ACPI_FAILURE(acpi_ret)) {
+ dev_err(dev, "cannot locate %s method\n", method);
+ return AE_NOT_FOUND;
+ }
+
+ acpi_ret = acpi_execute_simple_method(method_handle, NULL, arg);
+ if (ACPI_FAILURE(acpi_ret)) {
+ dev_err(dev, "%s method execution failed\n", method);
+ return AE_ERROR;
+ }
+
+ return AE_OK;
+}
+
+static void ged_timer_callback(struct timer_list *t)
+{
+ struct acpi_ged_device *geddev = from_timer(geddev, t, timer);
+ struct acpi_ged_event *event, *next;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ ged_acpi_execute(geddev->dev, "_GPP", event->gsi);
+ }
+}
+
+static int ged_suspend(struct device *dev)
+{
+ struct acpi_ged_device *geddev = dev_get_drvdata(dev);
+ struct acpi_ged_event *event, *next;
+ acpi_status acpi_ret;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ acpi_ret = ged_acpi_execute(dev, "_GPO", event->gsi);
+
+ if (acpi_ret == AE_ERROR)
+ return -EINVAL;
+
+ enable_irq_wake(event->irq);
+ }
+ return 0;
+}
+
+static int ged_resume(struct device *dev)
+{
+ struct acpi_ged_device *geddev = dev_get_drvdata(dev);
+ struct acpi_ged_event *event, *next;
+
+ list_for_each_entry_safe(event, next, &geddev->event_list, node) {
+ disable_irq_wake(event->irq);
+ }
+
+ /* use timer to complete 4s anti-shake */
+ geddev->timer.expires = jiffies + (4 * HZ);
+ geddev->timer.function = ged_timer_callback;
+ add_timer(&geddev->timer);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ged_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ged_suspend, ged_resume)
+};
+#endif
+
+
static struct platform_driver ged_driver = {
.probe = ged_probe,
.remove = ged_remove,
@@ -184,6 +262,10 @@ static struct platform_driver ged_driver = {
.driver = {
.name = MODULE_NAME,
.acpi_match_table = ACPI_PTR(ged_acpi_ids),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &ged_pm_ops,
+#endif
+
},
};
builtin_platform_driver(ged_driver);
--
2.20.1
1
1

20 Apr '20
From: Sunnanyong <sunnanyong(a)huawei.com>
If we want to support S4 (suspend to Disk), it is necessary to guarantee
that the ITS tables are at the same address in the booting kernel and
the resumed kernel. That covers all the ITS tables and as well as the
RDs'.
To support this, allocting the itt memory from memory pool intead.
Signed-off-by: Sunnanyong <sunnanyong(a)huawei.com>
---
drivers/irqchip/irq-gic-v3-its.c | 72 ++++++++++++++++++++++++++++++--
1 file changed, 69 insertions(+), 3 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 860f3ef2969e..9de585fe74fb 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -189,6 +189,14 @@ static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
+static void *its_mem_pool_alloc(int dev_id);
+#define ITS_MEM_POOL_SIZE (SZ_4M)
+#define ITS_MEM_POOL_MAX (16)
+#define GITS_OTHER_OFFSET 0x20000
+#define GITS_OTHER_REG_SIZE 0x100
+#define GITS_FUNC_REG_OFFSET 0x80
+static void *its_mem_pool[ITS_MEM_POOL_MAX] = {0};
+
static u16 get_its_list(struct its_vm *vm)
{
struct its_node *its;
@@ -2436,7 +2444,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
nr_ites = max(2, nvecs);
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
- itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
+
+ itt = its_mem_pool_alloc(dev_id);
+ if (!itt)
+ itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
+
if (alloc_lpis) {
lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
if (lpi_map)
@@ -2450,7 +2462,6 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
kfree(dev);
- kfree(itt);
kfree(lpi_map);
kfree(col_map);
return NULL;
@@ -2486,7 +2497,6 @@ static void its_free_device(struct its_device *its_dev)
raw_spin_lock_irqsave(&its_dev->its->lock, flags);
list_del(&its_dev->entry);
raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
- kfree(its_dev->itt);
kfree(its_dev);
}
@@ -3798,11 +3808,41 @@ static int redist_disable_lpis(void)
return 0;
}
+static void its_cpu_clear_cache(void)
+{
+ struct its_node *its;
+ u64 val = 0;
+ void __iomem *func_base;
+
+ raw_spin_lock(&its_lock);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ func_base = ioremap(its->phys_base + GITS_OTHER_OFFSET,
+ GITS_OTHER_REG_SIZE);
+ if (!func_base) {
+ pr_err("ITS@%p : Unable to map ITS OTHER registers\n",
+ (void *)(its->phys_base + GITS_OTHER_OFFSET));
+ raw_spin_unlock(&its_lock);
+ return;
+ }
+
+ val = readl_relaxed(func_base + GITS_FUNC_REG_OFFSET);
+ val = val | (0x7 << 16);
+ writel_relaxed(val, func_base + GITS_FUNC_REG_OFFSET);
+ dsb(sy);
+ iounmap(func_base);
+ }
+
+ raw_spin_unlock(&its_lock);
+}
+
+
int its_cpu_init(void)
{
if (!list_empty(&its_nodes)) {
int ret;
+ its_cpu_clear_cache();
ret = redist_disable_lpis();
if (ret)
return ret;
@@ -4001,6 +4041,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct its_node *its;
bool has_v4 = false;
int err;
+ int i;
its_parent = parent_domain;
of_node = to_of_node(handle);
@@ -4014,6 +4055,16 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
return -ENXIO;
}
+ for (i = 0; i < ITS_MEM_POOL_MAX; i++) {
+ if (!its_mem_pool[i]) {
+ its_mem_pool[i] = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_MEM_POOL_SIZE));
+ if (!its_mem_pool[i])
+ pr_err("err:[its mem[%d]] has no memory\n", i);
+ }
+ }
+
+
gic_rdists = rdists;
err = allocate_lpi_tables();
@@ -4035,3 +4086,18 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
return 0;
}
+
+void *its_mem_pool_alloc(int dev_id)
+{
+ int pool_num = dev_id / (ITS_MEM_POOL_SIZE / SZ_512);
+ int idx = dev_id % (ITS_MEM_POOL_SIZE / SZ_512);
+ void *addr = NULL;
+
+ if (pool_num >= ITS_MEM_POOL_MAX || !its_mem_pool[pool_num]) {
+ pr_err("[its mem[%d]] alloc error\n", pool_num);
+ return NULL;
+ }
+
+ addr = its_mem_pool[pool_num] + idx * SZ_512;
+ return addr;
+}
--
2.20.1
1
0
As a new x86 CPU Vendor, Chengdu Haiguang IC Design Co., Ltd (Hygon)
is a Joint Venture between AMD and Haiguang Information Technology Co.,
Ltd., and aims at providing high performance x86 processor for China
server market.
The first generation Hygon's processor(Dhyana) originates from AMD
technology and shares most of the architecture with AMD's family 17h,
but with different CPU Vendor ID("HygonGenuine")/PCIE Device Vendor ID
(0x1D94)/Family series number (Family 18h).
To enable the support of Linux kernel to Hygon's CPU, we added a new
vendor type (X86_VENDOR_HYGON, with value of 9) in arch/x86/include/
asm/processor.h, and shared most of kernel support codes with AMD
family 17h.
As Hygon will negotiate with AMD to make sure that only Hygon will
use family 18h, so try to minimize code modification and share most
codes with AMD under this consideration.
This patch series have been applied and tested successfully on Hygon
Dhyana SoC silicon. Also tested on AMD EPYC (Family 17h) processor,
it works fine and makes no harm to the existing codes.
This patch series are created for the current branch openEuler-1.0-LTS.
References:
[1] Linux kernel patches for Hygon Dhyana, merged in 4.20:
https://git.kernel.org/tip/c9661c1e80b609cd038db7c908e061f0535804ef
[2] MSR and CPUID definition:
https://www.amd.com/system/files/TechDocs/54945_PPR_Family_17h_Models_00h-0…
Pu Wen (22):
x86/cpu: Create Hygon Dhyana architecture support file
x86/cpu: Get cache info and setup cache cpumap for Hygon Dhyana
x86/cpu/mtrr: Support TOP_MEM2 and get MTRR number
x86/smpboot: Do not use BSP INIT delay and MWAIT to idle on Dhyana
x86/events: Add Hygon Dhyana support to PMU infrastructure
x86/alternative: Init ideal_nops for Hygon Dhyana
x86/amd_nb: Check vendor in AMD-only functions
x86/pci, x86/amd_nb: Add Hygon Dhyana support to PCI and northbridge
x86/apic: Add Hygon Dhyana support
x86/bugs: Add Hygon Dhyana to the respective mitigation machinery
x86/mce: Add Hygon Dhyana support to the MCA infrastructure
x86/kvm: Add Hygon Dhyana support to KVM
x86/xen: Add Hygon Dhyana support to Xen
ACPI: Add Hygon Dhyana support
cpufreq: Add Hygon Dhyana support
EDAC, amd64: Add Hygon Dhyana support
tools/cpupower: Add Hygon Dhyana support
hwmon: (k10temp) Add Hygon Dhyana support
x86/CPU/hygon: Fix phys_proc_id calculation logic for multi-die
processors
i2c-piix4: Add Hygon Dhyana SMBus support
x86/amd_nb: Make hygon_nb_misc_ids static
NTB: Add Hygon Device ID
Documentation/i2c/busses/i2c-piix4 | 2 +
MAINTAINERS | 6 +
arch/x86/Kconfig.cpu | 14 +
arch/x86/events/amd/core.c | 4 +
arch/x86/events/amd/uncore.c | 20 +-
arch/x86/events/core.c | 4 +
arch/x86/include/asm/amd_nb.h | 3 +
arch/x86/include/asm/cacheinfo.h | 1 +
arch/x86/include/asm/kvm_emulate.h | 4 +
arch/x86/include/asm/mce.h | 2 +
arch/x86/include/asm/processor.h | 3 +-
arch/x86/include/asm/virtext.h | 5 +-
arch/x86/kernel/alternative.c | 4 +
arch/x86/kernel/amd_nb.c | 49 ++-
arch/x86/kernel/apic/apic.c | 7 +
arch/x86/kernel/apic/probe_32.c | 1 +
arch/x86/kernel/cpu/Makefile | 1 +
arch/x86/kernel/cpu/bugs.c | 4 +-
arch/x86/kernel/cpu/cacheinfo.c | 31 +-
arch/x86/kernel/cpu/common.c | 4 +
arch/x86/kernel/cpu/cpu.h | 1 +
arch/x86/kernel/cpu/hygon.c | 413 ++++++++++++++++++
arch/x86/kernel/cpu/mce/core.c | 20 +-
arch/x86/kernel/cpu/mce/severity.c | 3 +-
arch/x86/kernel/cpu/mtrr/cleanup.c | 3 +-
arch/x86/kernel/cpu/mtrr/mtrr.c | 2 +-
arch/x86/kernel/cpu/perfctr-watchdog.c | 2 +
arch/x86/kernel/smpboot.c | 4 +-
arch/x86/kvm/emulate.c | 11 +-
arch/x86/pci/amd_bus.c | 6 +-
arch/x86/xen/pmu.c | 12 +-
drivers/acpi/acpi_pad.c | 1 +
drivers/acpi/processor_idle.c | 1 +
drivers/cpufreq/acpi-cpufreq.c | 5 +
drivers/cpufreq/amd_freq_sensitivity.c | 9 +-
drivers/edac/amd64_edac.c | 10 +-
drivers/edac/mce_amd.c | 4 +-
drivers/hwmon/k10temp.c | 3 +-
drivers/i2c/busses/Kconfig | 1 +
drivers/i2c/busses/i2c-piix4.c | 15 +-
drivers/ntb/hw/amd/ntb_hw_amd.c | 1 +
include/linux/pci_ids.h | 2 +
tools/power/cpupower/utils/cpufreq-info.c | 6 +-
tools/power/cpupower/utils/helpers/amd.c | 4 +-
tools/power/cpupower/utils/helpers/cpuid.c | 8 +-
tools/power/cpupower/utils/helpers/helpers.h | 2 +-
tools/power/cpupower/utils/helpers/misc.c | 2 +-
.../utils/idle_monitor/mperf_monitor.c | 3 +-
48 files changed, 668 insertions(+), 55 deletions(-)
create mode 100644 arch/x86/kernel/cpu/hygon.c
--
2.23.0
5
28
Alexander Usyskin (1):
mei: me: add cedar fork device ids
Amritha Nambiar (1):
net: Fix Tx hash bound checking
Arun KS (1):
arm64: Fix size of __early_cpu_boot_status
Avihai Horon (1):
RDMA/cm: Update num_paths in cma_resolve_iboe_route error flow
Chris Lew (1):
rpmsg: glink: Remove chunk size word align warning
Daniel Jordan (1):
padata: always acquire cpu_hotplug_lock before pinst->lock
David Ahern (1):
tools/accounting/getdelays.c: fix netlink attribute length
David Howells (1):
rxrpc: Fix sendmsg(MSG_WAITALL) handling
Eugene Syromiatnikov (1):
coresight: do not use the BIT() macro in the UAPI header
Eugeniy Paltsev (1):
initramfs: restore default compression behavior
Florian Fainelli (2):
net: dsa: bcm_sf2: Do not register slave MDIO bus with OF
net: dsa: bcm_sf2: Ensure correct sub-node is parsed
Geoffrey Allott (1):
ALSA: hda/ca0132 - Add Recon3Di quirk to handle integrated sound on
EVGA X99 Classified motherboard
Gerd Hoffmann (1):
drm/bochs: downgrade pci_request_region failure from error to warning
Greg Kroah-Hartman (1):
Linux 4.19.115
Hans Verkuil (1):
drm_dp_mst_topology: fix broken
drm_dp_sideband_parse_remote_dpcd_read()
Hans de Goede (2):
extcon: axp288: Add wakeup support
power: supply: axp288_charger: Add special handling for HP Pavilion x2
10
Ilya Dryomov (1):
ceph: canonicalize server path in place
James Zhu (1):
drm/amdgpu: fix typo for vcn1 idle check
Jarod Wilson (1):
ipv6: don't auto-add link-local address to lag ports
Jason A. Donenfeld (1):
random: always use batched entropy for get_random_u{32, 64}
Jason Gunthorpe (2):
RDMA/ucma: Put a lock around every call to the rdma_cm layer
RDMA/cma: Teach lockdep about the order of rtnl and lock
Jisheng Zhang (1):
net: stmmac: dwmac1000: fix out-of-bounds mac address reg setting
Kaike Wan (2):
IB/hfi1: Call kobject_put() when kobject_init_and_add() fails
IB/hfi1: Fix memory leaks in sysfs registration and unregistration
Kishon Vijay Abraham I (2):
misc: pci_endpoint_test: Fix to support > 10 pci-endpoint-test devices
misc: pci_endpoint_test: Avoid using module parameter to determine
irqtype
Len Brown (2):
tools/power turbostat: Fix gcc build warnings
tools/power turbostat: Fix missing SYS_LPI counter on some Chromebooks
Lucas Stach (1):
drm/etnaviv: replace MMU flush marker with flush sequence
Marcelo Ricardo Leitner (1):
sctp: fix possibly using a bad saddr with a given dst
Mario Kleiner (1):
drm/amd/display: Add link_rate quirk for Apple 15" MBP 2017
Martin Kaiser (1):
hwrng: imx-rngc - fix an error path
Oleksij Rempel (1):
net: phy: micrel: kszphy_resume(): add delay after genphy_resume()
before accessing PHY registers
Paul Cercueil (1):
ASoC: jz4740-i2s: Fix divider written at incorrect offset in register
Petr Machata (1):
mlxsw: spectrum_flower: Do not stop at FLOW_ACTION_VLAN_MANGLE
Prabhath Sajeepa (1):
nvme-rdma: Avoid double freeing of async event data
Qian Cai (1):
ipv4: fix a RCU-list lock in fib_triestat_seq_show
Qiujun Huang (3):
sctp: fix refcount bug in sctp_wfree
Bluetooth: RFCOMM: fix ODEBUG bug in rfcomm_dev_ioctl
fbcon: fix null-ptr-deref in fbcon_switch
Rob Clark (2):
drm/msm: stop abusing dma_map/unmap for cache
drm/msm: Use the correct dma_sync calls in msm_gem
Roger Quadros (1):
usb: dwc3: don't set gadget->is_otg flag
Sean Young (1):
media: rc: IR signal for Panasonic air conditioner too long
Taniya Das (1):
clk: qcom: rcg: Return failure for RCG update
Thinh Nguyen (1):
usb: dwc3: gadget: Wrap around when skip TRBs
William Dauchy (1):
net, ip_tunnel: fix interface lookup with no key
Xiubo Li (1):
ceph: remove the extra slashes in the server path
YueHaibing (1):
misc: rtsx: set correct pcr_ops for rts522A
Makefile | 2 +-
arch/arm64/kernel/head.S | 2 +-
drivers/char/hw_random/imx-rngc.c | 4 +-
drivers/char/random.c | 20 ++------
drivers/clk/qcom/clk-rcg2.c | 2 +-
drivers/extcon/extcon-axp288.c | 32 ++++++++++++
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 2 +-
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 11 +++++
drivers/gpu/drm/bochs/bochs_hw.c | 6 +--
drivers/gpu/drm/drm_dp_mst_topology.c | 1 +
drivers/gpu/drm/etnaviv/etnaviv_buffer.c | 10 ++--
drivers/gpu/drm/etnaviv/etnaviv_gpu.h | 1 +
drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 6 +--
drivers/gpu/drm/etnaviv/etnaviv_mmu.h | 2 +-
drivers/gpu/drm/msm/msm_gem.c | 47 ++++++++++++++++--
drivers/infiniband/core/cma.c | 14 ++++++
drivers/infiniband/core/ucma.c | 49 ++++++++++++++++++-
drivers/infiniband/hw/hfi1/sysfs.c | 26 +++++++---
drivers/media/rc/lirc_dev.c | 2 +-
drivers/misc/cardreader/rts5227.c | 1 +
drivers/misc/mei/hw-me-regs.h | 2 +
drivers/misc/mei/pci-me.c | 2 +
drivers/misc/pci_endpoint_test.c | 14 ++++--
drivers/net/dsa/bcm_sf2.c | 9 +++-
.../net/ethernet/mellanox/mlxsw/spectrum_flower.c | 8 +--
.../net/ethernet/stmicro/stmmac/dwmac1000_core.c | 2 +-
drivers/net/phy/micrel.c | 7 +++
drivers/nvme/host/rdma.c | 8 +--
drivers/power/supply/axp288_charger.c | 57 +++++++++++++++++++++-
drivers/rpmsg/qcom_glink_native.c | 3 --
drivers/usb/dwc3/gadget.c | 3 +-
drivers/video/fbdev/core/fbcon.c | 3 ++
fs/ceph/super.c | 56 +++++++++++++--------
fs/ceph/super.h | 2 +-
include/uapi/linux/coresight-stm.h | 6 ++-
kernel/padata.c | 4 +-
net/bluetooth/rfcomm/tty.c | 4 +-
net/core/dev.c | 2 +
net/ipv4/fib_trie.c | 3 ++
net/ipv4/ip_tunnel.c | 6 +--
net/ipv6/addrconf.c | 4 ++
net/rxrpc/sendmsg.c | 4 +-
net/sctp/ipv6.c | 20 +++++---
net/sctp/protocol.c | 28 +++++++----
net/sctp/socket.c | 31 +++++++++---
sound/pci/hda/patch_ca0132.c | 1 +
sound/soc/jz4740/jz4740-i2s.c | 2 +-
tools/accounting/getdelays.c | 2 +-
tools/power/x86/turbostat/turbostat.c | 27 +++++-----
usr/Kconfig | 22 ++++-----
50 files changed, 434 insertions(+), 148 deletions(-)
--
1.8.3
1
52
From: "Paul E. McKenney" <paulmck(a)kernel.org>
mainline inclusion
from mainline-v5.6-rc1
commit 844a378de3372c923909681706d62336d702531e
category: bugfix
bugzilla: 28851
CVE: NA
-------------------------------------------------------------------------
The ->srcu_last_gp_end field is accessed from any CPU at any time
by synchronize_srcu(), so non-initialization references need to use
READ_ONCE() and WRITE_ONCE(). This commit therefore makes that change.
Reported-by: syzbot+08f3e9d26e5541e1ecf2(a)syzkaller.appspotmail.com
Acked-by: Marco Elver <elver(a)google.com>
Signed-off-by: Paul E. McKenney <paulmck(a)kernel.org>
Conflicts:
kernel/rcu/srcutree.c
Signed-off-by: Zhen Lei <thunder.leizhen(a)huawei.com>
Reviewed-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
kernel/rcu/srcutree.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 4b0a6e3..7bd0204 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -552,7 +552,7 @@ static void srcu_gp_end(struct srcu_struct *sp)
idx = rcu_seq_state(sp->srcu_gp_seq);
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
cbdelay = srcu_get_delay(sp);
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
+ WRITE_ONCE(sp->srcu_last_gp_end, ktime_get_mono_fast_ns());
rcu_seq_end(&sp->srcu_gp_seq);
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
@@ -780,6 +780,7 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
unsigned long flags;
struct srcu_data *sdp;
unsigned long t;
+ unsigned long tlast;
/* If the local srcu_data structure has callbacks, not idle. */
local_irq_save(flags);
@@ -798,9 +799,9 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
/* First, see if enough time has passed since the last GP. */
t = ktime_get_mono_fast_ns();
+ tlast = READ_ONCE(sp->srcu_last_gp_end);
if (exp_holdoff == 0 ||
- time_in_range_open(t, sp->srcu_last_gp_end,
- sp->srcu_last_gp_end + exp_holdoff))
+ time_in_range_open(t, tlast, tlast + exp_holdoff))
return false; /* Too soon after last GP. */
/* Next, check for probable idleness. */
--
1.8.3
1
0

18 Apr '20
From: Yunsheng Lin <linyunsheng(a)huawei.com>
mainline inclusion
from mainline-v5.4-rc1
commit 6b0c54e7f2715997c366e8374209bc74259b0a59
category: bugfix
bugzilla: 21318
CVE: NA
-------------------------------------------------------------------------
The cookie is dereferenced before null checking in the function
iommu_dma_init_domain.
This patch moves the dereferencing after the null checking.
Fixes: fdbe574eb693 ("iommu/dma: Allow MSI-only cookies")
Signed-off-by: Yunsheng Lin <linyunsheng(a)huawei.com>
Signed-off-by: Joerg Roedel <jroedel(a)suse.de>
Conflicts:
drivers/iommu/dma-iommu.c
Signed-off-by: Zhen Lei <thunder.leizhen(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/iommu/dma-iommu.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 64ae17e8b..b68d9fd 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -290,13 +290,15 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
- struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn;
+ struct iova_domain *iovad;
int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
+ iovad = &cookie->iovad;
+
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
base_pfn = max_t(unsigned long, 1, base >> order);
--
1.8.3
1
1

18 Apr '20
From: Shaozhengchao <shaozhengchao(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: 4472
-----------------------------------------------------------------------
Fix the problem that out-of-bounds access caused by user input
In order to solve the problem, restrictions are imposed on each input
which is done in kernel driver.
Signed-off-by: Shaozhengchao <shaozhengchao(a)huawei.com>
Reviewed-by: Luoshaokai <luoshaokai(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/huawei/hinic/hinic_nictool.c | 18 ++++++++++++++++++
drivers/net/ethernet/huawei/hinic/hinic_nictool.h | 2 ++
drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c | 16 +++++++++++++---
3 files changed, 33 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
index 46dd9ec..df01088 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.c
@@ -1712,6 +1712,19 @@ static u32 get_up_timeout_val(enum hinic_mod_type mod, u8 cmd)
return UP_COMP_TIME_OUT_VAL;
}
+static int check_useparam_valid(struct msg_module *nt_msg, void *buf_in)
+{
+ struct csr_write_st *csr_write_msg = (struct csr_write_st *)buf_in;
+ u32 rd_len = csr_write_msg->rd_len;
+
+ if (rd_len > TOOL_COUNTER_MAX_LEN) {
+ pr_err("Csr read or write len is invalid!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int send_to_up(void *hwdev, struct msg_module *nt_msg,
void *buf_in, u32 in_size, void *buf_out, u32 *out_size)
{
@@ -1744,6 +1757,9 @@ static int send_to_up(void *hwdev, struct msg_module *nt_msg,
}
} else if (nt_msg->up_cmd.up_db.up_api_type == API_CHAIN) {
+ if (check_useparam_valid(nt_msg, buf_in))
+ return -EINVAL;
+
if (nt_msg->up_cmd.up_db.chipif_cmd == API_CSR_WRITE) {
ret = api_csr_write(hwdev, nt_msg, buf_in,
in_size, buf_out, out_size);
@@ -1994,6 +2010,8 @@ static int get_all_chip_id_cmd(struct msg_module *nt_msg)
{
struct nic_card_id card_id;
+ memset(&card_id, 0, sizeof(card_id));
+
hinic_get_all_chip_id((void *)&card_id);
if (copy_to_user(nt_msg->out_buf, &card_id, sizeof(card_id))) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_nictool.h b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
index cfbe435..e8eccaf 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_nictool.h
@@ -285,4 +285,6 @@ struct hinic_pf_info {
extern void hinic_get_io_stats(struct hinic_nic_dev *nic_dev,
struct hinic_show_item *items);
+#define TOOL_COUNTER_MAX_LEN 512
+
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
index 9536adf..eb35df6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sml_counter.c
@@ -253,9 +253,19 @@ int hinic_sm_ctr_rd64_pair(void *hwdev, u8 node, u8 instance,
ctr_rd_rsp_u rsp;
int ret;
- if (!hwdev || (0 != (ctr_id & 0x1)) || !value1 || !value2) {
- pr_err("Hwdev(0x%p) or value1(0x%p) or value2(0x%p) is NULL or ctr_id(%d) is odd number\n",
- hwdev, value1, value2, ctr_id);
+ if (!value1) {
+ pr_err("value1 is NULL for read 64 bit pair\n");
+ return -EFAULT;
+ }
+
+ if (!value2) {
+ pr_err("value2 is NULL for read 64 bit pair\n");
+ return -EFAULT;
+ }
+
+ if (!hwdev || (0 != (ctr_id & 0x1))) {
+ pr_err("Hwdev is NULL or ctr_id(%d) is odd number for read 64 bit pair\n",
+ ctr_id);
return -EFAULT;
}
--
1.8.3
1
0

18 Apr '20
From: Li Bin <huawei.libin(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 34234
CVE: NA
--------------------------------
If the dxfer_len is greater than 256M then the request is invalid,
it should call sg_remove_request in sg_common_write.
Fixes: f930c7043663 ("scsi: sg: only check for dxfer_len greater than 256M")
Signed-off-by: Li Bin <huawei.libin(a)huawei.com>
Acked-by: Douglas Gilbert <dgilbert(a)interlog.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/scsi/sg.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index c75324a..9c4b71e 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -808,8 +808,10 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
"sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
(int) cmnd[0], (int) hp->cmd_len));
- if (hp->dxfer_len >= SZ_256M)
+ if (hp->dxfer_len >= SZ_256M) {
+ sg_remove_request(sfp, srp);
return -EINVAL;
+ }
k = sg_start_req(srp, cmnd);
if (k) {
--
1.8.3
1
1

[PATCH 1/2] btrfs: extent_io: Handle errors better in extent_write_full_page()
by Yang Yingliang 18 Apr '20
by Yang Yingliang 18 Apr '20
18 Apr '20
From: Qu Wenruo <wqu(a)suse.com>
mainline inclusion
from mainline-v5.2-rc2
commit 3065976b045f77a910809fa7699f99a1e7c0dbbb
category: bugfix
bugzilla: 13690
CVE: CVE-2019-19377
Introduce end_write_bio() for CVE-2019-19377.
-------------------------------------------------
Since now flush_write_bio() could return error, kill the BUG_ON() first.
Then don't call flush_write_bio() unconditionally, instead we check the
return value from __extent_writepage() first.
If __extent_writepage() fails, we do cleanup, and return error without
submitting the possible corrupted or half-baked bio.
If __extent_writepage() successes, then we call flush_write_bio() and
return the result.
Signed-off-by: Qu Wenruo <wqu(a)suse.com>
Reviewed-by: David Sterba <dsterba(a)suse.com>
Signed-off-by: David Sterba <dsterba(a)suse.com>
Conflicts:
fs/btrfs/extent_io.c
[yyl: adjust context]
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/btrfs/extent_io.c | 19 +++++++++++++++++++
1 file changed, 19 insertions(+)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 11efb4f..7f2990f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2731,6 +2731,16 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
return blk_status_to_errno(ret);
}
+/* Cleanup unsubmitted bios */
+static void end_write_bio(struct extent_page_data *epd, int ret)
+{
+ if (epd->bio) {
+ epd->bio->bi_status = errno_to_blk_status(ret);
+ bio_endio(epd->bio);
+ epd->bio = NULL;
+ }
+}
+
/*
* @opf: bio REQ_OP_* and REQ_* flags as one value
* @tree: tree so we can call our merge_bio hook
@@ -3438,6 +3448,9 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
* records are inserted to lock ranges in the tree, and as dirty areas
* are found, they are marked writeback. Then the lock bits are removed
* and the end_io handler clears the writeback ranges
+ *
+ * Return 0 if everything goes well.
+ * Return <0 for error.
*/
static int __extent_writepage(struct page *page, struct writeback_control *wbc,
struct extent_page_data *epd)
@@ -3505,6 +3518,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
end_extent_writepage(page, ret, start, page_end);
}
unlock_page(page);
+ ASSERT(ret <= 0);
return ret;
done_unlocked:
@@ -4054,6 +4068,11 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
};
ret = __extent_writepage(page, wbc, &epd);
+ ASSERT(ret <= 0);
+ if (ret < 0) {
+ end_write_bio(&epd, ret);
+ return ret;
+ }
flush_write_bio(&epd);
return ret;
--
1.8.3
1
1
From: Shaozhengchao <shaozhengchao(a)huawei.com>
driver inclusion
category: cleanup
bugzilla: 4472
-----------------------------------------------------------------------
Delete useless header files
Signed-off-by: Shaozhengchao <shaozhengchao(a)huawei.com>
Reviewed-by: Luoshaokai <luoshaokai(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/huawei/hinic/hinic_common.c | 80 -
drivers/net/ethernet/huawei/hinic/hinic_common.h | 38 -
drivers/net/ethernet/huawei/hinic/hinic_dev.h | 64 -
.../net/ethernet/huawei/hinic/hinic_hw_api_cmd.c | 978 -------
.../net/ethernet/huawei/hinic/hinic_hw_api_cmd.h | 208 --
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 947 -------
drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h | 187 --
drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h | 149 --
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 1010 --------
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h | 239 --
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c | 886 -------
drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h | 265 --
drivers/net/ethernet/huawei/hinic/hinic_hw_if.c | 351 ---
drivers/net/ethernet/huawei/hinic/hinic_hw_if.h | 272 --
drivers/net/ethernet/huawei/hinic/hinic_hw_io.c | 533 ----
drivers/net/ethernet/huawei/hinic/hinic_hw_io.h | 97 -
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c | 597 -----
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c | 907 -------
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h | 205 --
.../net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h | 214 --
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c | 878 -------
drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h | 117 -
drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h | 368 ---
drivers/net/ethernet/huawei/hinic/hinic_port.c | 379 ---
drivers/net/ethernet/huawei/hinic/hinic_port.h | 198 --
.../net/ethernet/huawei/hinic/hinic_sml_table.h | 2728 --------------------
.../ethernet/huawei/hinic/hinic_sml_table_pub.h | 277 --
27 files changed, 13172 deletions(-)
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_common.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_common.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dev.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_port.c
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_port.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_table.h
delete mode 100644 drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.c b/drivers/net/ethernet/huawei/hinic/hinic_common.c
deleted file mode 100644
index 02c74fd..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_common.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#include "hinic_common.h"
-
-/**
- * hinic_cpu_to_be32 - convert data to big endian 32 bit format
- * @data: the data to convert
- * @len: length of data to convert
- **/
-void hinic_cpu_to_be32(void *data, int len)
-{
- u32 *mem = data;
- int i;
-
- len = len / sizeof(u32);
-
- for (i = 0; i < len; i++) {
- *mem = cpu_to_be32(*mem);
- mem++;
- }
-}
-
-/**
- * hinic_be32_to_cpu - convert data from big endian 32 bit format
- * @data: the data to convert
- * @len: length of data to convert
- **/
-void hinic_be32_to_cpu(void *data, int len)
-{
- u32 *mem = data;
- int i;
-
- len = len / sizeof(u32);
-
- for (i = 0; i < len; i++) {
- *mem = be32_to_cpu(*mem);
- mem++;
- }
-}
-
-/**
- * hinic_set_sge - set dma area in scatter gather entry
- * @sge: scatter gather entry
- * @addr: dma address
- * @len: length of relevant data in the dma address
- **/
-void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len)
-{
- sge->hi_addr = upper_32_bits(addr);
- sge->lo_addr = lower_32_bits(addr);
- sge->len = len;
-}
-
-/**
- * hinic_sge_to_dma - get dma address from scatter gather entry
- * @sge: scatter gather entry
- *
- * Return dma address of sg entry
- **/
-dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge)
-{
- return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.h b/drivers/net/ethernet/huawei/hinic/hinic_common.h
deleted file mode 100644
index 2c06b76..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_common.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_COMMON_H
-#define HINIC_COMMON_H
-
-#include <linux/types.h>
-
-#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF)
-#define LOWER_8_BITS(data) ((data) & 0xFF)
-
-struct hinic_sge {
- u32 hi_addr;
- u32 lo_addr;
- u32 len;
-};
-
-void hinic_cpu_to_be32(void *data, int len);
-
-void hinic_be32_to_cpu(void *data, int len);
-
-void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len);
-
-dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
deleted file mode 100644
index 5186cc9..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_DEV_H
-#define HINIC_DEV_H
-
-#include <linux/netdevice.h>
-#include <linux/types.h>
-#include <linux/semaphore.h>
-#include <linux/workqueue.h>
-#include <linux/bitops.h>
-
-#include "hinic_hw_dev.h"
-#include "hinic_tx.h"
-#include "hinic_rx.h"
-
-#define HINIC_DRV_NAME "hinic"
-
-enum hinic_flags {
- HINIC_LINK_UP = BIT(0),
- HINIC_INTF_UP = BIT(1),
-};
-
-struct hinic_rx_mode_work {
- struct work_struct work;
- u32 rx_mode;
-};
-
-struct hinic_dev {
- struct net_device *netdev;
- struct hinic_hwdev *hwdev;
-
- u32 msg_enable;
- unsigned int tx_weight;
- unsigned int rx_weight;
-
- unsigned int flags;
-
- struct semaphore mgmt_lock;
- unsigned long *vlan_bitmap;
-
- struct hinic_rx_mode_work rx_mode_work;
- struct workqueue_struct *workq;
-
- struct hinic_txq *txqs;
- struct hinic_rxq *rxqs;
-
- struct hinic_txq_stats tx_stats;
- struct hinic_rxq_stats rx_stats;
-};
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
deleted file mode 100644
index c40603a..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ /dev/null
@@ -1,978 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
-#include <linux/err.h>
-#include <linux/jiffies.h>
-#include <linux/delay.h>
-#include <linux/log2.h>
-#include <linux/semaphore.h>
-#include <asm/byteorder.h>
-#include <asm/barrier.h>
-
-#include "hinic_hw_csr.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_api_cmd.h"
-
-#define API_CHAIN_NUM_CELLS 32
-
-#define API_CMD_CELL_SIZE_SHIFT 6
-#define API_CMD_CELL_SIZE_MIN (BIT(API_CMD_CELL_SIZE_SHIFT))
-
-#define API_CMD_CELL_SIZE(cell_size) \
- (((cell_size) >= API_CMD_CELL_SIZE_MIN) ? \
- (1 << (fls(cell_size - 1))) : API_CMD_CELL_SIZE_MIN)
-
-#define API_CMD_CELL_SIZE_VAL(size) \
- ilog2((size) >> API_CMD_CELL_SIZE_SHIFT)
-
-#define API_CMD_BUF_SIZE 2048
-
-/* Sizes of the members in hinic_api_cmd_cell */
-#define API_CMD_CELL_DESC_SIZE 8
-#define API_CMD_CELL_DATA_ADDR_SIZE 8
-
-#define API_CMD_CELL_ALIGNMENT 8
-
-#define API_CMD_TIMEOUT 1000
-
-#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1))
-
-#define SIZE_8BYTES(size) (ALIGN((size), 8) >> 3)
-#define SIZE_4BYTES(size) (ALIGN((size), 4) >> 2)
-
-#define RD_DMA_ATTR_DEFAULT 0
-#define WR_DMA_ATTR_DEFAULT 0
-
-enum api_cmd_data_format {
- SGE_DATA = 1, /* cell data is passed by hw address */
-};
-
-enum api_cmd_type {
- API_CMD_WRITE = 0,
-};
-
-enum api_cmd_bypass {
- NO_BYPASS = 0,
- BYPASS = 1,
-};
-
-enum api_cmd_xor_chk_level {
- XOR_CHK_DIS = 0,
-
- XOR_CHK_ALL = 3,
-};
-
-static u8 xor_chksum_set(void *data)
-{
- int idx;
- u8 *val, checksum = 0;
-
- val = data;
-
- for (idx = 0; idx < 7; idx++)
- checksum ^= val[idx];
-
- return checksum;
-}
-
-static void set_prod_idx(struct hinic_api_cmd_chain *chain)
-{
- enum hinic_api_cmd_chain_type chain_type = chain->chain_type;
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, prod_idx;
-
- addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type);
- prod_idx = hinic_hwif_read_reg(hwif, addr);
-
- prod_idx = HINIC_API_CMD_PI_CLEAR(prod_idx, IDX);
-
- prod_idx |= HINIC_API_CMD_PI_SET(chain->prod_idx, IDX);
-
- hinic_hwif_write_reg(hwif, addr, prod_idx);
-}
-
-static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain)
-{
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type);
- val = hinic_hwif_read_reg(chain->hwif, addr);
-
- return HINIC_API_CMD_STATUS_GET(val, CONS_IDX);
-}
-
-/**
- * chain_busy - check if the chain is still processing last requests
- * @chain: chain to check
- *
- * Return 0 - Success, negative - Failure
- **/
-static int chain_busy(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u32 prod_idx;
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- chain->cons_idx = get_hw_cons_idx(chain);
- prod_idx = chain->prod_idx;
-
- /* check for a space for a new command */
- if (chain->cons_idx == MASKED_IDX(chain, prod_idx + 1)) {
- dev_err(&pdev->dev, "API CMD chain %d is busy\n",
- chain->chain_type);
- return -EBUSY;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "Unknown API CMD Chain type\n");
- break;
- }
-
- return 0;
-}
-
-/**
- * get_cell_data_size - get the data size of a specific cell type
- * @type: chain type
- *
- * Return the data(Desc + Address) size in the cell
- **/
-static u8 get_cell_data_size(enum hinic_api_cmd_chain_type type)
-{
- u8 cell_data_size = 0;
-
- switch (type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE +
- API_CMD_CELL_DATA_ADDR_SIZE,
- API_CMD_CELL_ALIGNMENT);
- break;
- default:
- break;
- }
-
- return cell_data_size;
-}
-
-/**
- * prepare_cell_ctrl - prepare the ctrl of the cell for the command
- * @cell_ctrl: the control of the cell to set the control value into it
- * @data_size: the size of the data in the cell
- **/
-static void prepare_cell_ctrl(u64 *cell_ctrl, u16 data_size)
-{
- u8 chksum;
- u64 ctrl;
-
- ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(data_size), DATA_SZ) |
- HINIC_API_CMD_CELL_CTRL_SET(RD_DMA_ATTR_DEFAULT, RD_DMA_ATTR) |
- HINIC_API_CMD_CELL_CTRL_SET(WR_DMA_ATTR_DEFAULT, WR_DMA_ATTR);
-
- chksum = xor_chksum_set(&ctrl);
-
- ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM);
-
- /* The data in the HW should be in Big Endian Format */
- *cell_ctrl = cpu_to_be64(ctrl);
-}
-
-/**
- * prepare_api_cmd - prepare API CMD command
- * @chain: chain for the command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @cmd_size: the command size
- **/
-static void prepare_api_cmd(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest,
- void *cmd, u16 cmd_size)
-{
- struct hinic_api_cmd_cell *cell = chain->curr_node;
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- cell_ctxt = &chain->cell_ctxt[chain->prod_idx];
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- cell->desc = HINIC_API_CMD_DESC_SET(SGE_DATA, API_TYPE) |
- HINIC_API_CMD_DESC_SET(API_CMD_WRITE, RD_WR) |
- HINIC_API_CMD_DESC_SET(NO_BYPASS, MGMT_BYPASS);
- break;
-
- default:
- dev_err(&pdev->dev, "unknown Chain type\n");
- return;
- }
-
- cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) |
- HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE);
-
- cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc),
- XOR_CHKSUM);
-
- /* The data in the HW should be in Big Endian Format */
- cell->desc = cpu_to_be64(cell->desc);
-
- memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size);
-}
-
-/**
- * prepare_cell - prepare cell ctrl and cmd in the current cell
- * @chain: chain for the command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @cmd_size: the command size
- *
- * Return 0 - Success, negative - Failure
- **/
-static void prepare_cell(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest,
- void *cmd, u16 cmd_size)
-{
- struct hinic_api_cmd_cell *curr_node = chain->curr_node;
- u16 data_size = get_cell_data_size(chain->chain_type);
-
- prepare_cell_ctrl(&curr_node->ctrl, data_size);
- prepare_api_cmd(chain, dest, cmd, cmd_size);
-}
-
-static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain)
-{
- chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1);
-}
-
-/**
- * api_cmd_status_update - update the status in the chain struct
- * @chain: chain to update
- **/
-static void api_cmd_status_update(struct hinic_api_cmd_chain *chain)
-{
- enum hinic_api_cmd_chain_type chain_type;
- struct hinic_api_cmd_status *wb_status;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u64 status_header;
- u32 status;
-
- wb_status = chain->wb_status;
- status_header = be64_to_cpu(wb_status->header);
-
- status = be32_to_cpu(wb_status->status);
- if (HINIC_API_CMD_STATUS_GET(status, CHKSUM_ERR)) {
- dev_err(&pdev->dev, "API CMD status: Xor check error\n");
- return;
- }
-
- chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID);
- if (chain_type >= HINIC_API_CMD_MAX) {
- dev_err(&pdev->dev, "unknown API CMD Chain %d\n", chain_type);
- return;
- }
-
- chain->cons_idx = HINIC_API_CMD_STATUS_GET(status, CONS_IDX);
-}
-
-/**
- * wait_for_status_poll - wait for write to api cmd command to complete
- * @chain: the chain of the command
- *
- * Return 0 - Success, negative - Failure
- **/
-static int wait_for_status_poll(struct hinic_api_cmd_chain *chain)
-{
- int err = -ETIMEDOUT;
- unsigned long end;
-
- end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT);
- do {
- api_cmd_status_update(chain);
-
- /* wait for CI to be updated - sign for completion */
- if (chain->cons_idx == chain->prod_idx) {
- err = 0;
- break;
- }
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- return err;
-}
-
-/**
- * wait_for_api_cmd_completion - wait for command to complete
- * @chain: chain for the command
- *
- * Return 0 - Success, negative - Failure
- **/
-static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- err = wait_for_status_poll(chain);
- if (err) {
- dev_err(&pdev->dev, "API CMD Poll status timeout\n");
- break;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "unknown API CMD Chain type\n");
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-
-/**
- * api_cmd - API CMD command
- * @chain: chain for the command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @size: the command size
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest, u8 *cmd, u16 cmd_size)
-{
- struct hinic_api_cmd_cell_ctxt *ctxt;
- int err;
-
- down(&chain->sem);
- if (chain_busy(chain)) {
- up(&chain->sem);
- return -EBUSY;
- }
-
- prepare_cell(chain, dest, cmd, cmd_size);
- cmd_chain_prod_idx_inc(chain);
-
- wmb(); /* inc pi before issue the command */
-
- set_prod_idx(chain); /* issue the command */
-
- ctxt = &chain->cell_ctxt[chain->prod_idx];
-
- chain->curr_node = ctxt->cell_vaddr;
-
- err = wait_for_api_cmd_completion(chain);
-
- up(&chain->sem);
- return err;
-}
-
-/**
- * hinic_api_cmd_write - Write API CMD command
- * @chain: chain for write command
- * @dest: destination node on the card that will receive the command
- * @cmd: command data
- * @size: the command size
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest, u8 *cmd, u16 size)
-{
- /* Verify the chain type */
- if (chain->chain_type == HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- return api_cmd(chain, dest, cmd, size);
-
- return -EINVAL;
-}
-
-/**
- * api_cmd_hw_restart - restart the chain in the HW
- * @chain: the API CMD specific chain to restart
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- int err = -ETIMEDOUT;
- unsigned long end;
- u32 reg_addr, val;
-
- /* Read Modify Write */
- reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(chain->chain_type);
- val = hinic_hwif_read_reg(hwif, reg_addr);
-
- val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART);
- val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART);
-
- hinic_hwif_write_reg(hwif, reg_addr, val);
-
- end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT);
- do {
- val = hinic_hwif_read_reg(hwif, reg_addr);
-
- if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) {
- err = 0;
- break;
- }
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- return err;
-}
-
-/**
- * api_cmd_ctrl_init - set the control register of a chain
- * @chain: the API CMD specific chain to set control register for
- **/
-static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, ctrl;
- u16 cell_size;
-
- /* Read Modify Write */
- addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
-
- cell_size = API_CMD_CELL_SIZE_VAL(chain->cell_size);
-
- ctrl = hinic_hwif_read_reg(hwif, addr);
-
- ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
-
- ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(1, XOR_ERR) |
- HINIC_API_CMD_CHAIN_CTRL_SET(XOR_CHK_ALL, XOR_CHK_EN) |
- HINIC_API_CMD_CHAIN_CTRL_SET(cell_size, CELL_SIZE);
-
- hinic_hwif_write_reg(hwif, addr, ctrl);
-}
-
-/**
- * api_cmd_set_status_addr - set the status address of a chain in the HW
- * @chain: the API CMD specific chain to set in HW status address for
- **/
-static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type);
- val = upper_32_bits(chain->wb_status_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-
- addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type);
- val = lower_32_bits(chain->wb_status_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * api_cmd_set_num_cells - set the number cells of a chain in the HW
- * @chain: the API CMD specific chain to set in HW the number of cells for
- **/
-static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type);
- val = chain->num_cells;
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * api_cmd_head_init - set the head of a chain in the HW
- * @chain: the API CMD specific chain to set in HW the head for
- **/
-static void api_cmd_head_init(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, val;
-
- addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type);
- val = upper_32_bits(chain->head_cell_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-
- addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type);
- val = lower_32_bits(chain->head_cell_paddr);
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * api_cmd_chain_hw_clean - clean the HW
- * @chain: the API CMD specific chain
- **/
-static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- u32 addr, ctrl;
-
- addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type);
-
- ctrl = hinic_hwif_read_reg(hwif, addr);
- ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) &
- HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE);
-
- hinic_hwif_write_reg(hwif, addr, ctrl);
-}
-
-/**
- * api_cmd_chain_hw_init - initialize the chain in the HW
- * @chain: the API CMD specific chain to initialize in HW
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- api_cmd_chain_hw_clean(chain);
-
- api_cmd_set_status_addr(chain);
-
- err = api_cmd_hw_restart(chain);
- if (err) {
- dev_err(&pdev->dev, "Failed to restart API CMD HW\n");
- return err;
- }
-
- api_cmd_ctrl_init(chain);
- api_cmd_set_num_cells(chain);
- api_cmd_head_init(chain);
- return 0;
-}
-
-/**
- * free_cmd_buf - free the dma buffer of API CMD command
- * @chain: the API CMD specific chain of the cmd
- * @cell_idx: the cell index of the cmd
- **/
-static void free_cmd_buf(struct hinic_api_cmd_chain *chain, int cell_idx)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
-
- dma_free_coherent(&pdev->dev, API_CMD_BUF_SIZE,
- cell_ctxt->api_cmd_vaddr,
- cell_ctxt->api_cmd_paddr);
-}
-
-/**
- * alloc_cmd_buf - allocate a dma buffer for API CMD command
- * @chain: the API CMD specific chain for the cmd
- * @cell: the cell in the HW for the cmd
- * @cell_idx: the index of the cell
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain,
- struct hinic_api_cmd_cell *cell, int cell_idx)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- dma_addr_t cmd_paddr;
- u8 *cmd_vaddr;
- int err = 0;
-
- cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE,
- &cmd_paddr, GFP_KERNEL);
- if (!cmd_vaddr) {
- dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n");
- return -ENOMEM;
- }
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
-
- cell_ctxt->api_cmd_vaddr = cmd_vaddr;
- cell_ctxt->api_cmd_paddr = cmd_paddr;
-
- /* set the cmd DMA address in the cell */
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- /* The data in the HW should be in Big Endian Format */
- cell->write.hw_cmd_paddr = cpu_to_be64(cmd_paddr);
- break;
-
- default:
- dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
- free_cmd_buf(chain, cell_idx);
- err = -EINVAL;
- break;
- }
-
- return err;
-}
-
-/**
- * api_cmd_create_cell - create API CMD cell for specific chain
- * @chain: the API CMD specific chain to create its cell
- * @cell_idx: the index of the cell to create
- * @pre_node: previous cell
- * @node_vaddr: the returned virt addr of the cell
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain,
- int cell_idx,
- struct hinic_api_cmd_cell *pre_node,
- struct hinic_api_cmd_cell **node_vaddr)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_api_cmd_cell *node;
- dma_addr_t node_paddr;
- int err;
-
- node = dma_zalloc_coherent(&pdev->dev, chain->cell_size,
- &node_paddr, GFP_KERNEL);
- if (!node) {
- dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n");
- return -ENOMEM;
- }
-
- node->read.hw_wb_resp_paddr = 0;
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
- cell_ctxt->cell_vaddr = node;
- cell_ctxt->cell_paddr = node_paddr;
-
- if (!pre_node) {
- chain->head_cell_paddr = node_paddr;
- chain->head_node = node;
- } else {
- /* The data in the HW should be in Big Endian Format */
- pre_node->next_cell_paddr = cpu_to_be64(node_paddr);
- }
-
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- err = alloc_cmd_buf(chain, node, cell_idx);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmd buffer\n");
- goto err_alloc_cmd_buf;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
- err = -EINVAL;
- goto err_alloc_cmd_buf;
- }
-
- *node_vaddr = node;
- return 0;
-
-err_alloc_cmd_buf:
- dma_free_coherent(&pdev->dev, chain->cell_size, node, node_paddr);
- return err;
-}
-
-/**
- * api_cmd_destroy_cell - destroy API CMD cell of specific chain
- * @chain: the API CMD specific chain to destroy its cell
- * @cell_idx: the cell to destroy
- **/
-static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain,
- int cell_idx)
-{
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_api_cmd_cell *node;
- dma_addr_t node_paddr;
- size_t node_size;
-
- cell_ctxt = &chain->cell_ctxt[cell_idx];
-
- node = cell_ctxt->cell_vaddr;
- node_paddr = cell_ctxt->cell_paddr;
- node_size = chain->cell_size;
-
- if (cell_ctxt->api_cmd_vaddr) {
- switch (chain->chain_type) {
- case HINIC_API_CMD_WRITE_TO_MGMT_CPU:
- free_cmd_buf(chain, cell_idx);
- break;
- default:
- dev_err(&pdev->dev, "Unsupported API CMD chain type\n");
- break;
- }
-
- dma_free_coherent(&pdev->dev, node_size, node,
- node_paddr);
- }
-}
-
-/**
- * api_cmd_destroy_cells - destroy API CMD cells of specific chain
- * @chain: the API CMD specific chain to destroy its cells
- * @num_cells: number of cells to destroy
- **/
-static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain,
- int num_cells)
-{
- int cell_idx;
-
- for (cell_idx = 0; cell_idx < num_cells; cell_idx++)
- api_cmd_destroy_cell(chain, cell_idx);
-}
-
-/**
- * api_cmd_create_cells - create API CMD cells for specific chain
- * @chain: the API CMD specific chain
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL;
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err, cell_idx;
-
- for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) {
- err = api_cmd_create_cell(chain, cell_idx, pre_node, &node);
- if (err) {
- dev_err(&pdev->dev, "Failed to create API CMD cell\n");
- goto err_create_cell;
- }
-
- pre_node = node;
- }
-
- /* set the Final node to point on the start */
- node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr);
-
- /* set the current node to be the head */
- chain->curr_node = chain->head_node;
- return 0;
-
-err_create_cell:
- api_cmd_destroy_cells(chain, cell_idx);
- return err;
-}
-
-/**
- * api_chain_init - initialize API CMD specific chain
- * @chain: the API CMD specific chain to initialize
- * @attr: attributes to set in the chain
- *
- * Return 0 - Success, negative - Failure
- **/
-static int api_chain_init(struct hinic_api_cmd_chain *chain,
- struct hinic_api_cmd_chain_attr *attr)
-{
- struct hinic_hwif *hwif = attr->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t cell_ctxt_size;
-
- chain->hwif = hwif;
- chain->chain_type = attr->chain_type;
- chain->num_cells = attr->num_cells;
- chain->cell_size = attr->cell_size;
-
- chain->prod_idx = 0;
- chain->cons_idx = 0;
-
- sema_init(&chain->sem, 1);
-
- cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt);
- chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL);
- if (!chain->cell_ctxt)
- return -ENOMEM;
-
- chain->wb_status = dma_zalloc_coherent(&pdev->dev,
- sizeof(*chain->wb_status),
- &chain->wb_status_paddr,
- GFP_KERNEL);
- if (!chain->wb_status) {
- dev_err(&pdev->dev, "Failed to allocate DMA wb status\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * api_chain_free - free API CMD specific chain
- * @chain: the API CMD specific chain to free
- **/
-static void api_chain_free(struct hinic_api_cmd_chain *chain)
-{
- struct hinic_hwif *hwif = chain->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- dma_free_coherent(&pdev->dev, sizeof(*chain->wb_status),
- chain->wb_status, chain->wb_status_paddr);
-}
-
-/**
- * api_cmd_create_chain - create API CMD specific chain
- * @attr: attributes to set the chain
- *
- * Return the created chain
- **/
-static struct hinic_api_cmd_chain *
- api_cmd_create_chain(struct hinic_api_cmd_chain_attr *attr)
-{
- struct hinic_hwif *hwif = attr->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_api_cmd_chain *chain;
- int err;
-
- if (attr->num_cells & (attr->num_cells - 1)) {
- dev_err(&pdev->dev, "Invalid number of cells, must be power of 2\n");
- return ERR_PTR(-EINVAL);
- }
-
- chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
- if (!chain)
- return ERR_PTR(-ENOMEM);
-
- err = api_chain_init(chain, attr);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize chain\n");
- return ERR_PTR(err);
- }
-
- err = api_cmd_create_cells(chain);
- if (err) {
- dev_err(&pdev->dev, "Failed to create cells for API CMD chain\n");
- goto err_create_cells;
- }
-
- err = api_cmd_chain_hw_init(chain);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize chain HW\n");
- goto err_chain_hw_init;
- }
-
- return chain;
-
-err_chain_hw_init:
- api_cmd_destroy_cells(chain, chain->num_cells);
-
-err_create_cells:
- api_chain_free(chain);
- return ERR_PTR(err);
-}
-
-/**
- * api_cmd_destroy_chain - destroy API CMD specific chain
- * @chain: the API CMD specific chain to destroy
- **/
-static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain)
-{
- api_cmd_chain_hw_clean(chain);
- api_cmd_destroy_cells(chain, chain->num_cells);
- api_chain_free(chain);
-}
-
-/**
- * hinic_api_cmd_init - Initialize all the API CMD chains
- * @chain: the API CMD chains that are initialized
- * @hwif: the hardware interface of a pci function device
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain,
- struct hinic_hwif *hwif)
-{
- enum hinic_api_cmd_chain_type type, chain_type;
- struct hinic_api_cmd_chain_attr attr;
- struct pci_dev *pdev = hwif->pdev;
- size_t hw_cell_sz;
- int err;
-
- hw_cell_sz = sizeof(struct hinic_api_cmd_cell);
-
- attr.hwif = hwif;
- attr.num_cells = API_CHAIN_NUM_CELLS;
- attr.cell_size = API_CMD_CELL_SIZE(hw_cell_sz);
-
- chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
- for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) {
- attr.chain_type = chain_type;
-
- if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- continue;
-
- chain[chain_type] = api_cmd_create_chain(&attr);
- if (IS_ERR(chain[chain_type])) {
- dev_err(&pdev->dev, "Failed to create chain %d\n",
- chain_type);
- err = PTR_ERR(chain[chain_type]);
- goto err_create_chain;
- }
- }
-
- return 0;
-
-err_create_chain:
- type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
- for ( ; type < chain_type; type++) {
- if (type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- continue;
-
- api_cmd_destroy_chain(chain[type]);
- }
-
- return err;
-}
-
-/**
- * hinic_api_cmd_free - free the API CMD chains
- * @chain: the API CMD chains that are freed
- **/
-void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain)
-{
- enum hinic_api_cmd_chain_type chain_type;
-
- chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU;
- for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) {
- if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU)
- continue;
-
- api_cmd_destroy_chain(chain[chain_type]);
- }
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
deleted file mode 100644
index 31b94d5..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_API_CMD_H
-#define HINIC_HW_API_CMD_H
-
-#include <linux/types.h>
-#include <linux/semaphore.h>
-
-#include "hinic_hw_if.h"
-
-#define HINIC_API_CMD_PI_IDX_SHIFT 0
-
-#define HINIC_API_CMD_PI_IDX_MASK 0xFFFFFF
-
-#define HINIC_API_CMD_PI_SET(val, member) \
- (((u32)(val) & HINIC_API_CMD_PI_##member##_MASK) << \
- HINIC_API_CMD_PI_##member##_SHIFT)
-
-#define HINIC_API_CMD_PI_CLEAR(val, member) \
- ((val) & (~(HINIC_API_CMD_PI_##member##_MASK \
- << HINIC_API_CMD_PI_##member##_SHIFT)))
-
-#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1
-
-#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1
-
-#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \
- (((u32)(val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \
- HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)
-
-#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \
- (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \
- HINIC_API_CMD_CHAIN_REQ_##member##_MASK)
-
-#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \
- ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK \
- << HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT)))
-
-#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_SHIFT 1
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2
-#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4
-#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28
-#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30
-
-#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_MASK 0x1
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1
-#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1
-#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3
-#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3
-#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3
-
-#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \
- HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)
-
-#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \
- ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \
- << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT)))
-
-#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_SHIFT 0
-#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_SHIFT 16
-#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_SHIFT 24
-#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56
-
-#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_MASK 0x3F
-#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_MASK 0x3F
-#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_MASK 0x3F
-#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFF
-
-#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \
- ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \
- HINIC_API_CMD_CELL_CTRL_##member##_SHIFT)
-
-#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0
-#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1
-#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2
-#define HINIC_API_CMD_DESC_DEST_SHIFT 32
-#define HINIC_API_CMD_DESC_SIZE_SHIFT 40
-#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56
-
-#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1
-#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1
-#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1
-#define HINIC_API_CMD_DESC_DEST_MASK 0x1F
-#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FF
-#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFF
-
-#define HINIC_API_CMD_DESC_SET(val, member) \
- ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \
- HINIC_API_CMD_DESC_##member##_SHIFT)
-
-#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16
-
-#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFF
-
-#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \
- (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \
- HINIC_API_CMD_STATUS_HEADER_##member##_MASK)
-
-#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0
-#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28
-
-#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFF
-#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3
-
-#define HINIC_API_CMD_STATUS_GET(val, member) \
- (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \
- HINIC_API_CMD_STATUS_##member##_MASK)
-
-enum hinic_api_cmd_chain_type {
- HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2,
-
- HINIC_API_CMD_MAX,
-};
-
-struct hinic_api_cmd_chain_attr {
- struct hinic_hwif *hwif;
- enum hinic_api_cmd_chain_type chain_type;
-
- u32 num_cells;
- u16 cell_size;
-};
-
-struct hinic_api_cmd_status {
- u64 header;
- u32 status;
- u32 rsvd0;
- u32 rsvd1;
- u32 rsvd2;
- u64 rsvd3;
-};
-
-/* HW struct */
-struct hinic_api_cmd_cell {
- u64 ctrl;
-
- /* address is 64 bit in HW struct */
- u64 next_cell_paddr;
-
- u64 desc;
-
- /* HW struct */
- union {
- struct {
- u64 hw_cmd_paddr;
- } write;
-
- struct {
- u64 hw_wb_resp_paddr;
- u64 hw_cmd_paddr;
- } read;
- };
-};
-
-struct hinic_api_cmd_cell_ctxt {
- dma_addr_t cell_paddr;
- struct hinic_api_cmd_cell *cell_vaddr;
-
- dma_addr_t api_cmd_paddr;
- u8 *api_cmd_vaddr;
-};
-
-struct hinic_api_cmd_chain {
- struct hinic_hwif *hwif;
- enum hinic_api_cmd_chain_type chain_type;
-
- u32 num_cells;
- u16 cell_size;
-
- /* HW members in 24 bit format */
- u32 prod_idx;
- u32 cons_idx;
-
- struct semaphore sem;
-
- struct hinic_api_cmd_cell_ctxt *cell_ctxt;
-
- dma_addr_t wb_status_paddr;
- struct hinic_api_cmd_status *wb_status;
-
- dma_addr_t head_cell_paddr;
- struct hinic_api_cmd_cell *head_node;
- struct hinic_api_cmd_cell *curr_node;
-};
-
-int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain,
- enum hinic_node_id dest, u8 *cmd, u16 size);
-
-int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain,
- struct hinic_hwif *hwif);
-
-void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
deleted file mode 100644
index 4d09ea7..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ /dev/null
@@ -1,947 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/spinlock.h>
-#include <linux/sizes.h>
-#include <linux/atomic.h>
-#include <linux/log2.h>
-#include <linux/io.h>
-#include <linux/completion.h>
-#include <linux/err.h>
-#include <asm/byteorder.h>
-#include <asm/barrier.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-#include "hinic_hw_io.h"
-#include "hinic_hw_dev.h"
-
-#define CMDQ_CEQE_TYPE_SHIFT 0
-
-#define CMDQ_CEQE_TYPE_MASK 0x7
-
-#define CMDQ_CEQE_GET(val, member) \
- (((val) >> CMDQ_CEQE_##member##_SHIFT) \
- & CMDQ_CEQE_##member##_MASK)
-
-#define CMDQ_WQE_ERRCODE_VAL_SHIFT 20
-
-#define CMDQ_WQE_ERRCODE_VAL_MASK 0xF
-
-#define CMDQ_WQE_ERRCODE_GET(val, member) \
- (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \
- & CMDQ_WQE_ERRCODE_##member##_MASK)
-
-#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
-
-#define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi))
-
-#define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
-
-#define CMDQ_WQE_COMPLETED(ctrl_info) \
- HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
-
-#define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
-
-#define CMDQ_DB_OFF SZ_2K
-
-#define CMDQ_WQEBB_SIZE 64
-#define CMDQ_WQE_SIZE 64
-#define CMDQ_DEPTH SZ_4K
-
-#define CMDQ_WQ_PAGE_SIZE SZ_4K
-
-#define WQE_LCMD_SIZE 64
-#define WQE_SCMD_SIZE 64
-
-#define COMPLETE_LEN 3
-
-#define CMDQ_TIMEOUT 1000
-
-#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
-
-#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
- struct hinic_cmdqs, cmdq[0])
-
-#define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \
- struct hinic_func_to_io, \
- cmdqs)
-
-enum cmdq_wqe_type {
- WQE_LCMD_TYPE = 0,
- WQE_SCMD_TYPE = 1,
-};
-
-enum completion_format {
- COMPLETE_DIRECT = 0,
- COMPLETE_SGE = 1,
-};
-
-enum data_format {
- DATA_SGE = 0,
- DATA_DIRECT = 1,
-};
-
-enum bufdesc_len {
- BUFDESC_LCMD_LEN = 2, /* 16 bytes - 2(8 byte unit) */
- BUFDESC_SCMD_LEN = 3, /* 24 bytes - 3(8 byte unit) */
-};
-
-enum ctrl_sect_len {
- CTRL_SECT_LEN = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */
- CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */
-};
-
-enum cmdq_scmd_type {
- CMDQ_SET_ARM_CMD = 2,
-};
-
-enum cmdq_cmd_type {
- CMDQ_CMD_SYNC_DIRECT_RESP = 0,
- CMDQ_CMD_SYNC_SGE_RESP = 1,
-};
-
-enum completion_request {
- NO_CEQ = 0,
- CEQ_SET = 1,
-};
-
-/**
- * hinic_alloc_cmdq_buf - alloc buffer for sending command
- * @cmdqs: the cmdqs
- * @cmdq_buf: the buffer returned in this struct
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf)
-{
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- cmdq_buf->buf = dma_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
- &cmdq_buf->dma_addr);
- if (!cmdq_buf->buf) {
- dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * hinic_free_cmdq_buf - free buffer
- * @cmdqs: the cmdqs
- * @cmdq_buf: the buffer to free that is in this struct
- **/
-void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf)
-{
- dma_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
-}
-
-static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len)
-{
- unsigned int wqe_size = 0;
-
- switch (len) {
- case BUFDESC_LCMD_LEN:
- wqe_size = WQE_LCMD_SIZE;
- break;
- case BUFDESC_SCMD_LEN:
- wqe_size = WQE_SCMD_SIZE;
- break;
- }
-
- return wqe_size;
-}
-
-static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion,
- struct hinic_cmdq_buf *buf_out)
-{
- struct hinic_sge_resp *sge_resp = &completion->sge_resp;
-
- hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size);
-}
-
-static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
- enum hinic_cmd_ack_type ack_type,
- enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
- enum completion_format complete_format,
- enum data_format data_format,
- enum bufdesc_len buf_len)
-{
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
- enum ctrl_sect_len ctrl_len;
- struct hinic_ctrl *ctrl;
- u32 saved_data;
-
- if (data_format == DATA_SGE) {
- wqe_lcmd = &wqe->wqe_lcmd;
-
- wqe_lcmd->status.status_info = 0;
- ctrl = &wqe_lcmd->ctrl;
- ctrl_len = CTRL_SECT_LEN;
- } else {
- wqe_scmd = &wqe->direct_wqe.wqe_scmd;
-
- wqe_scmd->status.status_info = 0;
- ctrl = &wqe_scmd->ctrl;
- ctrl_len = CTRL_DIRECT_SECT_LEN;
- }
-
- ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI) |
- HINIC_CMDQ_CTRL_SET(cmd, CMD) |
- HINIC_CMDQ_CTRL_SET(mod, MOD) |
- HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE);
-
- CMDQ_WQE_HEADER(wqe)->header_info =
- HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
- HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
- HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) |
- HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
- HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
- HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
- HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED);
-
- saved_data = CMDQ_WQE_HEADER(wqe)->saved_data;
- saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM);
-
- if ((cmd == CMDQ_SET_ARM_CMD) && (mod == HINIC_MOD_COMM))
- CMDQ_WQE_HEADER(wqe)->saved_data |=
- HINIC_SAVED_DATA_SET(1, ARM);
- else
- CMDQ_WQE_HEADER(wqe)->saved_data = saved_data;
-}
-
-static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd,
- struct hinic_cmdq_buf *buf_in)
-{
- hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size);
-}
-
-static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe,
- void *buf_in, u32 in_size)
-{
- struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd;
-
- wqe_scmd->buf_desc.buf_len = in_size;
- memcpy(wqe_scmd->buf_desc.data, buf_in, in_size);
-}
-
-static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
- enum cmdq_cmd_type cmd_type,
- struct hinic_cmdq_buf *buf_in,
- struct hinic_cmdq_buf *buf_out, int wrapped,
- enum hinic_cmd_ack_type ack_type,
- enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
-{
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
- enum completion_format complete_format;
-
- switch (cmd_type) {
- case CMDQ_CMD_SYNC_SGE_RESP:
- complete_format = COMPLETE_SGE;
- cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out);
- break;
- case CMDQ_CMD_SYNC_DIRECT_RESP:
- complete_format = COMPLETE_DIRECT;
- wqe_lcmd->completion.direct_resp = 0;
- break;
- }
-
- cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
- prod_idx, complete_format, DATA_SGE,
- BUFDESC_LCMD_LEN);
-
- cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
-}
-
-static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe,
- enum cmdq_cmd_type cmd_type,
- void *buf_in, u16 in_size,
- struct hinic_cmdq_buf *buf_out, int wrapped,
- enum hinic_cmd_ack_type ack_type,
- enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
-{
- struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
- enum completion_format complete_format;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
-
- wqe_scmd = &direct_wqe->wqe_scmd;
-
- switch (cmd_type) {
- case CMDQ_CMD_SYNC_SGE_RESP:
- complete_format = COMPLETE_SGE;
- cmdq_set_sge_completion(&wqe_scmd->completion, buf_out);
- break;
- case CMDQ_CMD_SYNC_DIRECT_RESP:
- complete_format = COMPLETE_DIRECT;
- wqe_scmd->completion.direct_resp = 0;
- break;
- }
-
- cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx,
- complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN);
-
- cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size);
-}
-
-static void cmdq_wqe_fill(void *dst, void *src)
-{
- memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST,
- CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
-
- wmb(); /* The first 8 bytes should be written last */
-
- *(u64 *)dst = *(u64 *)src;
-}
-
-static void cmdq_fill_db(u32 *db_info,
- enum hinic_cmdq_type cmdq_type, u16 prod_idx)
-{
- *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
- HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH) |
- HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
- HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE);
-}
-
-static void cmdq_set_db(struct hinic_cmdq *cmdq,
- enum hinic_cmdq_type cmdq_type, u16 prod_idx)
-{
- u32 db_info;
-
- cmdq_fill_db(&db_info, cmdq_type, prod_idx);
-
- /* The data that is written to HW should be in Big Endian Format */
- db_info = cpu_to_be32(db_info);
-
- wmb(); /* write all before the doorbell */
-
- writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
-}
-
-static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
- enum hinic_mod_type mod, u8 cmd,
- struct hinic_cmdq_buf *buf_in,
- u64 *resp)
-{
- struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
- u16 curr_prod_idx, next_prod_idx;
- int errcode, wrapped, num_wqebbs;
- struct hinic_wq *wq = cmdq->wq;
- struct hinic_hw_wqe *hw_wqe;
- struct completion done;
-
- /* Keep doorbell index correct. bh - for tasklet(ceq). */
- spin_lock_bh(&cmdq->cmdq_lock);
-
- /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
- hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx);
- if (IS_ERR(hw_wqe)) {
- spin_unlock_bh(&cmdq->cmdq_lock);
- return -EBUSY;
- }
-
- curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
-
- wrapped = cmdq->wrapped;
-
- num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
- next_prod_idx = curr_prod_idx + num_wqebbs;
- if (next_prod_idx >= wq->q_depth) {
- cmdq->wrapped = !cmdq->wrapped;
- next_prod_idx -= wq->q_depth;
- }
-
- cmdq->errcode[curr_prod_idx] = &errcode;
-
- init_completion(&done);
- cmdq->done[curr_prod_idx] = &done;
-
- cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL,
- wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd,
- curr_prod_idx);
-
- /* The data that is written to HW should be in Big Endian Format */
- hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE);
-
- /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
- cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
-
- cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
-
- spin_unlock_bh(&cmdq->cmdq_lock);
-
- if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) {
- spin_lock_bh(&cmdq->cmdq_lock);
-
- if (cmdq->errcode[curr_prod_idx] == &errcode)
- cmdq->errcode[curr_prod_idx] = NULL;
-
- if (cmdq->done[curr_prod_idx] == &done)
- cmdq->done[curr_prod_idx] = NULL;
-
- spin_unlock_bh(&cmdq->cmdq_lock);
-
- return -ETIMEDOUT;
- }
-
- smp_rmb(); /* read error code after completion */
-
- if (resp) {
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd;
-
- *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp);
- }
-
- if (errcode != 0)
- return -EFAULT;
-
- return 0;
-}
-
-static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in,
- u16 in_size)
-{
- struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
- u16 curr_prod_idx, next_prod_idx;
- struct hinic_wq *wq = cmdq->wq;
- struct hinic_hw_wqe *hw_wqe;
- int wrapped, num_wqebbs;
-
- /* Keep doorbell index correct */
- spin_lock(&cmdq->cmdq_lock);
-
- /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
- hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx);
- if (IS_ERR(hw_wqe)) {
- spin_unlock(&cmdq->cmdq_lock);
- return -EBUSY;
- }
-
- curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
-
- wrapped = cmdq->wrapped;
-
- num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
- next_prod_idx = curr_prod_idx + num_wqebbs;
- if (next_prod_idx >= wq->q_depth) {
- cmdq->wrapped = !cmdq->wrapped;
- next_prod_idx -= wq->q_depth;
- }
-
- cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in,
- in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ,
- HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx);
-
- /* The data that is written to HW should be in Big Endian Format */
- hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE);
-
- /* cmdq wqe is not shadow, therefore wqe will be written to wq */
- cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
-
- cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
-
- spin_unlock(&cmdq->cmdq_lock);
- return 0;
-}
-
-static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in)
-{
- if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE)
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * hinic_cmdq_direct_resp - send command with direct data as resp
- * @cmdqs: the cmdqs
- * @mod: module on the card that will handle the command
- * @cmd: the command
- * @buf_in: the buffer for the command
- * @resp: the response to return
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
- enum hinic_mod_type mod, u8 cmd,
- struct hinic_cmdq_buf *buf_in, u64 *resp)
-{
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- err = cmdq_params_valid(buf_in);
- if (err) {
- dev_err(&pdev->dev, "Invalid CMDQ parameters\n");
- return err;
- }
-
- return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
- mod, cmd, buf_in, resp);
-}
-
-/**
- * hinic_set_arm_bit - set arm bit for enable interrupt again
- * @cmdqs: the cmdqs
- * @q_type: type of queue to set the arm bit for
- * @q_id: the queue number
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id)
-{
- struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmdq_arm_bit arm_bit;
- int err;
-
- arm_bit.q_type = q_type;
- arm_bit.q_id = q_id;
-
- err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit));
- if (err) {
- dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id);
- return err;
- }
-
- return 0;
-}
-
-static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
- struct hinic_cmdq_wqe *wqe)
-{
- u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info);
- unsigned int bufdesc_len, wqe_size;
- struct hinic_ctrl *ctrl;
-
- bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
- wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len);
- if (wqe_size == WQE_LCMD_SIZE) {
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
-
- ctrl = &wqe_lcmd->ctrl;
- } else {
- struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
-
- wqe_scmd = &direct_wqe->wqe_scmd;
- ctrl = &wqe_scmd->ctrl;
- }
-
- /* clear HW busy bit */
- ctrl->ctrl_info = 0;
-
- wmb(); /* verify wqe is clear */
-}
-
-/**
- * cmdq_arm_ceq_handler - cmdq completion event handler for arm command
- * @cmdq: the cmdq of the arm command
- * @wqe: the wqe of the arm command
- *
- * Return 0 - Success, negative - Failure
- **/
-static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq,
- struct hinic_cmdq_wqe *wqe)
-{
- struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
- struct hinic_cmdq_wqe_scmd *wqe_scmd;
- struct hinic_ctrl *ctrl;
- u32 ctrl_info;
-
- wqe_scmd = &direct_wqe->wqe_scmd;
- ctrl = &wqe_scmd->ctrl;
- ctrl_info = be32_to_cpu(ctrl->ctrl_info);
-
- /* HW should toggle the HW BUSY BIT */
- if (!CMDQ_WQE_COMPLETED(ctrl_info))
- return -EBUSY;
-
- clear_wqe_complete_bit(cmdq, wqe);
-
- hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE);
- return 0;
-}
-
-static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
- int errcode)
-{
- if (cmdq->errcode[prod_idx])
- *cmdq->errcode[prod_idx] = errcode;
-}
-
-/**
- * cmdq_arm_ceq_handler - cmdq completion event handler for sync command
- * @cmdq: the cmdq of the command
- * @cons_idx: the consumer index to update the error code for
- * @errcode: the error code
- **/
-static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx,
- int errcode)
-{
- u16 prod_idx = cons_idx;
-
- spin_lock(&cmdq->cmdq_lock);
- cmdq_update_errcode(cmdq, prod_idx, errcode);
-
- wmb(); /* write all before update for the command request */
-
- if (cmdq->done[prod_idx])
- complete(cmdq->done[prod_idx]);
- spin_unlock(&cmdq->cmdq_lock);
-}
-
-static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
- struct hinic_cmdq_wqe *cmdq_wqe)
-{
- struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd;
- struct hinic_status *status = &wqe_lcmd->status;
- struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl;
- int errcode;
-
- if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
- return -EBUSY;
-
- errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
-
- cmdq_sync_cmd_handler(cmdq, ci, errcode);
-
- clear_wqe_complete_bit(cmdq, cmdq_wqe);
- hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE);
- return 0;
-}
-
-/**
- * cmdq_ceq_handler - cmdq completion event handler
- * @handle: private data for the handler(cmdqs)
- * @ceqe_data: ceq element data
- **/
-static void cmdq_ceq_handler(void *handle, u32 ceqe_data)
-{
- enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
- struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle;
- struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
- struct hinic_cmdq_header *header;
- struct hinic_hw_wqe *hw_wqe;
- int err, set_arm = 0;
- u32 saved_data;
- u16 ci;
-
- /* Read the smallest wqe size for getting wqe size */
- while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) {
- if (IS_ERR(hw_wqe))
- break;
-
- header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe);
- saved_data = be32_to_cpu(header->saved_data);
-
- if (HINIC_SAVED_DATA_GET(saved_data, ARM)) {
- /* arm_bit was set until here */
- set_arm = 0;
-
- if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe))
- break;
- } else {
- set_arm = 1;
-
- hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci);
- if (IS_ERR(hw_wqe))
- break;
-
- if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe))
- break;
- }
- }
-
- if (set_arm) {
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type);
- if (err)
- dev_err(&pdev->dev, "Failed to set arm for CMDQ\n");
- }
-}
-
-/**
- * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq
- * @cmdq_ctxt: cmdq ctxt to initialize
- * @cmdq: the cmdq
- * @cmdq_pages: the memory of the queue
- **/
-static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
- struct hinic_cmdq *cmdq,
- struct hinic_cmdq_pages *cmdq_pages)
-{
- struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
- u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
- struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
- struct hinic_wq *wq = cmdq->wq;
-
- /* The data in the HW is in Big Endian Format */
- wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
-
- pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size);
-
- ctxt_info->curr_wqe_page_pfn =
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
- HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
-
- /* block PFN - Read Modify Write */
- cmdq_first_block_paddr = cmdq_pages->page_paddr;
-
- pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
-
- ctxt_info->wq_block_pfn =
- HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
- HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
-
- cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
- cmdq_ctxt->cmdq_type = cmdq->cmdq_type;
-}
-
-/**
- * init_cmdq - initialize cmdq
- * @cmdq: the cmdq
- * @wq: the wq attaced to the cmdq
- * @q_type: the cmdq type of the cmdq
- * @db_area: doorbell area for the cmdq
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq,
- enum hinic_cmdq_type q_type, void __iomem *db_area)
-{
- int err;
-
- cmdq->wq = wq;
- cmdq->cmdq_type = q_type;
- cmdq->wrapped = 1;
-
- spin_lock_init(&cmdq->cmdq_lock);
-
- cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth));
- if (!cmdq->done)
- return -ENOMEM;
-
- cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode),
- wq->q_depth));
- if (!cmdq->errcode) {
- err = -ENOMEM;
- goto err_errcode;
- }
-
- cmdq->db_base = db_area + CMDQ_DB_OFF;
- return 0;
-
-err_errcode:
- vfree(cmdq->done);
- return err;
-}
-
-/**
- * free_cmdq - Free cmdq
- * @cmdq: the cmdq to free
- **/
-static void free_cmdq(struct hinic_cmdq *cmdq)
-{
- vfree(cmdq->errcode);
- vfree(cmdq->done);
-}
-
-/**
- * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
- * @hwdev: the NIC HW device
- * @cmdqs: cmdqs to write the ctxts for
- * &db_area: db_area for all the cmdqs
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
- struct hinic_cmdqs *cmdqs, void __iomem **db_area)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- enum hinic_cmdq_type type, cmdq_type;
- struct hinic_cmdq_ctxt *cmdq_ctxts;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- size_t cmdq_ctxts_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI function type\n");
- return -EINVAL;
- }
-
- cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
- cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
- if (!cmdq_ctxts)
- return -ENOMEM;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmdq_type = HINIC_CMDQ_SYNC;
- for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
- err = init_cmdq(&cmdqs->cmdq[cmdq_type],
- &cmdqs->saved_wqs[cmdq_type], cmdq_type,
- db_area[cmdq_type]);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize cmdq\n");
- goto err_init_cmdq;
- }
-
- cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type],
- &cmdqs->cmdq[cmdq_type],
- &cmdqs->cmdq_pages);
- }
-
- /* Write the CMDQ ctxts */
- cmdq_type = HINIC_CMDQ_SYNC;
- for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_CMDQ_CTXT_SET,
- &cmdq_ctxts[cmdq_type],
- sizeof(cmdq_ctxts[cmdq_type]),
- NULL, NULL, HINIC_MGMT_MSG_SYNC);
- if (err) {
- dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n",
- cmdq_type);
- goto err_write_cmdq_ctxt;
- }
- }
-
- devm_kfree(&pdev->dev, cmdq_ctxts);
- return 0;
-
-err_write_cmdq_ctxt:
- cmdq_type = HINIC_MAX_CMDQ_TYPES;
-
-err_init_cmdq:
- for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++)
- free_cmdq(&cmdqs->cmdq[type]);
-
- devm_kfree(&pdev->dev, cmdq_ctxts);
- return err;
-}
-
-/**
- * hinic_init_cmdqs - init all cmdqs
- * @cmdqs: cmdqs to init
- * @hwif: HW interface for accessing cmdqs
- * @db_area: doorbell areas for all the cmdqs
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
- void __iomem **db_area)
-{
- struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_hwdev *hwdev;
- size_t saved_wqs_size;
- u16 max_wqe_size;
- int err;
-
- cmdqs->hwif = hwif;
- cmdqs->cmdq_buf_pool = dma_pool_create("hinic_cmdq", &pdev->dev,
- HINIC_CMDQ_BUF_SIZE,
- HINIC_CMDQ_BUF_SIZE, 0);
- if (!cmdqs->cmdq_buf_pool)
- return -ENOMEM;
-
- saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
- cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
- if (!cmdqs->saved_wqs) {
- err = -ENOMEM;
- goto err_saved_wqs;
- }
-
- max_wqe_size = WQE_LCMD_SIZE;
- err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif,
- HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE,
- CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n");
- goto err_cmdq_wqs;
- }
-
- hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io);
- err = init_cmdqs_ctxt(hwdev, cmdqs, db_area);
- if (err) {
- dev_err(&pdev->dev, "Failed to write cmdq ctxt\n");
- goto err_cmdq_ctxt;
- }
-
- hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs,
- cmdq_ceq_handler);
- return 0;
-
-err_cmdq_ctxt:
- hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
- HINIC_MAX_CMDQ_TYPES);
-
-err_cmdq_wqs:
- devm_kfree(&pdev->dev, cmdqs->saved_wqs);
-
-err_saved_wqs:
- dma_pool_destroy(cmdqs->cmdq_buf_pool);
- return err;
-}
-
-/**
- * hinic_free_cmdqs - free all cmdqs
- * @cmdqs: cmdqs to free
- **/
-void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs)
-{
- struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
- struct hinic_hwif *hwif = cmdqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_cmdq_type cmdq_type;
-
- hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
-
- cmdq_type = HINIC_CMDQ_SYNC;
- for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
- free_cmdq(&cmdqs->cmdq[cmdq_type]);
-
- hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
- HINIC_MAX_CMDQ_TYPES);
-
- devm_kfree(&pdev->dev, cmdqs->saved_wqs);
-
- dma_pool_destroy(cmdqs->cmdq_buf_pool);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
deleted file mode 100644
index 23f8d39..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_CMDQ_H
-#define HINIC_CMDQ_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/completion.h>
-#include <linux/pci.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_wq.h"
-
-#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0
-#define HINIC_CMDQ_CTXT_EQ_ID_SHIFT 56
-#define HINIC_CMDQ_CTXT_CEQ_ARM_SHIFT 61
-#define HINIC_CMDQ_CTXT_CEQ_EN_SHIFT 62
-#define HINIC_CMDQ_CTXT_WRAPPED_SHIFT 63
-
-#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF
-#define HINIC_CMDQ_CTXT_EQ_ID_MASK 0x1F
-#define HINIC_CMDQ_CTXT_CEQ_ARM_MASK 0x1
-#define HINIC_CMDQ_CTXT_CEQ_EN_MASK 0x1
-#define HINIC_CMDQ_CTXT_WRAPPED_MASK 0x1
-
-#define HINIC_CMDQ_CTXT_PAGE_INFO_SET(val, member) \
- (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)
-
-#define HINIC_CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \
- ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)))
-
-#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0
-#define HINIC_CMDQ_CTXT_CI_SHIFT 52
-
-#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF
-#define HINIC_CMDQ_CTXT_CI_MASK 0xFFF
-
-#define HINIC_CMDQ_CTXT_BLOCK_INFO_SET(val, member) \
- (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)
-
-#define HINIC_CMDQ_CTXT_BLOCK_INFO_CLEAR(val, member) \
- ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \
- << HINIC_CMDQ_CTXT_##member##_SHIFT)))
-
-#define HINIC_SAVED_DATA_ARM_SHIFT 31
-
-#define HINIC_SAVED_DATA_ARM_MASK 0x1
-
-#define HINIC_SAVED_DATA_SET(val, member) \
- (((u32)(val) & HINIC_SAVED_DATA_##member##_MASK) \
- << HINIC_SAVED_DATA_##member##_SHIFT)
-
-#define HINIC_SAVED_DATA_GET(val, member) \
- (((val) >> HINIC_SAVED_DATA_##member##_SHIFT) \
- & HINIC_SAVED_DATA_##member##_MASK)
-
-#define HINIC_SAVED_DATA_CLEAR(val, member) \
- ((val) & (~(HINIC_SAVED_DATA_##member##_MASK \
- << HINIC_SAVED_DATA_##member##_SHIFT)))
-
-#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0
-#define HINIC_CMDQ_DB_INFO_PATH_SHIFT 23
-#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24
-#define HINIC_CMDQ_DB_INFO_DB_TYPE_SHIFT 27
-
-#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFF
-#define HINIC_CMDQ_DB_INFO_PATH_MASK 0x1
-#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7
-#define HINIC_CMDQ_DB_INFO_DB_TYPE_MASK 0x1F
-
-#define HINIC_CMDQ_DB_INFO_SET(val, member) \
- (((u32)(val) & HINIC_CMDQ_DB_INFO_##member##_MASK) \
- << HINIC_CMDQ_DB_INFO_##member##_SHIFT)
-
-#define HINIC_CMDQ_BUF_SIZE 2048
-
-#define HINIC_CMDQ_BUF_HW_RSVD 8
-#define HINIC_CMDQ_MAX_DATA_SIZE (HINIC_CMDQ_BUF_SIZE - \
- HINIC_CMDQ_BUF_HW_RSVD)
-
-enum hinic_cmdq_type {
- HINIC_CMDQ_SYNC,
-
- HINIC_MAX_CMDQ_TYPES,
-};
-
-enum hinic_set_arm_qtype {
- HINIC_SET_ARM_CMDQ,
-};
-
-enum hinic_cmd_ack_type {
- HINIC_CMD_ACK_TYPE_CMDQ,
-};
-
-struct hinic_cmdq_buf {
- void *buf;
- dma_addr_t dma_addr;
- size_t size;
-};
-
-struct hinic_cmdq_arm_bit {
- u32 q_type;
- u32 q_id;
-};
-
-struct hinic_cmdq_ctxt_info {
- u64 curr_wqe_page_pfn;
- u64 wq_block_pfn;
-};
-
-struct hinic_cmdq_ctxt {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 cmdq_type;
- u8 rsvd1[1];
-
- u8 rsvd2[4];
-
- struct hinic_cmdq_ctxt_info ctxt_info;
-};
-
-struct hinic_cmdq {
- struct hinic_wq *wq;
-
- enum hinic_cmdq_type cmdq_type;
- int wrapped;
-
- /* Lock for keeping the doorbell order */
- spinlock_t cmdq_lock;
-
- struct completion **done;
- int **errcode;
-
- /* doorbell area */
- void __iomem *db_base;
-};
-
-struct hinic_cmdqs {
- struct hinic_hwif *hwif;
-
- struct dma_pool *cmdq_buf_pool;
-
- struct hinic_wq *saved_wqs;
-
- struct hinic_cmdq_pages cmdq_pages;
-
- struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES];
-};
-
-int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf);
-
-void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
- struct hinic_cmdq_buf *cmdq_buf);
-
-int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
- enum hinic_mod_type mod, u8 cmd,
- struct hinic_cmdq_buf *buf_in, u64 *out_param);
-
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id);
-
-int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
- void __iomem **db_area);
-
-void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
deleted file mode 100644
index f39b184..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_CSR_H
-#define HINIC_HW_CSR_H
-
-/* HW interface registers */
-#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0
-#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4
-
-#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10
-#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14
-
-#define HINIC_DMA_ATTR_BASE 0xC80
-#define HINIC_ELECTION_BASE 0x4200
-
-#define HINIC_DMA_ATTR_STRIDE 0x4
-#define HINIC_CSR_DMA_ATTR_ADDR(idx) \
- (HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE)
-
-#define HINIC_PPF_ELECTION_STRIDE 0x4
-#define HINIC_CSR_MAX_PORTS 4
-
-#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \
- (HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE)
-
-/* API CMD registers */
-#define HINIC_CSR_API_CMD_BASE 0xF000
-
-#define HINIC_CSR_API_CMD_STRIDE 0x100
-
-#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-#define HINIC_CSR_API_CMD_STATUS_ADDR(idx) \
- (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE)
-
-/* MSI-X registers */
-#define HINIC_CSR_MSIX_CTRL_BASE 0x2000
-#define HINIC_CSR_MSIX_CNT_BASE 0x2004
-
-#define HINIC_CSR_MSIX_STRIDE 0x8
-
-#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \
- (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
-
-#define HINIC_CSR_MSIX_CNT_ADDR(idx) \
- (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE)
-
-/* EQ registers */
-#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200
-#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400
-
-#define HINIC_EQ_MTT_OFF_STRIDE 0x40
-
-#define HINIC_CSR_AEQ_MTT_OFF(id) \
- (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_MTT_OFF(id) \
- (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE)
-
-#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8
-
-#define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_AEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
-
-#define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \
- (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
- (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4)
-
-#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00
-#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04
-#define HINIC_AEQ_CONS_IDX_ADDR_BASE 0xE08
-#define HINIC_AEQ_PROD_IDX_ADDR_BASE 0xE0C
-
-#define HINIC_CEQ_CTRL_0_ADDR_BASE 0x1000
-#define HINIC_CEQ_CTRL_1_ADDR_BASE 0x1004
-#define HINIC_CEQ_CONS_IDX_ADDR_BASE 0x1008
-#define HINIC_CEQ_PROD_IDX_ADDR_BASE 0x100C
-
-#define HINIC_EQ_OFF_STRIDE 0x80
-
-#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \
- (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \
- (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \
- (HINIC_AEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \
- (HINIC_AEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_CTRL_0_ADDR(idx) \
- (HINIC_CEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_CTRL_1_ADDR(idx) \
- (HINIC_CEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \
- (HINIC_CEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \
- (HINIC_CEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE)
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
deleted file mode 100644
index 6b19607..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ /dev/null
@@ -1,1010 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/log2.h>
-#include <linux/err.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_qp_ctxt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-#include "hinic_hw_dev.h"
-
-#define IO_STATUS_TIMEOUT 100
-#define OUTBOUND_STATE_TIMEOUT 100
-#define DB_STATE_TIMEOUT 100
-
-#define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \
- (2 * (max_qps) + (num_aeqs) + (num_ceqs))
-
-#define ADDR_IN_4BYTES(addr) ((addr) >> 2)
-
-enum intr_type {
- INTR_MSIX_TYPE,
-};
-
-enum io_status {
- IO_STOPPED = 0,
- IO_RUNNING = 1,
-};
-
-enum hw_ioctxt_set_cmdq_depth {
- HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT,
-};
-
-/* HW struct */
-struct hinic_dev_cap {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 rsvd1[5];
- u8 intr_type;
- u8 rsvd2[66];
- u16 max_sqs;
- u16 max_rqs;
- u8 rsvd3[208];
-};
-
-/**
- * get_capability - convert device capabilities to NIC capabilities
- * @hwdev: the HW device to set and convert device capabilities for
- * @dev_cap: device capabilities from FW
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_capability(struct hinic_hwdev *hwdev,
- struct hinic_dev_cap *dev_cap)
-{
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
- int num_aeqs, num_ceqs, num_irqs;
-
- if (!HINIC_IS_PF(hwdev->hwif) && !HINIC_IS_PPF(hwdev->hwif))
- return -EINVAL;
-
- if (dev_cap->intr_type != INTR_MSIX_TYPE)
- return -EFAULT;
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif);
- num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif);
- num_irqs = HINIC_HWIF_NUM_IRQS(hwdev->hwif);
-
- /* Each QP has its own (SQ + RQ) interrupts */
- nic_cap->num_qps = (num_irqs - (num_aeqs + num_ceqs)) / 2;
-
- if (nic_cap->num_qps > HINIC_Q_CTXT_MAX)
- nic_cap->num_qps = HINIC_Q_CTXT_MAX;
-
- /* num_qps must be power of 2 */
- nic_cap->num_qps = BIT(fls(nic_cap->num_qps) - 1);
-
- nic_cap->max_qps = dev_cap->max_sqs + 1;
- if (nic_cap->max_qps != (dev_cap->max_rqs + 1))
- return -EFAULT;
-
- if (nic_cap->num_qps > nic_cap->max_qps)
- nic_cap->num_qps = nic_cap->max_qps;
-
- return 0;
-}
-
-/**
- * get_cap_from_fw - get device capabilities from FW
- * @pfhwdev: the PF HW device to get capabilities for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_cap_from_fw(struct hinic_pfhwdev *pfhwdev)
-{
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_dev_cap dev_cap;
- u16 in_len, out_len;
- int err;
-
- in_len = 0;
- out_len = sizeof(dev_cap);
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_CFGM,
- HINIC_CFG_NIC_CAP, &dev_cap, in_len, &dev_cap,
- &out_len, HINIC_MGMT_MSG_SYNC);
- if (err) {
- dev_err(&pdev->dev, "Failed to get capability from FW\n");
- return err;
- }
-
- return get_capability(hwdev, &dev_cap);
-}
-
-/**
- * get_dev_cap - get device capabilities
- * @hwdev: the NIC HW device to get capabilities for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_dev_cap(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- switch (HINIC_FUNC_TYPE(hwif)) {
- case HINIC_PPF:
- case HINIC_PF:
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- err = get_cap_from_fw(pfhwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to get capability from FW\n");
- return err;
- }
- break;
-
- default:
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * init_msix - enable the msix and save the entries
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_msix(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int nr_irqs, num_aeqs, num_ceqs;
- size_t msix_entries_size;
- int i, err;
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
- num_ceqs = HINIC_HWIF_NUM_CEQS(hwif);
- nr_irqs = MAX_IRQS(HINIC_MAX_QPS, num_aeqs, num_ceqs);
- if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif))
- nr_irqs = HINIC_HWIF_NUM_IRQS(hwif);
-
- msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries);
- hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size,
- GFP_KERNEL);
- if (!hwdev->msix_entries)
- return -ENOMEM;
-
- for (i = 0; i < nr_irqs; i++)
- hwdev->msix_entries[i].entry = i;
-
- err = pci_enable_msix_exact(pdev, hwdev->msix_entries, nr_irqs);
- if (err) {
- dev_err(&pdev->dev, "Failed to enable pci msix\n");
- return err;
- }
-
- return 0;
-}
-
-/**
- * disable_msix - disable the msix
- * @hwdev: the NIC HW device
- **/
-static void disable_msix(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- pci_disable_msix(pdev);
-}
-
-/**
- * hinic_port_msg_cmd - send port msg to mgmt
- * @hwdev: the NIC HW device
- * @cmd: the port command
- * @buf_in: input buffer
- * @in_size: input size
- * @buf_out: output buffer
- * @out_size: returned output size
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
- void *buf_in, u16 in_size, void *buf_out, u16 *out_size)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd,
- buf_in, in_size, buf_out, out_size,
- HINIC_MGMT_MSG_SYNC);
-}
-
-/**
- * init_fw_ctxt- Init Firmware tables before network mgmt and io operations
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_fw_ctxt(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmd_fw_ctxt fw_ctxt;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT,
- &fw_ctxt, sizeof(fw_ctxt),
- &fw_ctxt, &out_size);
- if (err || (out_size != sizeof(fw_ctxt)) || fw_ctxt.status) {
- dev_err(&pdev->dev, "Failed to init FW ctxt, ret = %d\n",
- fw_ctxt.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * set_hw_ioctxt - set the shape of the IO queues in FW
- * @hwdev: the NIC HW device
- * @rq_depth: rq depth
- * @sq_depth: sq depth
- *
- * Return 0 - Success, negative - Failure
- **/
-static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
- unsigned int sq_depth)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_cmd_hw_ioctxt hw_ioctxt;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
- hw_ioctxt.cmdq_depth = 0;
-
- hw_ioctxt.rq_depth = ilog2(rq_depth);
-
- hw_ioctxt.rx_buf_sz_idx = HINIC_RX_BUF_SZ_IDX;
-
- hw_ioctxt.sq_depth = ilog2(sq_depth);
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_HWCTXT_SET,
- &hw_ioctxt, sizeof(hw_ioctxt), NULL,
- NULL, HINIC_MGMT_MSG_SYNC);
-}
-
-static int wait_for_outbound_state(struct hinic_hwdev *hwdev)
-{
- enum hinic_outbound_state outbound_state;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- unsigned long end;
-
- end = jiffies + msecs_to_jiffies(OUTBOUND_STATE_TIMEOUT);
- do {
- outbound_state = hinic_outbound_state_get(hwif);
-
- if (outbound_state == HINIC_OUTBOUND_ENABLE)
- return 0;
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- dev_err(&pdev->dev, "Wait for OUTBOUND - Timeout\n");
- return -EFAULT;
-}
-
-static int wait_for_db_state(struct hinic_hwdev *hwdev)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_db_state db_state;
- unsigned long end;
-
- end = jiffies + msecs_to_jiffies(DB_STATE_TIMEOUT);
- do {
- db_state = hinic_db_state_get(hwif);
-
- if (db_state == HINIC_DB_ENABLE)
- return 0;
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- dev_err(&pdev->dev, "Wait for DB - Timeout\n");
- return -EFAULT;
-}
-
-static int wait_for_io_stopped(struct hinic_hwdev *hwdev)
-{
- struct hinic_cmd_io_status cmd_io_status;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- unsigned long end;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT);
- do {
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_IO_STATUS_GET,
- &cmd_io_status, sizeof(cmd_io_status),
- &cmd_io_status, &out_size,
- HINIC_MGMT_MSG_SYNC);
- if ((err) || (out_size != sizeof(cmd_io_status))) {
- dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n",
- err);
- return err;
- }
-
- if (cmd_io_status.status == IO_STOPPED) {
- dev_info(&pdev->dev, "IO stopped\n");
- return 0;
- }
-
- msleep(20);
- } while (time_before(jiffies, end));
-
- dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n");
- return -ETIMEDOUT;
-}
-
-/**
- * clear_io_resource - set the IO resources as not active in the NIC
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int clear_io_resources(struct hinic_hwdev *hwdev)
-{
- struct hinic_cmd_clear_io_res cmd_clear_io_res;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- err = wait_for_io_stopped(hwdev);
- if (err) {
- dev_err(&pdev->dev, "IO has not stopped yet\n");
- return err;
- }
-
- cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
- HINIC_COMM_CMD_IO_RES_CLEAR, &cmd_clear_io_res,
- sizeof(cmd_clear_io_res), NULL, NULL,
- HINIC_MGMT_MSG_SYNC);
- if (err) {
- dev_err(&pdev->dev, "Failed to clear IO resources\n");
- return err;
- }
-
- return 0;
-}
-
-/**
- * set_resources_state - set the state of the resources in the NIC
- * @hwdev: the NIC HW device
- * @state: the state to set
- *
- * Return 0 - Success, negative - Failure
- **/
-static int set_resources_state(struct hinic_hwdev *hwdev,
- enum hinic_res_state state)
-{
- struct hinic_cmd_set_res_state res_state;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- res_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- res_state.state = state;
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt,
- HINIC_MOD_COMM,
- HINIC_COMM_CMD_RES_STATE_SET,
- &res_state, sizeof(res_state), NULL,
- NULL, HINIC_MGMT_MSG_SYNC);
-}
-
-/**
- * get_base_qpn - get the first qp number
- * @hwdev: the NIC HW device
- * @base_qpn: returned qp number
- *
- * Return 0 - Success, negative - Failure
- **/
-static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
-{
- struct hinic_cmd_base_qpn cmd_base_qpn;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- cmd_base_qpn.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_GLOBAL_QPN,
- &cmd_base_qpn, sizeof(cmd_base_qpn),
- &cmd_base_qpn, &out_size);
- if (err || (out_size != sizeof(cmd_base_qpn)) || cmd_base_qpn.status) {
- dev_err(&pdev->dev, "Failed to get base qpn, status = %d\n",
- cmd_base_qpn.status);
- return -EFAULT;
- }
-
- *base_qpn = cmd_base_qpn.qpn;
- return 0;
-}
-
-/**
- * hinic_hwdev_ifup - Preparing the HW for passing IO
- * @hwdev: the NIC HW device
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
- struct hinic_hwif *hwif = hwdev->hwif;
- int err, num_aeqs, num_ceqs, num_qps;
- struct msix_entry *ceq_msix_entries;
- struct msix_entry *sq_msix_entries;
- struct msix_entry *rq_msix_entries;
- struct pci_dev *pdev = hwif->pdev;
- u16 base_qpn;
-
- err = get_base_qpn(hwdev, &base_qpn);
- if (err) {
- dev_err(&pdev->dev, "Failed to get global base qp number\n");
- return err;
- }
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
- num_ceqs = HINIC_HWIF_NUM_CEQS(hwif);
-
- ceq_msix_entries = &hwdev->msix_entries[num_aeqs];
-
- err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs,
- ceq_msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to init IO channel\n");
- return err;
- }
-
- num_qps = nic_cap->num_qps;
- sq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs];
- rq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs + num_qps];
-
- err = hinic_io_create_qps(func_to_io, base_qpn, num_qps,
- sq_msix_entries, rq_msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to create QPs\n");
- goto err_create_qps;
- }
-
- err = wait_for_db_state(hwdev);
- if (err) {
- dev_warn(&pdev->dev, "db - disabled, try again\n");
- hinic_db_state_set(hwif, HINIC_DB_ENABLE);
- }
-
- err = set_hw_ioctxt(hwdev, HINIC_SQ_DEPTH, HINIC_RQ_DEPTH);
- if (err) {
- dev_err(&pdev->dev, "Failed to set HW IO ctxt\n");
- goto err_hw_ioctxt;
- }
-
- return 0;
-
-err_hw_ioctxt:
- hinic_io_destroy_qps(func_to_io, num_qps);
-
-err_create_qps:
- hinic_io_free(func_to_io);
- return err;
-}
-
-/**
- * hinic_hwdev_ifdown - Closing the HW for passing IO
- * @hwdev: the NIC HW device
- *
- **/
-void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
-
- clear_io_resources(hwdev);
-
- hinic_io_destroy_qps(func_to_io, nic_cap->num_qps);
- hinic_io_free(func_to_io);
-}
-
-/**
- * hinic_hwdev_cb_register - register callback handler for MGMT events
- * @hwdev: the NIC HW device
- * @cmd: the mgmt event
- * @handle: private data for the handler
- * @handler: event handler
- **/
-void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd, void *handle,
- void (*handler)(void *handle, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size))
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_nic_cb *nic_cb;
- u8 cmd_cb;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
- nic_cb = &pfhwdev->nic_cb[cmd_cb];
-
- nic_cb->handler = handler;
- nic_cb->handle = handle;
- nic_cb->cb_state = HINIC_CB_ENABLED;
-}
-
-/**
- * hinic_hwdev_cb_unregister - unregister callback handler for MGMT events
- * @hwdev: the NIC HW device
- * @cmd: the mgmt event
- **/
-void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd)
-{
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_nic_cb *nic_cb;
- u8 cmd_cb;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return;
- }
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
-
- cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
- nic_cb = &pfhwdev->nic_cb[cmd_cb];
-
- nic_cb->cb_state &= ~HINIC_CB_ENABLED;
-
- while (nic_cb->cb_state & HINIC_CB_RUNNING)
- schedule();
-
- nic_cb->handler = NULL;
-}
-
-/**
- * nic_mgmt_msg_handler - nic mgmt event handler
- * @handle: private data for the handler
- * @buf_in: input buffer
- * @in_size: input size
- * @buf_out: output buffer
- * @out_size: returned output size
- **/
-static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in,
- u16 in_size, void *buf_out, u16 *out_size)
-{
- struct hinic_pfhwdev *pfhwdev = handle;
- enum hinic_cb_state cb_state;
- struct hinic_nic_cb *nic_cb;
- struct hinic_hwdev *hwdev;
- struct hinic_hwif *hwif;
- struct pci_dev *pdev;
- u8 cmd_cb;
-
- hwdev = &pfhwdev->hwdev;
- hwif = hwdev->hwif;
- pdev = hwif->pdev;
-
- if ((cmd < HINIC_MGMT_MSG_CMD_BASE) ||
- (cmd >= HINIC_MGMT_MSG_CMD_MAX)) {
- dev_err(&pdev->dev, "unknown L2NIC event, cmd = %d\n", cmd);
- return;
- }
-
- cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE;
-
- nic_cb = &pfhwdev->nic_cb[cmd_cb];
-
- cb_state = cmpxchg(&nic_cb->cb_state,
- HINIC_CB_ENABLED,
- HINIC_CB_ENABLED | HINIC_CB_RUNNING);
-
- if ((cb_state == HINIC_CB_ENABLED) && (nic_cb->handler))
- nic_cb->handler(nic_cb->handle, buf_in,
- in_size, buf_out, out_size);
- else
- dev_err(&pdev->dev, "Unhandled NIC Event %d\n", cmd);
-
- nic_cb->cb_state &= ~HINIC_CB_RUNNING;
-}
-
-/**
- * init_pfhwdev - Initialize the extended components of PF
- * @pfhwdev: the HW device for PF
- *
- * Return 0 - success, negative - failure
- **/
-static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
-{
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- err = hinic_pf_to_mgmt_init(&pfhwdev->pf_to_mgmt, hwif);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize PF to MGMT channel\n");
- return err;
- }
-
- hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC,
- pfhwdev, nic_mgmt_msg_handler);
-
- hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE);
- return 0;
-}
-
-/**
- * free_pfhwdev - Free the extended components of PF
- * @pfhwdev: the HW device for PF
- **/
-static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
-{
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
-
- hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT);
-
- hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC);
-
- hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
-}
-
-/**
- * hinic_init_hwdev - Initialize the NIC HW
- * @pdev: the NIC pci device
- *
- * Return initialized NIC HW device
- *
- * Initialize the NIC HW device and return a pointer to it
- **/
-struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
-{
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_hwdev *hwdev;
- struct hinic_hwif *hwif;
- int err, num_aeqs;
-
- hwif = devm_kzalloc(&pdev->dev, sizeof(*hwif), GFP_KERNEL);
- if (!hwif)
- return ERR_PTR(-ENOMEM);
-
- err = hinic_init_hwif(hwif, pdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init HW interface\n");
- return ERR_PTR(err);
- }
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- err = -EFAULT;
- goto err_func_type;
- }
-
- pfhwdev = devm_kzalloc(&pdev->dev, sizeof(*pfhwdev), GFP_KERNEL);
- if (!pfhwdev) {
- err = -ENOMEM;
- goto err_pfhwdev_alloc;
- }
-
- hwdev = &pfhwdev->hwdev;
- hwdev->hwif = hwif;
-
- err = init_msix(hwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init msix\n");
- goto err_init_msix;
- }
-
- err = wait_for_outbound_state(hwdev);
- if (err) {
- dev_warn(&pdev->dev, "outbound - disabled, try again\n");
- hinic_outbound_state_set(hwif, HINIC_OUTBOUND_ENABLE);
- }
-
- num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
-
- err = hinic_aeqs_init(&hwdev->aeqs, hwif, num_aeqs,
- HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE,
- hwdev->msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to init async event queues\n");
- goto err_aeqs_init;
- }
-
- err = init_pfhwdev(pfhwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init PF HW device\n");
- goto err_init_pfhwdev;
- }
-
- err = get_dev_cap(hwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to get device capabilities\n");
- goto err_dev_cap;
- }
-
- err = init_fw_ctxt(hwdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to init function table\n");
- goto err_init_fw_ctxt;
- }
-
- err = set_resources_state(hwdev, HINIC_RES_ACTIVE);
- if (err) {
- dev_err(&pdev->dev, "Failed to set resources state\n");
- goto err_resources_state;
- }
-
- return hwdev;
-
-err_resources_state:
-err_init_fw_ctxt:
-err_dev_cap:
- free_pfhwdev(pfhwdev);
-
-err_init_pfhwdev:
- hinic_aeqs_free(&hwdev->aeqs);
-
-err_aeqs_init:
- disable_msix(hwdev);
-
-err_init_msix:
-err_pfhwdev_alloc:
-err_func_type:
- hinic_free_hwif(hwif);
- return ERR_PTR(err);
-}
-
-/**
- * hinic_free_hwdev - Free the NIC HW device
- * @hwdev: the NIC HW device
- **/
-void hinic_free_hwdev(struct hinic_hwdev *hwdev)
-{
- struct hinic_pfhwdev *pfhwdev = container_of(hwdev,
- struct hinic_pfhwdev,
- hwdev);
-
- set_resources_state(hwdev, HINIC_RES_CLEAN);
-
- free_pfhwdev(pfhwdev);
-
- hinic_aeqs_free(&hwdev->aeqs);
-
- disable_msix(hwdev);
-
- hinic_free_hwif(hwdev->hwif);
-}
-
-/**
- * hinic_hwdev_num_qps - return the number QPs available for use
- * @hwdev: the NIC HW device
- *
- * Return number QPs available for use
- **/
-int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev)
-{
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
-
- return nic_cap->num_qps;
-}
-
-/**
- * hinic_hwdev_get_sq - get SQ
- * @hwdev: the NIC HW device
- * @i: the position of the SQ
- *
- * Return: the SQ in the i position
- **/
-struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_qp *qp = &func_to_io->qps[i];
-
- if (i >= hinic_hwdev_num_qps(hwdev))
- return NULL;
-
- return &qp->sq;
-}
-
-/**
- * hinic_hwdev_get_sq - get RQ
- * @hwdev: the NIC HW device
- * @i: the position of the RQ
- *
- * Return: the RQ in the i position
- **/
-struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i)
-{
- struct hinic_func_to_io *func_to_io = &hwdev->func_to_io;
- struct hinic_qp *qp = &func_to_io->qps[i];
-
- if (i >= hinic_hwdev_num_qps(hwdev))
- return NULL;
-
- return &qp->rq;
-}
-
-/**
- * hinic_hwdev_msix_cnt_set - clear message attribute counters for msix entry
- * @hwdev: the NIC HW device
- * @msix_index: msix_index
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index)
-{
- return hinic_msix_attr_cnt_clear(hwdev->hwif, msix_index);
-}
-
-/**
- * hinic_hwdev_msix_set - set message attribute for msix entry
- * @hwdev: the NIC HW device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer_cfg, u8 lli_credit_limit,
- u8 resend_timer)
-{
- return hinic_msix_attr_set(hwdev->hwif, msix_index,
- pending_limit, coalesc_timer,
- lli_timer_cfg, lli_credit_limit,
- resend_timer);
-}
-
-/**
- * hinic_hwdev_hw_ci_addr_set - set cons idx addr and attributes in HW for sq
- * @hwdev: the NIC HW device
- * @sq: send queue
- * @pending_limit: the maximum pending update ci events (unit 8)
- * @coalesc_timer: coalesc period for update ci (unit 8 us)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
- u8 pending_limit, u8 coalesc_timer)
-{
- struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_pfhwdev *pfhwdev;
- struct hinic_cmd_hw_ci hw_ci;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "Unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- hw_ci.dma_attr_off = 0;
- hw_ci.pending_limit = pending_limit;
- hw_ci.coalesc_timer = coalesc_timer;
-
- hw_ci.msix_en = 1;
- hw_ci.msix_entry_idx = sq->msix_entry;
-
- hw_ci.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- hw_ci.sq_id = qp->q_id;
-
- hw_ci.ci_addr = ADDR_IN_4BYTES(sq->hw_ci_dma_addr);
-
- pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
- return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt,
- HINIC_MOD_COMM,
- HINIC_COMM_CMD_SQ_HI_CI_SET,
- &hw_ci, sizeof(hw_ci), NULL,
- NULL, HINIC_MGMT_MSG_SYNC);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
deleted file mode 100644
index 0f5563f..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_DEV_H
-#define HINIC_HW_DEV_H
-
-#include <linux/pci.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-
-#define HINIC_MAX_QPS 32
-
-#define HINIC_MGMT_NUM_MSG_CMD (HINIC_MGMT_MSG_CMD_MAX - \
- HINIC_MGMT_MSG_CMD_BASE)
-
-struct hinic_cap {
- u16 max_qps;
- u16 num_qps;
-};
-
-enum hinic_port_cmd {
- HINIC_PORT_CMD_CHANGE_MTU = 2,
-
- HINIC_PORT_CMD_ADD_VLAN = 3,
- HINIC_PORT_CMD_DEL_VLAN = 4,
-
- HINIC_PORT_CMD_SET_MAC = 9,
- HINIC_PORT_CMD_GET_MAC = 10,
- HINIC_PORT_CMD_DEL_MAC = 11,
-
- HINIC_PORT_CMD_SET_RX_MODE = 12,
-
- HINIC_PORT_CMD_GET_LINK_STATE = 24,
-
- HINIC_PORT_CMD_SET_PORT_STATE = 41,
-
- HINIC_PORT_CMD_FWCTXT_INIT = 69,
-
- HINIC_PORT_CMD_SET_FUNC_STATE = 93,
-
- HINIC_PORT_CMD_GET_GLOBAL_QPN = 102,
-
- HINIC_PORT_CMD_GET_CAP = 170,
-};
-
-enum hinic_mgmt_msg_cmd {
- HINIC_MGMT_MSG_CMD_BASE = 160,
-
- HINIC_MGMT_MSG_CMD_LINK_STATUS = 160,
-
- HINIC_MGMT_MSG_CMD_MAX,
-};
-
-enum hinic_cb_state {
- HINIC_CB_ENABLED = BIT(0),
- HINIC_CB_RUNNING = BIT(1),
-};
-
-enum hinic_res_state {
- HINIC_RES_CLEAN = 0,
- HINIC_RES_ACTIVE = 1,
-};
-
-struct hinic_cmd_fw_ctxt {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rx_buf_sz;
-
- u32 rsvd1;
-};
-
-struct hinic_cmd_hw_ioctxt {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
-
- u16 rsvd1;
-
- u8 set_cmdq_depth;
- u8 cmdq_depth;
-
- u8 rsvd2;
- u8 rsvd3;
- u8 rsvd4;
- u8 rsvd5;
-
- u16 rq_depth;
- u16 rx_buf_sz_idx;
- u16 sq_depth;
-};
-
-struct hinic_cmd_io_status {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 rsvd1;
- u8 rsvd2;
- u32 io_status;
-};
-
-struct hinic_cmd_clear_io_res {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 rsvd1;
- u8 rsvd2;
-};
-
-struct hinic_cmd_set_res_state {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 state;
- u8 rsvd1;
- u32 rsvd2;
-};
-
-struct hinic_cmd_base_qpn {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 qpn;
-};
-
-struct hinic_cmd_hw_ci {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
-
- u8 dma_attr_off;
- u8 pending_limit;
- u8 coalesc_timer;
-
- u8 msix_en;
- u16 msix_entry_idx;
-
- u32 sq_id;
- u32 rsvd1;
- u64 ci_addr;
-};
-
-struct hinic_hwdev {
- struct hinic_hwif *hwif;
- struct msix_entry *msix_entries;
-
- struct hinic_aeqs aeqs;
- struct hinic_func_to_io func_to_io;
-
- struct hinic_cap nic_cap;
-};
-
-struct hinic_nic_cb {
- void (*handler)(void *handle, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size);
-
- void *handle;
- unsigned long cb_state;
-};
-
-struct hinic_pfhwdev {
- struct hinic_hwdev hwdev;
-
- struct hinic_pf_to_mgmt pf_to_mgmt;
-
- struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD];
-};
-
-void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd, void *handle,
- void (*handler)(void *handle, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size));
-
-void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
- enum hinic_mgmt_msg_cmd cmd);
-
-int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd,
- void *buf_in, u16 in_size, void *buf_out,
- u16 *out_size);
-
-int hinic_hwdev_ifup(struct hinic_hwdev *hwdev);
-
-void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev);
-
-struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev);
-
-void hinic_free_hwdev(struct hinic_hwdev *hwdev);
-
-int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev);
-
-struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i);
-
-struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i);
-
-int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index);
-
-int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer_cfg, u8 lli_credit_limit,
- u8 resend_timer);
-
-int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
- u8 pending_limit, u8 coalesc_timer);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
deleted file mode 100644
index 7cb8b9b9..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ /dev/null
@@ -1,886 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/log2.h>
-#include <asm/byteorder.h>
-#include <asm/barrier.h>
-
-#include "hinic_hw_csr.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-
-#define HINIC_EQS_WQ_NAME "hinic_eqs"
-
-#define GET_EQ_NUM_PAGES(eq, pg_size) \
- (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size))
-
-#define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size)
-
-#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \
- HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id))
-
-#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \
- HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id))
-
-#define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
- HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num))
-
-#define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \
- HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \
- HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num))
-
-#define GET_EQ_ELEMENT(eq, idx) \
- ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \
- (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size))
-
-#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \
- GET_EQ_ELEMENT(eq, idx))
-
-#define GET_CEQ_ELEM(eq, idx) ((u32 *) \
- GET_EQ_ELEMENT(eq, idx))
-
-#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx)
-
-#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx)
-
-#define PAGE_IN_4K(page_size) ((page_size) >> 12)
-#define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size)))
-
-#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5)
-#define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq)))
-
-#define EQ_MAX_PAGES 8
-
-#define CEQE_TYPE_SHIFT 23
-#define CEQE_TYPE_MASK 0x7
-
-#define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \
- CEQE_TYPE_MASK)
-
-#define CEQE_DATA_MASK 0x3FFFFFF
-#define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK)
-
-#define aeq_to_aeqs(eq) \
- container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0])
-
-#define ceq_to_ceqs(eq) \
- container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0])
-
-#define work_to_aeq_work(work) \
- container_of(work, struct hinic_eq_work, work)
-
-#define DMA_ATTR_AEQ_DEFAULT 0
-#define DMA_ATTR_CEQ_DEFAULT 0
-
-/* No coalescence */
-#define THRESH_CEQ_DEFAULT 0
-
-enum eq_int_mode {
- EQ_INT_MODE_ARMED,
- EQ_INT_MODE_ALWAYS
-};
-
-enum eq_arm_state {
- EQ_NOT_ARMED,
- EQ_ARMED
-};
-
-/**
- * hinic_aeq_register_hw_cb - register AEQ callback for specific event
- * @aeqs: pointer to Async eqs of the chip
- * @event: aeq event to register callback for it
- * @handle: private data will be used by the callback
- * @hw_handler: callback function
- **/
-void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event, void *handle,
- void (*hwe_handler)(void *handle, void *data,
- u8 size))
-{
- struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
-
- hwe_cb->hwe_handler = hwe_handler;
- hwe_cb->handle = handle;
- hwe_cb->hwe_state = HINIC_EQE_ENABLED;
-}
-
-/**
- * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event
- * @aeqs: pointer to Async eqs of the chip
- * @event: aeq event to unregister callback for it
- **/
-void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event)
-{
- struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event];
-
- hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED;
-
- while (hwe_cb->hwe_state & HINIC_EQE_RUNNING)
- schedule();
-
- hwe_cb->hwe_handler = NULL;
-}
-
-/**
- * hinic_ceq_register_cb - register CEQ callback for specific event
- * @ceqs: pointer to Completion eqs part of the chip
- * @event: ceq event to register callback for it
- * @handle: private data will be used by the callback
- * @handler: callback function
- **/
-void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event, void *handle,
- void (*handler)(void *handle, u32 ceqe_data))
-{
- struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
-
- ceq_cb->handler = handler;
- ceq_cb->handle = handle;
- ceq_cb->ceqe_state = HINIC_EQE_ENABLED;
-}
-
-/**
- * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event
- * @ceqs: pointer to Completion eqs part of the chip
- * @event: ceq event to unregister callback for it
- **/
-void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event)
-{
- struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event];
-
- ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED;
-
- while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING)
- schedule();
-
- ceq_cb->handler = NULL;
-}
-
-static u8 eq_cons_idx_checksum_set(u32 val)
-{
- u8 checksum = 0;
- int idx;
-
- for (idx = 0; idx < 32; idx += 4)
- checksum ^= ((val >> idx) & 0xF);
-
- return (checksum & 0xF);
-}
-
-/**
- * eq_update_ci - update the HW cons idx of event queue
- * @eq: the event queue to update the cons idx for
- **/
-static void eq_update_ci(struct hinic_eq *eq)
-{
- u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq);
-
- /* Read Modify Write */
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_EQ_CI_CLEAR(val, IDX) &
- HINIC_EQ_CI_CLEAR(val, WRAPPED) &
- HINIC_EQ_CI_CLEAR(val, INT_ARMED) &
- HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM);
-
- val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) |
- HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) |
- HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED);
-
- val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM);
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
-}
-
-/**
- * aeq_irq_handler - handler for the AEQ event
- * @eq: the Async Event Queue that received the event
- **/
-static void aeq_irq_handler(struct hinic_eq *eq)
-{
- struct hinic_aeqs *aeqs = aeq_to_aeqs(eq);
- struct hinic_hwif *hwif = aeqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_aeq_elem *aeqe_curr;
- struct hinic_hw_event_cb *hwe_cb;
- enum hinic_aeq_type event;
- unsigned long eqe_state;
- u32 aeqe_desc;
- int i, size;
-
- for (i = 0; i < eq->q_len; i++) {
- aeqe_curr = GET_CURR_AEQ_ELEM(eq);
-
- /* Data in HW is in Big endian Format */
- aeqe_desc = be32_to_cpu(aeqe_curr->desc);
-
- /* HW toggles the wrapped bit, when it adds eq element */
- if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped)
- break;
-
- event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE);
- if (event >= HINIC_MAX_AEQ_EVENTS) {
- dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event);
- return;
- }
-
- if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) {
- hwe_cb = &aeqs->hwe_cb[event];
-
- size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE);
-
- eqe_state = cmpxchg(&hwe_cb->hwe_state,
- HINIC_EQE_ENABLED,
- HINIC_EQE_ENABLED |
- HINIC_EQE_RUNNING);
- if ((eqe_state == HINIC_EQE_ENABLED) &&
- (hwe_cb->hwe_handler))
- hwe_cb->hwe_handler(hwe_cb->handle,
- aeqe_curr->data, size);
- else
- dev_err(&pdev->dev, "Unhandled AEQ Event %d\n",
- event);
-
- hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING;
- }
-
- eq->cons_idx++;
-
- if (eq->cons_idx == eq->q_len) {
- eq->cons_idx = 0;
- eq->wrapped = !eq->wrapped;
- }
- }
-}
-
-/**
- * ceq_event_handler - handler for the ceq events
- * @ceqs: ceqs part of the chip
- * @ceqe: ceq element that describes the event
- **/
-static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe)
-{
- struct hinic_hwif *hwif = ceqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_ceq_cb *ceq_cb;
- enum hinic_ceq_type event;
- unsigned long eqe_state;
-
- event = CEQE_TYPE(ceqe);
- if (event >= HINIC_MAX_CEQ_EVENTS) {
- dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event);
- return;
- }
-
- ceq_cb = &ceqs->ceq_cb[event];
-
- eqe_state = cmpxchg(&ceq_cb->ceqe_state,
- HINIC_EQE_ENABLED,
- HINIC_EQE_ENABLED | HINIC_EQE_RUNNING);
-
- if ((eqe_state == HINIC_EQE_ENABLED) && (ceq_cb->handler))
- ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe));
- else
- dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event);
-
- ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING;
-}
-
-/**
- * ceq_irq_handler - handler for the CEQ event
- * @eq: the Completion Event Queue that received the event
- **/
-static void ceq_irq_handler(struct hinic_eq *eq)
-{
- struct hinic_ceqs *ceqs = ceq_to_ceqs(eq);
- u32 ceqe;
- int i;
-
- for (i = 0; i < eq->q_len; i++) {
- ceqe = *(GET_CURR_CEQ_ELEM(eq));
-
- /* Data in HW is in Big endian Format */
- ceqe = be32_to_cpu(ceqe);
-
- /* HW toggles the wrapped bit, when it adds eq element event */
- if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped)
- break;
-
- ceq_event_handler(ceqs, ceqe);
-
- eq->cons_idx++;
-
- if (eq->cons_idx == eq->q_len) {
- eq->cons_idx = 0;
- eq->wrapped = !eq->wrapped;
- }
- }
-}
-
-/**
- * eq_irq_handler - handler for the EQ event
- * @data: the Event Queue that received the event
- **/
-static void eq_irq_handler(void *data)
-{
- struct hinic_eq *eq = data;
-
- if (eq->type == HINIC_AEQ)
- aeq_irq_handler(eq);
- else if (eq->type == HINIC_CEQ)
- ceq_irq_handler(eq);
-
- eq_update_ci(eq);
-}
-
-/**
- * eq_irq_work - the work of the EQ that received the event
- * @work: the work struct that is associated with the EQ
- **/
-static void eq_irq_work(struct work_struct *work)
-{
- struct hinic_eq_work *aeq_work = work_to_aeq_work(work);
- struct hinic_eq *aeq;
-
- aeq = aeq_work->data;
- eq_irq_handler(aeq);
-}
-
-/**
- * ceq_tasklet - the tasklet of the EQ that received the event
- * @ceq_data: the eq
- **/
-static void ceq_tasklet(unsigned long ceq_data)
-{
- struct hinic_eq *ceq = (struct hinic_eq *)ceq_data;
-
- eq_irq_handler(ceq);
-}
-
-/**
- * aeq_interrupt - aeq interrupt handler
- * @irq: irq number
- * @data: the Async Event Queue that collected the event
- **/
-static irqreturn_t aeq_interrupt(int irq, void *data)
-{
- struct hinic_eq_work *aeq_work;
- struct hinic_eq *aeq = data;
- struct hinic_aeqs *aeqs;
-
- /* clear resend timer cnt register */
- hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry);
-
- aeq_work = &aeq->aeq_work;
- aeq_work->data = aeq;
-
- aeqs = aeq_to_aeqs(aeq);
- queue_work(aeqs->workq, &aeq_work->work);
-
- return IRQ_HANDLED;
-}
-
-/**
- * ceq_interrupt - ceq interrupt handler
- * @irq: irq number
- * @data: the Completion Event Queue that collected the event
- **/
-static irqreturn_t ceq_interrupt(int irq, void *data)
-{
- struct hinic_eq *ceq = data;
-
- /* clear resend timer cnt register */
- hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry);
-
- tasklet_schedule(&ceq->ceq_tasklet);
-
- return IRQ_HANDLED;
-}
-
-static void set_ctrl0(struct hinic_eq *eq)
-{
- struct msix_entry *msix_entry = &eq->msix_entry;
- enum hinic_eq_type type = eq->type;
- u32 addr, val, ctrl0;
-
- if (type == HINIC_AEQ) {
- /* RMW Ctrl0 */
- addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX) &
- HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
- HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
- HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE);
-
- ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) |
- HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) |
- HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
- PCI_INTF_IDX) |
- HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE);
-
- val |= ctrl0;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- } else if (type == HINIC_CEQ) {
- /* RMW Ctrl0 */
- addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX) &
- HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR) &
- HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH) &
- HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) &
- HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE);
-
- ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) |
- HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR) |
- HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) |
- HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif),
- PCI_INTF_IDX) |
- HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE);
-
- val |= ctrl0;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- }
-}
-
-static void set_ctrl1(struct hinic_eq *eq)
-{
- enum hinic_eq_type type = eq->type;
- u32 page_size_val, elem_size;
- u32 addr, val, ctrl1;
-
- if (type == HINIC_AEQ) {
- /* RMW Ctrl1 */
- addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id);
-
- page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
- elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN) &
- HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE) &
- HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
-
- ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) |
- HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) |
- HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
-
- val |= ctrl1;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- } else if (type == HINIC_CEQ) {
- /* RMW Ctrl1 */
- addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id);
-
- page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq);
-
- val = hinic_hwif_read_reg(eq->hwif, addr);
-
- val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) &
- HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE);
-
- ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) |
- HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE);
-
- val |= ctrl1;
-
- hinic_hwif_write_reg(eq->hwif, addr, val);
- }
-}
-
-/**
- * set_eq_ctrls - setting eq's ctrl registers
- * @eq: the Event Queue for setting
- **/
-static void set_eq_ctrls(struct hinic_eq *eq)
-{
- set_ctrl0(eq);
- set_ctrl1(eq);
-}
-
-/**
- * aeq_elements_init - initialize all the elements in the aeq
- * @eq: the Async Event Queue
- * @init_val: value to initialize the elements with it
- **/
-static void aeq_elements_init(struct hinic_eq *eq, u32 init_val)
-{
- struct hinic_aeq_elem *aeqe;
- int i;
-
- for (i = 0; i < eq->q_len; i++) {
- aeqe = GET_AEQ_ELEM(eq, i);
- aeqe->desc = cpu_to_be32(init_val);
- }
-
- wmb(); /* Write the initilzation values */
-}
-
-/**
- * ceq_elements_init - Initialize all the elements in the ceq
- * @eq: the event queue
- * @init_val: value to init with it the elements
- **/
-static void ceq_elements_init(struct hinic_eq *eq, u32 init_val)
-{
- u32 *ceqe;
- int i;
-
- for (i = 0; i < eq->q_len; i++) {
- ceqe = GET_CEQ_ELEM(eq, i);
- *(ceqe) = cpu_to_be32(init_val);
- }
-
- wmb(); /* Write the initilzation values */
-}
-
-/**
- * alloc_eq_pages - allocate the pages for the queue
- * @eq: the event queue
- *
- * Return 0 - Success, Negative - Failure
- **/
-static int alloc_eq_pages(struct hinic_eq *eq)
-{
- struct hinic_hwif *hwif = eq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u32 init_val, addr, val;
- size_t addr_size;
- int err, pg;
-
- addr_size = eq->num_pages * sizeof(*eq->dma_addr);
- eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
- if (!eq->dma_addr)
- return -ENOMEM;
-
- addr_size = eq->num_pages * sizeof(*eq->virt_addr);
- eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
- if (!eq->virt_addr) {
- err = -ENOMEM;
- goto err_virt_addr_alloc;
- }
-
- for (pg = 0; pg < eq->num_pages; pg++) {
- eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev,
- eq->page_size,
- &eq->dma_addr[pg],
- GFP_KERNEL);
- if (!eq->virt_addr[pg]) {
- err = -ENOMEM;
- goto err_dma_alloc;
- }
-
- addr = EQ_HI_PHYS_ADDR_REG(eq, pg);
- val = upper_32_bits(eq->dma_addr[pg]);
-
- hinic_hwif_write_reg(hwif, addr, val);
-
- addr = EQ_LO_PHYS_ADDR_REG(eq, pg);
- val = lower_32_bits(eq->dma_addr[pg]);
-
- hinic_hwif_write_reg(hwif, addr, val);
- }
-
- init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED);
-
- if (eq->type == HINIC_AEQ)
- aeq_elements_init(eq, init_val);
- else if (eq->type == HINIC_CEQ)
- ceq_elements_init(eq, init_val);
-
- return 0;
-
-err_dma_alloc:
- while (--pg >= 0)
- dma_free_coherent(&pdev->dev, eq->page_size,
- eq->virt_addr[pg],
- eq->dma_addr[pg]);
-
- devm_kfree(&pdev->dev, eq->virt_addr);
-
-err_virt_addr_alloc:
- devm_kfree(&pdev->dev, eq->dma_addr);
- return err;
-}
-
-/**
- * free_eq_pages - free the pages of the queue
- * @eq: the Event Queue
- **/
-static void free_eq_pages(struct hinic_eq *eq)
-{
- struct hinic_hwif *hwif = eq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int pg;
-
- for (pg = 0; pg < eq->num_pages; pg++)
- dma_free_coherent(&pdev->dev, eq->page_size,
- eq->virt_addr[pg],
- eq->dma_addr[pg]);
-
- devm_kfree(&pdev->dev, eq->virt_addr);
- devm_kfree(&pdev->dev, eq->dma_addr);
-}
-
-/**
- * init_eq - initialize Event Queue
- * @eq: the event queue
- * @hwif: the HW interface of a PCI function device
- * @type: the type of the event queue, aeq or ceq
- * @q_id: Queue id number
- * @q_len: the number of EQ elements
- * @page_size: the page size of the pages in the event queue
- * @entry: msix entry associated with the event queue
- *
- * Return 0 - Success, Negative - Failure
- **/
-static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
- enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size,
- struct msix_entry entry)
-{
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- eq->hwif = hwif;
- eq->type = type;
- eq->q_id = q_id;
- eq->q_len = q_len;
- eq->page_size = page_size;
-
- /* Clear PI and CI, also clear the ARM bit */
- hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0);
- hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0);
-
- eq->cons_idx = 0;
- eq->wrapped = 0;
-
- if (type == HINIC_AEQ) {
- eq->elem_size = HINIC_AEQE_SIZE;
- } else if (type == HINIC_CEQ) {
- eq->elem_size = HINIC_CEQE_SIZE;
- } else {
- dev_err(&pdev->dev, "Invalid EQ type\n");
- return -EINVAL;
- }
-
- eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size);
- eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size);
-
- eq->msix_entry = entry;
-
- if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) {
- dev_err(&pdev->dev, "num elements in eq page != power of 2\n");
- return -EINVAL;
- }
-
- if (eq->num_pages > EQ_MAX_PAGES) {
- dev_err(&pdev->dev, "too many pages for eq\n");
- return -EINVAL;
- }
-
- set_eq_ctrls(eq);
- eq_update_ci(eq);
-
- err = alloc_eq_pages(eq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate pages for eq\n");
- return err;
- }
-
- if (type == HINIC_AEQ) {
- struct hinic_eq_work *aeq_work = &eq->aeq_work;
-
- INIT_WORK(&aeq_work->work, eq_irq_work);
- } else if (type == HINIC_CEQ) {
- tasklet_init(&eq->ceq_tasklet, ceq_tasklet,
- (unsigned long)eq);
- }
-
- /* set the attributes of the msix entry */
- hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry,
- HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT,
- HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT,
- HINIC_EQ_MSIX_LLI_TIMER_DEFAULT,
- HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT,
- HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT);
-
- if (type == HINIC_AEQ)
- err = request_irq(entry.vector, aeq_interrupt, 0,
- "hinic_aeq", eq);
- else if (type == HINIC_CEQ)
- err = request_irq(entry.vector, ceq_interrupt, 0,
- "hinic_ceq", eq);
-
- if (err) {
- dev_err(&pdev->dev, "Failed to request irq for the EQ\n");
- goto err_req_irq;
- }
-
- return 0;
-
-err_req_irq:
- free_eq_pages(eq);
- return err;
-}
-
-/**
- * remove_eq - remove Event Queue
- * @eq: the event queue
- **/
-static void remove_eq(struct hinic_eq *eq)
-{
- struct msix_entry *entry = &eq->msix_entry;
-
- free_irq(entry->vector, eq);
-
- if (eq->type == HINIC_AEQ) {
- struct hinic_eq_work *aeq_work = &eq->aeq_work;
-
- cancel_work_sync(&aeq_work->work);
- } else if (eq->type == HINIC_CEQ) {
- tasklet_kill(&eq->ceq_tasklet);
- }
-
- free_eq_pages(eq);
-}
-
-/**
- * hinic_aeqs_init - initialize all the aeqs
- * @aeqs: pointer to Async eqs of the chip
- * @hwif: the HW interface of a PCI function device
- * @num_aeqs: number of AEQs
- * @q_len: number of EQ elements
- * @page_size: the page size of the pages in the event queue
- * @msix_entries: msix entries associated with the event queues
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
- int num_aeqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries)
-{
- struct pci_dev *pdev = hwif->pdev;
- int err, i, q_id;
-
- aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME);
- if (!aeqs->workq)
- return -ENOMEM;
-
- aeqs->hwif = hwif;
- aeqs->num_aeqs = num_aeqs;
-
- for (q_id = 0; q_id < num_aeqs; q_id++) {
- err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len,
- page_size, msix_entries[q_id]);
- if (err) {
- dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id);
- goto err_init_aeq;
- }
- }
-
- return 0;
-
-err_init_aeq:
- for (i = 0; i < q_id; i++)
- remove_eq(&aeqs->aeq[i]);
-
- destroy_workqueue(aeqs->workq);
- return err;
-}
-
-/**
- * hinic_aeqs_free - free all the aeqs
- * @aeqs: pointer to Async eqs of the chip
- **/
-void hinic_aeqs_free(struct hinic_aeqs *aeqs)
-{
- int q_id;
-
- for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++)
- remove_eq(&aeqs->aeq[q_id]);
-
- destroy_workqueue(aeqs->workq);
-}
-
-/**
- * hinic_ceqs_init - init all the ceqs
- * @ceqs: ceqs part of the chip
- * @hwif: the hardware interface of a pci function device
- * @num_ceqs: number of CEQs
- * @q_len: number of EQ elements
- * @page_size: the page size of the event queue
- * @msix_entries: msix entries associated with the event queues
- *
- * Return 0 - Success, Negative - Failure
- **/
-int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
- int num_ceqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries)
-{
- struct pci_dev *pdev = hwif->pdev;
- int i, q_id, err;
-
- ceqs->hwif = hwif;
- ceqs->num_ceqs = num_ceqs;
-
- for (q_id = 0; q_id < num_ceqs; q_id++) {
- err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len,
- page_size, msix_entries[q_id]);
- if (err) {
- dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id);
- goto err_init_ceq;
- }
- }
-
- return 0;
-
-err_init_ceq:
- for (i = 0; i < q_id; i++)
- remove_eq(&ceqs->ceq[i]);
-
- return err;
-}
-
-/**
- * hinic_ceqs_free - free all the ceqs
- * @ceqs: ceqs part of the chip
- **/
-void hinic_ceqs_free(struct hinic_ceqs *ceqs)
-{
- int q_id;
-
- for (q_id = 0; q_id < ceqs->num_ceqs; q_id++)
- remove_eq(&ceqs->ceq[q_id]);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
deleted file mode 100644
index ecb9c2b..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_EQS_H
-#define HINIC_HW_EQS_H
-
-#include <linux/types.h>
-#include <linux/workqueue.h>
-#include <linux/pci.h>
-#include <linux/sizes.h>
-#include <linux/bitops.h>
-#include <linux/interrupt.h>
-
-#include "hinic_hw_if.h"
-
-#define HINIC_AEQ_CTRL_0_INT_IDX_SHIFT 0
-#define HINIC_AEQ_CTRL_0_DMA_ATTR_SHIFT 12
-#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20
-#define HINIC_AEQ_CTRL_0_INT_MODE_SHIFT 31
-
-#define HINIC_AEQ_CTRL_0_INT_IDX_MASK 0x3FF
-#define HINIC_AEQ_CTRL_0_DMA_ATTR_MASK 0x3F
-#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3
-#define HINIC_AEQ_CTRL_0_INT_MODE_MASK 0x1
-
-#define HINIC_AEQ_CTRL_0_SET(val, member) \
- (((u32)(val) & HINIC_AEQ_CTRL_0_##member##_MASK) << \
- HINIC_AEQ_CTRL_0_##member##_SHIFT)
-
-#define HINIC_AEQ_CTRL_0_CLEAR(val, member) \
- ((val) & (~(HINIC_AEQ_CTRL_0_##member##_MASK \
- << HINIC_AEQ_CTRL_0_##member##_SHIFT)))
-
-#define HINIC_AEQ_CTRL_1_LEN_SHIFT 0
-#define HINIC_AEQ_CTRL_1_ELEM_SIZE_SHIFT 24
-#define HINIC_AEQ_CTRL_1_PAGE_SIZE_SHIFT 28
-
-#define HINIC_AEQ_CTRL_1_LEN_MASK 0x1FFFFF
-#define HINIC_AEQ_CTRL_1_ELEM_SIZE_MASK 0x3
-#define HINIC_AEQ_CTRL_1_PAGE_SIZE_MASK 0xF
-
-#define HINIC_AEQ_CTRL_1_SET(val, member) \
- (((u32)(val) & HINIC_AEQ_CTRL_1_##member##_MASK) << \
- HINIC_AEQ_CTRL_1_##member##_SHIFT)
-
-#define HINIC_AEQ_CTRL_1_CLEAR(val, member) \
- ((val) & (~(HINIC_AEQ_CTRL_1_##member##_MASK \
- << HINIC_AEQ_CTRL_1_##member##_SHIFT)))
-
-#define HINIC_CEQ_CTRL_0_INTR_IDX_SHIFT 0
-#define HINIC_CEQ_CTRL_0_DMA_ATTR_SHIFT 12
-#define HINIC_CEQ_CTRL_0_KICK_THRESH_SHIFT 20
-#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24
-#define HINIC_CEQ_CTRL_0_INTR_MODE_SHIFT 31
-
-#define HINIC_CEQ_CTRL_0_INTR_IDX_MASK 0x3FF
-#define HINIC_CEQ_CTRL_0_DMA_ATTR_MASK 0x3F
-#define HINIC_CEQ_CTRL_0_KICK_THRESH_MASK 0xF
-#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3
-#define HINIC_CEQ_CTRL_0_INTR_MODE_MASK 0x1
-
-#define HINIC_CEQ_CTRL_0_SET(val, member) \
- (((u32)(val) & HINIC_CEQ_CTRL_0_##member##_MASK) << \
- HINIC_CEQ_CTRL_0_##member##_SHIFT)
-
-#define HINIC_CEQ_CTRL_0_CLEAR(val, member) \
- ((val) & (~(HINIC_CEQ_CTRL_0_##member##_MASK \
- << HINIC_CEQ_CTRL_0_##member##_SHIFT)))
-
-#define HINIC_CEQ_CTRL_1_LEN_SHIFT 0
-#define HINIC_CEQ_CTRL_1_PAGE_SIZE_SHIFT 28
-
-#define HINIC_CEQ_CTRL_1_LEN_MASK 0x1FFFFF
-#define HINIC_CEQ_CTRL_1_PAGE_SIZE_MASK 0xF
-
-#define HINIC_CEQ_CTRL_1_SET(val, member) \
- (((u32)(val) & HINIC_CEQ_CTRL_1_##member##_MASK) << \
- HINIC_CEQ_CTRL_1_##member##_SHIFT)
-
-#define HINIC_CEQ_CTRL_1_CLEAR(val, member) \
- ((val) & (~(HINIC_CEQ_CTRL_1_##member##_MASK \
- << HINIC_CEQ_CTRL_1_##member##_SHIFT)))
-
-#define HINIC_EQ_ELEM_DESC_TYPE_SHIFT 0
-#define HINIC_EQ_ELEM_DESC_SRC_SHIFT 7
-#define HINIC_EQ_ELEM_DESC_SIZE_SHIFT 8
-#define HINIC_EQ_ELEM_DESC_WRAPPED_SHIFT 31
-
-#define HINIC_EQ_ELEM_DESC_TYPE_MASK 0x7F
-#define HINIC_EQ_ELEM_DESC_SRC_MASK 0x1
-#define HINIC_EQ_ELEM_DESC_SIZE_MASK 0xFF
-#define HINIC_EQ_ELEM_DESC_WRAPPED_MASK 0x1
-
-#define HINIC_EQ_ELEM_DESC_SET(val, member) \
- (((u32)(val) & HINIC_EQ_ELEM_DESC_##member##_MASK) << \
- HINIC_EQ_ELEM_DESC_##member##_SHIFT)
-
-#define HINIC_EQ_ELEM_DESC_GET(val, member) \
- (((val) >> HINIC_EQ_ELEM_DESC_##member##_SHIFT) & \
- HINIC_EQ_ELEM_DESC_##member##_MASK)
-
-#define HINIC_EQ_CI_IDX_SHIFT 0
-#define HINIC_EQ_CI_WRAPPED_SHIFT 20
-#define HINIC_EQ_CI_XOR_CHKSUM_SHIFT 24
-#define HINIC_EQ_CI_INT_ARMED_SHIFT 31
-
-#define HINIC_EQ_CI_IDX_MASK 0xFFFFF
-#define HINIC_EQ_CI_WRAPPED_MASK 0x1
-#define HINIC_EQ_CI_XOR_CHKSUM_MASK 0xF
-#define HINIC_EQ_CI_INT_ARMED_MASK 0x1
-
-#define HINIC_EQ_CI_SET(val, member) \
- (((u32)(val) & HINIC_EQ_CI_##member##_MASK) << \
- HINIC_EQ_CI_##member##_SHIFT)
-
-#define HINIC_EQ_CI_CLEAR(val, member) \
- ((val) & (~(HINIC_EQ_CI_##member##_MASK \
- << HINIC_EQ_CI_##member##_SHIFT)))
-
-#define HINIC_MAX_AEQS 4
-#define HINIC_MAX_CEQS 32
-
-#define HINIC_AEQE_SIZE 64
-#define HINIC_CEQE_SIZE 4
-
-#define HINIC_AEQE_DESC_SIZE 4
-#define HINIC_AEQE_DATA_SIZE \
- (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE)
-
-#define HINIC_DEFAULT_AEQ_LEN 64
-#define HINIC_DEFAULT_CEQ_LEN 1024
-
-#define HINIC_EQ_PAGE_SIZE SZ_4K
-
-#define HINIC_CEQ_ID_CMDQ 0
-
-enum hinic_eq_type {
- HINIC_AEQ,
- HINIC_CEQ,
-};
-
-enum hinic_aeq_type {
- HINIC_MSG_FROM_MGMT_CPU = 2,
-
- HINIC_MAX_AEQ_EVENTS,
-};
-
-enum hinic_ceq_type {
- HINIC_CEQ_CMDQ = 3,
-
- HINIC_MAX_CEQ_EVENTS,
-};
-
-enum hinic_eqe_state {
- HINIC_EQE_ENABLED = BIT(0),
- HINIC_EQE_RUNNING = BIT(1),
-};
-
-struct hinic_aeq_elem {
- u8 data[HINIC_AEQE_DATA_SIZE];
- u32 desc;
-};
-
-struct hinic_eq_work {
- struct work_struct work;
- void *data;
-};
-
-struct hinic_eq {
- struct hinic_hwif *hwif;
-
- enum hinic_eq_type type;
- int q_id;
- u32 q_len;
- u32 page_size;
-
- u32 cons_idx;
- int wrapped;
-
- size_t elem_size;
- int num_pages;
- int num_elem_in_pg;
-
- struct msix_entry msix_entry;
-
- dma_addr_t *dma_addr;
- void **virt_addr;
-
- struct hinic_eq_work aeq_work;
-
- struct tasklet_struct ceq_tasklet;
-};
-
-struct hinic_hw_event_cb {
- void (*hwe_handler)(void *handle, void *data, u8 size);
- void *handle;
- unsigned long hwe_state;
-};
-
-struct hinic_aeqs {
- struct hinic_hwif *hwif;
-
- struct hinic_eq aeq[HINIC_MAX_AEQS];
- int num_aeqs;
-
- struct hinic_hw_event_cb hwe_cb[HINIC_MAX_AEQ_EVENTS];
-
- struct workqueue_struct *workq;
-};
-
-struct hinic_ceq_cb {
- void (*handler)(void *handle, u32 ceqe_data);
- void *handle;
- enum hinic_eqe_state ceqe_state;
-};
-
-struct hinic_ceqs {
- struct hinic_hwif *hwif;
-
- struct hinic_eq ceq[HINIC_MAX_CEQS];
- int num_ceqs;
-
- struct hinic_ceq_cb ceq_cb[HINIC_MAX_CEQ_EVENTS];
-};
-
-void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event, void *handle,
- void (*hwe_handler)(void *handle, void *data,
- u8 size));
-
-void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs,
- enum hinic_aeq_type event);
-
-void hinic_ceq_register_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event, void *handle,
- void (*ceq_cb)(void *handle, u32 ceqe_data));
-
-void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs,
- enum hinic_ceq_type event);
-
-int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif,
- int num_aeqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries);
-
-void hinic_aeqs_free(struct hinic_aeqs *aeqs);
-
-int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif,
- int num_ceqs, u32 q_len, u32 page_size,
- struct msix_entry *msix_entries);
-
-void hinic_ceqs_free(struct hinic_ceqs *ceqs);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
deleted file mode 100644
index 823a170..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-#include "hinic_hw_csr.h"
-#include "hinic_hw_if.h"
-
-#define PCIE_ATTR_ENTRY 0
-
-#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs)
-
-/**
- * hinic_msix_attr_set - set message attribute for msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer, u8 lli_credit_limit,
- u8 resend_timer)
-{
- u32 msix_ctrl, addr;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) |
- HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) |
- HINIC_MSIX_ATTR_SET(lli_timer, LLI_TIMER) |
- HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) |
- HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER);
-
- addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
-
- hinic_hwif_write_reg(hwif, addr, msix_ctrl);
- return 0;
-}
-
-/**
- * hinic_msix_attr_get - get message attribute of msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer)
-{
- u32 addr, val;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
- val = hinic_hwif_read_reg(hwif, addr);
-
- *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT);
- *coalesc_timer = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER);
- *lli_timer = HINIC_MSIX_ATTR_GET(val, LLI_TIMER);
- *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT);
- *resend_timer = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER);
- return 0;
-}
-
-/**
- * hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index)
-{
- u32 msix_ctrl, addr;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- msix_ctrl = HINIC_MSIX_CNT_SET(1, RESEND_TIMER);
- addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index);
-
- hinic_hwif_write_reg(hwif, addr, msix_ctrl);
- return 0;
-}
-
-/**
- * hinic_set_pf_action - set action on pf channel
- * @hwif: the HW interface of a pci function device
- * @action: action on pf channel
- *
- * Return 0 - Success, negative - Failure
- **/
-void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action)
-{
- u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR);
-
- attr5 = HINIC_FA5_CLEAR(attr5, PF_ACTION);
- attr5 |= HINIC_FA5_SET(action, PF_ACTION);
-
- hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR, attr5);
-}
-
-enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- return HINIC_FA4_GET(attr4, OUTBOUND_STATE);
-}
-
-void hinic_outbound_state_set(struct hinic_hwif *hwif,
- enum hinic_outbound_state outbound_state)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- attr4 = HINIC_FA4_CLEAR(attr4, OUTBOUND_STATE);
- attr4 |= HINIC_FA4_SET(outbound_state, OUTBOUND_STATE);
-
- hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4);
-}
-
-enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- return HINIC_FA4_GET(attr4, DB_STATE);
-}
-
-void hinic_db_state_set(struct hinic_hwif *hwif,
- enum hinic_db_state db_state)
-{
- u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR);
-
- attr4 = HINIC_FA4_CLEAR(attr4, DB_STATE);
- attr4 |= HINIC_FA4_SET(db_state, DB_STATE);
-
- hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4);
-}
-
-/**
- * hwif_ready - test if the HW is ready for use
- * @hwif: the HW interface of a pci function device
- *
- * Return 0 - Success, negative - Failure
- **/
-static int hwif_ready(struct hinic_hwif *hwif)
-{
- struct pci_dev *pdev = hwif->pdev;
- u32 addr, attr1;
-
- addr = HINIC_CSR_FUNC_ATTR1_ADDR;
- attr1 = hinic_hwif_read_reg(hwif, addr);
-
- if (!HINIC_FA1_GET(attr1, INIT_STATUS)) {
- dev_err(&pdev->dev, "hwif status is not ready\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * set_hwif_attr - set the attributes in the relevant members in hwif
- * @hwif: the HW interface of a pci function device
- * @attr0: the first attribute that was read from the hw
- * @attr1: the second attribute that was read from the hw
- **/
-static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1)
-{
- hwif->attr.func_idx = HINIC_FA0_GET(attr0, FUNC_IDX);
- hwif->attr.pf_idx = HINIC_FA0_GET(attr0, PF_IDX);
- hwif->attr.pci_intf_idx = HINIC_FA0_GET(attr0, PCI_INTF_IDX);
- hwif->attr.func_type = HINIC_FA0_GET(attr0, FUNC_TYPE);
-
- hwif->attr.num_aeqs = BIT(HINIC_FA1_GET(attr1, AEQS_PER_FUNC));
- hwif->attr.num_ceqs = BIT(HINIC_FA1_GET(attr1, CEQS_PER_FUNC));
- hwif->attr.num_irqs = BIT(HINIC_FA1_GET(attr1, IRQS_PER_FUNC));
- hwif->attr.num_dma_attr = BIT(HINIC_FA1_GET(attr1, DMA_ATTR_PER_FUNC));
-}
-
-/**
- * read_hwif_attr - read the attributes and set members in hwif
- * @hwif: the HW interface of a pci function device
- **/
-static void read_hwif_attr(struct hinic_hwif *hwif)
-{
- u32 addr, attr0, attr1;
-
- addr = HINIC_CSR_FUNC_ATTR0_ADDR;
- attr0 = hinic_hwif_read_reg(hwif, addr);
-
- addr = HINIC_CSR_FUNC_ATTR1_ADDR;
- attr1 = hinic_hwif_read_reg(hwif, addr);
-
- set_hwif_attr(hwif, attr0, attr1);
-}
-
-/**
- * set_ppf - try to set hwif as ppf and set the type of hwif in this case
- * @hwif: the HW interface of a pci function device
- **/
-static void set_ppf(struct hinic_hwif *hwif)
-{
- struct hinic_func_attr *attr = &hwif->attr;
- u32 addr, val, ppf_election;
-
- /* Read Modify Write */
- addr = HINIC_CSR_PPF_ELECTION_ADDR(HINIC_HWIF_PCI_INTF(hwif));
-
- val = hinic_hwif_read_reg(hwif, addr);
- val = HINIC_PPF_ELECTION_CLEAR(val, IDX);
-
- ppf_election = HINIC_PPF_ELECTION_SET(HINIC_HWIF_FUNC_IDX(hwif), IDX);
-
- val |= ppf_election;
- hinic_hwif_write_reg(hwif, addr, val);
-
- /* check PPF */
- val = hinic_hwif_read_reg(hwif, addr);
-
- attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX);
- if (attr->ppf_idx == HINIC_HWIF_FUNC_IDX(hwif))
- attr->func_type = HINIC_PPF;
-}
-
-/**
- * set_dma_attr - set the dma attributes in the HW
- * @hwif: the HW interface of a pci function device
- * @entry_idx: the entry index in the dma table
- * @st: PCIE TLP steering tag
- * @at: PCIE TLP AT field
- * @ph: PCIE TLP Processing Hint field
- * @no_snooping: PCIE TLP No snooping
- * @tph_en: PCIE TLP Processing Hint Enable
- **/
-static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx,
- u8 st, u8 at, u8 ph,
- enum hinic_pcie_nosnoop no_snooping,
- enum hinic_pcie_tph tph_en)
-{
- u32 addr, val, dma_attr_entry;
-
- /* Read Modify Write */
- addr = HINIC_CSR_DMA_ATTR_ADDR(entry_idx);
-
- val = hinic_hwif_read_reg(hwif, addr);
- val = HINIC_DMA_ATTR_CLEAR(val, ST) &
- HINIC_DMA_ATTR_CLEAR(val, AT) &
- HINIC_DMA_ATTR_CLEAR(val, PH) &
- HINIC_DMA_ATTR_CLEAR(val, NO_SNOOPING) &
- HINIC_DMA_ATTR_CLEAR(val, TPH_EN);
-
- dma_attr_entry = HINIC_DMA_ATTR_SET(st, ST) |
- HINIC_DMA_ATTR_SET(at, AT) |
- HINIC_DMA_ATTR_SET(ph, PH) |
- HINIC_DMA_ATTR_SET(no_snooping, NO_SNOOPING) |
- HINIC_DMA_ATTR_SET(tph_en, TPH_EN);
-
- val |= dma_attr_entry;
- hinic_hwif_write_reg(hwif, addr, val);
-}
-
-/**
- * dma_attr_table_init - initialize the the default dma attributes
- * @hwif: the HW interface of a pci function device
- **/
-static void dma_attr_init(struct hinic_hwif *hwif)
-{
- set_dma_attr(hwif, PCIE_ATTR_ENTRY, HINIC_PCIE_ST_DISABLE,
- HINIC_PCIE_AT_DISABLE, HINIC_PCIE_PH_DISABLE,
- HINIC_PCIE_SNOOP, HINIC_PCIE_TPH_DISABLE);
-}
-
-/**
- * hinic_init_hwif - initialize the hw interface
- * @hwif: the HW interface of a pci function device
- * @pdev: the pci device for acessing PCI resources
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev)
-{
- int err;
-
- hwif->pdev = pdev;
-
- hwif->cfg_regs_bar = pci_ioremap_bar(pdev, HINIC_PCI_CFG_REGS_BAR);
- if (!hwif->cfg_regs_bar) {
- dev_err(&pdev->dev, "Failed to map configuration regs\n");
- return -ENOMEM;
- }
-
- err = hwif_ready(hwif);
- if (err) {
- dev_err(&pdev->dev, "HW interface is not ready\n");
- goto err_hwif_ready;
- }
-
- read_hwif_attr(hwif);
-
- if (HINIC_IS_PF(hwif))
- set_ppf(hwif);
-
- /* No transactionss before DMA is initialized */
- dma_attr_init(hwif);
- return 0;
-
-err_hwif_ready:
- iounmap(hwif->cfg_regs_bar);
- return err;
-}
-
-/**
- * hinic_free_hwif - free the HW interface
- * @hwif: the HW interface of a pci function device
- **/
-void hinic_free_hwif(struct hinic_hwif *hwif)
-{
- iounmap(hwif->cfg_regs_bar);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
deleted file mode 100644
index 5b4760c..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_IF_H
-#define HINIC_HW_IF_H
-
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#define HINIC_DMA_ATTR_ST_SHIFT 0
-#define HINIC_DMA_ATTR_AT_SHIFT 8
-#define HINIC_DMA_ATTR_PH_SHIFT 10
-#define HINIC_DMA_ATTR_NO_SNOOPING_SHIFT 12
-#define HINIC_DMA_ATTR_TPH_EN_SHIFT 13
-
-#define HINIC_DMA_ATTR_ST_MASK 0xFF
-#define HINIC_DMA_ATTR_AT_MASK 0x3
-#define HINIC_DMA_ATTR_PH_MASK 0x3
-#define HINIC_DMA_ATTR_NO_SNOOPING_MASK 0x1
-#define HINIC_DMA_ATTR_TPH_EN_MASK 0x1
-
-#define HINIC_DMA_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_DMA_ATTR_##member##_MASK) << \
- HINIC_DMA_ATTR_##member##_SHIFT)
-
-#define HINIC_DMA_ATTR_CLEAR(val, member) \
- ((val) & (~(HINIC_DMA_ATTR_##member##_MASK \
- << HINIC_DMA_ATTR_##member##_SHIFT)))
-
-#define HINIC_FA0_FUNC_IDX_SHIFT 0
-#define HINIC_FA0_PF_IDX_SHIFT 10
-#define HINIC_FA0_PCI_INTF_IDX_SHIFT 14
-/* reserved members - off 16 */
-#define HINIC_FA0_FUNC_TYPE_SHIFT 24
-
-#define HINIC_FA0_FUNC_IDX_MASK 0x3FF
-#define HINIC_FA0_PF_IDX_MASK 0xF
-#define HINIC_FA0_PCI_INTF_IDX_MASK 0x3
-#define HINIC_FA0_FUNC_TYPE_MASK 0x1
-
-#define HINIC_FA0_GET(val, member) \
- (((val) >> HINIC_FA0_##member##_SHIFT) & HINIC_FA0_##member##_MASK)
-
-#define HINIC_FA1_AEQS_PER_FUNC_SHIFT 8
-/* reserved members - off 10 */
-#define HINIC_FA1_CEQS_PER_FUNC_SHIFT 12
-/* reserved members - off 15 */
-#define HINIC_FA1_IRQS_PER_FUNC_SHIFT 20
-#define HINIC_FA1_DMA_ATTR_PER_FUNC_SHIFT 24
-/* reserved members - off 27 */
-#define HINIC_FA1_INIT_STATUS_SHIFT 30
-
-#define HINIC_FA1_AEQS_PER_FUNC_MASK 0x3
-#define HINIC_FA1_CEQS_PER_FUNC_MASK 0x7
-#define HINIC_FA1_IRQS_PER_FUNC_MASK 0xF
-#define HINIC_FA1_DMA_ATTR_PER_FUNC_MASK 0x7
-#define HINIC_FA1_INIT_STATUS_MASK 0x1
-
-#define HINIC_FA1_GET(val, member) \
- (((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK)
-
-#define HINIC_FA4_OUTBOUND_STATE_SHIFT 0
-#define HINIC_FA4_DB_STATE_SHIFT 1
-
-#define HINIC_FA4_OUTBOUND_STATE_MASK 0x1
-#define HINIC_FA4_DB_STATE_MASK 0x1
-
-#define HINIC_FA4_GET(val, member) \
- (((val) >> HINIC_FA4_##member##_SHIFT) & HINIC_FA4_##member##_MASK)
-
-#define HINIC_FA4_SET(val, member) \
- ((((u32)val) & HINIC_FA4_##member##_MASK) << HINIC_FA4_##member##_SHIFT)
-
-#define HINIC_FA4_CLEAR(val, member) \
- ((val) & (~(HINIC_FA4_##member##_MASK << HINIC_FA4_##member##_SHIFT)))
-
-#define HINIC_FA5_PF_ACTION_SHIFT 0
-#define HINIC_FA5_PF_ACTION_MASK 0xFFFF
-
-#define HINIC_FA5_SET(val, member) \
- (((u32)(val) & HINIC_FA5_##member##_MASK) << HINIC_FA5_##member##_SHIFT)
-
-#define HINIC_FA5_CLEAR(val, member) \
- ((val) & (~(HINIC_FA5_##member##_MASK << HINIC_FA5_##member##_SHIFT)))
-
-#define HINIC_PPF_ELECTION_IDX_SHIFT 0
-#define HINIC_PPF_ELECTION_IDX_MASK 0x1F
-
-#define HINIC_PPF_ELECTION_SET(val, member) \
- (((u32)(val) & HINIC_PPF_ELECTION_##member##_MASK) << \
- HINIC_PPF_ELECTION_##member##_SHIFT)
-
-#define HINIC_PPF_ELECTION_GET(val, member) \
- (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \
- HINIC_PPF_ELECTION_##member##_MASK)
-
-#define HINIC_PPF_ELECTION_CLEAR(val, member) \
- ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \
- << HINIC_PPF_ELECTION_##member##_SHIFT)))
-
-#define HINIC_MSIX_PENDING_LIMIT_SHIFT 0
-#define HINIC_MSIX_COALESC_TIMER_SHIFT 8
-#define HINIC_MSIX_LLI_TIMER_SHIFT 16
-#define HINIC_MSIX_LLI_CREDIT_SHIFT 24
-#define HINIC_MSIX_RESEND_TIMER_SHIFT 29
-
-#define HINIC_MSIX_PENDING_LIMIT_MASK 0xFF
-#define HINIC_MSIX_COALESC_TIMER_MASK 0xFF
-#define HINIC_MSIX_LLI_TIMER_MASK 0xFF
-#define HINIC_MSIX_LLI_CREDIT_MASK 0x1F
-#define HINIC_MSIX_RESEND_TIMER_MASK 0x7
-
-#define HINIC_MSIX_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_MSIX_##member##_MASK) << \
- HINIC_MSIX_##member##_SHIFT)
-
-#define HINIC_MSIX_ATTR_GET(val, member) \
- (((val) >> HINIC_MSIX_##member##_SHIFT) & \
- HINIC_MSIX_##member##_MASK)
-
-#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29
-
-#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1
-
-#define HINIC_MSIX_CNT_SET(val, member) \
- (((u32)(val) & HINIC_MSIX_CNT_##member##_MASK) << \
- HINIC_MSIX_CNT_##member##_SHIFT)
-
-#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs)
-#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs)
-#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs)
-#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx)
-#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx)
-#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx)
-
-#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
-#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
-#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF)
-
-#define HINIC_PCI_CFG_REGS_BAR 0
-#define HINIC_PCI_DB_BAR 4
-
-#define HINIC_PCIE_ST_DISABLE 0
-#define HINIC_PCIE_AT_DISABLE 0
-#define HINIC_PCIE_PH_DISABLE 0
-
-#define HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT 0 /* Disabled */
-#define HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT 0xFF /* max */
-#define HINIC_EQ_MSIX_LLI_TIMER_DEFAULT 0 /* Disabled */
-#define HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT 0 /* Disabled */
-#define HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT 7 /* max */
-
-enum hinic_pcie_nosnoop {
- HINIC_PCIE_SNOOP = 0,
- HINIC_PCIE_NO_SNOOP = 1,
-};
-
-enum hinic_pcie_tph {
- HINIC_PCIE_TPH_DISABLE = 0,
- HINIC_PCIE_TPH_ENABLE = 1,
-};
-
-enum hinic_func_type {
- HINIC_PF = 0,
- HINIC_PPF = 2,
-};
-
-enum hinic_mod_type {
- HINIC_MOD_COMM = 0, /* HW communication module */
- HINIC_MOD_L2NIC = 1, /* L2NIC module */
- HINIC_MOD_CFGM = 7, /* Configuration module */
-
- HINIC_MOD_MAX = 15
-};
-
-enum hinic_node_id {
- HINIC_NODE_ID_MGMT = 21,
-};
-
-enum hinic_pf_action {
- HINIC_PF_MGMT_INIT = 0x0,
-
- HINIC_PF_MGMT_ACTIVE = 0x11,
-};
-
-enum hinic_outbound_state {
- HINIC_OUTBOUND_ENABLE = 0,
- HINIC_OUTBOUND_DISABLE = 1,
-};
-
-enum hinic_db_state {
- HINIC_DB_ENABLE = 0,
- HINIC_DB_DISABLE = 1,
-};
-
-struct hinic_func_attr {
- u16 func_idx;
- u8 pf_idx;
- u8 pci_intf_idx;
-
- enum hinic_func_type func_type;
-
- u8 ppf_idx;
-
- u16 num_irqs;
- u8 num_aeqs;
- u8 num_ceqs;
-
- u8 num_dma_attr;
-};
-
-struct hinic_hwif {
- struct pci_dev *pdev;
- void __iomem *cfg_regs_bar;
-
- struct hinic_func_attr attr;
-};
-
-static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg)
-{
- return be32_to_cpu(readl(hwif->cfg_regs_bar + reg));
-}
-
-static inline void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg,
- u32 val)
-{
- writel(cpu_to_be32(val), hwif->cfg_regs_bar + reg);
-}
-
-int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
- u8 pending_limit, u8 coalesc_timer,
- u8 lli_timer_cfg, u8 lli_credit_limit,
- u8 resend_timer);
-
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer_cfg,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer);
-
-int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index);
-
-void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action);
-
-enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif);
-
-void hinic_outbound_state_set(struct hinic_hwif *hwif,
- enum hinic_outbound_state outbound_state);
-
-enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif);
-
-void hinic_db_state_set(struct hinic_hwif *hwif,
- enum hinic_db_state db_state);
-
-int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev);
-
-void hinic_free_hwif(struct hinic_hwif *hwif);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
deleted file mode 100644
index 8e58976..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ /dev/null
@@ -1,533 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/semaphore.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/err.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-#include "hinic_hw_qp_ctxt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-
-#define CI_Q_ADDR_SIZE sizeof(u32)
-
-#define CI_ADDR(base_addr, q_id) ((base_addr) + \
- (q_id) * CI_Q_ADDR_SIZE)
-
-#define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE)
-
-#define DB_IDX(db, db_base) \
- (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
-
-enum io_cmd {
- IO_CMD_MODIFY_QUEUE_CTXT = 0,
-};
-
-static void init_db_area_idx(struct hinic_free_db_area *free_db_area)
-{
- int i;
-
- for (i = 0; i < HINIC_DB_MAX_AREAS; i++)
- free_db_area->db_idx[i] = i;
-
- free_db_area->alloc_pos = 0;
- free_db_area->return_pos = HINIC_DB_MAX_AREAS;
-
- free_db_area->num_free = HINIC_DB_MAX_AREAS;
-
- sema_init(&free_db_area->idx_lock, 1);
-}
-
-static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io)
-{
- struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
- int pos, idx;
-
- down(&free_db_area->idx_lock);
-
- free_db_area->num_free--;
-
- if (free_db_area->num_free < 0) {
- free_db_area->num_free++;
- up(&free_db_area->idx_lock);
- return ERR_PTR(-ENOMEM);
- }
-
- pos = free_db_area->alloc_pos++;
- pos &= HINIC_DB_MAX_AREAS - 1;
-
- idx = free_db_area->db_idx[pos];
-
- free_db_area->db_idx[pos] = -1;
-
- up(&free_db_area->idx_lock);
-
- return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE;
-}
-
-static void return_db_area(struct hinic_func_to_io *func_to_io,
- void __iomem *db_base)
-{
- struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area;
- int pos, idx = DB_IDX(db_base, func_to_io->db_base);
-
- down(&free_db_area->idx_lock);
-
- pos = free_db_area->return_pos++;
- pos &= HINIC_DB_MAX_AREAS - 1;
-
- free_db_area->db_idx[pos] = idx;
-
- free_db_area->num_free++;
-
- up(&free_db_area->idx_lock);
-}
-
-static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
- u16 num_sqs)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct hinic_sq_ctxt_block *sq_ctxt_block;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmdq_buf cmdq_buf;
- struct hinic_sq_ctxt *sq_ctxt;
- struct hinic_qp *qp;
- u64 out_param;
- int err, i;
-
- err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
- return err;
- }
-
- sq_ctxt_block = cmdq_buf.buf;
- sq_ctxt = sq_ctxt_block->sq_ctxt;
-
- hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ,
- num_sqs, func_to_io->max_qps);
- for (i = 0; i < num_sqs; i++) {
- qp = &func_to_io->qps[i];
-
- hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq,
- base_qpn + qp->q_id);
- }
-
- cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs);
-
- err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
- IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
- &out_param);
- if ((err) || (out_param != 0)) {
- dev_err(&pdev->dev, "Failed to set SQ ctxts\n");
- err = -EFAULT;
- }
-
- hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- return err;
-}
-
-static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
- u16 num_rqs)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct hinic_rq_ctxt_block *rq_ctxt_block;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_cmdq_buf cmdq_buf;
- struct hinic_rq_ctxt *rq_ctxt;
- struct hinic_qp *qp;
- u64 out_param;
- int err, i;
-
- err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate cmdq buf\n");
- return err;
- }
-
- rq_ctxt_block = cmdq_buf.buf;
- rq_ctxt = rq_ctxt_block->rq_ctxt;
-
- hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ,
- num_rqs, func_to_io->max_qps);
- for (i = 0; i < num_rqs; i++) {
- qp = &func_to_io->qps[i];
-
- hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq,
- base_qpn + qp->q_id);
- }
-
- cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs);
-
- err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC,
- IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf,
- &out_param);
- if ((err) || (out_param != 0)) {
- dev_err(&pdev->dev, "Failed to set RQ ctxts\n");
- err = -EFAULT;
- }
-
- hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf);
- return err;
-}
-
-/**
- * write_qp_ctxts - write the qp ctxt to HW
- * @func_to_io: func to io channel that holds the IO components
- * @base_qpn: first qp number
- * @num_qps: number of qps to write
- *
- * Return 0 - Success, negative - Failure
- **/
-static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn,
- u16 num_qps)
-{
- return (write_sq_ctxts(func_to_io, base_qpn, num_qps) ||
- write_rq_ctxts(func_to_io, base_qpn, num_qps));
-}
-
-/**
- * init_qp - Initialize a Queue Pair
- * @func_to_io: func to io channel that holds the IO components
- * @qp: pointer to the qp to initialize
- * @q_id: the id of the qp
- * @sq_msix_entry: msix entry for sq
- * @rq_msix_entry: msix entry for rq
- *
- * Return 0 - Success, negative - Failure
- **/
-static int init_qp(struct hinic_func_to_io *func_to_io,
- struct hinic_qp *qp, int q_id,
- struct msix_entry *sq_msix_entry,
- struct msix_entry *rq_msix_entry)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct pci_dev *pdev = hwif->pdev;
- void __iomem *db_base;
- int err;
-
- qp->q_id = q_id;
-
- err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id],
- HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE,
- HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n");
- return err;
- }
-
- err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id],
- HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE,
- HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n");
- goto err_rq_alloc;
- }
-
- db_base = get_db_area(func_to_io);
- if (IS_ERR(db_base)) {
- dev_err(&pdev->dev, "Failed to get DB area for SQ\n");
- err = PTR_ERR(db_base);
- goto err_get_db;
- }
-
- func_to_io->sq_db[q_id] = db_base;
-
- err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id],
- sq_msix_entry,
- CI_ADDR(func_to_io->ci_addr_base, q_id),
- CI_ADDR(func_to_io->ci_dma_base, q_id), db_base);
- if (err) {
- dev_err(&pdev->dev, "Failed to init SQ\n");
- goto err_sq_init;
- }
-
- err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
- rq_msix_entry);
- if (err) {
- dev_err(&pdev->dev, "Failed to init RQ\n");
- goto err_rq_init;
- }
-
- return 0;
-
-err_rq_init:
- hinic_clean_sq(&qp->sq);
-
-err_sq_init:
- return_db_area(func_to_io, db_base);
-
-err_get_db:
- hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
-
-err_rq_alloc:
- hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
- return err;
-}
-
-/**
- * destroy_qp - Clean the resources of a Queue Pair
- * @func_to_io: func to io channel that holds the IO components
- * @qp: pointer to the qp to clean
- **/
-static void destroy_qp(struct hinic_func_to_io *func_to_io,
- struct hinic_qp *qp)
-{
- int q_id = qp->q_id;
-
- hinic_clean_rq(&qp->rq);
- hinic_clean_sq(&qp->sq);
-
- return_db_area(func_to_io, func_to_io->sq_db[q_id]);
-
- hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]);
- hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]);
-}
-
-/**
- * hinic_io_create_qps - Create Queue Pairs
- * @func_to_io: func to io channel that holds the IO components
- * @base_qpn: base qp number
- * @num_qps: number queue pairs to create
- * @sq_msix_entry: msix entries for sq
- * @rq_msix_entry: msix entries for rq
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
- u16 base_qpn, int num_qps,
- struct msix_entry *sq_msix_entries,
- struct msix_entry *rq_msix_entries)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t qps_size, wq_size, db_size;
- void *ci_addr_base;
- int i, j, err;
-
- qps_size = num_qps * sizeof(*func_to_io->qps);
- func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL);
- if (!func_to_io->qps)
- return -ENOMEM;
-
- wq_size = num_qps * sizeof(*func_to_io->sq_wq);
- func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
- if (!func_to_io->sq_wq) {
- err = -ENOMEM;
- goto err_sq_wq;
- }
-
- wq_size = num_qps * sizeof(*func_to_io->rq_wq);
- func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
- if (!func_to_io->rq_wq) {
- err = -ENOMEM;
- goto err_rq_wq;
- }
-
- db_size = num_qps * sizeof(*func_to_io->sq_db);
- func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL);
- if (!func_to_io->sq_db) {
- err = -ENOMEM;
- goto err_sq_db;
- }
-
- ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
- &func_to_io->ci_dma_base,
- GFP_KERNEL);
- if (!ci_addr_base) {
- dev_err(&pdev->dev, "Failed to allocate CI area\n");
- err = -ENOMEM;
- goto err_ci_base;
- }
-
- func_to_io->ci_addr_base = ci_addr_base;
-
- for (i = 0; i < num_qps; i++) {
- err = init_qp(func_to_io, &func_to_io->qps[i], i,
- &sq_msix_entries[i], &rq_msix_entries[i]);
- if (err) {
- dev_err(&pdev->dev, "Failed to create QP %d\n", i);
- goto err_init_qp;
- }
- }
-
- err = write_qp_ctxts(func_to_io, base_qpn, num_qps);
- if (err) {
- dev_err(&pdev->dev, "Failed to init QP ctxts\n");
- goto err_write_qp_ctxts;
- }
-
- return 0;
-
-err_write_qp_ctxts:
-err_init_qp:
- for (j = 0; j < i; j++)
- destroy_qp(func_to_io, &func_to_io->qps[j]);
-
- dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps),
- func_to_io->ci_addr_base, func_to_io->ci_dma_base);
-
-err_ci_base:
- devm_kfree(&pdev->dev, func_to_io->sq_db);
-
-err_sq_db:
- devm_kfree(&pdev->dev, func_to_io->rq_wq);
-
-err_rq_wq:
- devm_kfree(&pdev->dev, func_to_io->sq_wq);
-
-err_sq_wq:
- devm_kfree(&pdev->dev, func_to_io->qps);
- return err;
-}
-
-/**
- * hinic_io_destroy_qps - Destroy the IO Queue Pairs
- * @func_to_io: func to io channel that holds the IO components
- * @num_qps: number queue pairs to destroy
- **/
-void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps)
-{
- struct hinic_hwif *hwif = func_to_io->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t ci_table_size;
- int i;
-
- ci_table_size = CI_TABLE_SIZE(num_qps);
-
- for (i = 0; i < num_qps; i++)
- destroy_qp(func_to_io, &func_to_io->qps[i]);
-
- dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base,
- func_to_io->ci_dma_base);
-
- devm_kfree(&pdev->dev, func_to_io->sq_db);
-
- devm_kfree(&pdev->dev, func_to_io->rq_wq);
- devm_kfree(&pdev->dev, func_to_io->sq_wq);
-
- devm_kfree(&pdev->dev, func_to_io->qps);
-}
-
-/**
- * hinic_io_init - Initialize the IO components
- * @func_to_io: func to io channel that holds the IO components
- * @hwif: HW interface for accessing IO
- * @max_qps: maximum QPs in HW
- * @num_ceqs: number completion event queues
- * @ceq_msix_entries: msix entries for ceqs
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_io_init(struct hinic_func_to_io *func_to_io,
- struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
- struct msix_entry *ceq_msix_entries)
-{
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_cmdq_type cmdq, type;
- void __iomem *db_area;
- int err;
-
- func_to_io->hwif = hwif;
- func_to_io->qps = NULL;
- func_to_io->max_qps = max_qps;
-
- err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs,
- HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE,
- ceq_msix_entries);
- if (err) {
- dev_err(&pdev->dev, "Failed to init CEQs\n");
- return err;
- }
-
- err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate WQS for IO\n");
- goto err_wqs_alloc;
- }
-
- func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR);
- if (!func_to_io->db_base) {
- dev_err(&pdev->dev, "Failed to remap IO DB area\n");
- err = -ENOMEM;
- goto err_db_ioremap;
- }
-
- init_db_area_idx(&func_to_io->free_db_area);
-
- for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) {
- db_area = get_db_area(func_to_io);
- if (IS_ERR(db_area)) {
- dev_err(&pdev->dev, "Failed to get cmdq db area\n");
- err = PTR_ERR(db_area);
- goto err_db_area;
- }
-
- func_to_io->cmdq_db_area[cmdq] = db_area;
- }
-
- err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif,
- func_to_io->cmdq_db_area);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize cmdqs\n");
- goto err_init_cmdqs;
- }
-
- return 0;
-
-err_init_cmdqs:
-err_db_area:
- for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
- return_db_area(func_to_io, func_to_io->cmdq_db_area[type]);
-
- iounmap(func_to_io->db_base);
-
-err_db_ioremap:
- hinic_wqs_free(&func_to_io->wqs);
-
-err_wqs_alloc:
- hinic_ceqs_free(&func_to_io->ceqs);
- return err;
-}
-
-/**
- * hinic_io_free - Free the IO components
- * @func_to_io: func to io channel that holds the IO components
- **/
-void hinic_io_free(struct hinic_func_to_io *func_to_io)
-{
- enum hinic_cmdq_type cmdq;
-
- hinic_free_cmdqs(&func_to_io->cmdqs);
-
- for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
- return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
-
- iounmap(func_to_io->db_base);
- hinic_wqs_free(&func_to_io->wqs);
- hinic_ceqs_free(&func_to_io->ceqs);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
deleted file mode 100644
index adb6417..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_IO_H
-#define HINIC_HW_IO_H
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/semaphore.h>
-#include <linux/sizes.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-#include "hinic_hw_qp.h"
-
-#define HINIC_DB_PAGE_SIZE SZ_4K
-#define HINIC_DB_SIZE SZ_4M
-
-#define HINIC_DB_MAX_AREAS (HINIC_DB_SIZE / HINIC_DB_PAGE_SIZE)
-
-enum hinic_db_type {
- HINIC_DB_CMDQ_TYPE,
- HINIC_DB_SQ_TYPE,
-};
-
-enum hinic_io_path {
- HINIC_CTRL_PATH,
- HINIC_DATA_PATH,
-};
-
-struct hinic_free_db_area {
- int db_idx[HINIC_DB_MAX_AREAS];
-
- int alloc_pos;
- int return_pos;
-
- int num_free;
-
- /* Lock for getting db area */
- struct semaphore idx_lock;
-};
-
-struct hinic_func_to_io {
- struct hinic_hwif *hwif;
-
- struct hinic_ceqs ceqs;
-
- struct hinic_wqs wqs;
-
- struct hinic_wq *sq_wq;
- struct hinic_wq *rq_wq;
-
- struct hinic_qp *qps;
- u16 max_qps;
-
- void __iomem **sq_db;
- void __iomem *db_base;
-
- void *ci_addr_base;
- dma_addr_t ci_dma_base;
-
- struct hinic_free_db_area free_db_area;
-
- void __iomem *cmdq_db_area[HINIC_MAX_CMDQ_TYPES];
-
- struct hinic_cmdqs cmdqs;
-};
-
-int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
- u16 base_qpn, int num_qps,
- struct msix_entry *sq_msix_entries,
- struct msix_entry *rq_msix_entries);
-
-void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io,
- int num_qps);
-
-int hinic_io_init(struct hinic_func_to_io *func_to_io,
- struct hinic_hwif *hwif, u16 max_qps, int num_ceqs,
- struct msix_entry *ceq_msix_entries);
-
-void hinic_io_free(struct hinic_func_to_io *func_to_io);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
deleted file mode 100644
index 278dc13..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ /dev/null
@@ -1,597 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/semaphore.h>
-#include <linux/completion.h>
-#include <linux/slab.h>
-#include <asm/barrier.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_eqs.h"
-#include "hinic_hw_api_cmd.h"
-#include "hinic_hw_mgmt.h"
-#include "hinic_hw_dev.h"
-
-#define SYNC_MSG_ID_MASK 0x1FF
-
-#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id)
-
-#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \
- ((SYNC_MSG_ID(pf_to_mgmt) + 1) & \
- SYNC_MSG_ID_MASK))
-
-#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_LEN)
-
-#define MGMT_MSG_LEN_MIN 20
-#define MGMT_MSG_LEN_STEP 16
-#define MGMT_MSG_RSVD_FOR_DEV 8
-
-#define SEGMENT_LEN 48
-
-#define MAX_PF_MGMT_BUF_SIZE 2048
-
-/* Data should be SEG LEN size aligned */
-#define MAX_MSG_LEN 2016
-
-#define MSG_NOT_RESP 0xFFFF
-
-#define MGMT_MSG_TIMEOUT 1000
-
-#define mgmt_to_pfhwdev(pf_mgmt) \
- container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt)
-
-enum msg_segment_type {
- NOT_LAST_SEGMENT = 0,
- LAST_SEGMENT = 1,
-};
-
-enum mgmt_direction_type {
- MGMT_DIRECT_SEND = 0,
- MGMT_RESP = 1,
-};
-
-enum msg_ack_type {
- MSG_ACK = 0,
- MSG_NO_ACK = 1,
-};
-
-/**
- * hinic_register_mgmt_msg_cb - register msg handler for a msg from a module
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that this handler will handle its messages
- * @handle: private data for the callback
- * @callback: the handler that will handle messages
- **/
-void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod,
- void *handle,
- void (*callback)(void *handle,
- u8 cmd, void *buf_in,
- u16 in_size, void *buf_out,
- u16 *out_size))
-{
- struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod];
-
- mgmt_cb->cb = callback;
- mgmt_cb->handle = handle;
- mgmt_cb->state = HINIC_MGMT_CB_ENABLED;
-}
-
-/**
- * hinic_unregister_mgmt_msg_cb - unregister msg handler for a msg from a module
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that this handler handles its messages
- **/
-void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod)
-{
- struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod];
-
- mgmt_cb->state &= ~HINIC_MGMT_CB_ENABLED;
-
- while (mgmt_cb->state & HINIC_MGMT_CB_RUNNING)
- schedule();
-
- mgmt_cb->cb = NULL;
-}
-
-/**
- * prepare_header - prepare the header of the message
- * @pf_to_mgmt: PF to MGMT channel
- * @msg_len: the length of the message
- * @mod: module in the chip that will get the message
- * @ack_type: ask for response
- * @direction: the direction of the message
- * @cmd: command of the message
- * @msg_id: message id
- *
- * Return the prepared header value
- **/
-static u64 prepare_header(struct hinic_pf_to_mgmt *pf_to_mgmt,
- u16 msg_len, enum hinic_mod_type mod,
- enum msg_ack_type ack_type,
- enum mgmt_direction_type direction,
- u16 cmd, u16 msg_id)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
-
- return HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) |
- HINIC_MSG_HEADER_SET(mod, MODULE) |
- HINIC_MSG_HEADER_SET(SEGMENT_LEN, SEG_LEN) |
- HINIC_MSG_HEADER_SET(ack_type, NO_ACK) |
- HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) |
- HINIC_MSG_HEADER_SET(0, SEQID) |
- HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) |
- HINIC_MSG_HEADER_SET(direction, DIRECTION) |
- HINIC_MSG_HEADER_SET(cmd, CMD) |
- HINIC_MSG_HEADER_SET(HINIC_HWIF_PCI_INTF(hwif), PCI_INTF) |
- HINIC_MSG_HEADER_SET(HINIC_HWIF_PF_IDX(hwif), PF_IDX) |
- HINIC_MSG_HEADER_SET(msg_id, MSG_ID);
-}
-
-/**
- * prepare_mgmt_cmd - prepare the mgmt command
- * @mgmt_cmd: pointer to the command to prepare
- * @header: pointer of the header for the message
- * @msg: the data of the message
- * @msg_len: the length of the message
- **/
-static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, u8 *msg, u16 msg_len)
-{
- memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV);
-
- mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV;
- memcpy(mgmt_cmd, header, sizeof(*header));
-
- mgmt_cmd += sizeof(*header);
- memcpy(mgmt_cmd, msg, msg_len);
-}
-
-/**
- * mgmt_msg_len - calculate the total message length
- * @msg_data_len: the length of the message data
- *
- * Return the total message length
- **/
-static u16 mgmt_msg_len(u16 msg_data_len)
-{
- /* RSVD + HEADER_SIZE + DATA_LEN */
- u16 msg_len = MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len;
-
- if (msg_len > MGMT_MSG_LEN_MIN)
- msg_len = MGMT_MSG_LEN_MIN +
- ALIGN((msg_len - MGMT_MSG_LEN_MIN),
- MGMT_MSG_LEN_STEP);
- else
- msg_len = MGMT_MSG_LEN_MIN;
-
- return msg_len;
-}
-
-/**
- * send_msg_to_mgmt - send message to mgmt by API CMD
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @data: the msg data
- * @data_len: the msg data length
- * @ack_type: ask for response
- * @direction: the direction of the original message
- * @resp_msg_id: msg id to response for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- u8 *data, u16 data_len,
- enum msg_ack_type ack_type,
- enum mgmt_direction_type direction,
- u16 resp_msg_id)
-{
- struct hinic_api_cmd_chain *chain;
- u64 header;
- u16 msg_id;
-
- msg_id = SYNC_MSG_ID(pf_to_mgmt);
-
- if (direction == MGMT_RESP) {
- header = prepare_header(pf_to_mgmt, data_len, mod, ack_type,
- direction, cmd, resp_msg_id);
- } else {
- SYNC_MSG_ID_INC(pf_to_mgmt);
- header = prepare_header(pf_to_mgmt, data_len, mod, ack_type,
- direction, cmd, msg_id);
- }
-
- prepare_mgmt_cmd(pf_to_mgmt->sync_msg_buf, &header, data, data_len);
-
- chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU];
- return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT,
- pf_to_mgmt->sync_msg_buf,
- mgmt_msg_len(data_len));
-}
-
-/**
- * msg_to_mgmt_sync - send sync message to mgmt
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @buf_in: the msg data
- * @in_size: the msg data length
- * @buf_out: response
- * @out_size: response length
- * @direction: the direction of the original message
- * @resp_msg_id: msg id to response for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- u8 *buf_in, u16 in_size,
- u8 *buf_out, u16 *out_size,
- enum mgmt_direction_type direction,
- u16 resp_msg_id)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_recv_msg *recv_msg;
- struct completion *recv_done;
- u16 msg_id;
- int err;
-
- /* Lock the sync_msg_buf */
- down(&pf_to_mgmt->sync_msg_lock);
-
- recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt;
- recv_done = &recv_msg->recv_done;
-
- if (resp_msg_id == MSG_NOT_RESP)
- msg_id = SYNC_MSG_ID(pf_to_mgmt);
- else
- msg_id = resp_msg_id;
-
- init_completion(recv_done);
-
- err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size,
- MSG_ACK, direction, resp_msg_id);
- if (err) {
- dev_err(&pdev->dev, "Failed to send sync msg to mgmt\n");
- goto unlock_sync_msg;
- }
-
- if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) {
- dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id);
- err = -ETIMEDOUT;
- goto unlock_sync_msg;
- }
-
- smp_rmb(); /* verify reading after completion */
-
- if (recv_msg->msg_id != msg_id) {
- dev_err(&pdev->dev, "incorrect MSG for id = %d\n", msg_id);
- err = -EFAULT;
- goto unlock_sync_msg;
- }
-
- if ((buf_out) && (recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE)) {
- memcpy(buf_out, recv_msg->msg, recv_msg->msg_len);
- *out_size = recv_msg->msg_len;
- }
-
-unlock_sync_msg:
- up(&pf_to_mgmt->sync_msg_lock);
- return err;
-}
-
-/**
- * msg_to_mgmt_async - send message to mgmt without response
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @buf_in: the msg data
- * @in_size: the msg data length
- * @direction: the direction of the original message
- * @resp_msg_id: msg id to response for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int msg_to_mgmt_async(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- u8 *buf_in, u16 in_size,
- enum mgmt_direction_type direction,
- u16 resp_msg_id)
-{
- int err;
-
- /* Lock the sync_msg_buf */
- down(&pf_to_mgmt->sync_msg_lock);
-
- err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size,
- MSG_NO_ACK, direction, resp_msg_id);
-
- up(&pf_to_mgmt->sync_msg_lock);
- return err;
-}
-
-/**
- * hinic_msg_to_mgmt - send message to mgmt
- * @pf_to_mgmt: PF to MGMT channel
- * @mod: module in the chip that will get the message
- * @cmd: command of the message
- * @buf_in: the msg data
- * @in_size: the msg data length
- * @buf_out: response
- * @out_size: returned response length
- * @sync: sync msg or async msg
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
- enum hinic_mod_type mod, u8 cmd,
- void *buf_in, u16 in_size, void *buf_out, u16 *out_size,
- enum hinic_mgmt_msg_type sync)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- if (sync != HINIC_MGMT_MSG_SYNC) {
- dev_err(&pdev->dev, "Invalid MGMT msg type\n");
- return -EINVAL;
- }
-
- if (!MSG_SZ_IS_VALID(in_size)) {
- dev_err(&pdev->dev, "Invalid MGMT msg buffer size\n");
- return -EINVAL;
- }
-
- return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size,
- buf_out, out_size, MGMT_DIRECT_SEND,
- MSG_NOT_RESP);
-}
-
-/**
- * mgmt_recv_msg_handler - handler for message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: received message details
- **/
-static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u8 *buf_out = recv_msg->buf_out;
- struct hinic_mgmt_cb *mgmt_cb;
- unsigned long cb_state;
- u16 out_size = 0;
-
- if (recv_msg->mod >= HINIC_MOD_MAX) {
- dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n",
- recv_msg->mod);
- return;
- }
-
- mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod];
-
- cb_state = cmpxchg(&mgmt_cb->state,
- HINIC_MGMT_CB_ENABLED,
- HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING);
-
- if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb))
- mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd,
- recv_msg->msg, recv_msg->msg_len,
- buf_out, &out_size);
- else
- dev_err(&pdev->dev, "No MGMT msg handler, mod = %d\n",
- recv_msg->mod);
-
- mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
-
- if (!recv_msg->async_mgmt_to_pf)
- /* MGMT sent sync msg, send the response */
- msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd,
- buf_out, out_size, MGMT_RESP,
- recv_msg->msg_id);
-}
-
-/**
- * mgmt_resp_msg_handler - handler for a response message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: received message details
- **/
-static void mgmt_resp_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
-{
- wmb(); /* verify writing all, before reading */
-
- complete(&recv_msg->recv_done);
-}
-
-/**
- * recv_mgmt_msg_handler - handler for a message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @header: the header of the message
- * @recv_msg: received message details
- **/
-static void recv_mgmt_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- u64 *header, struct hinic_recv_msg *recv_msg)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int seq_id, seg_len;
- u8 *msg_body;
-
- seq_id = HINIC_MSG_HEADER_GET(*header, SEQID);
- seg_len = HINIC_MSG_HEADER_GET(*header, SEG_LEN);
-
- if (seq_id >= (MAX_MSG_LEN / SEGMENT_LEN)) {
- dev_err(&pdev->dev, "recv big mgmt msg\n");
- return;
- }
-
- msg_body = (u8 *)header + sizeof(*header);
- memcpy(recv_msg->msg + seq_id * SEGMENT_LEN, msg_body, seg_len);
-
- if (!HINIC_MSG_HEADER_GET(*header, LAST))
- return;
-
- recv_msg->cmd = HINIC_MSG_HEADER_GET(*header, CMD);
- recv_msg->mod = HINIC_MSG_HEADER_GET(*header, MODULE);
- recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(*header,
- ASYNC_MGMT_TO_PF);
- recv_msg->msg_len = HINIC_MSG_HEADER_GET(*header, MSG_LEN);
- recv_msg->msg_id = HINIC_MSG_HEADER_GET(*header, MSG_ID);
-
- if (HINIC_MSG_HEADER_GET(*header, DIRECTION) == MGMT_RESP)
- mgmt_resp_msg_handler(pf_to_mgmt, recv_msg);
- else
- mgmt_recv_msg_handler(pf_to_mgmt, recv_msg);
-}
-
-/**
- * mgmt_msg_aeqe_handler - handler for a mgmt message event
- * @handle: PF to MGMT channel
- * @data: the header of the message
- * @size: unused
- **/
-static void mgmt_msg_aeqe_handler(void *handle, void *data, u8 size)
-{
- struct hinic_pf_to_mgmt *pf_to_mgmt = handle;
- struct hinic_recv_msg *recv_msg;
- u64 *header = (u64 *)data;
-
- recv_msg = HINIC_MSG_HEADER_GET(*header, DIRECTION) ==
- MGMT_DIRECT_SEND ?
- &pf_to_mgmt->recv_msg_from_mgmt :
- &pf_to_mgmt->recv_resp_msg_from_mgmt;
-
- recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg);
-}
-
-/**
- * alloc_recv_msg - allocate receive message memory
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: pointer that will hold the allocated data
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_recv_msg(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- recv_msg->msg = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE,
- GFP_KERNEL);
- if (!recv_msg->msg)
- return -ENOMEM;
-
- recv_msg->buf_out = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE,
- GFP_KERNEL);
- if (!recv_msg->buf_out)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel
- * @pf_to_mgmt: PF to MGMT channel
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt)
-{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- err = alloc_recv_msg(pf_to_mgmt,
- &pf_to_mgmt->recv_msg_from_mgmt);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate recv msg\n");
- return err;
- }
-
- err = alloc_recv_msg(pf_to_mgmt,
- &pf_to_mgmt->recv_resp_msg_from_mgmt);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate resp recv msg\n");
- return err;
- }
-
- pf_to_mgmt->sync_msg_buf = devm_kzalloc(&pdev->dev,
- MAX_PF_MGMT_BUF_SIZE,
- GFP_KERNEL);
- if (!pf_to_mgmt->sync_msg_buf)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * hinic_pf_to_mgmt_init - initialize PF to MGMT channel
- * @pf_to_mgmt: PF to MGMT channel
- * @hwif: HW interface the PF to MGMT will use for accessing HW
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_hwif *hwif)
-{
- struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
- struct pci_dev *pdev = hwif->pdev;
- int err;
-
- pf_to_mgmt->hwif = hwif;
-
- sema_init(&pf_to_mgmt->sync_msg_lock, 1);
- pf_to_mgmt->sync_msg_id = 0;
-
- err = alloc_msg_buf(pf_to_mgmt);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate msg buffers\n");
- return err;
- }
-
- err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize cmd chains\n");
- return err;
- }
-
- hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU,
- pf_to_mgmt,
- mgmt_msg_aeqe_handler);
- return 0;
-}
-
-/**
- * hinic_pf_to_mgmt_free - free PF to MGMT channel
- * @pf_to_mgmt: PF to MGMT channel
- **/
-void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
-{
- struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt);
- struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
-
- hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
- hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
deleted file mode 100644
index cb23962..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ /dev/null
@@ -1,907 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-#include <linux/errno.h>
-#include <linux/sizes.h>
-#include <linux/atomic.h>
-#include <linux/skbuff.h>
-#include <linux/io.h>
-#include <asm/barrier.h>
-#include <asm/byteorder.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_qp_ctxt.h"
-#include "hinic_hw_qp.h"
-#include "hinic_hw_io.h"
-
-#define SQ_DB_OFF SZ_2K
-
-/* The number of cache line to prefetch Until threshold state */
-#define WQ_PREFETCH_MAX 2
-/* The number of cache line to prefetch After threshold state */
-#define WQ_PREFETCH_MIN 1
-/* Threshold state */
-#define WQ_PREFETCH_THRESHOLD 256
-
-/* sizes of the SQ/RQ ctxt */
-#define Q_CTXT_SIZE 48
-#define CTXT_RSVD 240
-
-#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
- (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE)
-
-#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \
- (((max_rqs) + (max_sqs)) * CTXT_RSVD + \
- (max_sqs + (q_id)) * Q_CTXT_SIZE)
-
-#define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4)
-#define SIZE_8BYTES(size) (ALIGN(size, 8) >> 3)
-#define SECT_SIZE_FROM_8BYTES(size) ((size) << 3)
-
-#define SQ_DB_PI_HI_SHIFT 8
-#define SQ_DB_PI_HI(prod_idx) ((prod_idx) >> SQ_DB_PI_HI_SHIFT)
-
-#define SQ_DB_PI_LOW_MASK 0xFF
-#define SQ_DB_PI_LOW(prod_idx) ((prod_idx) & SQ_DB_PI_LOW_MASK)
-
-#define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi))
-
-#define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask)
-#define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask)
-
-#define TX_MAX_MSS_DEFAULT 0x3E00
-
-enum sq_wqe_type {
- SQ_NORMAL_WQE = 0,
-};
-
-enum rq_completion_fmt {
- RQ_COMPLETE_SGE = 1
-};
-
-void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
- enum hinic_qp_ctxt_type ctxt_type,
- u16 num_queues, u16 max_queues)
-{
- u16 max_sqs = max_queues;
- u16 max_rqs = max_queues;
-
- qp_ctxt_hdr->num_queues = num_queues;
- qp_ctxt_hdr->queue_type = ctxt_type;
-
- if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ)
- qp_ctxt_hdr->addr_offset = SQ_CTXT_OFFSET(max_sqs, max_rqs, 0);
- else
- qp_ctxt_hdr->addr_offset = RQ_CTXT_OFFSET(max_sqs, max_rqs, 0);
-
- qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset);
-
- hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr));
-}
-
-void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
- struct hinic_sq *sq, u16 global_qid)
-{
- u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo;
- u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
- u16 pi_start, ci_start;
- struct hinic_wq *wq;
-
- wq = sq->wq;
- ci_start = atomic_read(&wq->cons_idx);
- pi_start = atomic_read(&wq->prod_idx);
-
- /* Read the first page paddr from the WQ page paddr ptrs */
- wq_page_addr = be64_to_cpu(*wq->block_vaddr);
-
- wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr);
- wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
- wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
-
- wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
- wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
- wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
-
- sq_ctxt->ceq_attr = HINIC_SQ_CTXT_CEQ_ATTR_SET(global_qid,
- GLOBAL_SQ_ID) |
- HINIC_SQ_CTXT_CEQ_ATTR_SET(0, EN);
-
- sq_ctxt->ci_wrapped = HINIC_SQ_CTXT_CI_SET(ci_start, IDX) |
- HINIC_SQ_CTXT_CI_SET(1, WRAPPED);
-
- sq_ctxt->wq_hi_pfn_pi =
- HINIC_SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) |
- HINIC_SQ_CTXT_WQ_PAGE_SET(pi_start, PI);
-
- sq_ctxt->wq_lo_pfn = wq_page_pfn_lo;
-
- sq_ctxt->pref_cache =
- HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
- HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
- HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
-
- sq_ctxt->pref_wrapped = 1;
-
- sq_ctxt->pref_wq_hi_pfn_ci =
- HINIC_SQ_CTXT_PREF_SET(ci_start, CI) |
- HINIC_SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN);
-
- sq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo;
-
- sq_ctxt->wq_block_hi_pfn =
- HINIC_SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN);
-
- sq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo;
-
- hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt));
-}
-
-void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt,
- struct hinic_rq *rq, u16 global_qid)
-{
- u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo;
- u64 wq_page_addr, wq_page_pfn, wq_block_pfn;
- u16 pi_start, ci_start;
- struct hinic_wq *wq;
-
- wq = rq->wq;
- ci_start = atomic_read(&wq->cons_idx);
- pi_start = atomic_read(&wq->prod_idx);
-
- /* Read the first page paddr from the WQ page paddr ptrs */
- wq_page_addr = be64_to_cpu(*wq->block_vaddr);
-
- wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr);
- wq_page_pfn_hi = upper_32_bits(wq_page_pfn);
- wq_page_pfn_lo = lower_32_bits(wq_page_pfn);
-
- wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr);
- wq_block_pfn_hi = upper_32_bits(wq_block_pfn);
- wq_block_pfn_lo = lower_32_bits(wq_block_pfn);
-
- rq_ctxt->ceq_attr = HINIC_RQ_CTXT_CEQ_ATTR_SET(0, EN) |
- HINIC_RQ_CTXT_CEQ_ATTR_SET(1, WRAPPED);
-
- rq_ctxt->pi_intr_attr = HINIC_RQ_CTXT_PI_SET(pi_start, IDX) |
- HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR);
-
- rq_ctxt->wq_hi_pfn_ci = HINIC_RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi,
- HI_PFN) |
- HINIC_RQ_CTXT_WQ_PAGE_SET(ci_start, CI);
-
- rq_ctxt->wq_lo_pfn = wq_page_pfn_lo;
-
- rq_ctxt->pref_cache =
- HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) |
- HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) |
- HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD);
-
- rq_ctxt->pref_wrapped = 1;
-
- rq_ctxt->pref_wq_hi_pfn_ci =
- HINIC_RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN) |
- HINIC_RQ_CTXT_PREF_SET(ci_start, CI);
-
- rq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo;
-
- rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr);
- rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr);
-
- rq_ctxt->wq_block_hi_pfn =
- HINIC_RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN);
-
- rq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo;
-
- hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt));
-}
-
-/**
- * alloc_sq_skb_arr - allocate sq array for saved skb
- * @sq: HW Send Queue
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_sq_skb_arr(struct hinic_sq *sq)
-{
- struct hinic_wq *wq = sq->wq;
- size_t skb_arr_size;
-
- skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb);
- sq->saved_skb = vzalloc(skb_arr_size);
- if (!sq->saved_skb)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * free_sq_skb_arr - free sq array for saved skb
- * @sq: HW Send Queue
- **/
-static void free_sq_skb_arr(struct hinic_sq *sq)
-{
- vfree(sq->saved_skb);
-}
-
-/**
- * alloc_rq_skb_arr - allocate rq array for saved skb
- * @rq: HW Receive Queue
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_rq_skb_arr(struct hinic_rq *rq)
-{
- struct hinic_wq *wq = rq->wq;
- size_t skb_arr_size;
-
- skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb);
- rq->saved_skb = vzalloc(skb_arr_size);
- if (!rq->saved_skb)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * free_rq_skb_arr - free rq array for saved skb
- * @rq: HW Receive Queue
- **/
-static void free_rq_skb_arr(struct hinic_rq *rq)
-{
- vfree(rq->saved_skb);
-}
-
-/**
- * hinic_init_sq - Initialize HW Send Queue
- * @sq: HW Send Queue
- * @hwif: HW Interface for accessing HW
- * @wq: Work Queue for the data of the SQ
- * @entry: msix entry for sq
- * @ci_addr: address for reading the current HW consumer index
- * @ci_dma_addr: dma address for reading the current HW consumer index
- * @db_base: doorbell base address
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry,
- void *ci_addr, dma_addr_t ci_dma_addr,
- void __iomem *db_base)
-{
- sq->hwif = hwif;
-
- sq->wq = wq;
-
- sq->irq = entry->vector;
- sq->msix_entry = entry->entry;
-
- sq->hw_ci_addr = ci_addr;
- sq->hw_ci_dma_addr = ci_dma_addr;
-
- sq->db_base = db_base + SQ_DB_OFF;
-
- return alloc_sq_skb_arr(sq);
-}
-
-/**
- * hinic_clean_sq - Clean HW Send Queue's Resources
- * @sq: Send Queue
- **/
-void hinic_clean_sq(struct hinic_sq *sq)
-{
- free_sq_skb_arr(sq);
-}
-
-/**
- * alloc_rq_cqe - allocate rq completion queue elements
- * @rq: HW Receive Queue
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_rq_cqe(struct hinic_rq *rq)
-{
- struct hinic_hwif *hwif = rq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t cqe_dma_size, cqe_size;
- struct hinic_wq *wq = rq->wq;
- int j, i;
-
- cqe_size = wq->q_depth * sizeof(*rq->cqe);
- rq->cqe = vzalloc(cqe_size);
- if (!rq->cqe)
- return -ENOMEM;
-
- cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma);
- rq->cqe_dma = vzalloc(cqe_dma_size);
- if (!rq->cqe_dma)
- goto err_cqe_dma_arr_alloc;
-
- for (i = 0; i < wq->q_depth; i++) {
- rq->cqe[i] = dma_zalloc_coherent(&pdev->dev,
- sizeof(*rq->cqe[i]),
- &rq->cqe_dma[i], GFP_KERNEL);
- if (!rq->cqe[i])
- goto err_cqe_alloc;
- }
-
- return 0;
-
-err_cqe_alloc:
- for (j = 0; j < i; j++)
- dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j],
- rq->cqe_dma[j]);
-
- vfree(rq->cqe_dma);
-
-err_cqe_dma_arr_alloc:
- vfree(rq->cqe);
- return -ENOMEM;
-}
-
-/**
- * free_rq_cqe - free rq completion queue elements
- * @rq: HW Receive Queue
- **/
-static void free_rq_cqe(struct hinic_rq *rq)
-{
- struct hinic_hwif *hwif = rq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- struct hinic_wq *wq = rq->wq;
- int i;
-
- for (i = 0; i < wq->q_depth; i++)
- dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i],
- rq->cqe_dma[i]);
-
- vfree(rq->cqe_dma);
- vfree(rq->cqe);
-}
-
-/**
- * hinic_init_rq - Initialize HW Receive Queue
- * @rq: HW Receive Queue
- * @hwif: HW Interface for accessing HW
- * @wq: Work Queue for the data of the RQ
- * @entry: msix entry for rq
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry)
-{
- struct pci_dev *pdev = hwif->pdev;
- size_t pi_size;
- int err;
-
- rq->hwif = hwif;
-
- rq->wq = wq;
-
- rq->irq = entry->vector;
- rq->msix_entry = entry->entry;
-
- rq->buf_sz = HINIC_RX_BUF_SZ;
-
- err = alloc_rq_skb_arr(rq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate rq priv data\n");
- return err;
- }
-
- err = alloc_rq_cqe(rq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate rq cqe\n");
- goto err_alloc_rq_cqe;
- }
-
- /* HW requirements: Must be at least 32 bit */
- pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
- rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size,
- &rq->pi_dma_addr, GFP_KERNEL);
- if (!rq->pi_virt_addr) {
- dev_err(&pdev->dev, "Failed to allocate PI address\n");
- err = -ENOMEM;
- goto err_pi_virt;
- }
-
- return 0;
-
-err_pi_virt:
- free_rq_cqe(rq);
-
-err_alloc_rq_cqe:
- free_rq_skb_arr(rq);
- return err;
-}
-
-/**
- * hinic_clean_rq - Clean HW Receive Queue's Resources
- * @rq: HW Receive Queue
- **/
-void hinic_clean_rq(struct hinic_rq *rq)
-{
- struct hinic_hwif *hwif = rq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t pi_size;
-
- pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32));
- dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr,
- rq->pi_dma_addr);
-
- free_rq_cqe(rq);
- free_rq_skb_arr(rq);
-}
-
-/**
- * hinic_get_sq_free_wqebbs - return number of free wqebbs for use
- * @sq: send queue
- *
- * Return number of free wqebbs
- **/
-int hinic_get_sq_free_wqebbs(struct hinic_sq *sq)
-{
- struct hinic_wq *wq = sq->wq;
-
- return atomic_read(&wq->delta) - 1;
-}
-
-/**
- * hinic_get_rq_free_wqebbs - return number of free wqebbs for use
- * @rq: recv queue
- *
- * Return number of free wqebbs
- **/
-int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
-{
- struct hinic_wq *wq = rq->wq;
-
- return atomic_read(&wq->delta) - 1;
-}
-
-static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx,
- int nr_descs)
-{
- u32 ctrl_size, task_size, bufdesc_size;
-
- ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl));
- task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task));
- bufdesc_size = nr_descs * sizeof(struct hinic_sq_bufdesc);
- bufdesc_size = SIZE_8BYTES(bufdesc_size);
-
- ctrl->ctrl_info = HINIC_SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) |
- HINIC_SQ_CTRL_SET(task_size, TASKSECT_LEN) |
- HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) |
- HINIC_SQ_CTRL_SET(ctrl_size, LEN);
-
- ctrl->queue_info = HINIC_SQ_CTRL_SET(TX_MAX_MSS_DEFAULT,
- QUEUE_INFO_MSS);
-}
-
-static void sq_prepare_task(struct hinic_sq_task *task)
-{
- task->pkt_info0 =
- HINIC_SQ_TASK_INFO0_SET(0, L2HDR_LEN) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_L4_OFF_DISABLE, L4_OFFLOAD) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_OUTER_L3TYPE_UNKNOWN,
- INNER_L3TYPE) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_VLAN_OFF_DISABLE,
- VLAN_OFFLOAD) |
- HINIC_SQ_TASK_INFO0_SET(HINIC_PKT_NOT_PARSED, PARSE_FLAG);
-
- task->pkt_info1 =
- HINIC_SQ_TASK_INFO1_SET(HINIC_MEDIA_UNKNOWN, MEDIA_TYPE) |
- HINIC_SQ_TASK_INFO1_SET(0, INNER_L4_LEN) |
- HINIC_SQ_TASK_INFO1_SET(0, INNER_L3_LEN);
-
- task->pkt_info2 =
- HINIC_SQ_TASK_INFO2_SET(0, TUNNEL_L4_LEN) |
- HINIC_SQ_TASK_INFO2_SET(0, OUTER_L3_LEN) |
- HINIC_SQ_TASK_INFO2_SET(HINIC_TUNNEL_L4TYPE_UNKNOWN,
- TUNNEL_L4TYPE) |
- HINIC_SQ_TASK_INFO2_SET(HINIC_OUTER_L3TYPE_UNKNOWN,
- OUTER_L3TYPE);
-
- task->ufo_v6_identify = 0;
-
- task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE);
-
- task->zero_pad = 0;
-}
-
-/**
- * hinic_sq_prepare_wqe - prepare wqe before insert to the queue
- * @sq: send queue
- * @prod_idx: pi value
- * @sq_wqe: wqe to prepare
- * @sges: sges for use by the wqe for send for buf addresses
- * @nr_sges: number of sges
- **/
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
- int nr_sges)
-{
- int i;
-
- sq_prepare_ctrl(&sq_wqe->ctrl, prod_idx, nr_sges);
-
- sq_prepare_task(&sq_wqe->task);
-
- for (i = 0; i < nr_sges; i++)
- sq_wqe->buf_descs[i].sge = sges[i];
-}
-
-/**
- * sq_prepare_db - prepare doorbell to write
- * @sq: send queue
- * @prod_idx: pi value for the doorbell
- * @cos: cos of the doorbell
- *
- * Return db value
- **/
-static u32 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsigned int cos)
-{
- struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
- u8 hi_prod_idx = SQ_DB_PI_HI(SQ_MASKED_IDX(sq, prod_idx));
-
- /* Data should be written to HW in Big Endian Format */
- return cpu_to_be32(HINIC_SQ_DB_INFO_SET(hi_prod_idx, PI_HI) |
- HINIC_SQ_DB_INFO_SET(HINIC_DB_SQ_TYPE, TYPE) |
- HINIC_SQ_DB_INFO_SET(HINIC_DATA_PATH, PATH) |
- HINIC_SQ_DB_INFO_SET(cos, COS) |
- HINIC_SQ_DB_INFO_SET(qp->q_id, QID));
-}
-
-/**
- * hinic_sq_write_db- write doorbell
- * @sq: send queue
- * @prod_idx: pi value for the doorbell
- * @wqe_size: wqe size
- * @cos: cos of the wqe
- **/
-void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
- unsigned int cos)
-{
- struct hinic_wq *wq = sq->wq;
-
- /* increment prod_idx to the next */
- prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
-
- wmb(); /* Write all before the doorbell */
-
- writel(sq_prepare_db(sq, prod_idx, cos), SQ_DB_ADDR(sq, prod_idx));
-}
-
-/**
- * hinic_sq_get_wqe - get wqe ptr in the current pi and update the pi
- * @sq: sq to get wqe from
- * @wqe_size: wqe size
- * @prod_idx: returned pi
- *
- * Return wqe pointer
- **/
-struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
- unsigned int wqe_size, u16 *prod_idx)
-{
- struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size,
- prod_idx);
-
- if (IS_ERR(hw_wqe))
- return NULL;
-
- return &hw_wqe->sq_wqe;
-}
-
-/**
- * hinic_sq_write_wqe - write the wqe to the sq
- * @sq: send queue
- * @prod_idx: pi of the wqe
- * @sq_wqe: the wqe to write
- * @skb: skb to save
- * @wqe_size: the size of the wqe
- **/
-void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *sq_wqe,
- struct sk_buff *skb, unsigned int wqe_size)
-{
- struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)sq_wqe;
-
- sq->saved_skb[prod_idx] = skb;
-
- /* The data in the HW should be in Big Endian Format */
- hinic_cpu_to_be32(sq_wqe, wqe_size);
-
- hinic_write_wqe(sq->wq, hw_wqe, wqe_size);
-}
-
-/**
- * hinic_sq_read_wqebb - read wqe ptr in the current ci and update the ci, the
- * wqe only have one wqebb
- * @sq: send queue
- * @skb: return skb that was saved
- * @wqe_size: the wqe size ptr
- * @cons_idx: consumer index of the wqe
- *
- * Return wqe in ci position
- **/
-struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx)
-{
- struct hinic_hw_wqe *hw_wqe;
- struct hinic_sq_wqe *sq_wqe;
- struct hinic_sq_ctrl *ctrl;
- unsigned int buf_sect_len;
- u32 ctrl_info;
-
- /* read the ctrl section for getting wqe size */
- hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx);
- if (IS_ERR(hw_wqe))
- return NULL;
-
- *skb = sq->saved_skb[*cons_idx];
-
- sq_wqe = &hw_wqe->sq_wqe;
- ctrl = &sq_wqe->ctrl;
- ctrl_info = be32_to_cpu(ctrl->ctrl_info);
- buf_sect_len = HINIC_SQ_CTRL_GET(ctrl_info, BUFDESC_SECT_LEN);
-
- *wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task);
- *wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len);
- *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size);
-
- return &hw_wqe->sq_wqe;
-}
-
-/**
- * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci
- * @sq: send queue
- * @skb: return skb that was saved
- * @wqe_size: the size of the wqe
- * @cons_idx: consumer index of the wqe
- *
- * Return wqe in ci position
- **/
-struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int wqe_size, u16 *cons_idx)
-{
- struct hinic_hw_wqe *hw_wqe;
-
- hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx);
- *skb = sq->saved_skb[*cons_idx];
-
- return &hw_wqe->sq_wqe;
-}
-
-/**
- * hinic_sq_put_wqe - release the ci for new wqes
- * @sq: send queue
- * @wqe_size: the size of the wqe
- **/
-void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size)
-{
- hinic_put_wqe(sq->wq, wqe_size);
-}
-
-/**
- * hinic_sq_get_sges - get sges from the wqe
- * @sq_wqe: wqe to get the sges from its buffer addresses
- * @sges: returned sges
- * @nr_sges: number sges to return
- **/
-void hinic_sq_get_sges(struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
- int nr_sges)
-{
- int i;
-
- for (i = 0; i < nr_sges && i < HINIC_MAX_SQ_BUFDESCS; i++) {
- sges[i] = sq_wqe->buf_descs[i].sge;
- hinic_be32_to_cpu(&sges[i], sizeof(sges[i]));
- }
-}
-
-/**
- * hinic_rq_get_wqe - get wqe ptr in the current pi and update the pi
- * @rq: rq to get wqe from
- * @wqe_size: wqe size
- * @prod_idx: returned pi
- *
- * Return wqe pointer
- **/
-struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
- unsigned int wqe_size, u16 *prod_idx)
-{
- struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size,
- prod_idx);
-
- if (IS_ERR(hw_wqe))
- return NULL;
-
- return &hw_wqe->rq_wqe;
-}
-
-/**
- * hinic_rq_write_wqe - write the wqe to the rq
- * @rq: recv queue
- * @prod_idx: pi of the wqe
- * @rq_wqe: the wqe to write
- * @skb: skb to save
- **/
-void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb)
-{
- struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)rq_wqe;
-
- rq->saved_skb[prod_idx] = skb;
-
- /* The data in the HW should be in Big Endian Format */
- hinic_cpu_to_be32(rq_wqe, sizeof(*rq_wqe));
-
- hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe));
-}
-
-/**
- * hinic_rq_read_wqe - read wqe ptr in the current ci and update the ci
- * @rq: recv queue
- * @wqe_size: the size of the wqe
- * @skb: return saved skb
- * @cons_idx: consumer index of the wqe
- *
- * Return wqe in ci position
- **/
-struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb, u16 *cons_idx)
-{
- struct hinic_hw_wqe *hw_wqe;
- struct hinic_rq_cqe *cqe;
- int rx_done;
- u32 status;
-
- hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx);
- if (IS_ERR(hw_wqe))
- return NULL;
-
- cqe = rq->cqe[*cons_idx];
-
- status = be32_to_cpu(cqe->status);
-
- rx_done = HINIC_RQ_CQE_STATUS_GET(status, RXDONE);
- if (!rx_done)
- return NULL;
-
- *skb = rq->saved_skb[*cons_idx];
-
- return &hw_wqe->rq_wqe;
-}
-
-/**
- * hinic_rq_read_next_wqe - increment ci and read the wqe in ci position
- * @rq: recv queue
- * @wqe_size: the size of the wqe
- * @skb: return saved skb
- * @cons_idx: consumer index in the wq
- *
- * Return wqe in incremented ci position
- **/
-struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb,
- u16 *cons_idx)
-{
- struct hinic_wq *wq = rq->wq;
- struct hinic_hw_wqe *hw_wqe;
- unsigned int num_wqebbs;
-
- wqe_size = ALIGN(wqe_size, wq->wqebb_size);
- num_wqebbs = wqe_size / wq->wqebb_size;
-
- *cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs);
-
- *skb = rq->saved_skb[*cons_idx];
-
- hw_wqe = hinic_read_wqe_direct(wq, *cons_idx);
-
- return &hw_wqe->rq_wqe;
-}
-
-/**
- * hinic_put_wqe - release the ci for new wqes
- * @rq: recv queue
- * @cons_idx: consumer index of the wqe
- * @wqe_size: the size of the wqe
- **/
-void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
- unsigned int wqe_size)
-{
- struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
- u32 status = be32_to_cpu(cqe->status);
-
- status = HINIC_RQ_CQE_STATUS_CLEAR(status, RXDONE);
-
- /* Rx WQE size is 1 WQEBB, no wq shadow*/
- cqe->status = cpu_to_be32(status);
-
- wmb(); /* clear done flag */
-
- hinic_put_wqe(rq->wq, wqe_size);
-}
-
-/**
- * hinic_rq_get_sge - get sge from the wqe
- * @rq: recv queue
- * @rq_wqe: wqe to get the sge from its buf address
- * @cons_idx: consumer index
- * @sge: returned sge
- **/
-void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe,
- u16 cons_idx, struct hinic_sge *sge)
-{
- struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
- u32 len = be32_to_cpu(cqe->len);
-
- sge->hi_addr = be32_to_cpu(rq_wqe->buf_desc.hi_addr);
- sge->lo_addr = be32_to_cpu(rq_wqe->buf_desc.lo_addr);
- sge->len = HINIC_RQ_CQE_SGE_GET(len, LEN);
-}
-
-/**
- * hinic_rq_prepare_wqe - prepare wqe before insert to the queue
- * @rq: recv queue
- * @prod_idx: pi value
- * @rq_wqe: the wqe
- * @sge: sge for use by the wqe for recv buf address
- **/
-void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *rq_wqe, struct hinic_sge *sge)
-{
- struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect;
- struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc;
- struct hinic_rq_cqe *cqe = rq->cqe[prod_idx];
- struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl;
- dma_addr_t cqe_dma = rq->cqe_dma[prod_idx];
-
- ctrl->ctrl_info =
- HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) |
- HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)),
- COMPLETE_LEN) |
- HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)),
- BUFDESC_SECT_LEN) |
- HINIC_RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT);
-
- hinic_set_sge(&cqe_sect->sge, cqe_dma, sizeof(*cqe));
-
- buf_desc->hi_addr = sge->hi_addr;
- buf_desc->lo_addr = sge->lo_addr;
-}
-
-/**
- * hinic_rq_update - update pi of the rq
- * @rq: recv queue
- * @prod_idx: pi value
- **/
-void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx)
-{
- *rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1));
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
deleted file mode 100644
index 6c84f83..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_QP_H
-#define HINIC_HW_QP_H
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/sizes.h>
-#include <linux/pci.h>
-#include <linux/skbuff.h>
-
-#include "hinic_common.h"
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_qp_ctxt.h"
-
-#define HINIC_SQ_DB_INFO_PI_HI_SHIFT 0
-#define HINIC_SQ_DB_INFO_QID_SHIFT 8
-#define HINIC_SQ_DB_INFO_PATH_SHIFT 23
-#define HINIC_SQ_DB_INFO_COS_SHIFT 24
-#define HINIC_SQ_DB_INFO_TYPE_SHIFT 27
-
-#define HINIC_SQ_DB_INFO_PI_HI_MASK 0xFF
-#define HINIC_SQ_DB_INFO_QID_MASK 0x3FF
-#define HINIC_SQ_DB_INFO_PATH_MASK 0x1
-#define HINIC_SQ_DB_INFO_COS_MASK 0x7
-#define HINIC_SQ_DB_INFO_TYPE_MASK 0x1F
-
-#define HINIC_SQ_DB_INFO_SET(val, member) \
- (((u32)(val) & HINIC_SQ_DB_INFO_##member##_MASK) \
- << HINIC_SQ_DB_INFO_##member##_SHIFT)
-
-#define HINIC_SQ_WQEBB_SIZE 64
-#define HINIC_RQ_WQEBB_SIZE 32
-
-#define HINIC_SQ_PAGE_SIZE SZ_4K
-#define HINIC_RQ_PAGE_SIZE SZ_4K
-
-#define HINIC_SQ_DEPTH SZ_4K
-#define HINIC_RQ_DEPTH SZ_4K
-
-/* In any change to HINIC_RX_BUF_SZ, HINIC_RX_BUF_SZ_IDX must be changed */
-#define HINIC_RX_BUF_SZ 2048
-#define HINIC_RX_BUF_SZ_IDX HINIC_RX_BUF_SZ_2048_IDX
-
-#define HINIC_MIN_TX_WQE_SIZE(wq) \
- ALIGN(HINIC_SQ_WQE_SIZE(1), (wq)->wqebb_size)
-
-#define HINIC_MIN_TX_NUM_WQEBBS(sq) \
- (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size)
-
-enum hinic_rx_buf_sz_idx {
- HINIC_RX_BUF_SZ_32_IDX,
- HINIC_RX_BUF_SZ_64_IDX,
- HINIC_RX_BUF_SZ_96_IDX,
- HINIC_RX_BUF_SZ_128_IDX,
- HINIC_RX_BUF_SZ_192_IDX,
- HINIC_RX_BUF_SZ_256_IDX,
- HINIC_RX_BUF_SZ_384_IDX,
- HINIC_RX_BUF_SZ_512_IDX,
- HINIC_RX_BUF_SZ_768_IDX,
- HINIC_RX_BUF_SZ_1024_IDX,
- HINIC_RX_BUF_SZ_1536_IDX,
- HINIC_RX_BUF_SZ_2048_IDX,
- HINIC_RX_BUF_SZ_3072_IDX,
- HINIC_RX_BUF_SZ_4096_IDX,
- HINIC_RX_BUF_SZ_8192_IDX,
- HINIC_RX_BUF_SZ_16384_IDX,
-};
-
-struct hinic_sq {
- struct hinic_hwif *hwif;
-
- struct hinic_wq *wq;
-
- u32 irq;
- u16 msix_entry;
-
- void *hw_ci_addr;
- dma_addr_t hw_ci_dma_addr;
-
- void __iomem *db_base;
-
- struct sk_buff **saved_skb;
-};
-
-struct hinic_rq {
- struct hinic_hwif *hwif;
-
- struct hinic_wq *wq;
-
- u32 irq;
- u16 msix_entry;
-
- size_t buf_sz;
-
- struct sk_buff **saved_skb;
-
- struct hinic_rq_cqe **cqe;
- dma_addr_t *cqe_dma;
-
- u16 *pi_virt_addr;
- dma_addr_t pi_dma_addr;
-};
-
-struct hinic_qp {
- struct hinic_sq sq;
- struct hinic_rq rq;
-
- u16 q_id;
-};
-
-void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr,
- enum hinic_qp_ctxt_type ctxt_type,
- u16 num_queues, u16 max_queues);
-
-void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt,
- struct hinic_sq *sq, u16 global_qid);
-
-void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt,
- struct hinic_rq *rq, u16 global_qid);
-
-int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr,
- dma_addr_t ci_dma_addr, void __iomem *db_base);
-
-void hinic_clean_sq(struct hinic_sq *sq);
-
-int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif,
- struct hinic_wq *wq, struct msix_entry *entry);
-
-void hinic_clean_rq(struct hinic_rq *rq);
-
-int hinic_get_sq_free_wqebbs(struct hinic_sq *sq);
-
-int hinic_get_rq_free_wqebbs(struct hinic_rq *rq);
-
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
- int nr_sges);
-
-void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
- unsigned int cos);
-
-struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq,
- unsigned int wqe_size, u16 *prod_idx);
-
-void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *wqe, struct sk_buff *skb,
- unsigned int wqe_size);
-
-struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int wqe_size, u16 *cons_idx);
-
-struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq,
- struct sk_buff **skb,
- unsigned int *wqe_size, u16 *cons_idx);
-
-void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size);
-
-void hinic_sq_get_sges(struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
- int nr_sges);
-
-struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq,
- unsigned int wqe_size, u16 *prod_idx);
-
-void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *wqe, struct sk_buff *skb);
-
-struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb, u16 *cons_idx);
-
-struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq,
- unsigned int wqe_size,
- struct sk_buff **skb,
- u16 *cons_idx);
-
-void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx,
- unsigned int wqe_size);
-
-void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *wqe,
- u16 cons_idx, struct hinic_sge *sge);
-
-void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx,
- struct hinic_rq_wqe *wqe, struct hinic_sge *sge);
-
-void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
deleted file mode 100644
index 376abf0..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_QP_CTXT_H
-#define HINIC_HW_QP_CTXT_H
-
-#include <linux/types.h>
-
-#include "hinic_hw_cmdq.h"
-
-#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13
-#define HINIC_SQ_CTXT_CEQ_ATTR_EN_SHIFT 23
-
-#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FF
-#define HINIC_SQ_CTXT_CEQ_ATTR_EN_MASK 0x1
-
-#define HINIC_SQ_CTXT_CEQ_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_CEQ_ATTR_##member##_MASK) \
- << HINIC_SQ_CTXT_CEQ_ATTR_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_CI_IDX_SHIFT 11
-#define HINIC_SQ_CTXT_CI_WRAPPED_SHIFT 23
-
-#define HINIC_SQ_CTXT_CI_IDX_MASK 0xFFF
-#define HINIC_SQ_CTXT_CI_WRAPPED_MASK 0x1
-
-#define HINIC_SQ_CTXT_CI_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_CI_##member##_MASK) \
- << HINIC_SQ_CTXT_CI_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
-#define HINIC_SQ_CTXT_WQ_PAGE_PI_SHIFT 20
-
-#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF
-#define HINIC_SQ_CTXT_WQ_PAGE_PI_MASK 0xFFF
-
-#define HINIC_SQ_CTXT_WQ_PAGE_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_WQ_PAGE_##member##_MASK) \
- << HINIC_SQ_CTXT_WQ_PAGE_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
-#define HINIC_SQ_CTXT_PREF_CACHE_MAX_SHIFT 14
-#define HINIC_SQ_CTXT_PREF_CACHE_MIN_SHIFT 25
-
-#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF
-#define HINIC_SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF
-#define HINIC_SQ_CTXT_PREF_CACHE_MIN_MASK 0x7F
-
-#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0
-#define HINIC_SQ_CTXT_PREF_CI_SHIFT 20
-
-#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF
-#define HINIC_SQ_CTXT_PREF_CI_MASK 0xFFF
-
-#define HINIC_SQ_CTXT_PREF_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_PREF_##member##_MASK) \
- << HINIC_SQ_CTXT_PREF_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0
-
-#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF
-
-#define HINIC_SQ_CTXT_WQ_BLOCK_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTXT_WQ_BLOCK_##member##_MASK) \
- << HINIC_SQ_CTXT_WQ_BLOCK_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_CEQ_ATTR_EN_SHIFT 0
-#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_SHIFT 1
-
-#define HINIC_RQ_CTXT_CEQ_ATTR_EN_MASK 0x1
-#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_MASK 0x1
-
-#define HINIC_RQ_CTXT_CEQ_ATTR_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_CEQ_ATTR_##member##_MASK) \
- << HINIC_RQ_CTXT_CEQ_ATTR_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_PI_IDX_SHIFT 0
-#define HINIC_RQ_CTXT_PI_INTR_SHIFT 22
-
-#define HINIC_RQ_CTXT_PI_IDX_MASK 0xFFF
-#define HINIC_RQ_CTXT_PI_INTR_MASK 0x3FF
-
-#define HINIC_RQ_CTXT_PI_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_PI_##member##_MASK) << \
- HINIC_RQ_CTXT_PI_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0
-#define HINIC_RQ_CTXT_WQ_PAGE_CI_SHIFT 20
-
-#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF
-#define HINIC_RQ_CTXT_WQ_PAGE_CI_MASK 0xFFF
-
-#define HINIC_RQ_CTXT_WQ_PAGE_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_WQ_PAGE_##member##_MASK) << \
- HINIC_RQ_CTXT_WQ_PAGE_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0
-#define HINIC_RQ_CTXT_PREF_CACHE_MAX_SHIFT 14
-#define HINIC_RQ_CTXT_PREF_CACHE_MIN_SHIFT 25
-
-#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF
-#define HINIC_RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF
-#define HINIC_RQ_CTXT_PREF_CACHE_MIN_MASK 0x7F
-
-#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0
-#define HINIC_RQ_CTXT_PREF_CI_SHIFT 20
-
-#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF
-#define HINIC_RQ_CTXT_PREF_CI_MASK 0xFFF
-
-#define HINIC_RQ_CTXT_PREF_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_PREF_##member##_MASK) << \
- HINIC_RQ_CTXT_PREF_##member##_SHIFT)
-
-#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0
-
-#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF
-
-#define HINIC_RQ_CTXT_WQ_BLOCK_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTXT_WQ_BLOCK_##member##_MASK) << \
- HINIC_RQ_CTXT_WQ_BLOCK_##member##_SHIFT)
-
-#define HINIC_SQ_CTXT_SIZE(num_sqs) (sizeof(struct hinic_qp_ctxt_header) \
- + (num_sqs) * sizeof(struct hinic_sq_ctxt))
-
-#define HINIC_RQ_CTXT_SIZE(num_rqs) (sizeof(struct hinic_qp_ctxt_header) \
- + (num_rqs) * sizeof(struct hinic_rq_ctxt))
-
-#define HINIC_WQ_PAGE_PFN_SHIFT 12
-#define HINIC_WQ_BLOCK_PFN_SHIFT 9
-
-#define HINIC_WQ_PAGE_PFN(page_addr) ((page_addr) >> HINIC_WQ_PAGE_PFN_SHIFT)
-#define HINIC_WQ_BLOCK_PFN(page_addr) ((page_addr) >> \
- HINIC_WQ_BLOCK_PFN_SHIFT)
-
-#define HINIC_Q_CTXT_MAX \
- ((HINIC_CMDQ_BUF_SIZE - sizeof(struct hinic_qp_ctxt_header)) \
- / sizeof(struct hinic_sq_ctxt))
-
-enum hinic_qp_ctxt_type {
- HINIC_QP_CTXT_TYPE_SQ,
- HINIC_QP_CTXT_TYPE_RQ
-};
-
-struct hinic_qp_ctxt_header {
- u16 num_queues;
- u16 queue_type;
- u32 addr_offset;
-};
-
-struct hinic_sq_ctxt {
- u32 ceq_attr;
-
- u32 ci_wrapped;
-
- u32 wq_hi_pfn_pi;
- u32 wq_lo_pfn;
-
- u32 pref_cache;
- u32 pref_wrapped;
- u32 pref_wq_hi_pfn_ci;
- u32 pref_wq_lo_pfn;
-
- u32 rsvd0;
- u32 rsvd1;
-
- u32 wq_block_hi_pfn;
- u32 wq_block_lo_pfn;
-};
-
-struct hinic_rq_ctxt {
- u32 ceq_attr;
-
- u32 pi_intr_attr;
-
- u32 wq_hi_pfn_ci;
- u32 wq_lo_pfn;
-
- u32 pref_cache;
- u32 pref_wrapped;
-
- u32 pref_wq_hi_pfn_ci;
- u32 pref_wq_lo_pfn;
-
- u32 pi_paddr_hi;
- u32 pi_paddr_lo;
-
- u32 wq_block_hi_pfn;
- u32 wq_block_lo_pfn;
-};
-
-struct hinic_sq_ctxt_block {
- struct hinic_qp_ctxt_header hdr;
- struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX];
-};
-
-struct hinic_rq_ctxt_block {
- struct hinic_qp_ctxt_header hdr;
- struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX];
-};
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
deleted file mode 100644
index 3e3181c08..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ /dev/null
@@ -1,878 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/atomic.h>
-#include <linux/semaphore.h>
-#include <linux/errno.h>
-#include <linux/vmalloc.h>
-#include <linux/err.h>
-#include <asm/byteorder.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-#include "hinic_hw_wq.h"
-#include "hinic_hw_cmdq.h"
-
-#define WQS_BLOCKS_PER_PAGE 4
-
-#define WQ_BLOCK_SIZE 4096
-#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
-
-#define WQS_MAX_NUM_BLOCKS 128
-#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \
- sizeof((wqs)->free_blocks[0]))
-
-#define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size)
-
-#define WQ_PAGE_ADDR_SIZE sizeof(u64)
-#define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
-
-#define CMDQ_BLOCK_SIZE 512
-#define CMDQ_PAGE_SIZE 4096
-
-#define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
-
-#define WQ_BASE_VADDR(wqs, wq) \
- ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
- + (wq)->block_idx * WQ_BLOCK_SIZE)
-
-#define WQ_BASE_PADDR(wqs, wq) \
- ((wqs)->page_paddr[(wq)->page_idx] \
- + (wq)->block_idx * WQ_BLOCK_SIZE)
-
-#define WQ_BASE_ADDR(wqs, wq) \
- ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \
- + (wq)->block_idx * WQ_BLOCK_SIZE)
-
-#define CMDQ_BASE_VADDR(cmdq_pages, wq) \
- ((void *)((cmdq_pages)->page_vaddr) \
- + (wq)->block_idx * CMDQ_BLOCK_SIZE)
-
-#define CMDQ_BASE_PADDR(cmdq_pages, wq) \
- ((cmdq_pages)->page_paddr \
- + (wq)->block_idx * CMDQ_BLOCK_SIZE)
-
-#define CMDQ_BASE_ADDR(cmdq_pages, wq) \
- ((void *)((cmdq_pages)->shadow_page_vaddr) \
- + (wq)->block_idx * CMDQ_BLOCK_SIZE)
-
-#define WQE_PAGE_OFF(wq, idx) (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \
- (wq)->wqebb_size)
-
-#define WQE_PAGE_NUM(wq, idx) (((idx) / ((wq)->num_wqebbs_per_page)) \
- & ((wq)->num_q_pages - 1))
-
-#define WQ_PAGE_ADDR(wq, idx) \
- ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
-
-#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask)
-
-#define WQE_IN_RANGE(wqe, start, end) \
- (((unsigned long)(wqe) >= (unsigned long)(start)) && \
- ((unsigned long)(wqe) < (unsigned long)(end)))
-
-#define WQE_SHADOW_PAGE(wq, wqe) \
- (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
- / (wq)->max_wqe_size)
-
-/**
- * queue_alloc_page - allocate page for Queue
- * @hwif: HW interface for allocating DMA
- * @vaddr: virtual address will be returned in this address
- * @paddr: physical address will be returned in this address
- * @shadow_vaddr: VM area will be return here for holding WQ page addresses
- * @page_sz: page size of each WQ page
- *
- * Return 0 - Success, negative - Failure
- **/
-static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
- void ***shadow_vaddr, size_t page_sz)
-{
- struct pci_dev *pdev = hwif->pdev;
- dma_addr_t dma_addr;
-
- *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr,
- GFP_KERNEL);
- if (!*vaddr) {
- dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
- return -ENOMEM;
- }
-
- *paddr = (u64)dma_addr;
-
- /* use vzalloc for big mem */
- *shadow_vaddr = vzalloc(page_sz);
- if (!*shadow_vaddr)
- goto err_shadow_vaddr;
-
- return 0;
-
-err_shadow_vaddr:
- dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr);
- return -ENOMEM;
-}
-
-/**
- * wqs_allocate_page - allocate page for WQ set
- * @wqs: Work Queue Set
- * @page_idx: the page index of the page will be allocated
- *
- * Return 0 - Success, negative - Failure
- **/
-static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx)
-{
- return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx],
- &wqs->page_paddr[page_idx],
- &wqs->shadow_page_vaddr[page_idx],
- WQS_PAGE_SIZE);
-}
-
-/**
- * wqs_free_page - free page of WQ set
- * @wqs: Work Queue Set
- * @page_idx: the page index of the page will be freed
- **/
-static void wqs_free_page(struct hinic_wqs *wqs, int page_idx)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- dma_free_coherent(&pdev->dev, WQS_PAGE_SIZE,
- wqs->page_vaddr[page_idx],
- (dma_addr_t)wqs->page_paddr[page_idx]);
- vfree(wqs->shadow_page_vaddr[page_idx]);
-}
-
-/**
- * cmdq_allocate_page - allocate page for cmdq
- * @cmdq_pages: the pages of the cmdq queue struct to hold the page
- *
- * Return 0 - Success, negative - Failure
- **/
-static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
-{
- return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr,
- &cmdq_pages->page_paddr,
- &cmdq_pages->shadow_page_vaddr,
- CMDQ_PAGE_SIZE);
-}
-
-/**
- * cmdq_free_page - free page from cmdq
- * @cmdq_pages: the pages of the cmdq queue struct that hold the page
- *
- * Return 0 - Success, negative - Failure
- **/
-static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
-{
- struct hinic_hwif *hwif = cmdq_pages->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE,
- cmdq_pages->page_vaddr,
- (dma_addr_t)cmdq_pages->page_paddr);
- vfree(cmdq_pages->shadow_page_vaddr);
-}
-
-static int alloc_page_arrays(struct hinic_wqs *wqs)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t size;
-
- size = wqs->num_pages * sizeof(*wqs->page_paddr);
- wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wqs->page_paddr)
- return -ENOMEM;
-
- size = wqs->num_pages * sizeof(*wqs->page_vaddr);
- wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wqs->page_vaddr)
- goto err_page_vaddr;
-
- size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
- wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wqs->shadow_page_vaddr)
- goto err_page_shadow_vaddr;
-
- return 0;
-
-err_page_shadow_vaddr:
- devm_kfree(&pdev->dev, wqs->page_vaddr);
-
-err_page_vaddr:
- devm_kfree(&pdev->dev, wqs->page_paddr);
- return -ENOMEM;
-}
-
-static void free_page_arrays(struct hinic_wqs *wqs)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- devm_kfree(&pdev->dev, wqs->shadow_page_vaddr);
- devm_kfree(&pdev->dev, wqs->page_vaddr);
- devm_kfree(&pdev->dev, wqs->page_paddr);
-}
-
-static int wqs_next_block(struct hinic_wqs *wqs, int *page_idx,
- int *block_idx)
-{
- int pos;
-
- down(&wqs->alloc_blocks_lock);
-
- wqs->num_free_blks--;
-
- if (wqs->num_free_blks < 0) {
- wqs->num_free_blks++;
- up(&wqs->alloc_blocks_lock);
- return -ENOMEM;
- }
-
- pos = wqs->alloc_blk_pos++;
- pos &= WQS_MAX_NUM_BLOCKS - 1;
-
- *page_idx = wqs->free_blocks[pos].page_idx;
- *block_idx = wqs->free_blocks[pos].block_idx;
-
- wqs->free_blocks[pos].page_idx = -1;
- wqs->free_blocks[pos].block_idx = -1;
-
- up(&wqs->alloc_blocks_lock);
- return 0;
-}
-
-static void wqs_return_block(struct hinic_wqs *wqs, int page_idx,
- int block_idx)
-{
- int pos;
-
- down(&wqs->alloc_blocks_lock);
-
- pos = wqs->return_blk_pos++;
- pos &= WQS_MAX_NUM_BLOCKS - 1;
-
- wqs->free_blocks[pos].page_idx = page_idx;
- wqs->free_blocks[pos].block_idx = block_idx;
-
- wqs->num_free_blks++;
-
- up(&wqs->alloc_blocks_lock);
-}
-
-static void init_wqs_blocks_arr(struct hinic_wqs *wqs)
-{
- int page_idx, blk_idx, pos = 0;
-
- for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
- for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) {
- wqs->free_blocks[pos].page_idx = page_idx;
- wqs->free_blocks[pos].block_idx = blk_idx;
- pos++;
- }
- }
-
- wqs->alloc_blk_pos = 0;
- wqs->return_blk_pos = pos;
- wqs->num_free_blks = pos;
-
- sema_init(&wqs->alloc_blocks_lock, 1);
-}
-
-/**
- * hinic_wqs_alloc - allocate Work Queues set
- * @wqs: Work Queue Set
- * @max_wqs: maximum wqs to allocate
- * @hwif: HW interface for use for the allocation
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs,
- struct hinic_hwif *hwif)
-{
- struct pci_dev *pdev = hwif->pdev;
- int err, i, page_idx;
-
- max_wqs = ALIGN(max_wqs, WQS_BLOCKS_PER_PAGE);
- if (max_wqs > WQS_MAX_NUM_BLOCKS) {
- dev_err(&pdev->dev, "Invalid max_wqs = %d\n", max_wqs);
- return -EINVAL;
- }
-
- wqs->hwif = hwif;
- wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE;
-
- if (alloc_page_arrays(wqs)) {
- dev_err(&pdev->dev,
- "Failed to allocate mem for page addresses\n");
- return -ENOMEM;
- }
-
- for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
- err = wqs_allocate_page(wqs, page_idx);
- if (err) {
- dev_err(&pdev->dev, "Failed wq page allocation\n");
- goto err_wq_allocate_page;
- }
- }
-
- wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs),
- GFP_KERNEL);
- if (!wqs->free_blocks) {
- err = -ENOMEM;
- goto err_alloc_blocks;
- }
-
- init_wqs_blocks_arr(wqs);
- return 0;
-
-err_alloc_blocks:
-err_wq_allocate_page:
- for (i = 0; i < page_idx; i++)
- wqs_free_page(wqs, i);
-
- free_page_arrays(wqs);
- return err;
-}
-
-/**
- * hinic_wqs_free - free Work Queues set
- * @wqs: Work Queue Set
- **/
-void hinic_wqs_free(struct hinic_wqs *wqs)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int page_idx;
-
- devm_kfree(&pdev->dev, wqs->free_blocks);
-
- for (page_idx = 0; page_idx < wqs->num_pages; page_idx++)
- wqs_free_page(wqs, page_idx);
-
- free_page_arrays(wqs);
-}
-
-/**
- * alloc_wqes_shadow - allocate WQE shadows for WQ
- * @wq: WQ to allocate shadows for
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_wqes_shadow(struct hinic_wq *wq)
-{
- struct hinic_hwif *hwif = wq->hwif;
- struct pci_dev *pdev = hwif->pdev;
- size_t size;
-
- size = wq->num_q_pages * wq->max_wqe_size;
- wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wq->shadow_wqe)
- return -ENOMEM;
-
- size = wq->num_q_pages * sizeof(wq->prod_idx);
- wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!wq->shadow_idx)
- goto err_shadow_idx;
-
- return 0;
-
-err_shadow_idx:
- devm_kfree(&pdev->dev, wq->shadow_wqe);
- return -ENOMEM;
-}
-
-/**
- * free_wqes_shadow - free WQE shadows of WQ
- * @wq: WQ to free shadows from
- **/
-static void free_wqes_shadow(struct hinic_wq *wq)
-{
- struct hinic_hwif *hwif = wq->hwif;
- struct pci_dev *pdev = hwif->pdev;
-
- devm_kfree(&pdev->dev, wq->shadow_idx);
- devm_kfree(&pdev->dev, wq->shadow_wqe);
-}
-
-/**
- * free_wq_pages - free pages of WQ
- * @hwif: HW interface for releasing dma addresses
- * @wq: WQ to free pages from
- * @num_q_pages: number pages to free
- **/
-static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
- int num_q_pages)
-{
- struct pci_dev *pdev = hwif->pdev;
- int i;
-
- for (i = 0; i < num_q_pages; i++) {
- void **vaddr = &wq->shadow_block_vaddr[i];
- u64 *paddr = &wq->block_vaddr[i];
- dma_addr_t dma_addr;
-
- dma_addr = (dma_addr_t)be64_to_cpu(*paddr);
- dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr,
- dma_addr);
- }
-
- free_wqes_shadow(wq);
-}
-
-/**
- * alloc_wq_pages - alloc pages for WQ
- * @hwif: HW interface for allocating dma addresses
- * @wq: WQ to allocate pages for
- * @max_pages: maximum pages allowed
- *
- * Return 0 - Success, negative - Failure
- **/
-static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
- int max_pages)
-{
- struct pci_dev *pdev = hwif->pdev;
- int i, err, num_q_pages;
-
- num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size;
- if (num_q_pages > max_pages) {
- dev_err(&pdev->dev, "Number wq pages exceeds the limit\n");
- return -EINVAL;
- }
-
- if (num_q_pages & (num_q_pages - 1)) {
- dev_err(&pdev->dev, "Number wq pages must be power of 2\n");
- return -EINVAL;
- }
-
- wq->num_q_pages = num_q_pages;
-
- err = alloc_wqes_shadow(wq);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate wqe shadow\n");
- return err;
- }
-
- for (i = 0; i < num_q_pages; i++) {
- void **vaddr = &wq->shadow_block_vaddr[i];
- u64 *paddr = &wq->block_vaddr[i];
- dma_addr_t dma_addr;
-
- *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size,
- &dma_addr, GFP_KERNEL);
- if (!*vaddr) {
- dev_err(&pdev->dev, "Failed to allocate wq page\n");
- goto err_alloc_wq_pages;
- }
-
- /* HW uses Big Endian Format */
- *paddr = cpu_to_be64(dma_addr);
- }
-
- return 0;
-
-err_alloc_wq_pages:
- free_wq_pages(wq, hwif, i);
- return -ENOMEM;
-}
-
-/**
- * hinic_wq_allocate - Allocate the WQ resources from the WQS
- * @wqs: WQ set from which to allocate the WQ resources
- * @wq: WQ to allocate resources for it from the WQ set
- * @wqebb_size: Work Queue Block Byte Size
- * @wq_page_size: the page size in the Work Queue
- * @q_depth: number of wqebbs in WQ
- * @max_wqe_size: maximum WQE size that will be used in the WQ
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u16 wq_page_size, u16 q_depth,
- u16 max_wqe_size)
-{
- struct hinic_hwif *hwif = wqs->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 num_wqebbs_per_page;
- int err;
-
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
- return -EINVAL;
- }
-
- if (wq_page_size == 0) {
- dev_err(&pdev->dev, "wq_page_size must be > 0\n");
- return -EINVAL;
- }
-
- if (q_depth & (q_depth - 1)) {
- dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
- return -EINVAL;
- }
-
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
-
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
- dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
- return -EINVAL;
- }
-
- wq->hwif = hwif;
-
- err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx);
- if (err) {
- dev_err(&pdev->dev, "Failed to get free wqs next block\n");
- return err;
- }
-
- wq->wqebb_size = wqebb_size;
- wq->wq_page_size = wq_page_size;
- wq->q_depth = q_depth;
- wq->max_wqe_size = max_wqe_size;
- wq->num_wqebbs_per_page = num_wqebbs_per_page;
-
- wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
- wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
- wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
-
- err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate wq pages\n");
- goto err_alloc_wq_pages;
- }
-
- atomic_set(&wq->cons_idx, 0);
- atomic_set(&wq->prod_idx, 0);
- atomic_set(&wq->delta, q_depth);
- wq->mask = q_depth - 1;
-
- return 0;
-
-err_alloc_wq_pages:
- wqs_return_block(wqs, wq->page_idx, wq->block_idx);
- return err;
-}
-
-/**
- * hinic_wq_free - Free the WQ resources to the WQS
- * @wqs: WQ set to free the WQ resources to it
- * @wq: WQ to free its resources to the WQ set resources
- **/
-void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
-{
- free_wq_pages(wq, wqs->hwif, wq->num_q_pages);
-
- wqs_return_block(wqs, wq->page_idx, wq->block_idx);
-}
-
-/**
- * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs
- * @cmdq_pages: will hold the pages of the cmdq
- * @wq: returned wqs
- * @hwif: HW interface
- * @cmdq_blocks: number of cmdq blocks/wq to allocate
- * @wqebb_size: Work Queue Block Byte Size
- * @wq_page_size: the page size in the Work Queue
- * @q_depth: number of wqebbs in WQ
- * @max_wqe_size: maximum WQE size that will be used in the WQ
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
- u16 q_depth, u16 max_wqe_size)
-{
- struct pci_dev *pdev = hwif->pdev;
- u16 num_wqebbs_per_page;
- int i, j, err = -ENOMEM;
-
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
- return -EINVAL;
- }
-
- if (wq_page_size == 0) {
- dev_err(&pdev->dev, "wq_page_size must be > 0\n");
- return -EINVAL;
- }
-
- if (q_depth & (q_depth - 1)) {
- dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
- return -EINVAL;
- }
-
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
-
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
- dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
- return -EINVAL;
- }
-
- cmdq_pages->hwif = hwif;
-
- err = cmdq_allocate_page(cmdq_pages);
- if (err) {
- dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
- return err;
- }
-
- for (i = 0; i < cmdq_blocks; i++) {
- wq[i].hwif = hwif;
- wq[i].page_idx = 0;
- wq[i].block_idx = i;
-
- wq[i].wqebb_size = wqebb_size;
- wq[i].wq_page_size = wq_page_size;
- wq[i].q_depth = q_depth;
- wq[i].max_wqe_size = max_wqe_size;
- wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
-
- wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
- wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
- wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
-
- err = alloc_wq_pages(&wq[i], cmdq_pages->hwif,
- CMDQ_WQ_MAX_PAGES);
- if (err) {
- dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n");
- goto err_cmdq_block;
- }
-
- atomic_set(&wq[i].cons_idx, 0);
- atomic_set(&wq[i].prod_idx, 0);
- atomic_set(&wq[i].delta, q_depth);
- wq[i].mask = q_depth - 1;
- }
-
- return 0;
-
-err_cmdq_block:
- for (j = 0; j < i; j++)
- free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages);
-
- cmdq_free_page(cmdq_pages);
- return err;
-}
-
-/**
- * hinic_wqs_cmdq_free - Free wqs from cmdqs
- * @cmdq_pages: hold the pages of the cmdq
- * @wq: wqs to free
- * @cmdq_blocks: number of wqs to free
- **/
-void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, int cmdq_blocks)
-{
- int i;
-
- for (i = 0; i < cmdq_blocks; i++)
- free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages);
-
- cmdq_free_page(cmdq_pages);
-}
-
-static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr,
- int num_wqebbs, u16 idx)
-{
- void *wqebb_addr;
- int i;
-
- for (i = 0; i < num_wqebbs; i++, idx++) {
- idx = MASKED_WQE_IDX(wq, idx);
- wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
- WQE_PAGE_OFF(wq, idx);
-
- memcpy(shadow_addr, wqebb_addr, wq->wqebb_size);
-
- shadow_addr += wq->wqebb_size;
- }
-}
-
-static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr,
- int num_wqebbs, u16 idx)
-{
- void *wqebb_addr;
- int i;
-
- for (i = 0; i < num_wqebbs; i++, idx++) {
- idx = MASKED_WQE_IDX(wq, idx);
- wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
- WQE_PAGE_OFF(wq, idx);
-
- memcpy(wqebb_addr, shadow_addr, wq->wqebb_size);
- shadow_addr += wq->wqebb_size;
- }
-}
-
-/**
- * hinic_get_wqe - get wqe ptr in the current pi and update the pi
- * @wq: wq to get wqe from
- * @wqe_size: wqe size
- * @prod_idx: returned pi
- *
- * Return wqe pointer
- **/
-struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *prod_idx)
-{
- int curr_pg, end_pg, num_wqebbs;
- u16 curr_prod_idx, end_prod_idx;
-
- *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
-
- num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
-
- if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
- atomic_add(num_wqebbs, &wq->delta);
- return ERR_PTR(-EBUSY);
- }
-
- end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx);
-
- end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx);
- curr_prod_idx = end_prod_idx - num_wqebbs;
- curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
-
- /* end prod index points to the next wqebb, therefore minus 1 */
- end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1);
-
- curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx);
- end_pg = WQE_PAGE_NUM(wq, end_prod_idx);
-
- *prod_idx = curr_prod_idx;
-
- if (curr_pg != end_pg) {
- void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
-
- copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
-
- wq->shadow_idx[curr_pg] = *prod_idx;
- return shadow_addr;
- }
-
- return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx);
-}
-
-/**
- * hinic_put_wqe - return the wqe place to use for a new wqe
- * @wq: wq to return wqe
- * @wqe_size: wqe size
- **/
-void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
-{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
-
- atomic_add(num_wqebbs, &wq->cons_idx);
-
- atomic_add(num_wqebbs, &wq->delta);
-}
-
-/**
- * hinic_read_wqe - read wqe ptr in the current ci
- * @wq: wq to get read from
- * @wqe_size: wqe size
- * @cons_idx: returned ci
- *
- * Return wqe pointer
- **/
-struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *cons_idx)
-{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
- u16 curr_cons_idx, end_cons_idx;
- int curr_pg, end_pg;
-
- if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
- return ERR_PTR(-EBUSY);
-
- curr_cons_idx = atomic_read(&wq->cons_idx);
-
- curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
- end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1);
-
- curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx);
- end_pg = WQE_PAGE_NUM(wq, end_cons_idx);
-
- *cons_idx = curr_cons_idx;
-
- if (curr_pg != end_pg) {
- void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
-
- copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
- return shadow_addr;
- }
-
- return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx);
-}
-
-/**
- * hinic_read_wqe_direct - read wqe directly from ci position
- * @wq: wq
- * @cons_idx: ci position
- *
- * Return wqe
- **/
-struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx)
-{
- return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx);
-}
-
-/**
- * wqe_shadow - check if a wqe is shadow
- * @wq: wq of the wqe
- * @wqe: the wqe for shadow checking
- *
- * Return true - shadow, false - Not shadow
- **/
-static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe)
-{
- size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size;
-
- return WQE_IN_RANGE(wqe, wq->shadow_wqe,
- &wq->shadow_wqe[wqe_shadow_size]);
-}
-
-/**
- * hinic_write_wqe - write the wqe to the wq
- * @wq: wq to write wqe to
- * @wqe: wqe to write
- * @wqe_size: wqe size
- **/
-void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
- unsigned int wqe_size)
-{
- int curr_pg, num_wqebbs;
- void *shadow_addr;
- u16 prod_idx;
-
- if (wqe_shadow(wq, wqe)) {
- curr_pg = WQE_SHADOW_PAGE(wq, wqe);
-
- prod_idx = wq->shadow_idx[curr_pg];
- num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
- shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
-
- copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx);
- }
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
deleted file mode 100644
index 9c030a0..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_WQ_H
-#define HINIC_HW_WQ_H
-
-#include <linux/types.h>
-#include <linux/semaphore.h>
-#include <linux/atomic.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_wqe.h"
-
-struct hinic_free_block {
- int page_idx;
- int block_idx;
-};
-
-struct hinic_wq {
- struct hinic_hwif *hwif;
-
- int page_idx;
- int block_idx;
-
- u16 wqebb_size;
- u16 wq_page_size;
- u16 q_depth;
- u16 max_wqe_size;
- u16 num_wqebbs_per_page;
-
- /* The addresses are 64 bit in the HW */
- u64 block_paddr;
- void **shadow_block_vaddr;
- u64 *block_vaddr;
-
- int num_q_pages;
- u8 *shadow_wqe;
- u16 *shadow_idx;
-
- atomic_t cons_idx;
- atomic_t prod_idx;
- atomic_t delta;
- u16 mask;
-};
-
-struct hinic_wqs {
- struct hinic_hwif *hwif;
- int num_pages;
-
- /* The addresses are 64 bit in the HW */
- u64 *page_paddr;
- u64 **page_vaddr;
- void ***shadow_page_vaddr;
-
- struct hinic_free_block *free_blocks;
- int alloc_blk_pos;
- int return_blk_pos;
- int num_free_blks;
-
- /* Lock for getting a free block from the WQ set */
- struct semaphore alloc_blocks_lock;
-};
-
-struct hinic_cmdq_pages {
- /* The addresses are 64 bit in the HW */
- u64 page_paddr;
- u64 *page_vaddr;
- void **shadow_page_vaddr;
-
- struct hinic_hwif *hwif;
-};
-
-int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, struct hinic_hwif *hwif,
- int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
- u16 q_depth, u16 max_wqe_size);
-
-void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
- struct hinic_wq *wq, int cmdq_blocks);
-
-int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs,
- struct hinic_hwif *hwif);
-
-void hinic_wqs_free(struct hinic_wqs *wqs);
-
-int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
- u16 wqebb_size, u16 wq_page_size, u16 q_depth,
- u16 max_wqe_size);
-
-void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq);
-
-struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *prod_idx);
-
-void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size);
-
-struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
- u16 *cons_idx);
-
-struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx);
-
-void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
- unsigned int wqe_size);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
deleted file mode 100644
index bc73485..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_HW_WQE_H
-#define HINIC_HW_WQE_H
-
-#include "hinic_common.h"
-
-#define HINIC_CMDQ_CTRL_PI_SHIFT 0
-#define HINIC_CMDQ_CTRL_CMD_SHIFT 16
-#define HINIC_CMDQ_CTRL_MOD_SHIFT 24
-#define HINIC_CMDQ_CTRL_ACK_TYPE_SHIFT 29
-#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31
-
-#define HINIC_CMDQ_CTRL_PI_MASK 0xFFFF
-#define HINIC_CMDQ_CTRL_CMD_MASK 0xFF
-#define HINIC_CMDQ_CTRL_MOD_MASK 0x1F
-#define HINIC_CMDQ_CTRL_ACK_TYPE_MASK 0x3
-#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1
-
-#define HINIC_CMDQ_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_CMDQ_CTRL_##member##_MASK) \
- << HINIC_CMDQ_CTRL_##member##_SHIFT)
-
-#define HINIC_CMDQ_CTRL_GET(val, member) \
- (((val) >> HINIC_CMDQ_CTRL_##member##_SHIFT) \
- & HINIC_CMDQ_CTRL_##member##_MASK)
-
-#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15
-#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27
-#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29
-#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_SHIFT 31
-
-#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFF
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1
-#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1
-#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3
-#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3
-#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_MASK 0x1
-
-#define HINIC_CMDQ_WQE_HEADER_SET(val, member) \
- (((u32)(val) & HINIC_CMDQ_WQE_HEADER_##member##_MASK) \
- << HINIC_CMDQ_WQE_HEADER_##member##_SHIFT)
-
-#define HINIC_CMDQ_WQE_HEADER_GET(val, member) \
- (((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \
- & HINIC_CMDQ_WQE_HEADER_##member##_MASK)
-
-#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
-#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16
-#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22
-#define HINIC_SQ_CTRL_LEN_SHIFT 29
-
-#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
-#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F
-#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1
-#define HINIC_SQ_CTRL_LEN_MASK 0x3
-
-#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13
-
-#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF
-
-#define HINIC_SQ_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \
- << HINIC_SQ_CTRL_##member##_SHIFT)
-
-#define HINIC_SQ_CTRL_GET(val, member) \
- (((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \
- & HINIC_SQ_CTRL_##member##_MASK)
-
-#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0
-#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8
-#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10
-#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12
-#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO0_TSO_FLAG_SHIFT 15
-#define HINIC_SQ_TASK_INFO0_VLAN_TAG_SHIFT 16
-
-#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFF
-#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_MASK 0x3
-#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3
-#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1
-#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO0_TSO_FLAG_MASK 0x1
-#define HINIC_SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFF
-
-#define HINIC_SQ_TASK_INFO0_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO0_##member##_MASK) << \
- HINIC_SQ_TASK_INFO0_##member##_SHIFT)
-
-/* 8 bits reserved */
-#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8
-#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_SHIFT 16
-#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_SHIFT 24
-
-/* 8 bits reserved */
-#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF
-#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_MASK 0xFF
-#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_MASK 0xFF
-
-#define HINIC_SQ_TASK_INFO1_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \
- HINIC_SQ_TASK_INFO1_##member##_SHIFT)
-
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_SHIFT 0
-#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_SHIFT 12
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 19
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 22
-/* 8 bits reserved */
-
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_MASK 0xFFF
-#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_MASK 0x7F
-#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x3
-/* 1 bit reserved */
-#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3
-/* 8 bits reserved */
-
-#define HINIC_SQ_TASK_INFO2_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO2_##member##_MASK) << \
- HINIC_SQ_TASK_INFO2_##member##_SHIFT)
-
-/* 31 bits reserved */
-#define HINIC_SQ_TASK_INFO4_L2TYPE_SHIFT 31
-
-/* 31 bits reserved */
-#define HINIC_SQ_TASK_INFO4_L2TYPE_MASK 0x1
-
-#define HINIC_SQ_TASK_INFO4_SET(val, member) \
- (((u32)(val) & HINIC_SQ_TASK_INFO4_##member##_MASK) << \
- HINIC_SQ_TASK_INFO4_##member##_SHIFT)
-
-#define HINIC_RQ_CQE_STATUS_RXDONE_SHIFT 31
-
-#define HINIC_RQ_CQE_STATUS_RXDONE_MASK 0x1
-
-#define HINIC_RQ_CQE_STATUS_GET(val, member) \
- (((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \
- HINIC_RQ_CQE_STATUS_##member##_MASK)
-
-#define HINIC_RQ_CQE_STATUS_CLEAR(val, member) \
- ((val) & (~(HINIC_RQ_CQE_STATUS_##member##_MASK << \
- HINIC_RQ_CQE_STATUS_##member##_SHIFT)))
-
-#define HINIC_RQ_CQE_SGE_LEN_SHIFT 16
-
-#define HINIC_RQ_CQE_SGE_LEN_MASK 0xFFFF
-
-#define HINIC_RQ_CQE_SGE_GET(val, member) \
- (((val) >> HINIC_RQ_CQE_SGE_##member##_SHIFT) & \
- HINIC_RQ_CQE_SGE_##member##_MASK)
-
-#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0
-#define HINIC_RQ_CTRL_COMPLETE_FORMAT_SHIFT 15
-#define HINIC_RQ_CTRL_COMPLETE_LEN_SHIFT 27
-#define HINIC_RQ_CTRL_LEN_SHIFT 29
-
-#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF
-#define HINIC_RQ_CTRL_COMPLETE_FORMAT_MASK 0x1
-#define HINIC_RQ_CTRL_COMPLETE_LEN_MASK 0x3
-#define HINIC_RQ_CTRL_LEN_MASK 0x3
-
-#define HINIC_RQ_CTRL_SET(val, member) \
- (((u32)(val) & HINIC_RQ_CTRL_##member##_MASK) << \
- HINIC_RQ_CTRL_##member##_SHIFT)
-
-#define HINIC_SQ_WQE_SIZE(nr_sges) \
- (sizeof(struct hinic_sq_ctrl) + \
- sizeof(struct hinic_sq_task) + \
- (nr_sges) * sizeof(struct hinic_sq_bufdesc))
-
-#define HINIC_SCMD_DATA_LEN 16
-
-#define HINIC_MAX_SQ_BUFDESCS 17
-
-#define HINIC_SQ_WQE_MAX_SIZE 320
-#define HINIC_RQ_WQE_SIZE 32
-
-enum hinic_l4offload_type {
- HINIC_L4_OFF_DISABLE = 0,
- HINIC_TCP_OFFLOAD_ENABLE = 1,
- HINIC_SCTP_OFFLOAD_ENABLE = 2,
- HINIC_UDP_OFFLOAD_ENABLE = 3,
-};
-
-enum hinic_vlan_offload {
- HINIC_VLAN_OFF_DISABLE = 0,
- HINIC_VLAN_OFF_ENABLE = 1,
-};
-
-enum hinic_pkt_parsed {
- HINIC_PKT_NOT_PARSED = 0,
- HINIC_PKT_PARSED = 1,
-};
-
-enum hinic_outer_l3type {
- HINIC_OUTER_L3TYPE_UNKNOWN = 0,
- HINIC_OUTER_L3TYPE_IPV6 = 1,
- HINIC_OUTER_L3TYPE_IPV4_NO_CHKSUM = 2,
- HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3,
-};
-
-enum hinic_media_type {
- HINIC_MEDIA_UNKNOWN = 0,
-};
-
-enum hinic_l2type {
- HINIC_L2TYPE_ETH = 0,
-};
-
-enum hinc_tunnel_l4type {
- HINIC_TUNNEL_L4TYPE_UNKNOWN = 0,
-};
-
-struct hinic_cmdq_header {
- u32 header_info;
- u32 saved_data;
-};
-
-struct hinic_status {
- u32 status_info;
-};
-
-struct hinic_ctrl {
- u32 ctrl_info;
-};
-
-struct hinic_sge_resp {
- struct hinic_sge sge;
- u32 rsvd;
-};
-
-struct hinic_cmdq_completion {
- /* HW Format */
- union {
- struct hinic_sge_resp sge_resp;
- u64 direct_resp;
- };
-};
-
-struct hinic_scmd_bufdesc {
- u32 buf_len;
- u32 rsvd;
- u8 data[HINIC_SCMD_DATA_LEN];
-};
-
-struct hinic_lcmd_bufdesc {
- struct hinic_sge sge;
- u32 rsvd1;
- u64 rsvd2;
- u64 rsvd3;
-};
-
-struct hinic_cmdq_wqe_scmd {
- struct hinic_cmdq_header header;
- u64 rsvd;
- struct hinic_status status;
- struct hinic_ctrl ctrl;
- struct hinic_cmdq_completion completion;
- struct hinic_scmd_bufdesc buf_desc;
-};
-
-struct hinic_cmdq_wqe_lcmd {
- struct hinic_cmdq_header header;
- struct hinic_status status;
- struct hinic_ctrl ctrl;
- struct hinic_cmdq_completion completion;
- struct hinic_lcmd_bufdesc buf_desc;
-};
-
-struct hinic_cmdq_direct_wqe {
- struct hinic_cmdq_wqe_scmd wqe_scmd;
-};
-
-struct hinic_cmdq_wqe {
- /* HW Format */
- union {
- struct hinic_cmdq_direct_wqe direct_wqe;
- struct hinic_cmdq_wqe_lcmd wqe_lcmd;
- };
-};
-
-struct hinic_sq_ctrl {
- u32 ctrl_info;
- u32 queue_info;
-};
-
-struct hinic_sq_task {
- u32 pkt_info0;
- u32 pkt_info1;
- u32 pkt_info2;
- u32 ufo_v6_identify;
- u32 pkt_info4;
- u32 zero_pad;
-};
-
-struct hinic_sq_bufdesc {
- struct hinic_sge sge;
- u32 rsvd;
-};
-
-struct hinic_sq_wqe {
- struct hinic_sq_ctrl ctrl;
- struct hinic_sq_task task;
- struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS];
-};
-
-struct hinic_rq_cqe {
- u32 status;
- u32 len;
-
- u32 rsvd2;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
- u32 rsvd6;
- u32 rsvd7;
-};
-
-struct hinic_rq_ctrl {
- u32 ctrl_info;
-};
-
-struct hinic_rq_cqe_sect {
- struct hinic_sge sge;
- u32 rsvd;
-};
-
-struct hinic_rq_bufdesc {
- u32 hi_addr;
- u32 lo_addr;
-};
-
-struct hinic_rq_wqe {
- struct hinic_rq_ctrl ctrl;
- u32 rsvd;
- struct hinic_rq_cqe_sect cqe_sect;
- struct hinic_rq_bufdesc buf_desc;
-};
-
-struct hinic_hw_wqe {
- /* HW Format */
- union {
- struct hinic_cmdq_wqe cmdq_wqe;
- struct hinic_sq_wqe sq_wqe;
- struct hinic_rq_wqe rq_wqe;
- };
-};
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
deleted file mode 100644
index 4d4e3f0..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ /dev/null
@@ -1,379 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if_vlan.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-
-#include "hinic_hw_if.h"
-#include "hinic_hw_dev.h"
-#include "hinic_port.h"
-#include "hinic_dev.h"
-
-#define HINIC_MIN_MTU_SIZE 256
-#define HINIC_MAX_JUMBO_FRAME_SIZE 15872
-
-enum mac_op {
- MAC_DEL,
- MAC_SET,
-};
-
-/**
- * change_mac - change(add or delete) mac address
- * @nic_dev: nic device
- * @addr: mac address
- * @vlan_id: vlan number to set with the mac
- * @op: add or delete the mac
- *
- * Return 0 - Success, negative - Failure
- **/
-static int change_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id, enum mac_op op)
-{
- struct net_device *netdev = nic_dev->netdev;
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_mac_cmd port_mac_cmd;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- enum hinic_port_cmd cmd;
- u16 out_size;
- int err;
-
- if (vlan_id >= VLAN_N_VID) {
- netif_err(nic_dev, drv, netdev, "Invalid VLAN number\n");
- return -EINVAL;
- }
-
- if (op == MAC_SET)
- cmd = HINIC_PORT_CMD_SET_MAC;
- else
- cmd = HINIC_PORT_CMD_DEL_MAC;
-
- port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- port_mac_cmd.vlan_id = vlan_id;
- memcpy(port_mac_cmd.mac, addr, ETH_ALEN);
-
- err = hinic_port_msg_cmd(hwdev, cmd, &port_mac_cmd,
- sizeof(port_mac_cmd),
- &port_mac_cmd, &out_size);
- if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) {
- dev_err(&pdev->dev, "Failed to change MAC, ret = %d\n",
- port_mac_cmd.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_add_mac - add mac address
- * @nic_dev: nic device
- * @addr: mac address
- * @vlan_id: vlan number to set with the mac
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_add_mac(struct hinic_dev *nic_dev,
- const u8 *addr, u16 vlan_id)
-{
- return change_mac(nic_dev, addr, vlan_id, MAC_SET);
-}
-
-/**
- * hinic_port_del_mac - remove mac address
- * @nic_dev: nic device
- * @addr: mac address
- * @vlan_id: vlan number that is connected to the mac
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id)
-{
- return change_mac(nic_dev, addr, vlan_id, MAC_DEL);
-}
-
-/**
- * hinic_port_get_mac - get the mac address of the nic device
- * @nic_dev: nic device
- * @addr: returned mac address
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_mac_cmd port_mac_cmd;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MAC,
- &port_mac_cmd, sizeof(port_mac_cmd),
- &port_mac_cmd, &out_size);
- if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) {
- dev_err(&pdev->dev, "Failed to get mac, ret = %d\n",
- port_mac_cmd.status);
- return -EFAULT;
- }
-
- memcpy(addr, port_mac_cmd.mac, ETH_ALEN);
- return 0;
-}
-
-/**
- * hinic_port_set_mtu - set mtu
- * @nic_dev: nic device
- * @new_mtu: new mtu
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu)
-{
- struct net_device *netdev = nic_dev->netdev;
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_mtu_cmd port_mtu_cmd;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- int err, max_frame;
- u16 out_size;
-
- if (new_mtu < HINIC_MIN_MTU_SIZE) {
- netif_err(nic_dev, drv, netdev, "mtu < MIN MTU size");
- return -EINVAL;
- }
-
- max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if (max_frame > HINIC_MAX_JUMBO_FRAME_SIZE) {
- netif_err(nic_dev, drv, netdev, "mtu > MAX MTU size");
- return -EINVAL;
- }
-
- port_mtu_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- port_mtu_cmd.mtu = new_mtu;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU,
- &port_mtu_cmd, sizeof(port_mtu_cmd),
- &port_mtu_cmd, &out_size);
- if (err || (out_size != sizeof(port_mtu_cmd)) || port_mtu_cmd.status) {
- dev_err(&pdev->dev, "Failed to set mtu, ret = %d\n",
- port_mtu_cmd.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_add_vlan - add vlan to the nic device
- * @nic_dev: nic device
- * @vlan_id: the vlan number to add
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_vlan_cmd port_vlan_cmd;
-
- port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- port_vlan_cmd.vlan_id = vlan_id;
-
- return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ADD_VLAN,
- &port_vlan_cmd, sizeof(port_vlan_cmd),
- NULL, NULL);
-}
-
-/**
- * hinic_port_del_vlan - delete vlan from the nic device
- * @nic_dev: nic device
- * @vlan_id: the vlan number to delete
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_vlan_cmd port_vlan_cmd;
-
- port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- port_vlan_cmd.vlan_id = vlan_id;
-
- return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_DEL_VLAN,
- &port_vlan_cmd, sizeof(port_vlan_cmd),
- NULL, NULL);
-}
-
-/**
- * hinic_port_set_rx_mode - set rx mode in the nic device
- * @nic_dev: nic device
- * @rx_mode: the rx mode to set
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_rx_mode_cmd rx_mode_cmd;
-
- rx_mode_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
- rx_mode_cmd.rx_mode = rx_mode;
-
- return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_MODE,
- &rx_mode_cmd, sizeof(rx_mode_cmd),
- NULL, NULL);
-}
-
-/**
- * hinic_port_link_state - get the link state
- * @nic_dev: nic device
- * @link_state: the returned link state
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_link_state(struct hinic_dev *nic_dev,
- enum hinic_port_link_state *link_state)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct hinic_port_link_cmd link_cmd;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- link_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE,
- &link_cmd, sizeof(link_cmd),
- &link_cmd, &out_size);
- if (err || (out_size != sizeof(link_cmd)) || link_cmd.status) {
- dev_err(&pdev->dev, "Failed to get link state, ret = %d\n",
- link_cmd.status);
- return -EINVAL;
- }
-
- *link_state = link_cmd.state;
- return 0;
-}
-
-/**
- * hinic_port_set_state - set port state
- * @nic_dev: nic device
- * @state: the state to set
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_port_state_cmd port_state;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
- dev_err(&pdev->dev, "unsupported PCI Function type\n");
- return -EINVAL;
- }
-
- port_state.state = state;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PORT_STATE,
- &port_state, sizeof(port_state),
- &port_state, &out_size);
- if (err || (out_size != sizeof(port_state)) || port_state.status) {
- dev_err(&pdev->dev, "Failed to set port state, ret = %d\n",
- port_state.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_set_func_state- set func device state
- * @nic_dev: nic device
- * @state: the state to set
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_set_func_state(struct hinic_dev *nic_dev,
- enum hinic_func_port_state state)
-{
- struct hinic_port_func_state_cmd func_state;
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- func_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
- func_state.state = state;
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_FUNC_STATE,
- &func_state, sizeof(func_state),
- &func_state, &out_size);
- if (err || (out_size != sizeof(func_state)) || func_state.status) {
- dev_err(&pdev->dev, "Failed to set port func state, ret = %d\n",
- func_state.status);
- return -EFAULT;
- }
-
- return 0;
-}
-
-/**
- * hinic_port_get_cap - get port capabilities
- * @nic_dev: nic device
- * @port_cap: returned port capabilities
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_port_get_cap(struct hinic_dev *nic_dev,
- struct hinic_port_cap *port_cap)
-{
- struct hinic_hwdev *hwdev = nic_dev->hwdev;
- struct hinic_hwif *hwif = hwdev->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u16 out_size;
- int err;
-
- port_cap->func_idx = HINIC_HWIF_FUNC_IDX(hwif);
-
- err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_CAP,
- port_cap, sizeof(*port_cap),
- port_cap, &out_size);
- if (err || (out_size != sizeof(*port_cap)) || port_cap->status) {
- dev_err(&pdev->dev,
- "Failed to get port capabilities, ret = %d\n",
- port_cap->status);
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
deleted file mode 100644
index 9404365..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef HINIC_PORT_H
-#define HINIC_PORT_H
-
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/bitops.h>
-
-#include "hinic_dev.h"
-
-enum hinic_rx_mode {
- HINIC_RX_MODE_UC = BIT(0),
- HINIC_RX_MODE_MC = BIT(1),
- HINIC_RX_MODE_BC = BIT(2),
- HINIC_RX_MODE_MC_ALL = BIT(3),
- HINIC_RX_MODE_PROMISC = BIT(4),
-};
-
-enum hinic_port_link_state {
- HINIC_LINK_STATE_DOWN,
- HINIC_LINK_STATE_UP,
-};
-
-enum hinic_port_state {
- HINIC_PORT_DISABLE = 0,
- HINIC_PORT_ENABLE = 3,
-};
-
-enum hinic_func_port_state {
- HINIC_FUNC_PORT_DISABLE = 0,
- HINIC_FUNC_PORT_ENABLE = 2,
-};
-
-enum hinic_autoneg_cap {
- HINIC_AUTONEG_UNSUPPORTED,
- HINIC_AUTONEG_SUPPORTED,
-};
-
-enum hinic_autoneg_state {
- HINIC_AUTONEG_DISABLED,
- HINIC_AUTONEG_ACTIVE,
-};
-
-enum hinic_duplex {
- HINIC_DUPLEX_HALF,
- HINIC_DUPLEX_FULL,
-};
-
-enum hinic_speed {
- HINIC_SPEED_10MB_LINK = 0,
- HINIC_SPEED_100MB_LINK,
- HINIC_SPEED_1000MB_LINK,
- HINIC_SPEED_10GB_LINK,
- HINIC_SPEED_25GB_LINK,
- HINIC_SPEED_40GB_LINK,
- HINIC_SPEED_100GB_LINK,
-
- HINIC_SPEED_UNKNOWN = 0xFF,
-};
-
-struct hinic_port_mac_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 vlan_id;
- u16 rsvd1;
- unsigned char mac[ETH_ALEN];
-};
-
-struct hinic_port_mtu_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd1;
- u32 mtu;
-};
-
-struct hinic_port_vlan_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 vlan_id;
-};
-
-struct hinic_port_rx_mode_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd;
- u32 rx_mode;
-};
-
-struct hinic_port_link_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u8 state;
- u8 rsvd1;
-};
-
-struct hinic_port_state_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u8 state;
- u8 rsvd1[3];
-};
-
-struct hinic_port_link_status {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 rsvd1;
- u8 link;
- u8 rsvd2;
-};
-
-struct hinic_port_func_state_cmd {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd1;
- u8 state;
- u8 rsvd2[3];
-};
-
-struct hinic_port_cap {
- u8 status;
- u8 version;
- u8 rsvd0[6];
-
- u16 func_idx;
- u16 rsvd1;
- u8 port_type;
- u8 autoneg_cap;
- u8 autoneg_state;
- u8 duplex;
- u8 speed;
- u8 rsvd2[3];
-};
-
-int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id);
-
-int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr,
- u16 vlan_id);
-
-int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr);
-
-int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu);
-
-int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id);
-
-int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id);
-
-int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode);
-
-int hinic_port_link_state(struct hinic_dev *nic_dev,
- enum hinic_port_link_state *link_state);
-
-int hinic_port_set_state(struct hinic_dev *nic_dev,
- enum hinic_port_state state);
-
-int hinic_port_set_func_state(struct hinic_dev *nic_dev,
- enum hinic_func_port_state state);
-
-int hinic_port_get_cap(struct hinic_dev *nic_dev,
- struct hinic_port_cap *port_cap);
-
-#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_table.h b/drivers/net/ethernet/huawei/hinic/hinic_sml_table.h
deleted file mode 100644
index b837dab..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_sml_table.h
+++ /dev/null
@@ -1,2728 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0*/
-/* Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef __SML_TABLE_H__
-#define __SML_TABLE_H__
-
-#include "hinic_sml_table_pub.h"
-
-#ifdef __cplusplus
-#if __cplusplus
-extern "C" {
-#endif
-#endif /* __cplusplus */
-
-#define TBL_ID_CTR_DFX_S32_SM_NODE 11
-#define TBL_ID_CTR_DFX_S32_SM_INST 20
-
-#define TBL_ID_CTR_DFX_PAIR_SM_NODE 10
-#define TBL_ID_CTR_DFX_PAIR_SM_INST 24
-
-#define TBL_ID_CTR_DFX_S64_SM_NODE 11
-#define TBL_ID_CTR_DFX_S64_SM_INST 21
-
-#if (!defined(__UP_FPGA__) && (!defined(HI1822_MODE_FPGA)) && \
- (!defined(__FPGA__)))
-
-#define TBL_ID_GLOBAL_SM_NODE 10
-#define TBL_ID_GLOBAL_SM_INST 1
-
-#define TBL_ID_PORT_CFG_SM_NODE 10
-#define TBL_ID_PORT_CFG_SM_INST 2
-
-#define TBL_ID_VLAN_SM_NODE 10
-#define TBL_ID_VLAN_SM_INST 3
-
-#define TBL_ID_MULTICAST_SM_NODE 10
-#define TBL_ID_MULTICAST_SM_INST 4
-
-#define TBL_ID_MISC_RSS_HASH0_SM_NODE 10
-#define TBL_ID_MISC_RSS_HASH0_SM_INST 5
-
-#define TBL_ID_FIC_VOQ_MAP_SM_NODE 10
-#define TBL_ID_FIC_VOQ_MAP_SM_INST 6
-
-#define TBL_ID_CAR_SM_NODE 10
-#define TBL_ID_CAR_SM_INST 7
-
-#define TBL_ID_IPMAC_FILTER_SM_NODE 10
-#define TBL_ID_IPMAC_FILTER_SM_INST 8
-
-#define TBL_ID_GLOBAL_QUE_MAP_SM_NODE 10
-#define TBL_ID_GLOBAL_QUE_MAP_SM_INST 9
-
-#define TBL_ID_CTR_VSW_FUNC_MIB_SM_NODE 10
-#define TBL_ID_CTR_VSW_FUNC_MIB_SM_INST 10
-
-#define TBL_ID_UCODE_EXEC_INFO_SM_NODE 10
-#define TBL_ID_UCODE_EXEC_INFO_SM_INST 11
-
-#define TBL_ID_RQ_IQ_MAPPING_SM_NODE 10
-#define TBL_ID_RQ_IQ_MAPPING_SM_INST 12
-
-#define TBL_ID_MAC_SM_NODE 10
-#define TBL_ID_MAC_SM_INST 21
-
-#define TBL_ID_MAC_BHEAP_SM_NODE 10
-#define TBL_ID_MAC_BHEAP_SM_INST 22
-
-#define TBL_ID_MAC_MISC_SM_NODE 10
-#define TBL_ID_MAC_MISC_SM_INST 23
-
-#define TBL_ID_FUNC_CFG_SM_NODE 11
-#define TBL_ID_FUNC_CFG_SM_INST 1
-
-#define TBL_ID_TRUNK_FWD_SM_NODE 11
-#define TBL_ID_TRUNK_FWD_SM_INST 2
-
-#define TBL_ID_VLAN_FILTER_SM_NODE 11
-#define TBL_ID_VLAN_FILTER_SM_INST 3
-
-#define TBL_ID_ELB_SM_NODE 11
-#define TBL_ID_ELB_SM_INST 4
-
-#define TBL_ID_MISC_RSS_HASH1_SM_NODE 11
-#define TBL_ID_MISC_RSS_HASH1_SM_INST 5
-
-#define TBL_ID_RSS_CONTEXT_SM_NODE 11
-#define TBL_ID_RSS_CONTEXT_SM_INST 6
-
-#define TBL_ID_ETHERTYPE_FILTER_SM_NODE 11
-#define TBL_ID_ETHERTYPE_FILTER_SM_INST 7
-
-#define TBL_ID_VTEP_IP_SM_NODE 11
-#define TBL_ID_VTEP_IP_SM_INST 8
-
-#define TBL_ID_NAT_SM_NODE 11
-#define TBL_ID_NAT_SM_INST 9
-
-#define TBL_ID_BHEAP_LRO_AGING_SM_NODE 11
-#define TBL_ID_BHEAP_LRO_AGING_SM_INST 10
-
-#define TBL_ID_MISC_LRO_AGING_SM_NODE 11
-#define TBL_ID_MISC_LRO_AGING_SM_INST 11
-
-#define TBL_ID_BHEAP_CQE_AGING_SM_NODE 11
-#define TBL_ID_BHEAP_CQE_AGING_SM_INST 12
-
-#define TBL_ID_MISC_CQE_AGING_SM_NODE 11
-#define TBL_ID_MISC_CQE_AGING_SM_INST 13
-
-#define TBL_ID_DFX_LOG_POINTER_SM_NODE 11
-#define TBL_ID_DFX_LOG_POINTER_SM_INST 14
-
-#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_NODE 11
-#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_INST 15
-
-#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_NODE 11
-#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_INST 16
-
-#define TBL_ID_CTR_COMM_FUNC_S32_SM_NODE 11
-#define TBL_ID_CTR_COMM_FUNC_S32_SM_INST 17
-
-#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_NODE 11
-#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_INST 41
-
-#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_NODE 11
-#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_INST 42
-
-#define TBL_ID_CTR_OVS_FUNC_S64_SM_NODE 11
-#define TBL_ID_CTR_OVS_FUNC_S64_SM_INST 43
-
-#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_NODE 11
-#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_INST 44
-
-#define TBL_ID_CTR_XOE_FUNC_S32_SM_NODE 11
-#define TBL_ID_CTR_XOE_FUNC_S32_SM_INST 45
-
-#define TBL_ID_CTR_SYS_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_SYS_GLB_S32_SM_INST 46
-
-#define TBL_ID_CTR_VSW_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_VSW_GLB_S32_SM_INST 47
-
-#define TBL_ID_CTR_ROCE_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_ROCE_GLB_S32_SM_INST 48
-
-#define TBL_ID_CTR_COMM_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_COMM_GLB_S32_SM_INST 49
-
-#define TBL_ID_CTR_XOE_GLB_S32_SM_NODE 11
-#define TBL_ID_CTR_XOE_GLB_S32_SM_INST 50
-
-#define TBL_ID_CTR_OVS_GLB_S64_SM_NODE 11
-#define TBL_ID_CTR_OVS_GLB_S64_SM_INST 51
-
-#define TBL_ID_RWLOCK_ROCE_SM_NODE 11
-#define TBL_ID_RWLOCK_ROCE_SM_INST 30
-
-#define TBL_ID_CQE_ADDR_SM_NODE 11
-#define TBL_ID_CQE_ADDR_SM_INST 31
-
-#else
-
-#define TBL_ID_GLOBAL_SM_NODE 10
-#define TBL_ID_GLOBAL_SM_INST 1
-
-#define TBL_ID_PORT_CFG_SM_NODE 10
-#define TBL_ID_PORT_CFG_SM_INST 2
-
-#define TBL_ID_VLAN_SM_NODE 10
-#define TBL_ID_VLAN_SM_INST 3
-
-#define TBL_ID_MULTICAST_SM_NODE 10
-#define TBL_ID_MULTICAST_SM_INST 4
-
-#define TBL_ID_MISC_RSS_HASH0_SM_NODE 10
-#define TBL_ID_MISC_RSS_HASH0_SM_INST 5
-
-#define TBL_ID_FIC_VOQ_MAP_SM_NODE 10
-#define TBL_ID_FIC_VOQ_MAP_SM_INST 6
-
-#define TBL_ID_CAR_SM_NODE 10
-#define TBL_ID_CAR_SM_INST 7
-
-#define TBL_ID_IPMAC_FILTER_SM_NODE 10
-#define TBL_ID_IPMAC_FILTER_SM_INST 8
-
-#define TBL_ID_GLOBAL_QUE_MAP_SM_NODE 10
-#define TBL_ID_GLOBAL_QUE_MAP_SM_INST 9
-
-#define TBL_ID_CTR_VSW_FUNC_MIB_SM_NODE 10
-#define TBL_ID_CTR_VSW_FUNC_MIB_SM_INST 10
-
-#define TBL_ID_UCODE_EXEC_INFO_SM_NODE 10
-#define TBL_ID_UCODE_EXEC_INFO_SM_INST 11
-
-#define TBL_ID_RQ_IQ_MAPPING_SM_NODE 10
-#define TBL_ID_RQ_IQ_MAPPING_SM_INST 12
-
-#define TBL_ID_MAC_SM_NODE 10
-#define TBL_ID_MAC_SM_INST 13
-
-#define TBL_ID_MAC_BHEAP_SM_NODE 10
-#define TBL_ID_MAC_BHEAP_SM_INST 14
-
-#define TBL_ID_MAC_MISC_SM_NODE 10
-#define TBL_ID_MAC_MISC_SM_INST 15
-
-#define TBL_ID_FUNC_CFG_SM_NODE 10
-#define TBL_ID_FUNC_CFG_SM_INST 16
-
-#define TBL_ID_TRUNK_FWD_SM_NODE 10
-#define TBL_ID_TRUNK_FWD_SM_INST 17
-
-#define TBL_ID_VLAN_FILTER_SM_NODE 10
-#define TBL_ID_VLAN_FILTER_SM_INST 18
-
-#define TBL_ID_ELB_SM_NODE 10
-#define TBL_ID_ELB_SM_INST 19
-
-#define TBL_ID_MISC_RSS_HASH1_SM_NODE 10
-#define TBL_ID_MISC_RSS_HASH1_SM_INST 20
-
-#define TBL_ID_RSS_CONTEXT_SM_NODE 10
-#define TBL_ID_RSS_CONTEXT_SM_INST 21
-
-#define TBL_ID_ETHERTYPE_FILTER_SM_NODE 10
-#define TBL_ID_ETHERTYPE_FILTER_SM_INST 22
-
-#define TBL_ID_VTEP_IP_SM_NODE 10
-#define TBL_ID_VTEP_IP_SM_INST 23
-
-#define TBL_ID_NAT_SM_NODE 10
-#define TBL_ID_NAT_SM_INST 24
-
-#define TBL_ID_BHEAP_LRO_AGING_SM_NODE 10
-#define TBL_ID_BHEAP_LRO_AGING_SM_INST 25
-
-#define TBL_ID_MISC_LRO_AGING_SM_NODE 10
-#define TBL_ID_MISC_LRO_AGING_SM_INST 26
-
-#define TBL_ID_BHEAP_CQE_AGING_SM_NODE 10
-#define TBL_ID_BHEAP_CQE_AGING_SM_INST 27
-
-#define TBL_ID_MISC_CQE_AGING_SM_NODE 10
-#define TBL_ID_MISC_CQE_AGING_SM_INST 28
-
-#define TBL_ID_DFX_LOG_POINTER_SM_NODE 10
-#define TBL_ID_DFX_LOG_POINTER_SM_INST 29
-
-#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_NODE 10
-#define TBL_ID_CTR_VSW_FUNC_S32_DROP_ERR_SM_INST 40
-
-#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_NODE 10
-#define TBL_ID_CTR_VSW_FUNC_S32_DFX_SM_INST 41
-
-#define TBL_ID_CTR_COMM_FUNC_S32_SM_NODE 10
-#define TBL_ID_CTR_COMM_FUNC_S32_SM_INST 42
-
-#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_NODE 10
-#define TBL_ID_CTR_SRIOV_FUNC_PAIR_SM_INST 43
-
-#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_NODE 10
-#define TBL_ID_CTR_SRIOV_FUNC_S32_SM_INST 44
-
-#define TBL_ID_CTR_OVS_FUNC_S64_SM_NODE 10
-#define TBL_ID_CTR_OVS_FUNC_S64_SM_INST 45
-
-#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_NODE 10
-#define TBL_ID_CTR_XOE_FUNC_PAIR_SM_INST 46
-
-#define TBL_ID_CTR_XOE_FUNC_S32_SM_NODE 10
-#define TBL_ID_CTR_XOE_FUNC_S32_SM_INST 47
-
-#define TBL_ID_CTR_SYS_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_SYS_GLB_S32_SM_INST 48
-
-#define TBL_ID_CTR_VSW_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_VSW_GLB_S32_SM_INST 49
-
-#define TBL_ID_CTR_ROCE_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_ROCE_GLB_S32_SM_INST 50
-
-#define TBL_ID_CTR_COMM_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_COMM_GLB_S32_SM_INST 51
-
-#define TBL_ID_CTR_XOE_GLB_S32_SM_NODE 10
-#define TBL_ID_CTR_XOE_GLB_S32_SM_INST 52
-
-#define TBL_ID_CTR_OVS_GLB_S64_SM_NODE 10
-#define TBL_ID_CTR_OVS_GLB_S64_SM_INST 53
-
-#define TBL_ID_RWLOCK_ROCE_SM_NODE 10
-#define TBL_ID_RWLOCK_ROCE_SM_INST 30
-
-#define TBL_ID_CQE_ADDR_SM_NODE 10
-#define TBL_ID_CQE_ADDR_SM_INST 31
-
-#endif
-
-#define TBL_ID_MISC_RSS_HASH_SM_NODE TBL_ID_MISC_RSS_HASH0_SM_NODE
-#define TBL_ID_MISC_RSS_HASH_SM_INST TBL_ID_MISC_RSS_HASH0_SM_INST
-
-/*rx cqe checksum err*/
-#define NIC_RX_CSUM_IP_CSUM_ERR BIT(0)
-#define NIC_RX_CSUM_TCP_CSUM_ERR BIT(1)
-#define NIC_RX_CSUM_UDP_CSUM_ERR BIT(2)
-#define NIC_RX_CSUM_IGMP_CSUM_ERR BIT(3)
-#define NIC_RX_CSUM_ICMPV4_CSUM_ERR BIT(4)
-#define NIC_RX_CSUM_ICMPV6_CSUM_ERR BIT(5)
-#define NIC_RX_CSUM_SCTP_CRC_ERR BIT(6)
-#define NIC_RX_CSUM_HW_BYPASS_ERR BIT(7)
-
-typedef struct tag_log_ctrl {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 mod_name:4;
- u32 log_level:4;
- u32 rsvd:8;
- u32 line_num:16;
-#else
- u32 line_num:16;
- u32 rsvd:8;
- u32 log_level:4;
- u32 mod_name:4;
-#endif
-} log_ctrl;
-
-/**
- * 1. bank GPA address is HOST-based, every host has 4 bank GPA,
- * total size 4*32B
- * 2. Allocated space for storing
- * Two global entry are allocated for storing bank GPA,
- * which are index5 and index6. (Note index start value is 0)
- * The index5 top 32B store the bank GPA of host 0;
- * Remain 32B store the bank GPA of host 1.
- * The index6 top 32B store the bank GPA of host 2,
- * the remain 32B store the bank GPA of host 3.
- * Bank GPA corresponding to the each host is based on the following format)
- */
-typedef struct tag_sml_global_bank_gpa {
- u32 bank0_gpa_h32;
- u32 bank0_gpa_l32;
-
- u32 bank1_gpa_h32;
- u32 bank1_gpa_l32;
-
- u32 bank2_gpa_h32;
- u32 bank2_gpa_l32;
-
- u32 bank3_gpa_h32;
- u32 bank3_gpa_l32;
-} global_bank_gpa_s;
-
-/**
- * Struct name: sml_global_table_s
- * @brief: global_table structure
- * Description: global configuration table
- */
-typedef struct tag_sml_global_table {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 port_mode:1; /*portmode:0-eth;1-fic */
- /* dualplaneenable:0-disable;1-enable */
- u32 dual_plane_en:1;
- /* fourrouteenable:0-disable;1-enable */
- u32 four_route_en:1;
- /* ficworkmode:0-fabric;1-fullmesh.*/
- u32 fic_work_mode:1;
- /* unicast/multicastmode:0-drop;
- * 1-broadcastinvlandomain
- */
- u32 un_mc_mode:1;
- /* maclearnenable:1-enable */
- u32 mac_learn_en:1;
- u32 qcn_en:1;
- u32 esl_run_flag:1;
- /* 1-special protocal pkt to up; 0-to x86 */
- u32 special_pro_to_up_flag:1;
- u32 vf_mask:4;
- u32 dif_ser_type:2;
- u32 rsvd0:1;
- u32 board_num:16; /*boardnumber */
-#else
- u32 board_num:16; /*boardnumber */
- u32 rsvd0:1;
- u32 dif_ser_type:2;
- u32 vf_mask:4;
- /*1-special protocal pkt to up; 0-to x86 */
- u32 special_pro_to_up_flag:1;
- u32 esl_run_flag:1;
- u32 qcn_en:1;
- u32 mac_learn_en:1; /*maclearnenable:1-enable */
- /*unicast/multicastmode:0-drop;1-broadcastinvlandomain*/
- u32 un_mc_mode:1;
- /* ficworkmode:0-fabric;1-fullmesh.*/
- u32 fic_work_mode:1;
- /*fourrouteenable:0-disable;1-enable */
- u32 four_route_en:1;
- /*dualplaneenable:0-disable;1-enable */
- u32 dual_plane_en:1;
- u32 port_mode:1; /*portmode:0-eth;1-fic */
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 bc_offset:16; /*broadcastoffset */
- u32 mc_offset:16; /*multicastoffset */
-#else
- u32 mc_offset:16; /*multicastoffset */
- u32 bc_offset:16; /*broadcastoffset */
-#endif
- } bs;
- u32 value;
- } dw1;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 net_src_type:8; /* eth-FWD_PORT, fic-FWD_FIC */
- u32 xrc_pl_dec:1;
- u32 sq_cqn:20;
- u32 qpc_stg:1;
- u32 qpc_state_err:1;
- u32 qpc_wb_flag:1;
-#else
- u32 qpc_wb_flag:1;
- u32 qpc_state_err:1;
- u32 qpc_stg:1;
- u32 sq_cqn:20;
- u32 xrc_pl_dec:1;
- u32 net_src_type:8; /* eth-FWD_PORT, fic-FWD_FIC */
-#endif
- } bs;
-
- u32 value;
- } dw2;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 drop_cause_id:16;
- u32 pkt_len:16;
-#else
- u32 pkt_len:16;
- u32 drop_cause_id:16;
-#endif
- } bs;
-
- u32 value;
- } dw3;
-
- u8 fcoe_vf_table[12];
-
- union {
- struct {
- /* [31:30]Pipeline number mode. */
- u32 cfg_mode_pn:2;
- /* [29:28]initial default fq mode for traffic
- * from rx side
- */
- u32 cfg_mode_init_def_fq:2;
- /* [27:16]base fqid for initial default fqs
- * (for packest from rx side only).
- */
- u32 cfg_base_init_def_fq:12;
- /* [15:15]push doorbell as new packet to tile
- * via command path enable.
- */
- u32 cfg_psh_msg_en:1;
- /* [14:14]1,enable asc for scanning
- * active fq.0,disable.
- */
- u32 enable_asc:1;
- /* [13:13]1,enable pro for commands process.0,disable.*/
- u32 enable_pro:1;
- /* [12:12]1,ngsf mode.0,ethernet mode. */
- u32 cfg_ngsf_mod:1;
- /* [11:11]Stateful process enable. */
- u32 enable_stf:1;
- /* [10:9]initial default fq mode for
- * traffic from tx side.
- */
- u32 cfg_mode_init_def_fq_tx:2;
- /* [8:0]maximum allocable oeid configuration. */
- u32 cfg_max_oeid:9;
- } bs;
- u32 value;
- } fq_mode;
-
- u32 rsvd2[8];
-} sml_global_table_s;
-
-/**
- * Struct name: sml_fic_config_table_s
- * @brief: global_table structure
- * Description: global configuration table
- */
-typedef struct tag_sml_fic_config_table {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /*dualplaneenable:0-disable;1-enable */
- u32 dual_plane_en:1;
- /*fourrouteenable:0-disable;1-enable */
- u32 four_route_en:1;
- /* ficworkmode:0-fabric;1-fullmesh.*/
- u32 fic_work_mode:1;
- u32 mac_learn_en:1; /*maclearnenable:1-enable */
- u32 rsvd:12;
- u32 board_num:16; /*boardnumber */
-#else
- u32 board_num:16; /*boardnumber */
- u32 rsvd:12;
- u32 mac_learn_en:1;
- /* ficworkmode:0-fabric;1-fullmesh.*/
- u32 fic_work_mode:1;
- /* fourrouteenable:0-disable;1-enable */
- u32 four_route_en:1;
- /* dualplaneenable:0-disable;1-enable */
- u32 dual_plane_en:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 bc_offset:16; /*broadcastoffset */
- u32 mc_offset:16; /*multicastoffset */
-#else
- u32 mc_offset:16; /*multicastoffset */
- u32 bc_offset:16; /*broadcastoffset */
-#endif
- } bs;
- u32 value;
- } dw1;
-
- u32 rsvd2[14];
-} sml_fic_config_table_s;
-
-/**
- * Struct name: sml_ucode_version_info_table_s
- * @brief: microcode version information structure
- * Description: global configuration table entry data structure of index 1
- */
-typedef struct tag_sml_ucode_version_info_table {
- u32 ucode_version[4];
- u32 ucode_compile_time[5];
- u32 rsvd[7];
-} sml_ucode_version_info_table_s;
-
-/**
- * Struct name: sml_funcfg_tbl_s
- * @brief: Function Configuration Table
- * Description: Function Configuration attribute table
- */
-typedef struct tag_sml_funcfg_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* function valid: 0-invalid; 1-valid */
- u32 valid:1;
- /* mac learn enable: 0-disable; 1-enable */
- u32 learn_en:1;
- /* lli enable: 0-disable; 1-enable */
- u32 lli_en:1;
- /* rss enable: 0-disable; 1-enable */
- u32 rss_en:1;
- /* rx vlan offload enable: 0-disable; 1-enable */
- u32 rxvlan_offload_en:1;
- /* tso local coalesce enable: 0-disable; 1-enable */
- u32 tso_local_coalesce:1;
- u32 rsvd1:1;
- u32 rsvd2:1;
- /* qos rx car enable: 0-disable; 1-enable */
- u32 qos_rx_car_en:1;
- /* mac filter enable: 0-disable; 1-enable */
- u32 mac_filter_en:1;
- /* ipmac filter enable: 0-disable; 1-enable */
- u32 ipmac_filter_en:1;
- /* ethtype filter enable: 0-disable; 1-enable */
- u32 ethtype_filter_en:1;
- /* mc bc limit enable: 0-disable; 1-enable */
- u32 mc_bc_limit_en:1;
- /* acl tx enable: 0-disable; 1-enable */
- u32 acl_tx_en:1;
- /* acl rx enable: 0-disable; 1-enable */
- u32 acl_rx_en:1;
- /* ovs function enable: 0-disable; 1-enable */
- u32 ovs_func_en:1;
- /* ucode capture enable: 0-disable; 1-enable */
- u32 ucapture_en:1;
- /* fic car enable: 0-disable; 1-enable */
- u32 fic_car_en:1;
- u32 tso_en:1;
- u32 nic_rx_mode:5; /* nic_rx_mode:
- * 0b00001: unicast mode
- * 0b00010: multicast mode
- * 0b00100: broadcast mode
- * 0b01000: all multicast mode
- * 0b10000: promisc mod
- */
- u32 rsvd4:3;
- u32 def_pri:3; /* default priority */
- /* host id: [0~3]. support up to 4 Host. */
- u32 host_id:2;
-#else
- u32 host_id:2;
- u32 def_pri:3;
- u32 rsvd4:3;
- u32 nic_rx_mode:5;
- u32 tso_en:1;
- u32 fic_car_en:1;
- /* ucode capture enable: 0-disable; 1-enable */
- u32 ucapture_en:1;
- u32 ovs_func_en:1;
- u32 acl_rx_en:1;
- u32 acl_tx_en:1;
- u32 mc_bc_limit_en:1;
- u32 ethtype_filter_en:1;
- u32 ipmac_filter_en:1;
- u32 mac_filter_en:1;
- u32 qos_rx_car_en:1;
- u32 rsvd2:1;
- u32 rsvd1:1;
- u32 tso_local_coalesce:1;
- u32 rxvlan_offload_en:1;
- u32 rss_en:1;
- u32 lli_en:1;
- u32 learn_en:1;
- u32 valid:1;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 mtu:16; /* mtu value: [64-15500] */
- u32 rsvd1:1;
- /* vlan mode: 0-all; 1-access; 2-trunk;
- * 3-hybrid(unsupport); 4-qinq port;
- */
- u32 vlan_mode:3;
- u32 vlan_id:12; /* vlan id: [0~4095] */
-#else
- u32 vlan_id:12;
- u32 vlan_mode:3;
- u32 rsvd1:1;
- u32 mtu:16;
-#endif
- } bs;
-
- u32 value;
- } dw1;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 lli_mode:1; /* lli mode */
- /* er forward trunk type: 0-ethernet type, 1-fic type */
- u32 er_fwd_trunk_type:1;
- /* er forward trunk mode:
- * 0-standby; 1-smac; 2-dmac; 3-smacdmac; 4-sip; 5-dip;
- * 6-sipdip; 7-5tuples; 8-lacp
- */
- u32 er_fwd_trunk_mode:4;
- /* edge relay mode: 0-VEB; 1-VEPA(unsupport);
- * 2-Multi-Channel(unsupport)
- */
- u32 er_mode:2;
- /* edge relay id: [0~15]. support up to 16 er. */
- u32 er_id:4;
- /* er forward type: 2-port; 3-fic;
- * 4-trunk; other-unsupport
- */
- u32 er_fwd_type:4;
- /* er forward id:
- * fwd_type=2: forward ethernet port id
- * fwd_type=3: forward fic id(tb+tp)
- * fwd_type=4: forward trunk id
- */
- u32 er_fwd_id:16;
-#else
- u32 er_fwd_id:16;
- u32 er_fwd_type:4;
- u32 er_id:4;
- u32 er_mode:2;
- u32 er_fwd_trunk_mode:4;
- u32 er_fwd_trunk_type:1;
- u32 lli_mode:1;
-#endif
- } bs;
-
- u32 value;
- } dw2;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 pfc_en:1;
- u32 rsvd1:7;
- u32 ovs_invld_tcp_action:1;
- u32 ovs_ip_frag_action:1;
- u32 rsvd2:2;
- u32 roce_en:1;
- u32 iwarp_en:1;
- u32 fcoe_en:1;
- u32 toe_en:1;
- u32 rsvd3:8;
- u32 ethtype_group_id:8;
-#else
- u32 ethtype_group_id:8;
- u32 rsvd3:8;
- u32 toe_en:1;
- u32 fcoe_en:1;
- u32 iwarp_en:1;
- u32 roce_en:1;
- u32 rsvd2:2;
- u32 ovs_ip_frag_action:1;
- u32 ovs_invld_tcp_action:1;
- u32 rsvd1:7;
- u32 pfc_en:1;
-#endif
- } bs;
-
- u32 value;
- } dw3;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:8;
- u32 vni:24;
-#else
- u32 vni:24;
- u32 rsvd1:8;
-#endif
- } bs;
-
- u32 value;
- } dw4;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1;
-#else
- u32 rsvd1;
-#endif
- } bs;
-
- u32 value;
- } dw5;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:8;
- u32 rq_thd:13;
- u32 host_car_id:11; /* host car id */
-#else
- u32 host_car_id:11;
- u32 rq_thd:13;
- u32 rsvd1:8;
-#endif
- } bs;
-
- u32 value;
- } dw6;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:5;
- u32 fic_uc_car_id:11; /* fic unicast car id */
- u32 rsvd2:5;
- u32 fic_mc_car_id:11; /* fic multicast car id */
-#else
- u32 fic_mc_car_id:11;
- u32 rsvd2:5;
- u32 fic_uc_car_id:11;
- u32 rsvd1:5;
-#endif
- } fic_bs;
-
- u32 value;
- } dw7;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* safe group identifier valid: 0-invalid; 1-valid */
- u32 sg_id_valid:1;
- u32 sg_id:10; /* safe group identifier */
- u32 rsvd9:1;
- /* rq priority enable: 0-disable; 1-enable */
- u32 rq_pri_en:1;
- /* rq priority num: 0-1pri; 1-2pri; 2-4pri; 3-8pri */
- u32 rq_pri_num:3;
- /* one wqe buffer size, default is 2K bytes */
- u32 rx_wqe_buffer_size:16;
-#else
- u32 rx_wqe_buffer_size:16;
- u32 rq_pri_num:3;
- u32 rq_pri_en:1;
- u32 rsvd9:1;
- u32 sg_id:10;
- u32 sg_id_valid:1;
-#endif
- } bs;
-
- u32 value;
- } dw8;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* IPv4 LRO enable: 0-disable; 1-enable; */
- u32 lro_ipv4_en:1;
- /* IPv6 LRO enable: 0-disable; 1-enable; */
- u32 lro_ipv6_en:1;
- /* LRO pkt max wqe buffer number */
- u32 lro_max_wqe_num:6;
- /* Each group occupies 3bits,
- * 8 group share allocation 24bits,
- * group 0 corresponds to the low 3bits
- */
- u32 vlan_pri_map_group:24;
-#else
- u32 vlan_pri_map_group:24;
- u32 lro_max_wqe_num:6;
- u32 lro_ipv6_en:1;
- u32 lro_ipv4_en:1;
-#endif
- } bs;
-
- u32 value;
- } dw9;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rss_group_id:4;
- u32 lli_frame_size:12;
- u32 smac_h16:16;
-#else
- u32 smac_h16:16;
- u32 lli_frame_size:12;
- u32 rss_group_id:4;
-#endif
- } bs;
-
- u32 value;
- } dw10;
-
- u32 smac_l32;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 oqid:16;
- u32 vf_map_pf_id:4;
- /*lro change; 0:changing 1:change done */
- u32 lro_change_flag:1;
- u32 rsvd11:1;
- u32 base_qid:10;
-#else
- u32 base_qid:10;
- u32 rsvd11:1;
- u32 lro_change_flag:1;
- u32 vf_map_pf_id:4;
- u32 oqid:16;
-#endif
- } bs;
-
- u32 value;
- } dw12;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:2;
- u32 cfg_rq_depth:6;
- u32 cfg_q_num:6;
- u32 fc_port_id:4;
- u32 rsvd2:14;
-#else
- u32 rsvd2:14;
- u32 fc_port_id:4;
- u32 cfg_q_num:6;
- u32 cfg_rq_depth:6;
- u32 rsvd1:2;
-#endif
- } bs;
-
- u32 value;
- } dw13;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1;
-#else
- u32 rsvd1;
-#endif
- } bs;
-
- u32 value;
-
- } dw14;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd3:2;
- u32 bond3_hash_policy:3;
- u32 bond3_mode:3;
- u32 rsvd2:2;
- u32 bond2_hash_policy:3;
- u32 bond2_mode:3;
- u32 rsvd1:2;
- u32 bond1_hash_policy:3;
- u32 bond1_mode:3;
- u32 rsvd0:2;
- u32 bond0_hash_policy:3;
- u32 bond0_mode:3;
-#else
- u32 bond0_mode:3;
- u32 bond0_hash_policy:3;
- u32 rsvd0:2;
- u32 bond1_mode:3;
- u32 bond1_hash_policy:3;
- u32 rsvd1:2;
- u32 bond2_mode:3;
- u32 bond2_hash_policy:3;
- u32 rsvd2:2;
- u32 bond3_mode:3;
- u32 bond3_hash_policy:3;
- u32 rsvd3:2;
-#endif
- } bs;
-
- u32 value;
-
- } dw15;
-} sml_funcfg_tbl_s;
-
-/**
- * Struct name: sml_portcfg_tbl_s
- * @brief: Port Configuration Table
- * Description: Port Configuration attribute table
- */
-typedef struct tag_sml_portcfg_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 valid:1; /* valid:0-invalid; 1-valid */
- /* mac learn enable: 0-disable; 1-enable */
- u32 learn_en:1;
- u32 trunk_en:1; /* trunk enable: 0-disable; 1-enable */
- /* broadcast suppression enable: 0-disable; 1-enable */
- u32 bc_sups_en:1;
- /* unknown multicast suppression enable:
- * 0-disable; 1-enable
- */
- u32 un_mc_sups_en:1;
- /* unknown unicast suppression enable:
- * 0-disable; 1-enable
- */
- u32 un_uc_sups_en:1;
- u32 ovs_mirror_tx_en:1;
- /* ovs port enable: 0-disable; 1-enable */
- u32 ovs_port_en:1;
- u32 ovs_mirror_rx_en:1;
- u32 qcn_en:1; /* qcn enable: 0-disable; 1-enable */
- /* ucode capture enable: 0-disable; 1-enable */
- u32 ucapture_en:1;
- u32 ovs_invld_tcp_action:1;
- u32 ovs_ip_frag_action:1;
- u32 def_pri:3; /* default priority */
- u32 rsvd3:2;
- /* edge relay mode: 0-VEB; 1-VEPA(unsupport);
- * 2-Multi-Channel(unsupport)
- */
- u32 er_mode:2;
- /* edge relay identifier: [0~15]. support up to 16 er */
- u32 er_id:4;
- u32 trunk_id:8; /* trunk identifier: [0~255] */
-#else
- u32 trunk_id:8;
- u32 er_id:4;
- u32 er_mode:2;
- u32 rsvd3:2;
- u32 def_pri:3;
- u32 ovs_ip_frag_action:1;
- u32 ovs_invld_tcp_action:1;
- u32 ucapture_en:1;
- u32 qcn_en:1;
- u32 ovs_mirror_rx_en:1;
- u32 ovs_port_en:1;
- u32 ovs_mirror_tx_en:1;
- u32 un_uc_sups_en:1;
- u32 un_mc_sups_en:1;
- u32 bc_sups_en:1;
- u32 trunk_en:1;
- u32 learn_en:1;
- u32 valid:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd2:2;
- u32 mtu:14;
- u32 rsvd3:1;
- u32 vlan_mode:3;
- u32 vlan_id:12;
-#else
- u32 vlan_id:12;
- u32 vlan_mode:3;
- u32 rsvd3:1;
- u32 mtu:14;
- u32 rsvd2:2;
-#endif
- } bs;
- u32 value;
- } dw1;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* q7_cos : ... : q0_cos = 4bits : ... : 4bits */
- u32 ovs_queue_cos;
-#else
- u32 ovs_queue_cos;
-#endif
- } bs;
- u32 value;
- } dw2;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:10;
- u32 un_mc_car_id:11;
- u32 un_uc_car_id:11;
-#else
- u32 un_uc_car_id:11;
- u32 un_mc_car_id:11;
- u32 rsvd1:10;
-#endif
- } bs;
- u32 value;
- } dw3;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd6:5;
- u32 bc_car_id:11;
- u32 pf_promiscuous_bitmap:16;
-#else
- u32 pf_promiscuous_bitmap:16;
- u32 bc_car_id:11;
- u32 rsvd6:5;
-#endif
- } bs;
- u32 value;
- } dw4;
-
- union {
- struct {
- u32 fc_map;
-
- } fcoe_bs;
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 start_queue:8;
- u32 queue_size:8;
- u32 mirror_func_id:16;
-#else
- u32 mirror_func_id:16;
- u32 queue_size:8;
- u32 start_queue:8;
-#endif
- } ovs_mirror_bs;
- u32 value;
- } dw5;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u16 vlan;
- u16 dmac_h16;
-#else
- u16 dmac_h16;
- u16 vlan;
-#endif
- } fcoe_bs;
- u32 value;
- } dw6;
-
- union {
- struct {
- u32 dmac_l32;
-
- } fcoe_bs;
- u32 value;
- } dw7;
-
-} sml_portcfg_tbl_s;
-
-/**
- * Struct name: sml_taggedlist_tbl_s
- * @brief: Tagged List Table
- * Description: VLAN filtering Trunk/Hybrid type tagged list table
- */
-typedef struct tag_sml_taggedlist_tbl {
- u32 bitmap[TBL_ID_TAGGEDLIST_BITMAP32_NUM];
-} sml_taggedlist_tbl_s;
-
-/**
- * Struct name: sml_untaggedlist_tbl_s
- * @brief: Untagged List Table
- * Description: VLAN filtering Hybrid type Untagged list table
- */
-typedef struct tag_sml_untaggedlist_tbl {
- u32 bitmap[TBL_ID_UNTAGGEDLIST_BITMAP32_NUM];
-} sml_untaggedlist_tbl_s;
-
-/**
- * Struct name: sml_trunkfwd_tbl_s
- * @brief: Trunk Forward Table
- * Description: port aggregation Eth-Trunk forwarding table
- */
-typedef struct tag_sml_trunkfwd_tbl {
- u16 fwd_id[TBL_ID_TRUNKFWD_ENTRY_ELEM_NUM]; /* dw0-dw15 */
-} sml_trunkfwd_tbl_s;
-
-/**
- * Struct name: sml_mac_tbl_head_u
- * @brief: Mac table request/response head
- * Description: MAC table, Hash API header
- */
-typedef union tag_sml_mac_tbl_head {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 src:5;
- u32 instance_id:6;
- u32 opid:5;
- u32 A:1;
- u32 S:1;
- u32 rsvd:14;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 rsvd:14;
- u32 S:1;
- u32 A:1;
- u32 opid:5;
- u32 instance_id:6;
- u32 src:5;
-#endif
- } req_bs;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 code:2;
- u32 subcode:2;
- u32 node_index:28;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 node_index:28;
- u32 subcode:2;
- u32 code:2;
-#endif
- } rsp_bs;
-
- u32 value;
-} sml_mac_tbl_head_u;
-
-/**
- * Struct name: sml_mac_tbl_8_4_key_u
- * @brief: Mac Table Key
- * Description: MAC table key
- */
-typedef union tag_sml_mac_tbl_8_4_key {
- struct {
- u32 val0;
- u32 val1;
- } value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 er_id:4;
- u32 vlan_id:12;
- u32 mac_h16:16;
-
- u32 mac_m16:16;
- u32 mac_l16:16;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 mac_h16:16;
- u32 vlan_id:12;
- u32 er_id:4;
-
- u32 mac_l16:16;
- u32 mac_m16:16;
-#endif
- } bs;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 er_id:4;
- u32 vlan_id:12;
- u32 mac0:8;
- u32 mac1:8;
-
- u32 mac2:8;
- u32 mac3:8;
- u32 mac4:8;
- u32 mac5:8;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 mac1:8;
- u32 mac0:8;
- u32 vlan_id:12;
- u32 er_id:4;
-
- u32 mac5:8;
- u32 mac4:8;
- u32 mac3:8;
- u32 mac2:8;
-#endif
- } mac_bs;
-} sml_mac_tbl_8_4_key_u;
-
-/**
- * Struct name: sml_mac_tbl_8_4_item_u
- * @brief: Mac Table Item
- * Description: xxxxxxxxxxxxxxx
- */
-typedef union tag_sml_mac_tbl_8_4_item {
- u32 value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd:10;
- u32 host_id:2;
- u32 fwd_type:4;
- u32 fwd_id:16;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 fwd_id:16;
- u32 fwd_type:4;
- u32 host_id:2;
- u32 rsvd:10;
-#endif
- } bs;
-} sml_mac_tbl_8_4_item_u;
-
-/**
- * Struct name: sml_mac_tbl_key_item_s
- * @brief: Mac Table( 8 + 4 )
- * Description: MAC table Key + Item
- */
-typedef struct tag_sml_mac_tbl_8_4 {
- sml_mac_tbl_head_u head;
- sml_mac_tbl_8_4_key_u key;
- sml_mac_tbl_8_4_item_u item;
-} sml_mac_tbl_8_4_s;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_key_s
- * @brief: Vtep Table Key
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vtep_tbl_8_20_key {
- u32 vtep_remote_ip;
- u32 rsvd;
-} sml_vtep_tbl_8_20_key_s;
-
-/**
- * Struct name: dmac_smac_u
- * @brief: Dmac & Smac for VxLAN encapsulation
- * Description: xxxxxxxxxxxxxxx
- */
-typedef union tag_dmac_smac {
- u16 mac_addr[6];
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u16 d_mac0:8;
- u16 d_mac1:8;
- u16 d_mac2:8;
- u16 d_mac3:8;
-
- u16 d_mac4:8;
- u16 d_mac5:8;
- u16 s_mac0:8;
- u16 s_mac1:8;
-
- u16 s_mac2:8;
- u16 s_mac3:8;
- u16 s_mac4:8;
- u16 s_mac5:8;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u16 d_mac1:8;
- u16 d_mac0:8;
- u16 d_mac3:8;
- u16 d_mac2:8;
-
- u16 d_mac5:8;
- u16 d_mac4:8;
- u16 s_mac1:8;
- u16 s_mac0:8;
-
- u16 s_mac3:8;
- u16 s_mac2:8;
- u16 s_mac5:8;
- u16 s_mac4:8;
-#endif
- } bs;
-} dmac_smac_u;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_item_u
- * @brief: Vtep Table Item
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vtep_tbl_8_20_item {
- dmac_smac_u dmac_smac;
- u32 source_ip;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 er_id:4;
- u32 rsvd:12;
- u32 vlan:16; /* The PRI*/
-#else
- u32 vlan:16; /* The PRI*/
- u32 rsvd:12;
- u32 er_id:4;
-#endif
- } bs;
-
- u32 value;
- } misc;
-} sml_vtep_tbl_8_20_item_s;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_s
- * @brief: Vtep Table( 8 + 20)
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vtep_tbl_8_20 {
- sml_mac_tbl_head_u head; /*first 4 bytes , the same as mac tbl */
- sml_vtep_tbl_8_20_key_s key;
- sml_vtep_tbl_8_20_item_s item;
-} sml_vtep_tbl_8_20_s;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_key_s
- * @brief: Vtep Table Key
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vxlan_udp_portcfg_4_8_key {
- u32 udp_dest_port;
- u32 rsvd;
-} sml_vxlan_udp_portcfg_4_8_key_s;
-
-/**
- * Struct name: sml_vtep_tbl_8_20_item_u
- * @brief: Vtep Table Item
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vxlan_udp_portcfg_4_8_item {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 odp_port:12;
- u32 dp_id:2;
- u32 resvd:20;
-#else
- u32 resvd:20;
- u32 dp_id:2;
- u32 odp_port:12;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-} sml_vxlan_udp_portcfg_4_8_item_s;
-
-/**
- * Struct name: sml_vxlan_udp_portcfg_4_8_s
- * @brief: Vxlan Dest Udp Port Table( 8 + 20)
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vxlan_udp_portcfg_4_8 {
- sml_mac_tbl_head_u head; /*first 4 bytes , the same as mac tbl */
- sml_vxlan_udp_portcfg_4_8_key_s key;
- sml_vxlan_udp_portcfg_4_8_item_s item;
-} sml_vxlan_udp_portcfg_4_8_s;
-
-/**
- * Struct name: sml_vtep_er_info_s
- * @brief: Vtep Er Info Table
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_vtep_er_info {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 lli_mode:1;
- /* ER bound to the outbound port is Eth-Trunk,
- * type (FIC/Port)
- */
- u32 er_fwd_trunk_type:1;
- /* ER bound to the outbound port is Eth-Trunk,
- * port aggregation mode (Standby/LoadBalance/LACP)
- */
- u32 er_fwd_trunk_mode:4;
- u32 er_mode:2; /* ER mode (VEB/VEPA)*/
- /* er_id as LT index but also used as entries,
- * facilitating service
- */
- u32 er_id:4;
- /* Type of the ER bound to the outbound port
- * (Port/FIC/Eth-Trunk)
- */
- u32 er_fwd_type:4;
- /* ER bound egress ID(PortID/FICID/TrunkID)*/
- u32 er_fwd_id:16;
-#else
- u32 er_fwd_id:16;
- u32 er_fwd_type:4;
- u32 er_id:4;
- u32 er_mode:2;
- u32 er_fwd_trunk_mode:4;
- u32 er_fwd_trunk_type:1;
- u32 lli_mode:1;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-} sml_vtep_er_info_s;
-
-/**
- * Struct name: sml_logic_port_cfg_tbl_s
- * @brief: Logic Port Cfg Table
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sm_logic_port_cfg {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* Input switch port (or DP_MAX_PORTS). */
- u32 odp_port:12;
- u32 dp_id:2; /* datapath id */
- u32 er_id:4;
- /* logic port MAC Learning enable or disable */
- u32 learn_en:1;
- u32 resvd:13;
-#else
- u32 resvd:13;
- /* logic port MAC Learning enable or disable */
- u32 learn_en:1;
- u32 er_id:4;
- u32 dp_id:2; /* datapath id */
- /* Input switch port (or DP_MAX_PORTS). */
- u32 odp_port:12;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd4:1;
- u32 er_fwd_trunk_type:1;
- u32 er_fwd_trunk_mode:4;
- u32 er_mode:2;
- u32 er_id:4;
- u32 er_fwd_type:4;
- u32 er_fwd_id:16;
-#else
- u32 er_fwd_id:16;
- u32 er_fwd_type:4;
- u32 er_id:4;
- u32 er_mode:2;
- u32 er_fwd_trunk_mode:4;
- u32 er_fwd_trunk_type:1;
- u32 rsvd4:1;
-#endif
- } bs;
-
- u32 value;
- } dw1;
-} sml_logic_port_cfg_tbl_s;
-
-/* vport stats counter */
-typedef struct tag_vport_stats_ctr {
- u16 rx_packets; /* total packets received */
- u16 tx_packets; /* total packets transmitted */
- u16 rx_bytes; /* total bytes received */
- u16 tx_bytes; /* total bytes transmitted */
- u16 rx_errors; /* bad packets received */
- u16 tx_errors; /* packet transmit problems */
- u16 rx_dropped; /* no space in linux buffers */
- u16 tx_dropped; /* no space available in linux */
-} vport_stats_ctr_s;
-
-/**
- * Struct name: vport_s
- * @brief: Datapath Cfg Table
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_vport {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* dw0 */
- u32 valid:1;
- u32 learn_en:1;
- u32 type:4;
- u32 dp_id:2;
- /* The type of Vport mapping port, 0:VF, 1:Logic Port */
- u32 mapping_type:4;
- u32 mapping_port:12; /* odp_port mapping on VF or ER Logic Port */
- u32 rsvd:8;
-
- /* dw1 */
- u32 srctagl:12; /* the function used by parent context */
- /* parent context XID used to upcall missed packet to ovs-vswitchd */
- u32 xid:20;
-
- /* dw2 */
- u32 odp_port:12; /* on datapath port id */
- /* parent context CID used to upcall missed packet to ovs-vswitchd */
- u32 cid:20;
-#else
- /* dw0 */
- u32 rsvd:8;
- u32 mapping_port:12; /* odp_port mapping on VF or ER Logic Port */
- /* The type of Vport mapping port, 0:VF, 1:Logic Port */
- u32 mapping_type:4;
- u32 dp_id:2;
- u32 type:4;
- u32 learn_en:1;
- u32 valid:1;
-
- /* dw1 */
- /* parent context XID used to upcall missed packet to ovs-vswitchd */
- u32 xid:20;
- u32 srctagl:12; /* the function used by parent context */
-
- /* dw2 */
- /* parent context CID used to upcall missed packet to ovs-vswitchd */
- u32 cid:20;
- u32 odp_port:12; /* on datapath port id */
-#endif
-
- /* dw3 is er information and it is valid only
- * when mapping_type=1(logic port)
- */
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 lli_mode:1;
- /* ER bound to the outbound port is Eth-Trunk,
- * type (FIC/Port)
- */
- u32 er_fwd_trunk_type:1;
- /* ER bound to the outbound port is Eth-Trunk,
- * port aggregation mode (Standby/LoadBalance/LACP)
- */
- u32 er_fwd_trunk_mode:4;
- u32 er_mode:2; /* ER mode (VEB/VEPA)*/
- u32 er_id:4; /* ERID */
- /* Type of the ER bound to the outbound port
- * (Port/FIC/Eth-Trunk)
- */
- u32 er_fwd_type:4;
- /*ER bound egress ID(PortID/FICID/TrunkID)*/
- u32 er_fwd_id:16;
-#else
- u32 er_fwd_id:16;
- u32 er_fwd_type:4;
- u32 er_id:4;
- u32 er_mode:2;
- u32 er_fwd_trunk_mode:4;
- /* ER bound to the outbound port is Eth-Trunk,
- * type (FIC/Port)
- */
- u32 er_fwd_trunk_type:1;
- u32 lli_mode:1;
-#endif
- } bs;
- u32 value;
- } dw3;
-
- /* dw4~dw7 */
- vport_stats_ctr_s stats; /* vport stats counters */
-
-} vport_s;
-
-/**
- * Struct name: sml_elb_tbl_elem_u
- * @brief: ELB Table Elem
- * Description: ELB leaf table members
- */
-typedef union tag_sml_elb_tbl_elem {
- struct {
- u32 fwd_val;
- u32 next_val;
- } value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd0:12;
- u32 fwd_type:4;
- u32 fwd_id:16;
-
- u32 rsvd1:17;
- u32 elb_index_next:15;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 fwd_id:16;
- u32 fwd_type:4;
- u32 rsvd0:12;
-
- u32 elb_index_next:15;
- u32 rsvd1:17;
-#endif
- } bs;
-} sml_elb_tbl_elem_u;
-
-/**
- * Struct name: sml_elb_tbl_s
- * @brief ELB Table
- * Description: ELB leaf table Entry
- */
-typedef struct tag_sml_elb_tbl {
- sml_elb_tbl_elem_u elem[TBL_ID_ELB_ENTRY_ELEM_NUM];
-} sml_elb_tbl_s;
-
-/**
- * Struct name: sml_vlan_tbl_elem_u
- * @brief: VLAN Table Elem
- * Description: VLAN broadcast table members
- */
-typedef union tag_sml_vlan_tbl_elem {
- u16 value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u16 learn_en:1;
- u16 elb_index:15;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u16 elb_index:15;
- u16 learn_en:1;
-#endif
- } bs;
-} sml_vlan_tbl_elem_u;
-
-/**
- * Struct name: sml_vlan_tbl_s
- * @brief: VLAN Table
- * Entry Description: VLAN broadcast table
- */
-typedef struct tag_sml_vlan_tbl {
- sml_vlan_tbl_elem_u elem[TBL_ID_VLAN_ENTRY_ELEM_NUM];
-} sml_vlan_tbl_s;
-
-/**
- * Struct name: sml_multicast_tbl_array_u
- * @brief: Multicast Table Elem
- * Description: multicast table members
- */
-typedef union tag_sml_multicast_tbl_elem {
- struct {
- u32 route_val;
- u32 next_val;
- } value;
-
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd0:12;
- u32 route_fwd_type:4;
- u32 route_fwd_id:16;
-
- u32 rsvd1:17;
- u32 elb_index:15;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 route_fwd_id:16;
- u32 route_fwd_type:4;
- u32 rsvd0:12;
-
- u32 elb_index:15;
- u32 rsvd1:17;
-#endif
- } bs;
-} sml_multicast_tbl_elem_u;
-
-/* Struct name: sml_multicast_tbl_s
- * @brief: Multicast Table
- * Entry Description: multicast table
- */
-typedef struct tag_sml_multicast_tbl {
- sml_multicast_tbl_elem_u elem[TBL_ID_MULTICAST_ENTRY_ELEM_NUM];
-} sml_multicast_tbl_s;
-
-/* Struct name: sml_observe_port_s
- * @brief: Observe Port Table
- * Description: observing port entries defined
- */
-typedef struct tag_sml_observe_port {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 valid:1;
- u32 rsvd0:11;
- u32 dst_type:4;
- u32 dst_id:16;
-#else
- u32 dst_id:16;
- u32 dst_type:4;
- u32 rsvd0:11;
- u32 valid:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:4;
- u32 vlan_id:12;
- u32 rsvd2:2;
- u32 cut_len:14;
-#else
- u32 cut_len:14;
- u32 rsvd2:2;
- u32 vlan_id:12;
- u32 rsvd1:4;
-#endif
- } bs;
- u32 value;
- } dw1;
-
- u32 rsvd_pad[2];
-} sml_observe_port_s;
-
-/* Struct name: sml_ipmac_tbl_16_12_key_s
- * @brief ipmac filter table key
- * Description: ipmac filter key define
- */
-typedef struct tag_sml_ipmac_tbl_16_12_key {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 func_id:16;
- u32 mac_h16:16;
-#else
- u32 mac_h16:16;
- u32 func_id:16;
-#endif
-
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 mac_m16:16;
- u32 mac_l16:16;
-#else
- u32 mac_l16:16;
- u32 mac_m16:16;
-#endif
-
- u32 ip;
- u32 rsvd;
-} sml_ipmac_tbl_16_12_key_s;
-
-/* Struct name: sml_ipmac_tbl_16_12_item_s
- * @brief ipmac filter table item
- * Description: ipmac filter item define
- */
-typedef struct tag_sml_ipmac_tbl_16_12_item {
- u32 rsvd[3];
-} sml_ipmac_tbl_16_12_item_s;
-
-/* Struct name: sml_ethtype_tbl_8_4_key_s
- * @brief: ethtype filter table key
- * Description: ethtype filter key define
- */
-typedef struct tag_sml_ethtype_tbl_8_4_key {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 group_id:16;
- u32 ethtype:16;
-#else
- u32 ethtype:16;
- u32 group_id:16;
-#endif
-
- u32 rsvd;
-} sml_ethtype_tbl_8_4_key_s;
-
-/* Struct name: sml_ethtype_tbl_8_4_item_s
- * @brief ethtype filter table item
- * Description: ethtype filter item define
- */
-typedef struct tag_sml_ethtype_tbl_8_4_item {
- u32 rsvd;
-} sml_ethtype_tbl_8_4_item_s;
-
-/* ACL to dfx record packets*/
-typedef enum {
- ACL_PKT_TX = 0,
- ACL_PKT_RX = 1,
-} sml_acl_pkt_dir_e;
-
-/* ACL policy table item*/
-typedef struct tag_sml_acl_policy_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 drop:1;
- u32 car_en:1;
- u32 car_id:12;
- u32 counter_type:2;
- u32 counter_id:16;
-#else
- u32 counter_id:16;
- u32 counter_type:2;
- u32 car_id:12;
- u32 car_en:1;
- u32 drop:1;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd1:7;
- u32 mirrior_en:1;
- u32 observer_port:10;
- u32 change_dscp:1;
- u32 new_dscp:6;
- u32 change_pkt_pri:1;
- u32 new_pkt_pri:3;
- u32 redirect_en:3;
-#else
- u32 redirect_en:3;
- u32 new_pkt_pri:3;
- u32 change_pkt_pri:1;
- u32 new_dscp:6;
- u32 change_dscp:1;
- u32 observer_port:10;
- u32 mirrior_en:1;
- u32 rsvd1:7;
-#endif
- } bs;
-
- u32 value;
- } dw1;
-
- u32 redirect_data;
- u32 rsvd2;
-} sml_acl_policy_tbl_s;
-
-typedef struct tag_sml_acl_ipv4_key {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* The alignment, match_key_type and
- * later field is a KEY value
- */
- u32 padding:16;
- u32 tid0:2;
- u32 match_key_type:3; /* Matching type*/
- u32 rsvd:11; /* Reserved field*/
-#else
- u32 rsvd:11;
- u32 match_key_type:3;
- u32 tid0:2;
- u32 padding:16;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- /* dw1&dw2 */
- u32 sipv4;
- u32 dipv4;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 l4_sport:16;
- u32 l4_dport:16;
-#else
- u32 l4_dport:16;
- u32 l4_sport:16;
-#endif
- } bs;
- u32 value;
- } dw3;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 l4_protocol:8;
- u32 rsvd0:8;
- u32 seg_id:10;
- u32 rsvd1:6;
-#else
- u32 rsvd1:6;
- u32 seg_id:10;
- u32 rsvd0:8;
- u32 l4_protocol:8;
-#endif
- } bs;
- u32 value;
- } dw4;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid1:2;
- u32 rsvd:14;
- u32 padding:16;
-#else
- u32 padding:16;
- u32 rsvd:14;
- u32 tid1:2;
-#endif
- } bs;
- u32 value;
- } dw5;
-} sml_acl_ipv4_key_s;
-
-typedef struct tag_sml_acl_ipv6_key {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* The alignment, match_key_type and
- * later field is a KEY value
- */
- u32 padding:16;
- u32 tid0:2;
- u32 match_key_type:3; /* Matching type*/
- u32 rsvd:11; /* Reserved field*/
-#else
- u32 rsvd:11;
- u32 match_key_type:3;
- u32 tid0:2;
- u32 padding:16;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- /*dw1~dw4 */
- u32 sipv6[4];
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid1:2;
- u32 rsvd1:14;
- u32 tid2:2;
- u32 rsvd2:14;
-#else
- u32 rsvd2:14;
- u32 tid2:2;
- u32 rsvd1:14;
- u32 tid1:2;
-#endif
- } bs;
- u32 value;
- } dw5;
-
- /*dw6~dw9 */
- u32 dipv6[4];
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid3:2;
- u32 rsvd3:14;
- u32 tid4:2;
- u32 rsvd4:14;
-#else
- u32 rsvd4:14;
- u32 tid4:2;
- u32 rsvd3:14;
- u32 tid3:2;
-#endif
- } bs;
- u32 value;
- } dw10;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 l4_sport:16;
- u32 l4_dport:16;
-#else
- u32 l4_dport:16;
- u32 l4_sport:16;
-#endif
- } bs;
- u32 value;
- } dw11;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 l4_protocol:8;
- u32 rsvd0:8;
- u32 seg_id:10;
- u32 rsvd1:6;
-#else
- u32 rsvd1:6;
- u32 seg_id:10;
- u32 rsvd0:8;
- u32 l4_protocol:8;
-#endif
- } bs;
- u32 value;
- } dw12;
-
- u32 dw13;
- u32 dw14;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid5:2;
- u32 rsvd5:14;
- u32 tid6:2;
- u32 rsvd6:14;
-#else
- u32 rsvd6:14;
- u32 tid6:2;
- u32 rsvd5:14;
- u32 tid5:2;
-#endif
- } bs;
- u32 value;
- } dw15;
-
- u32 dw16;
- u32 dw17;
- u32 dw18;
- u32 dw19;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 tid7:2;
- u32 rsvd7:30;
-#else
- u32 rsvd7:30;
- u32 tid7:2;
-#endif
- } bs;
- u32 value;
- } dw20;
-} sml_acl_ipv6_key_s;
-
-/**
- * Struct name: sml_voq_map_table_s
- * @brief: voq_map_table
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_voq_map_table {
- u16 voq_base[8];
-} sml_voq_map_table_s;
-
-/**
- * Struct name: sml_rss_context_u
- * @brief: rss_context
- * Description: xxxxxxxxxxxxxxx
- */
-typedef union tag_sml_rss_context {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 udp_ipv4:1;
- u32 udp_ipv6:1;
- u32 ipv4:1;
- u32 tcp_ipv4:1;
- u32 ipv6:1;
- u32 tcp_ipv6:1;
- u32 ipv6_ext:1;
- u32 tcp_ipv6_ext:1;
- u32 valid:1;
- u32 rsvd1:13;
- u32 def_qpn:10;
-#else
- u32 def_qpn:10;
- u32 rsvd1:13;
- u32 valid:1;
- u32 tcp_ipv6_ext:1;
- u32 ipv6_ext:1;
- u32 tcp_ipv6:1;
- u32 ipv6:1;
- u32 tcp_ipv4:1;
- u32 ipv4:1;
- u32 udp_ipv6:1;
- u32 udp_ipv4:1;
-#endif
- } bs;
-
- u32 value;
-} sml_rss_context_u;
-
-typedef struct tag_sml_rss_context_tbl {
- sml_rss_context_u element[TBL_ID_RSS_CONTEXT_NUM];
-} sml_rss_context_tbl_s;
-
-/**
- * Struct name: sml_rss_hash_u
- * @brief: rss_hash
- * Description: xxxxxxxxxxxxxxx
- */
-typedef union tag_sml_rss_hash {
- u8 rq_index[256];
-} sml_rss_hash_u;
-
-typedef struct tag_sml_rss_hash_tbl {
- sml_rss_hash_u element[TBL_ID_RSS_HASH_NUM];
-} sml_rss_hash_tbl_s;
-
-/**
- * Struct name: sml_lli_5tuple_key_s
- * @brief: lli_5tuple_key
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_lli_5tuple_key {
- union {
- struct {
-/** Define the struct bits */
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 src:5;
- /* The tile need fill the Dest */
- u32 rt:1;
- u32 key_size:2;
- /* determines which action that engine will take */
- u32 profile_id:3;
- /* indicates that requestor expect
- * to receive a response data
- */
- u32 op_id:5;
- u32 a:1;
- u32 rsvd:12;
- u32 vld:1;
- u32 xy:1;
- u32 at:1;
-#else
- u32 at:1;
- u32 xy:1;
- u32 vld:1;
- /* indicates that requestor expect to
- * receive a response data
- */
- u32 rsvd:12;
- /* determines which action that engine will take*/
- u32 a:1;
- u32 op_id:5;
- u32 profile_id:3;
- u32 key_size:2;
- u32 rt:1;
- u32 src:5;
-#endif
- } bs;
-
-/* Define an unsigned member */
- u32 value;
- } dw0;
- union {
- struct {
- u32 rsvd:1;
- /* The tile need fill the Dest */
- u32 address:15;
-
- u32 table_type:5;
- u32 ip_type:1;
- u32 func_id:10;
- } bs;
-
- u32 value;
- } misc;
-
- u32 src_ip[4];
- u32 dst_ip[4];
-
- u16 src_port;
- u16 dst_port;
-
- u8 protocol;
- u8 tcp_flag;
- u8 fcoe_rctl;
- u8 fcoe_type;
- u16 eth_type;
-} sml_lli_5tuple_key_s;
-
-/**
- * Struct name: sml_lli_5tuple_rsp_s
- * @brief: lli_5tuple_rsp
- * Description: xxxxxxxxxxxxxxx
- */
-typedef struct tag_sml_lli_5tuple_rsp {
- union {
- struct {
- u32 state:4;
- u32 rsvd:28;
- } bs;
-
- u32 value;
- } dw0;
-
- u32 dw1;
-
- union {
- struct {
- u32 frame_size:16;
- u32 lli_en:8;
- u32 rsvd:8;
- } bs;
-
- u32 value;
- } dw2;
-
- u32 dw3;
-} sml_lli_5tuple_rsp_s;
-
-/**
- * Struct name: l2nic_rx_cqe_s.
- * @brief: l2nic_rx_cqe_s data structure.
- * Description:
- */
-typedef struct tag_l2nic_rx_cqe {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rx_done:1;
- u32 bp_en:1;
- u32 rsvd1:6;
- u32 lro_num:8;
- u32 checksum_err:16;
-#else
- u32 checksum_err:16;
- u32 lro_num:8;
- u32 rsvd1:6;
- u32 bp_en:1;
- u32 rx_done:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 length:16;
- u32 vlan:16;
-#else
- u32 vlan:16;
- u32 length:16;
-#endif
- } bs;
- u32 value;
- } dw1;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rss_type:8;
- u32 rsvd0:2;
- u32 vlan_offload_en:1;
- u32 umbcast:2;
- u32 rsvd1:7;
- u32 pkt_types:12;
-#else
- u32 pkt_types:12;
- u32 rsvd1:7;
- u32 umbcast:2;
- u32 vlan_offload_en:1;
- u32 rsvd0:2;
- u32 rss_type:8;
-#endif
- } bs;
- u32 value;
- } dw2;
-
- union {
- struct {
- u32 rss_hash_value;
- } bs;
- u32 value;
- } dw3;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 if_1588:1;
- u32 if_tx_ts:1;
- u32 if_rx_ts:1;
- u32 rsvd:1;
- u32 msg_1588_type:4;
- u32 msg_1588_offset:8;
- u32 tx_ts_seq:16;
-#else
- u32 tx_ts_seq:16;
- u32 msg_1588_offset:8;
- u32 msg_1588_type:4;
- u32 rsvd:1;
- u32 if_rx_ts:1;
- u32 if_tx_ts:1;
- u32 if_1588:1;
-#endif
- } bs;
- u32 value;
- } dw4;
-
- union {
- struct {
- u32 msg_1588_ts;
- } bs;
-
- struct {
- u32 rsvd0:12;
- /* for ovs. traffic type: 0-default l2nic pkt,
- * 1-fallback traffic, 2-miss upcall traffic,
- * 2-command
- */
- u32 traffic_type:4;
- /* for ovs. traffic from: vf_id,
- * only support traffic_type=0(default l2nic)
- * or 2(miss upcall)
- */
- u32 traffic_from:16;
- } ovs_bs;
-
- u32 value;
- } dw5;
-
- union {
- struct {
- u32 lro_ts;
- } bs;
- u32 value;
- } dw6;
-
- union {
- struct {
- u32 rsvd0;
- } bs;
-
- u32 localtag; /* for ovs */
-
- u32 value;
- } dw7;
-} l2nic_rx_cqe_s;
-
-typedef union tag_sml_global_queue_tbl_elem {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 src_tag_l:16;
- u32 local_qid:8;
- u32 rsvd:8;
-#elif (__BYTE_ORDER__ == __LITTLE_ENDIAN__)
- u32 rsvd:8;
- u32 local_qid:8;
- u32 src_tag_l:16;
-#endif
- } bs;
-
- u32 value;
-} sml_global_queue_tbl_elem_u;
-
-typedef struct tag_sml_global_queue_tbl {
- sml_global_queue_tbl_elem_u element[TBL_ID_GLOBAL_QUEUE_NUM];
-} sml_global_queue_tbl_s;
-
-typedef struct tag_sml_dfx_log_tbl {
- u32 wr_init_pc_h32; /* Initial value of write_pc*/
- u32 wr_init_pc_l32;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 state:8;
- u32 func_en:1;
- u32 srctag:12;
- u32 max_num:11; /* Data block highest value*/
-#else
- u32 max_num:11;
- u32 srctag:12;
- u32 func_en:1;
- u32 state:8;
-#endif
- } bs;
- u32 value;
- } dw2;
-
- u32 ci_index;
-} sml_dfx_log_tbl_s;
-
-typedef struct tag_sml_glb_capture_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 valid:1;
- u32 max_num:15;
- u32 rsvd:16;
-#else
- u32 rsvd:16;
- u32 max_num:15;
- u32 valid:1;
-#endif
- } bs;
- u32 value;
- } dw0;
-
- u32 discard_addr_h32;
- u32 discard_addr_l32;
-
- u32 rsvd0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 valid:1;
- u32 mode:5;
- u32 direct:2;
- u32 offset:8;
- u32 cos:3;
- u32 max_num:13;
-#else
- u32 max_num:13;
- u32 cos:3;
- u32 offset:8;
- u32 direct:2;
- u32 mode:5;
- u32 valid:1;
-#endif
- } bs;
- u32 value;
- } dw4;
-
- u32 data_vlan;
-
- u32 condition_addr_h32;
- u32 condition_addr_l32;
-
-} sml_glb_capture_tbl_s;
-
-typedef struct tag_sml_cqe_addr_tbl {
- u32 cqe_first_addr_h32;
- u32 cqe_first_addr_l32;
- u32 cqe_last_addr_h32;
- u32 cqe_last_addr_l32;
-
-} sml_cqe_addr_tbl_s;
-
-/**
- * Struct name: sml_ucode_exec_info_tbl_s
- * @brief: ucode execption info Table
- * Description: microcode exception information table
- */
-typedef struct tag_ucode_exec_info_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 wptr_cpb_ack_str:4;
- u32 mem_cpb_ack_cnums_dma:4;
- u32 mem_cpb_ack_cmd_mode:2;
- u32 pr_ret_vld:1;
- u32 oeid_pd_pkt:1;
- u32 rptr_cmd:4;
- u32 wptr_cmd:4;
- u32 src_tag_l:12;
-#else
- u32 src_tag_l:12;
- u32 wptr_cmd:4;
- u32 rptr_cmd:4;
- u32 oeid_pd_pkt:1;
- u32 pr_ret_vld:1;
- u32 mem_cpb_ack_cmd_mode:2;
- u32 mem_cpb_ack_cnums_dma:4;
- u32 wptr_cpb_ack_str:4;
-#endif
- } bs;
-
- u32 value;
- } dw0;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 fq:16;
- u32 exception_type:4;
- u32 rptr_cpb_ack_str:4;
- u32 header_oeid:8;
-#else
- u32 header_oeid:8;
- u32 rptr_cpb_ack_str:4;
- u32 exception_type:4;
- u32 fq:16;
-#endif
- } bs;
-
- u32 value;
- } dw1;
-
- u32 oeid_pd_data_l32;
- u32 oeid_pd_data_m32;
-} sml_ucode_exec_info_s;
-
-typedef struct rq_iq_mapping_tbl {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rqid:16;
- u32 iqid:8;
- u32 rsvd:8;
-#else
- u32 rsvd:8;
- u32 iqid:8;
- u32 rqid:16;
-#endif
- } bs;
- u32 value;
- } dw[4];
-} sml_rq_iq_mapping_tbl_s;
-
-/* nic_ucode_rq_ctx table define
- */
-typedef struct nic_ucode_rq_ctx {
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 max_count:10;
- u32 cqe_tmpl:6;
- u32 pkt_tmpl:6;
- u32 wqe_tmpl:6;
- u32 psge_valid:1;
- u32 rsvd1:1;
- u32 owner:1;
- u32 ceq_en:1;
-#else
- u32 ceq_en:1;
- u32 owner:1;
- u32 rsvd1:1;
- u32 psge_valid:1;
- u32 wqe_tmpl:6;
- u32 pkt_tmpl:6;
- u32 cqe_tmpl:6;
- u32 max_count:10;
-#endif
- } bs;
- u32 dw0;
- };
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* Interrupt number that L2NIC engine tell SW
- * if generate int instead of CEQ
- */
- u32 int_num:10;
- u32 ceq_count:10;
- /* product index */
- u32 pi:12;
-#else
- /* product index */
- u32 pi:12;
- u32 ceq_count:10;
- /* Interrupt number that L2NIC engine tell SW
- * if generate int instead of CEQ
- */
- u32 int_num:10;
-#endif
- } bs0;
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* CEQ arm, L2NIC engine will clear it after send ceq,
- * driver should set it by CMD Q after receive all pkt.
- */
- u32 ceq_arm:1;
- u32 eq_id:5;
- u32 rsvd2:4;
- u32 ceq_count:10;
- /* product index */
- u32 pi:12;
-#else
- /* product index */
- u32 pi:12;
- u32 ceq_count:10;
- u32 rsvd2:4;
- u32 eq_id:5;
- /* CEQ arm, L2NIC engine will clear it after send ceq,
- * driver should set it by CMD Q after receive all pkt.
- */
- u32 ceq_arm:1;
-#endif
- } bs1;
- u32 dw1;
- };
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- /* consumer index */
- u32 ci:12;
- /* WQE page address of current CI point to, high part */
- u32 ci_wqe_page_addr_hi:20;
-#else
- /* WQE page address of current CI point to, high part */
- u32 ci_wqe_page_addr_hi:20;
- /* consumer index */
- u32 ci:12;
-#endif
- } bs2;
- u32 dw2;
- };
-
- /* WQE page address of current CI point to, low part */
- u32 ci_wqe_page_addr_lo;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 prefetch_min:7;
- u32 prefetch_max:11;
- u32 prefetch_cache_threshold:14;
-#else
- u32 prefetch_cache_threshold:14;
- u32 prefetch_max:11;
- u32 prefetch_min:7;
-#endif
- } bs3;
- u32 dw3;
- };
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd3:31;
- /* ownership of WQE */
- u32 prefetch_owner:1;
-#else
- /* ownership of WQE */
- u32 prefetch_owner:1;
- u32 rsvd3:31;
-#endif
- } bs4;
- u32 dw4;
- };
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 prefetch_ci:12;
- /* high part */
- u32 prefetch_ci_wqe_page_addr_hi:20;
-#else
- /* high part */
- u32 prefetch_ci_wqe_page_addr_hi:20;
- u32 prefetch_ci:12;
-#endif
- } bs5;
- u32 dw5;
- };
-
- /* low part */
- u32 prefetch_ci_wqe_page_addr_lo;
- /* host mem GPA, high part */
- u32 pi_gpa_hi;
- /* host mem GPA, low part */
- u32 pi_gpa_lo;
-
- union {
- struct {
-#if (__BYTE_ORDER__ == __BIG_ENDIAN__)
- u32 rsvd4:9;
- u32 ci_cla_tbl_addr_hi:23;
-#else
- u32 ci_cla_tbl_addr_hi:23;
- u32 rsvd4:9;
-#endif
- } bs6;
- u32 dw6;
- };
-
- u32 ci_cla_tbl_addr_lo;
-
-} nic_ucode_rq_ctx_s;
-
-#define LRO_TSO_SPACE_SIZE (240) /* (15 * 16) */
-#define RQ_CTX_SIZE (48)
-
-#ifdef __cplusplus
-#if __cplusplus
-}
-#endif
-#endif /* __cplusplus */
-#endif /* __L2_TABLE_H__ */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h b/drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h
deleted file mode 100644
index 39d0516c..00000000
--- a/drivers/net/ethernet/huawei/hinic/hinic_sml_table_pub.h
+++ /dev/null
@@ -1,277 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0*/
-/* Huawei HiNIC PCI Express Linux driver
- * Copyright(c) 2017 Huawei Technologies Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef __SML_TABLE_PUB_H__
-#define __SML_TABLE_PUB_H__
-
-#ifdef __cplusplus
-#if __cplusplus
-extern "C" {
-#endif
-#endif /* __cplusplus */
-
-/* Un-FPGA(ESL/EMU/EDA) specification */
-#if (!defined(__UP_FPGA__) && (!defined(HI1822_MODE_FPGA)))
-/* ER specification*/
-#define L2_ER_SPEC (16)
-
-/* Entry specification*/
-#define TBL_ID_FUNC_CFG_SPEC (512)
-#define TBL_ID_PORT_CFG_SPEC (16)
-#define TBL_ID_MAC_SPEC (4096)
-#define TBL_ID_MULTICAST_SPEC (1024)
-#define TBL_ID_TRUNK_SPEC (256)
-#define TBL_ID_ELB_SPEC (18432)
-#define TBL_ID_TAGGEDLIST_SPEC (80)
-#define TBL_ID_UNTAGGEDLIST_SPEC (16)
-
-/* VLAN specification*/
-#define VSW_VLAN_SPEC (4096)
-
-#else /* FPGA scenario specifications */
-
-/* ER specification*/
-#define L2_ER_SPEC (4)
-
-/* Entry specification*/
-#define TBL_ID_FUNC_CFG_SPEC (64)
-#define TBL_ID_PORT_CFG_SPEC (16)
-#define TBL_ID_MAC_SPEC (256)
-#define TBL_ID_MULTICAST_SPEC (32)
-#define TBL_ID_TRUNK_SPEC (16)
-#define TBL_ID_ELB_SPEC (1152)
-#define TBL_ID_TAGGEDLIST_SPEC (20)
-#define TBL_ID_UNTAGGEDLIST_SPEC (4)
-
-/* VLAN specification*/
-#define VSW_VLAN_SPEC (1024)
-#endif
-
-/**
- * Number of entries elements defined
- */
-#define TBL_ID_ELB_ENTRY_ELEM_NUM 2
-#define TBL_ID_VLAN_ENTRY_ELEM_NUM 8
-#define TBL_ID_MULTICAST_ENTRY_ELEM_NUM 2
-#define TBL_ID_TRUNKFWD_ENTRY_ELEM_NUM 32
-#define TBL_ID_TAGGEDLIST_BITMAP32_NUM 4
-#define TBL_ID_UNTAGGEDLIST_BITMAP32_NUM 4
-#define TBL_ID_GLOBAL_QUEUE_NUM 4
-#define TBL_ID_RSS_CONTEXT_NUM 4
-#define TBL_ID_RSS_HASH_NUM 4
-
-/**
- * NIC receiving mode defined
- */
-#define NIC_RX_MODE_UC 0x01 /* 0b00001 */
-#define NIC_RX_MODE_MC 0x02 /* 0b00010 */
-#define NIC_RX_MODE_BC 0x04 /* 0b00100 */
-#define NIC_RX_MODE_MC_ALL 0x08 /* 0b01000 */
-#define NIC_RX_MODE_PROMISC 0x10 /* 0b10000 */
-
-/**
- * Maximum number of HCAR
- */
-#define QOS_MAX_HCAR_NUM (12)
-
-/**
- * VLAN Table, Multicast Table, ELB Table Definitions
- * The Table index and sub id index
- */
-#define VSW_DEFAULT_VLAN0 (0)
-#define INVALID_ELB_INDEX (0)
-
-#if (!defined(__UP_FPGA__) && (!defined(HI1822_MODE_FPGA)))
-/* Supports ESL/EMU/EDA 16ER * 4K VLAN, 1 entry stored 8 vlan*/
-#define GET_VLAN_TABLE_INDEX(er_id, vlan_id) \
- ((((er_id) & 0xF) << 9) | (((vlan_id) & 0xFFF) >> 3))
-#else
-/*FPGA supports only 4ER * 1K VLAN, 1 entry stored 8 vlan*/
-#define GET_VLAN_TABLE_INDEX(er_id, vlan_id) \
- ((((er_id) & 0x3) << 7) | (((vlan_id) & 0x3FF) >> 3))
-#endif
-#define GET_VLAN_ENTRY_SUBID(vlan_id) ((vlan_id) & 0x7)
-
-#define GET_MULTICAST_TABLE_INDEX(mc_id) ((mc_id) >> 1)
-#define GET_MULTICAST_ENTRY_SUBID(mc_id) ((mc_id) & 0x1)
-
-#define GET_ELB_TABLE_INDEX(elb_id) ((elb_id) >> 1)
-#define GET_ELB_ENTRY_SUBID(elb_id) ((elb_id) & 0x1)
-
-/**
- * taggedlist_table and untaggedlist_table access offset calculation
- */
-#define GET_TAGLIST_TABLE_INDEX(list_id, vlan_id) \
- (((list_id) << 5) | (((vlan_id) & 0xFFF) >> 7))
-#define GET_TAGLIST_TABLE_BITMAP_IDX(vlan_id) (((vlan_id) >> 5) & 0x3)
-#define GET_TAGLIST_TABLE_VLAN_BIT(vlan_id) \
- (0x1UL << ((vlan_id) & 0x1F))
-
-#define TRUNK_FWDID_NOPORT 0xFFFF
-
-/**
- * MAC type definition
- */
-typedef enum {
- MAC_TYPE_UC = 0,
- MAC_TYPE_BC,
- MAC_TYPE_MC,
- MAC_TYPE_RSV,
-} mac_type_e;
-
-/**
- * Ethernet port definition
- */
-typedef enum {
- MAG_ETH_PORT0 = 0,
- MAG_ETH_PORT1,
- MAG_ETH_PORT2,
- MAG_ETH_PORT3,
- MAG_ETH_PORT4,
- MAG_ETH_PORT5,
- MAG_ETH_PORT6,
- MAG_ETH_PORT7,
- MAG_ETH_PORT8,
- MAG_ETH_PORT9,
-} mag_eth_port_e;
-
-/**
- * vlan filter type defined
- */
-typedef enum {
- VSW_VLAN_MODE_ALL = 0,
- VSW_VLAN_MODE_ACCESS,
- VSW_VLAN_MODE_TRUNK,
- VSW_VLAN_MODE_HYBRID,
- VSW_VLAN_MODE_QINQ,
- VSW_VLAN_MODE_MAX,
-} vsw_vlan_mode_e;
-
-/**
- * MAC table query forwarding port type definition
- */
-typedef enum {
- VSW_FWD_TYPE_FUNCTION = 0, /* forward type function */
- VSW_FWD_TYPE_VMDQ, /* forward type function-queue(vmdq) */
- VSW_FWD_TYPE_PORT, /* forward type port */
- VSW_FWD_TYPE_FIC, /* forward type fic */
- VSW_FWD_TYPE_TRUNK, /* forward type trunk */
- VSW_FWD_TYPE_DP, /* forward type DP */
- VSW_FWD_TYPE_MC, /* forward type multicast */
-
- /* START: is not used and has to be removed */
- VSW_FWD_TYPE_BC, /* forward type broadcast */
- VSW_FWD_TYPE_PF, /* forward type pf */
- /* END: is not used and has to be removed */
-
- VSW_FWD_TYPE_NULL, /* forward type null */
-} vsw_fwd_type_e;
-
-/**
- * Eth-Trunk port aggregation mode
- */
-typedef enum {
- VSW_ETRK_MODE_STANDBY,
- VSW_ETRK_MODE_SMAC,
- VSW_ETRK_MODE_DMAC,
- VSW_ETRK_MODE_SMACDMAC,
- VSW_ETRK_MODE_SIP,
- VSW_ETRK_MODE_DIP,
- VSW_ETRK_MODE_SIPDIP,
- VSW_ETRK_MODE_5TUPLES,
- VSW_ETRK_MODE_LACP,
- VSW_ETRK_MODE_MAX,
-} vsw_etrk_mode_e;
-
-/**
- * Eth-Trunk port aggregation mode
- */
-typedef enum {
- TRUNK_MODE_STANDBY,
- TRUNK_MODE_SMAC,
- TRUNK_MODE_DMAC,
- TRUNK_MODE_SMACDMAC,
- TRUNK_MODE_SIP,
- TRUNK_MODE_DIP,
- TRUNK_MODE_SIPDIP,
- TRUNK_MODE_5TUPLES,
- TRUNK_MODE_SIPV6,
- TRUNK_MODE_DIPV6,
- TRUNK_MODE_SIPDIPV6,
- TRUNK_MODE_5TUPLESV6,
- TRUNK_MODE_LACP,
-} trunk_mode_s;
-
-/* ACL key type */
-enum {
- ACL_KEY_IPV4 = 0,
- ACL_KEY_IPV6
-};
-
-/* ACL filter action */
-enum {
- ACL_ACTION_PERMIT = 0,
- ACL_ACTION_DENY
-};
-
-/* ACL action button*/
-enum {
- ACL_ACTION_OFF = 0,
- ACL_ACTION_ON,
-};
-
-/* ACL statistic action*/
-enum {
- ACL_ACTION_NO_COUNTER = 0,
- ACL_ACTION_COUNT_PKT,
- ACL_ACTION_COUNT_PKT_LEN,
-};
-
-/* ACL redirect action*/
-enum {
- ACL_ACTION_FORWAR_UP = 1,
- ACL_ACTION_FORWAR_PORT,
- ACL_ACTION_FORWAR_NEXT_HOP,
- ACL_ACTION_FORWAR_OTHER,
-};
-
-enum {
- CEQ_TIMER_STOP = 0,
- CEQ_TIMER_START,
-};
-
-enum {
- CEQ_API_DISPATCH = 0,
- CEQ_API_NOT_DISPATCH,
-};
-
-enum {
- CEQ_MODE = 1,
- INT_MODE,
-};
-
-enum {
- ER_MODE_VEB,
- ER_MODE_VEPA,
- ER_MODE_MULTI,
- ER_MODE_NULL,
-};
-
-#ifdef __cplusplus
-#if __cplusplus
-}
-#endif
-#endif /* __cplusplus */
-#endif /* __L2_TABLE_PUB_H__ */
--
1.8.3
1
0

[PATCH 1/4] btrfs: delayed-inode: Kill the BUG_ON() in btrfs_delete_delayed_dir_index()
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Qu Wenruo <wqu(a)suse.com>
mainline inclusion
from mainline-v5.4-rc1
commit 933c22a7512c5c09b1fdc46b557384efe8d03233
category: bugfix
bugzilla: 13690
CVE: CVE-2019-19813
-------------------------------------------------
There is one report of fuzzed image which leads to BUG_ON() in
btrfs_delete_delayed_dir_index().
Although that fuzzed image can already be addressed by enhanced
extent-tree error handler, it's still better to hunt down more BUG_ON().
This patch will hunt down two BUG_ON()s in
btrfs_delete_delayed_dir_index():
- One for error from btrfs_delayed_item_reserve_metadata()
Instead of BUG_ON(), we output an error message and free the item.
And return the error.
All callers of this function handles the error by aborting current
trasaction.
- One for possible EEXIST from __btrfs_add_delayed_deletion_item()
That function can return -EEXIST.
We already have a good enough error message for that, only need to
clean up the reserved metadata space and allocated item.
To help above cleanup, also modifiy __btrfs_remove_delayed_item() called
in btrfs_release_delayed_item(), to skip unassociated item.
Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=203253
Signed-off-by: Qu Wenruo <wqu(a)suse.com>
Reviewed-by: David Sterba <dsterba(a)suse.com>
Signed-off-by: David Sterba <dsterba(a)suse.com>
Conflicts:
fs/btrfs/delayed-inode.c
[yyl: adjust context]
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/btrfs/delayed-inode.c | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index e9522f2..5dc6141 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -471,6 +471,9 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
struct rb_root *root;
struct btrfs_delayed_root *delayed_root;
+ /* Not associated with any delayed_node */
+ if (!delayed_item->delayed_node)
+ return;
delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
BUG_ON(!delayed_root);
@@ -1526,7 +1529,12 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
* we have reserved enough space when we start a new transaction,
* so reserving metadata failure is impossible.
*/
- BUG_ON(ret);
+ if (ret < 0) {
+ btrfs_err(trans->fs_info,
+"metadata reservation failed for delayed dir item deltiona, should have been reserved");
+ btrfs_release_delayed_item(item);
+ goto end;
+ }
mutex_lock(&node->mutex);
ret = __btrfs_add_delayed_deletion_item(node, item);
@@ -1534,7 +1542,8 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
btrfs_err(trans->fs_info,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
index, node->root->objectid, node->inode_id, ret);
- BUG();
+ btrfs_delayed_item_release_metadata(dir->root, item);
+ btrfs_release_delayed_item(item);
}
mutex_unlock(&node->mutex);
end:
--
1.8.3
1
3

[PATCH 01/30] net: hns3: add one printing information in hnae3_unregister_client() function
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: shenhao <shenhao21(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
-----------------------------------------------------
This patch adds one printing information to let user know the client does
not exist when unregister a noneexistent client.
Signed-off-by: Guangbin Huang <huangguangbin2(a)huawei.com>
Signed-off-by: shenhao <shenhao21(a)huawei.com>
Reviewed-by: Zhong Zhaohui <zhongzhaohui(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/hisilicon/hns3/hnae3.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 4afa509..53a87b3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -156,6 +156,7 @@ void hnae3_unregister_client(struct hnae3_client *client)
if (!existed) {
mutex_unlock(&hnae3_common_lock);
+ pr_err("client %s does not exist!\n", client->name);
return;
}
--
1.8.3
1
29
From: youshengzui <youshengzui(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
--------------------------
This patch is used to modify the hns3 driver version to 1.9.37.4
Signed-off-by: youshengzui <youshengzui(a)huawei.com>
Reviewed-by: Weiwei Deng <dengweiwei(a)huawei.com>
Reviewed-by: Zhaohui Zhong <zhongzhaohui(a)huawei.com>
Reviewed-by: Junxin Chen <chenjunxin1(a)huawei.com>
Reviewed-by: Zhong Zhaohui <zhongzhaohui(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 +-
drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h | 2 +-
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 2 +-
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 +-
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 2 +-
5 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index f795bfd..98dfa7c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -30,7 +30,7 @@
#include <linux/pci.h>
#include <linux/types.h>
-#define HNAE3_MOD_VERSION "1.9.37.3"
+#define HNAE3_MOD_VERSION "1.9.37.4"
#define HNAE3_MIN_VECTOR_NUM 2 /* one for msi-x, another for IO */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h
index 3977883..630f642 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_cae/hns3_cae_version.h
@@ -4,7 +4,7 @@
#ifndef __HNS3_CAE_VERSION_H__
#define __HNS3_CAE_VERSION_H__
-#define HNS3_CAE_MOD_VERSION "1.9.37.3"
+#define HNS3_CAE_MOD_VERSION "1.9.37.4"
#define CMT_ID_LEN 8
#define RESV_LEN 3
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 5f1d5a3..9e11ec3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -8,7 +8,7 @@
#include "hnae3.h"
-#define HNS3_MOD_VERSION "1.9.37.3"
+#define HNS3_MOD_VERSION "1.9.37.4"
extern char hns3_driver_version[];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 0146470..5e64d2a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -12,7 +12,7 @@
#include "hclge_cmd.h"
#include "hnae3.h"
-#define HCLGE_MOD_VERSION "1.9.37.3"
+#define HCLGE_MOD_VERSION "1.9.37.4"
#define HCLGE_DRIVER_NAME "hclge"
#define HCLGE_MAX_PF_NUM 8
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 51af1050..596618e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -10,7 +10,7 @@
#include "hclgevf_cmd.h"
#include "hnae3.h"
-#define HCLGEVF_MOD_VERSION "1.9.37.3"
+#define HCLGEVF_MOD_VERSION "1.9.37.4"
#define HCLGEVF_DRIVER_NAME "hclgevf"
#define HCLGEVF_MAX_VLAN_ID 4095
--
1.8.3
1
192

[PATCH 01/40] perf/amd/uncore: Replace manual sampling check with CAP_NO_INTERRUPT flag
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Kim Phillips <kim.phillips(a)amd.com>
[ Upstream commit f967140dfb7442e2db0868b03b961f9c59418a1b ]
Enable the sampling check in kernel/events/core.c::perf_event_open(),
which returns the more appropriate -EOPNOTSUPP.
BEFORE:
$ sudo perf record -a -e instructions,l3_request_g1.caching_l3_cache_accesses true
Error:
The sys_perf_event_open() syscall returned with 22 (Invalid argument) for event (l3_request_g1.caching_l3_cache_accesses).
/bin/dmesg | grep -i perf may provide additional information.
With nothing relevant in dmesg.
AFTER:
$ sudo perf record -a -e instructions,l3_request_g1.caching_l3_cache_accesses true
Error:
l3_request_g1.caching_l3_cache_accesses: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'
Fixes: c43ca5091a37 ("perf/x86/amd: Add support for AMD NB and L2I "uncore" counters")
Signed-off-by: Kim Phillips <kim.phillips(a)amd.com>
Signed-off-by: Borislav Petkov <bp(a)suse.de>
Acked-by: Peter Zijlstra <peterz(a)infradead.org>
Cc: stable(a)vger.kernel.org
Link: https://lkml.kernel.org/r/20200311191323.13124-1-kim.phillips@amd.com
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/x86/events/amd/uncore.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index baa7e36..604a855 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -193,20 +193,18 @@ static int amd_uncore_event_init(struct perf_event *event)
/*
* NB and Last level cache counters (MSRs) are shared across all cores
- * that share the same NB / Last level cache. Interrupts can be directed
- * to a single target core, however, event counts generated by processes
- * running on other cores cannot be masked out. So we do not support
- * sampling and per-thread events.
+ * that share the same NB / Last level cache. On family 16h and below,
+ * Interrupts can be directed to a single target core, however, event
+ * counts generated by processes running on other cores cannot be masked
+ * out. So we do not support sampling and per-thread events via
+ * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
*/
- if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
- return -EINVAL;
/* NB and Last level cache counters do not have usr/os/guest/host bits */
if (event->attr.exclude_user || event->attr.exclude_kernel ||
event->attr.exclude_host || event->attr.exclude_guest)
return -EINVAL;
- /* and we do not enable counter overflow interrupts */
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1;
@@ -314,6 +312,7 @@ static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
};
static struct pmu amd_llc_pmu = {
@@ -324,6 +323,7 @@ static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
.start = amd_uncore_start,
.stop = amd_uncore_stop,
.read = amd_uncore_read,
+ .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
};
static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
--
1.8.3
1
39
From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: feature
bugzilla: NA
CVE: NA
In this patch, we try to add dfx for io operation, including send/
recv/send_fail/send_busy. We also can define overtime_threshold to
judge timeout task.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Mingqiang Ling <lingmingqiang(a)huawei.com>
Reviewed-by: Guangwei Zhou <zhouguangwei5(a)huawei.com>
Reviewed-by: Ye Kai <yekai13(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/hpre/hpre.h | 16 ++++++
drivers/crypto/hisilicon/hpre/hpre_crypto.c | 89 ++++++++++++++++++++++++-----
drivers/crypto/hisilicon/hpre/hpre_main.c | 55 ++++++++++++++++++
3 files changed, 146 insertions(+), 14 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 42b2f2a..203eb2a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -25,6 +25,16 @@ enum hpre_ctrl_dbgfs_file {
HPRE_DEBUG_FILE_NUM,
};
+enum hpre_dfx_dbgfs_file {
+ HPRE_SEND_CNT,
+ HPRE_RECV_CNT,
+ HPRE_SEND_FAIL_CNT,
+ HPRE_SEND_BUSY_CNT,
+ HPRE_OVER_THRHLD_CNT,
+ HPRE_OVERTIME_THRHLD,
+ HPRE_DFX_FILE_NUM
+};
+
#define HPRE_DEBUGFS_FILE_NUM (HPRE_DEBUG_FILE_NUM + HPRE_CLUSTERS_NUM - 1)
struct hpre_debugfs_file {
@@ -34,12 +44,18 @@ struct hpre_debugfs_file {
struct hpre_debug *debug;
};
+struct hpre_dfx {
+ atomic64_t value;
+ enum hpre_dfx_dbgfs_file type;
+};
+
/*
* One HPRE controller has one PF and multiple VFs, some global configurations
* which PF has need this structure.
* Just relevant for PF.
*/
struct hpre_debug {
+ struct hpre_dfx dfx[HPRE_DFX_FILE_NUM];
struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
};
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 7610e13..b68b30c 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -10,6 +10,7 @@
#include <linux/dma-mapping.h>
#include <linux/fips.h>
#include <linux/module.h>
+#include <linux/time.h>
#include "hpre.h"
struct hpre_ctx;
@@ -68,6 +69,7 @@ struct hpre_dh_ctx {
struct hpre_ctx {
struct hisi_qp *qp;
struct hpre_asym_request **req_list;
+ struct hpre *hpre;
spinlock_t req_lock;
unsigned int key_sz;
bool crt_g2_mode;
@@ -90,6 +92,7 @@ struct hpre_asym_request {
int err;
int req_id;
hpre_cb cb;
+ struct timespec64 req_time;
};
static DEFINE_MUTEX(hpre_alg_lock);
@@ -119,6 +122,7 @@ static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
{
struct hpre_ctx *ctx;
+ struct hpre_dfx *dfx;
int id;
ctx = hpre_req->ctx;
@@ -129,6 +133,10 @@ static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
ctx->req_list[id] = hpre_req;
hpre_req->req_id = id;
+ dfx = ctx->hpre->debug.dfx;
+ if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
+ ktime_get_ts64(&hpre_req->req_time);
+
return id;
}
@@ -308,12 +316,16 @@ static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
{
+ struct hpre *hpre;
+
if (!ctx || !qp || qlen < 0)
return -EINVAL;
spin_lock_init(&ctx->req_lock);
ctx->qp = qp;
+ hpre = container_of(ctx->qp->qm, struct hpre, qm);
+ ctx->hpre = hpre;
ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
if (!ctx->req_list)
return -ENOMEM;
@@ -336,30 +348,67 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
ctx->key_sz = 0;
}
+static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
+ u64 overtime_thrhld)
+{
+ struct timespec64 reply_time;
+ u64 time_use_us;
+
+#define HPRE_DFX_SEC_TO_US 1000000
+#define HPRE_DFX_US_TO_NS 1000
+
+ ktime_get_ts64(&reply_time);
+ time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
+ HPRE_DFX_SEC_TO_US +
+ (reply_time.tv_nsec - req->req_time.tv_nsec) /
+ HPRE_DFX_US_TO_NS;
+
+ if (time_use_us <= overtime_thrhld)
+ return false;
+
+ return true;
+}
+
static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct kpp_request *areq;
+ u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
areq = req->areq.dh;
areq->dst_len = ctx->key_sz;
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
kpp_request_complete(areq, ret);
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
struct hpre_asym_request *req;
struct akcipher_request *areq;
+ u64 overtime_thrhld;
int ret;
ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
+
+ overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
+ if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
+ atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
+
areq = req->areq.rsa;
areq->dst_len = ctx->key_sz;
hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
akcipher_request_complete(areq, ret);
+ atomic64_inc(&dfx[HPRE_RECV_CNT].value);
}
static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
@@ -435,6 +484,29 @@ static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
return 0;
}
+static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
+{
+ struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
+ int ctr = 0;
+ int ret;
+
+ do {
+ atomic64_inc(&dfx[HPRE_SEND_CNT].value);
+ ret = hisi_qp_send(ctx->qp, msg);
+ if (ret != -EBUSY)
+ break;
+ atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
+ } while (ctr++ < HPRE_TRY_SEND_TIMES);
+
+ if (likely(!ret))
+ return ret;
+
+ if (ret != -EBUSY)
+ atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
+
+ return ret;
+}
+
#ifdef CONFIG_CRYPTO_DH
static int hpre_dh_compute_value(struct kpp_request *req)
{
@@ -443,7 +515,6 @@ static int hpre_dh_compute_value(struct kpp_request *req)
void *tmp = kpp_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
ret = hpre_msg_request_set(ctx, req, false);
@@ -464,11 +535,9 @@ static int hpre_dh_compute_value(struct kpp_request *req)
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
else
msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -646,7 +715,6 @@ static int hpre_rsa_enc(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@@ -676,11 +744,8 @@ static int hpre_rsa_enc(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
-
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
@@ -698,7 +763,6 @@ static int hpre_rsa_dec(struct akcipher_request *req)
void *tmp = akcipher_request_ctx(req);
struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
struct hpre_sqe *msg = &hpre_req->req;
- int ctr = 0;
int ret;
/* For 512 and 1536 bits key size, use soft tfm instead */
@@ -735,11 +799,8 @@ static int hpre_rsa_dec(struct akcipher_request *req)
if (unlikely(ret))
goto clear_all;
- do {
- ret = hisi_qp_send(ctx->qp, msg);
- } while (ret == -EBUSY && ctr++ < HPRE_TRY_SEND_TIMES);
-
/* success */
+ ret = hpre_send(ctx, msg);
if (likely(!ret))
return -EINPROGRESS;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index f727158..2ede8d78 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -169,6 +169,15 @@ struct hpre_hw_error {
{"INT_STATUS ", HPRE_INT_STATUS},
};
+static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
+ "send_cnt",
+ "recv_cnt",
+ "send_fail_cnt",
+ "send_busy_cnt",
+ "over_thrhld_cnt",
+ "overtime_thrhld"
+};
+
#ifdef CONFIG_CRYPTO_QM_UACCE
static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
@@ -588,6 +597,33 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
.write = hpre_ctrl_debug_write,
};
+static int hpre_debugfs_atomic64_get(void *data, u64 *val)
+{
+ struct hpre_dfx *dfx_item = data;
+
+ *val = atomic64_read(&dfx_item->value);
+ return 0;
+}
+
+static int hpre_debugfs_atomic64_set(void *data, u64 val)
+{
+ struct hpre_dfx *dfx_item = data;
+
+ if (dfx_item->type == HPRE_OVERTIME_THRHLD) {
+ struct hpre_dfx *hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
+
+ atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
+ } else if (val) {
+ return -EINVAL;
+ }
+
+ atomic64_set(&dfx_item->value, val);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
+ hpre_debugfs_atomic64_set, "%llu\n");
+
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
@@ -691,6 +727,22 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug)
return hpre_cluster_debugfs_init(debug);
}
+static void hpre_dfx_debug_init(struct hpre_debug *debug)
+{
+ struct hpre *hpre = container_of(debug, struct hpre, debug);
+ struct hpre_dfx *dfx = hpre->debug.dfx;
+ struct hisi_qm *qm = &hpre->qm;
+ struct dentry *parent;
+ int i;
+
+ parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
+ for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
+ dfx[i].type = i;
+ debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
+ &hpre_atomic64_ops);
+ }
+}
+
static int hpre_debugfs_init(struct hisi_qm *qm)
{
struct hpre *hpre = container_of(qm, struct hpre, qm);
@@ -709,6 +761,9 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
if (ret)
goto failed_to_create;
}
+
+ hpre_dfx_debug_init(&hpre->debug);
+
return 0;
failed_to_create:
--
1.8.3
1
8
From: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
When I backported 52918ed5fcf0 ("KVM: SVM: Override default MMIO mask if
memory encryption is enabled") to 4.19 (which resulted in commit
a4e761c9f63a ("KVM: SVM: Override default MMIO mask if memory encryption
is enabled")), I messed up the call to kvm_mmu_set_mmio_spte_mask()
Fix that here now.
Reported-by: Tom Lendacky <thomas.lendacky(a)amd.com>
Cc: Sean Christopherson <sean.j.christopherson(a)intel.com>
Cc: Paolo Bonzini <pbonzini(a)redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/x86/kvm/svm.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 3f0565e..cc8f3b41 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1336,7 +1336,7 @@ static __init void svm_adjust_mmio_mask(void)
*/
mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
- kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK);
+ kvm_mmu_set_mmio_spte_mask(mask, mask);
}
static __init int svm_hardware_setup(void)
--
1.8.3
1
91

[PATCH 1/5] Revert "dm crypt: fix benbi IV constructor crash if used in authenticated mode"
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Xiongfeng Wang <wangxiongfeng2(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 31797
CVE: NA
-------------------------
The next two patches to be reverted will conflict with this patch. Let's
revert this patch and merge the original patch.
This reverts commit 808f3768deec5e1a5c9d2a4a2d8593fbbaf3e4cc.
Signed-off-by: Xiongfeng Wang <wangxiongfeng2(a)huawei.com>
Reviewed-by: ZhangXiaoxu <zhangxiaoxu5(a)huawei.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/md/dm-crypt.c | 10 ++--------
1 file changed, 2 insertions(+), 8 deletions(-)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d451f98..aa7f741 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -563,14 +563,8 @@ static int crypt_iv_essiv_gen(struct geniv_ctx *ctx,
static int crypt_iv_benbi_ctr(struct geniv_ctx *ctx)
{
- unsigned bs;
- int log;
-
- if (test_bit(CRYPT_MODE_INTEGRITY_AEAD, &ctx->cipher_flags))
- bs = crypto_aead_blocksize(ctx->tfms.tfms_aead[0]);
- else
- bs = crypto_skcipher_blocksize(ctx->tfms.tfms[0]);
- log = ilog2(bs);
+ unsigned int bs = crypto_skcipher_blocksize(ctx->tfms.tfms[0]);
+ int log = ilog2(bs);
/* we need to calculate how far we must shift the sector count
* to get the cipher block count, we use this shift in _gen */
--
1.8.3
1
4

[PATCH] arm64: clear_page: Add new implementation of clear_page() by STNP
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Wei Li <liwei391(a)huawei.com>
hulk inclusion
category: feature
bugzilla: 31400
CVE: NA
---------------------------
Currently, clear_page() clear the page through 'dc zva', while the page may
not be used immediately mostly, so the cache flush is in vain.
Add an optimized implementation of clear_page() by 'stnp' for performance
promotion. It can be switched by the boot cmdline 'mm.use_clearpage_stnp'.
In the hugetlb clear test, we gained about 53.7% performance improvement:
Set mm.use_clearpage_stnp = 0 | Set mm.use_clearpage_stnp = 1
[root@localhost liwei]# ./a.out 50 20 | [root@localhost liwei]# ./a.out 50 20
size is 50 Gib, test times is 20 | size is 50 Gib, test times is 20
test_time[0] : use 8.438046 sec | test_time[0] : use 3.722682 sec
test_time[1] : use 8.028493 sec | test_time[1] : use 3.640274 sec
test_time[2] : use 8.646547 sec | test_time[2] : use 4.095052 sec
test_time[3] : use 8.122490 sec | test_time[3] : use 3.998446 sec
test_time[4] : use 8.053038 sec | test_time[4] : use 4.084259 sec
test_time[5] : use 8.843512 sec | test_time[5] : use 3.933871 sec
test_time[6] : use 8.308906 sec | test_time[6] : use 3.934334 sec
test_time[7] : use 8.093817 sec | test_time[7] : use 3.869142 sec
test_time[8] : use 8.303504 sec | test_time[8] : use 3.902916 sec
test_time[9] : use 8.178336 sec | test_time[9] : use 3.541885 sec
test_time[10] : use 8.003625 sec | test_time[10] : use 3.595554 sec
test_time[11] : use 8.163807 sec | test_time[11] : use 3.583813 sec
test_time[12] : use 8.267464 sec | test_time[12] : use 3.863033 sec
test_time[13] : use 8.055326 sec | test_time[13] : use 3.770953 sec
test_time[14] : use 8.246986 sec | test_time[14] : use 3.808006 sec
test_time[15] : use 8.546992 sec | test_time[15] : use 3.653194 sec
test_time[16] : use 8.727256 sec | test_time[16] : use 3.722395 sec
test_time[17] : use 8.288951 sec | test_time[17] : use 3.683508 sec
test_time[18] : use 8.019322 sec | test_time[18] : use 4.253087 sec
test_time[19] : use 8.250685 sec | test_time[19] : use 4.082845 sec
hugetlb test end! | hugetlb test end!
Signed-off-by: Wei Li <liwei391(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/arm64/include/asm/cpucaps.h | 3 ++-
arch/arm64/kernel/cpufeature.c | 34 ++++++++++++++++++++++++++++++++++
arch/arm64/lib/clear_page.S | 21 +++++++++++++++++++++
3 files changed, 57 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index a9090f2..3cd169f 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -56,7 +56,8 @@
#define ARM64_WORKAROUND_1463225 35
#define ARM64_HAS_CRC32 36
#define ARM64_SSBS 37
+#define ARM64_CLEARPAGE_STNP 38
-#define ARM64_NCAPS 38
+#define ARM64_NCAPS 39
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index b1f621c..8b84a47 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1232,6 +1232,34 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
}
#endif
+static bool use_clearpage_stnp;
+
+static int __init early_use_clearpage_stnp(char *p)
+{
+ return strtobool(p, &use_clearpage_stnp);
+}
+early_param("mm.use_clearpage_stnp", early_use_clearpage_stnp);
+
+static bool has_mor_nontemporal(const struct arm64_cpu_capabilities *entry)
+{
+ /*
+ * List of CPUs which have memory ordering ruled non-temporal
+ * load and store.
+ */
+ static const struct midr_range cpus[] = {
+ MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
+ {},
+ };
+
+ return is_midr_in_range_list(read_cpuid_id(), cpus);
+}
+
+static bool can_clearpage_use_stnp(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ return use_clearpage_stnp && has_mor_nontemporal(entry);
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -1467,6 +1495,12 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
.cpu_enable = cpu_enable_ssbs,
},
#endif
+ {
+ .desc = "Clear Page by STNP",
+ .capability = ARM64_CLEARPAGE_STNP,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = can_clearpage_use_stnp,
+ },
{},
};
diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S
index ef08e90..9aa1de1 100644
--- a/arch/arm64/lib/clear_page.S
+++ b/arch/arm64/lib/clear_page.S
@@ -18,6 +18,25 @@
#include <linux/const.h>
#include <asm/assembler.h>
#include <asm/page.h>
+#include <asm/alternative.h>
+
+/*
+ * Clear page @dest
+ *
+ * Parameters:
+ * x0 - dest
+ */
+ENTRY(clear_page_stnp)
+ .align 6
+1: stnp xzr, xzr, [x0]
+ stnp xzr, xzr, [x0, #0x10]
+ stnp xzr, xzr, [x0, #0x20]
+ stnp xzr, xzr, [x0, #0x30]
+ add x0, x0, #0x40
+ tst x0, #(PAGE_SIZE - 1)
+ b.ne 1b
+ ret
+ENDPROC(clear_page_stnp)
/*
* Clear page @dest
@@ -26,6 +45,8 @@
* x0 - dest
*/
ENTRY(clear_page)
+ ALTERNATIVE("nop", "b clear_page_stnp", ARM64_CLEARPAGE_STNP)
+
mrs x1, dczid_el0
and w1, w1, #0xf
mov x2, #4
--
1.8.3
1
0

17 Apr '20
From: Tao Jihua <taojihua4(a)huawei.com>
driver inclusion
category: Bugfix
bugzilla: NA
This modification is mainly to optimize mtr management
and solve mtr addressing bug:When mtt_ba_pg_sz = 0,
hem-> start / step = 1, which eventually results in an
additional BA_BYTE_LEN added to the offset
Signed-off-by: Tao Jihua <taojihua4(a)huawei.com>
Reviewed-by: Hu Chunzhi <huchunzhi(a)huawei.com>
Reviewed-by: Wang Lin <wanglin137(a)huawei.com>
Reviewed-by: Zhao Weibo <zhaoweibo3(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/infiniband/hw/hns/hns_roce_hem.c | 43 ++++++++++++++++----------------
1 file changed, 21 insertions(+), 22 deletions(-)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 510c008..1911470 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -1165,13 +1165,6 @@ struct roce_hem_item {
int end; /* end buf offset in this hem */
};
-#define hem_list_for_each(pos, n, head) \
- list_for_each_entry_safe(pos, n, head, list)
-
-#define hem_list_del_item(hem) list_del(&hem->list)
-#define hem_list_add_item(hem, head) list_add(&hem->list, head)
-#define hem_list_link_item(hem, head) list_add(&hem->sibling, head)
-
static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev,
int start, int end,
int count, bool exist_bt,
@@ -1216,8 +1209,8 @@ static void hem_list_free_all(struct hns_roce_dev *hr_dev,
{
struct roce_hem_item *hem, *temp_hem;
- hem_list_for_each(hem, temp_hem, head) {
- hem_list_del_item(hem);
+ list_for_each_entry_safe(hem, temp_hem, head, list) {
+ list_del(&hem->list);
hem_list_free_item(hr_dev, hem, exist_bt);
}
}
@@ -1249,7 +1242,7 @@ static struct roce_hem_item *hem_list_search_item(struct list_head *ba_list,
struct roce_hem_item *hem, *temp_hem;
struct roce_hem_item *found = NULL;
- hem_list_for_each(hem, temp_hem, ba_list) {
+ list_for_each_entry_safe(hem, temp_hem, ba_list, list) {
if (hem_list_page_is_in_range(hem, page_offset)) {
found = hem;
break;
@@ -1391,9 +1384,9 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
goto err_exit;
}
hem_ptrs[level] = cur;
- hem_list_add_item(cur, &temp_list[level]);
+ list_add(&cur->list, &temp_list[level]);
if (hem_list_is_bottom_bt(hopnum, level))
- hem_list_link_item(cur, &temp_list[0]);
+ list_add(&cur->sibling, &temp_list[0]);
/* link bt to parent bt */
if (level > 1) {
@@ -1430,6 +1423,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
void *cpu_base;
u64 phy_base;
int ret = 0;
+ int ba_num;
int offset;
int total;
int step;
@@ -1440,15 +1434,19 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
if (root_hem)
return 0;
+ ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
+ if (ba_num < 1)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&temp_root);
- total = r->offset;
+ offset = r->offset;
/* indicate to last region */
r = ®ions[region_cnt - 1];
- root_hem = hem_list_alloc_item(hr_dev, total, r->offset + r->count - 1,
- unit, true, 0);
+ root_hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
+ ba_num, true, 0);
if (!root_hem)
return -ENOMEM;
- hem_list_add_item(root_hem, &temp_root);
+ list_add(&root_hem->list, &temp_root);
hem_list->root_ba = root_hem->dma_addr;
@@ -1457,7 +1455,7 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
INIT_LIST_HEAD(&temp_list[i]);
total = 0;
- for (i = 0; i < region_cnt && total < unit; i++) {
+ for (i = 0; i < region_cnt && total < ba_num; i++) {
r = ®ions[i];
if (!r->count)
continue;
@@ -1478,8 +1476,8 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
goto err_exit;
}
hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
- hem_list_add_item(hem, &temp_list[i]);
- hem_list_link_item(hem, &temp_btm);
+ list_add(&hem->list, &temp_list[i]);
+ list_add(&hem->sibling, &temp_btm);
total += r->count;
} else {
step = hem_list_calc_ba_range(r->hopnum, 1, unit);
@@ -1488,9 +1486,10 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
goto err_exit;
}
/* if exist mid bt, link L1 to L0 */
- hem_list_for_each(hem, temp_hem,
- &hem_list->mid_bt[i][1]) {
- offset = hem->start / step * BA_BYTE_LEN;
+ list_for_each_entry_safe(hem, temp_hem,
+ &hem_list->mid_bt[i][1], list) {
+ offset = ((hem->start - r->offset) / step) *
+ BA_BYTE_LEN;
hem_list_link_bt(hr_dev, cpu_base + offset,
hem->dma_addr);
total++;
--
1.8.3
1
0

17 Apr '20
From: Joe Perches <joe(a)perches.com>
[ Upstream commit 20faba848752901de23a4d45a1174d64d2069dde ]
Arguments are supposed to be ordered high then low.
Signed-off-by: Joe Perches <joe(a)perches.com>
Signed-off-by: Thomas Gleixner <tglx(a)linutronix.de>
Acked-by: Marc Zyngier <marc.zyngier(a)arm.com>
Link: https://lkml.kernel.org/r/ab5deb4fc3cd604cb620054770b7d00016d736bc.15627348…
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/irqchip/irq-gic-v3-its.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 4a9c14f..860f3ef 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -215,7 +215,7 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
static struct its_collection *valid_col(struct its_collection *col)
{
- if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
+ if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
return NULL;
return col;
--
1.8.3
1
159
From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
1. we delete sec_usr_if.h, then move the define of sec hardware structure
into sec_crypto.h and normalize two structure types.
2. In sec_main.c, we remove fusion_limit/fusion_time, because this part of
logic is not used in the end. We also optimize the logic of debugfs without
judging some return codes, because this does not affect the driver loading.
Probe flow is also be optimized, including add sec_iommu_used_check, modify
sec_probe_init, realize sec_qm_pre_init and so on.
3. In sec.h, we define structure of sec_ctx, which defines queue/cipher/
request .etc relatives.
4. In sec_crypto.c,we encapsulate independent interfaces, such as init/
uninit/map/unmap/callback/alloc resource/free resource/encrypt/decrypt/
filling hardware descriptor/set key .etc, which removes fusion logic and is
easy to expand algorithm. Meanwhile, we remove DES algorithm support,
because of its weak key.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Cheng Hu <hucheng.hu(a)huawei.com>
Reviewed-by: Guangwei Zhou <zhouguangwei5(a)huawei.com>
Reviewed-by: Ye Kai <yekai13(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/qm.c | 3 +-
drivers/crypto/hisilicon/sec2/sec.h | 166 ++-
drivers/crypto/hisilicon/sec2/sec_crypto.c | 1770 +++++++++++-----------------
drivers/crypto/hisilicon/sec2/sec_crypto.h | 237 +++-
drivers/crypto/hisilicon/sec2/sec_main.c | 541 ++++-----
drivers/crypto/hisilicon/sec2/sec_usr_if.h | 179 ---
6 files changed, 1246 insertions(+), 1650 deletions(-)
delete mode 100644 drivers/crypto/hisilicon/sec2/sec_usr_if.h
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index d3429e7..8b49902 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1389,6 +1389,7 @@ static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
+
dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
kfree(sqc);
@@ -1598,7 +1599,7 @@ static int hisi_qm_stop_qp_nolock(struct hisi_qp *qp)
else
flush_work(&qp->qm->work);
- /* wait for increase used count in qp send and last poll qp finish */
+ /* waiting for increase used count in qp send and last poll qp finish */
udelay(WAIT_PERIOD);
if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
qp_stop_fail_cb(qp);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index f85dd06..e3b581a 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -1,19 +1,124 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2018-2019 HiSilicon Limited. */
-#ifndef HISI_SEC_H
-#define HISI_SEC_H
+#ifndef __HISI_SEC_V2_H
+#define __HISI_SEC_V2_H
#include <linux/list.h>
+
#include "../qm.h"
-#include "sec_usr_if.h"
+#include "sec_crypto.h"
+
+/* Algorithm resource per hardware SEC queue */
+struct sec_alg_res {
+ u8 *pbuf;
+ dma_addr_t pbuf_dma;
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+ u8 *out_mac;
+ dma_addr_t out_mac_dma;
+};
+
+/* Cipher request of SEC private */
+struct sec_cipher_req {
+ struct hisi_acc_hw_sgl *c_in;
+ dma_addr_t c_in_dma;
+ struct hisi_acc_hw_sgl *c_out;
+ dma_addr_t c_out_dma;
+ u8 *c_ivin;
+ dma_addr_t c_ivin_dma;
+ struct skcipher_request *sk_req;
+ u32 c_len;
+ bool encrypt;
+};
+
+/* SEC request of Crypto */
+struct sec_req {
+ struct sec_sqe sec_sqe;
+ struct sec_ctx *ctx;
+ struct sec_qp_ctx *qp_ctx;
+
+ struct sec_cipher_req c_req;
+
+ int err_type;
+ int req_id;
+
+ /* Status of the SEC request */
+ bool fake_busy;
+};
+
+/**
+ * struct sec_req_op - Operations for SEC request
+ * @buf_map: DMA map the SGL buffers of the request
+ * @buf_unmap: DMA unmap the SGL buffers of the request
+ * @bd_fill: Fill the SEC queue BD
+ * @bd_send: Send the SEC BD into the hardware queue
+ * @callback: Call back for the request
+ * @process: Main processing logic of Skcipher
+ */
+struct sec_req_op {
+ int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
+ int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
+ void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
+ int (*process)(struct sec_ctx *ctx, struct sec_req *req);
+};
+
+/* SEC cipher context which cipher's relatives */
+struct sec_cipher_ctx {
+ u8 *c_key;
+ dma_addr_t c_key_dma;
+ sector_t iv_offset;
+ u32 c_gran_size;
+ u32 ivsize;
+ u8 c_mode;
+ u8 c_alg;
+ u8 c_key_len;
+};
-#undef pr_fmt
-#define pr_fmt(fmt) "hisi_sec: " fmt
+/* SEC queue context which defines queue's relatives */
+struct sec_qp_ctx {
+ struct hisi_qp *qp;
+ struct sec_req *req_list[QM_Q_DEPTH];
+ struct idr req_idr;
+ struct sec_alg_res res[QM_Q_DEPTH];
+ struct sec_ctx *ctx;
+ struct mutex req_lock;
+ struct hisi_acc_sgl_pool *c_in_pool;
+ struct hisi_acc_sgl_pool *c_out_pool;
+ atomic_t pending_reqs;
+};
+enum sec_alg_type {
+ SEC_SKCIPHER,
+ SEC_AEAD
+};
+
+/* SEC Crypto TFM context which defines queue and cipher .etc relatives */
+struct sec_ctx {
+ struct sec_qp_ctx *qp_ctx;
+ struct sec_dev *sec;
+ const struct sec_req_op *req_op;
+ struct hisi_qp **qps;
+
+ /* Half queues for encipher, and half for decipher */
+ u32 hlf_q_num;
+
+ /* Threshold for fake busy, trigger to return -EBUSY to user */
+ u32 fake_req_limit;
+
+ /* Currrent cyclic index to select a queue for encipher */
+ atomic_t enc_qcyclic;
+
+ /* Currrent cyclic index to select a queue for decipher */
+ atomic_t dec_qcyclic;
-#define FUSION_LIMIT_DEF 1
-#define FUSION_LIMIT_MAX 64
-#define FUSION_TMOUT_NSEC_DEF (400 * 1000)
+ enum sec_alg_type alg_type;
+ bool pbuf_supported;
+ bool use_pbuf;
+ struct sec_cipher_ctx c_ctx;
+};
enum sec_endian {
SEC_LE = 0,
@@ -21,32 +126,37 @@ enum sec_endian {
SEC_64BE
};
-struct hisi_sec_ctrl;
+enum sec_debug_file_index {
+ SEC_CURRENT_QM,
+ SEC_CLEAR_ENABLE,
+ SEC_DEBUG_FILE_NUM,
+};
+
+struct sec_debug_file {
+ enum sec_debug_file_index index;
+ spinlock_t lock;
+ struct hisi_qm *qm;
+};
-struct hisi_sec_dfx {
- u64 send_cnt;
- u64 send_by_tmout;
- u64 send_by_full;
- u64 recv_cnt;
- u64 get_task_cnt;
- u64 put_task_cnt;
- u64 gran_task_cnt;
- u64 thread_cnt;
- u64 fake_busy_cnt;
- u64 busy_comp_cnt;
+struct sec_dfx {
+ atomic64_t send_cnt;
+ atomic64_t recv_cnt;
};
-struct hisi_sec {
+struct sec_debug {
+ struct sec_dfx dfx;
+ struct sec_debug_file files[SEC_DEBUG_FILE_NUM];
+};
+
+struct sec_dev {
struct hisi_qm qm;
- struct hisi_sec_dfx sec_dfx;
- struct hisi_sec_ctrl *ctrl;
- int ctx_q_num;
- int fusion_limit;
- int fusion_tmout_nsec;
+ struct sec_debug debug;
+ u32 ctx_q_num;
+ bool iommu_used;
};
void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
struct hisi_qp **sec_create_qps(void);
-struct hisi_sec *find_sec_device(int node);
-
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 0643955..52448d0 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -1,384 +1,329 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018-2019 HiSilicon Limited. */
-#include <linux/crypto.h>
-#include <linux/hrtimer.h>
-#include <linux/dma-mapping.h>
-#include <linux/ktime.h>
-
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <crypto/skcipher.h>
#include <crypto/xts.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/crypto.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
#include "sec.h"
#include "sec_crypto.h"
-static atomic_t sec_active_devs;
-
-#define SEC_ASYNC
-
-#define SEC_INVLD_REQ_ID (-1)
-#define SEC_PRIORITY 4001
-#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
-#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
-#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
-#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
-
-#define BUF_MAP_PER_SGL 64
-#define SEC_FUSION_BD
-
-enum C_ALG {
- C_ALG_DES = 0x0,
- C_ALG_3DES = 0x1,
- C_ALG_AES = 0x2,
- C_ALG_SM4 = 0x3,
-};
-
-enum C_MODE {
- C_MODE_ECB = 0x0,
- C_MODE_CBC = 0x1,
- C_MODE_CTR = 0x4,
- C_MODE_CCM = 0x5,
- C_MODE_GCM = 0x6,
- C_MODE_XTS = 0x7,
- C_MODE_CBC_CS = 0x9,
-};
-
-enum CKEY_LEN {
- CKEY_LEN_128_BIT = 0x0,
- CKEY_LEN_192_BIT = 0x1,
- CKEY_LEN_256_BIT = 0x2,
- CKEY_LEN_DES = 0x1,
- CKEY_LEN_3DES_3KEY = 0x1,
- CKEY_LEN_3DES_2KEY = 0x3,
-};
-
-enum SEC_BD_TYPE {
- BD_TYPE1 = 0x1,
- BD_TYPE2 = 0x2,
-};
-
-enum SEC_CIPHER_TYPE {
- SEC_CIPHER_ENC = 0x1,
- SEC_CIPHER_DEC = 0x2,
-};
-
-enum SEC_ADDR_TYPE {
- PBUF = 0x0,
- SGL = 0x1,
- PRP = 0x2,
-};
-
-enum SEC_CI_GEN {
- CI_GEN_BY_ADDR = 0x0,
- CI_GEN_BY_LBA = 0X3,
-};
-
-enum SEC_SCENE {
- SCENE_IPSEC = 0x0,
- SCENE_STORAGE = 0x5,
-};
-
-enum {
- SEC_NO_FUSION = 0x0,
- SEC_IV_FUSION = 0x1,
- SEC_FUSION_BUTT
-};
-
-enum SEC_REQ_OPS_TYPE {
- SEC_OPS_SKCIPHER_ALG = 0x0,
- SEC_OPS_MULTI_IV = 0x1,
- SEC_OPS_BUTT
-};
-
-struct cipher_res {
- struct skcipher_request_ctx **sk_reqs;
- u8 *c_ivin;
- dma_addr_t c_ivin_dma;
- struct scatterlist *src;
- struct scatterlist *dst;
-};
-
-struct hisi_sec_cipher_req {
- struct hisi_acc_hw_sgl *c_in;
- dma_addr_t c_in_dma;
- struct hisi_acc_hw_sgl *c_out;
- dma_addr_t c_out_dma;
- u8 *c_ivin;
- dma_addr_t c_ivin_dma;
- struct skcipher_request *sk_req;
- struct scatterlist *src;
- struct scatterlist *dst;
- u32 c_len;
- u32 gran_num;
- u64 lba;
- bool encrypt;
-};
-
-struct hisi_sec_ctx;
-struct hisi_sec_qp_ctx;
-
-struct hisi_sec_req {
- struct hisi_sec_sqe sec_sqe;
- struct hisi_sec_ctx *ctx;
- struct hisi_sec_qp_ctx *qp_ctx;
- void **priv;
- struct hisi_sec_cipher_req c_req;
- ktime_t st_time;
- int err_type;
- int req_id;
- int req_cnt;
- int fusion_num;
- int fake_busy;
-};
+#define SEC_PRIORITY 4001
+#define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
+#define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
+#define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
+#define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
+
+/* SEC sqe(bd) bit operational relative MACRO */
+#define SEC_DE_OFFSET 1
+#define SEC_CI_GEN_OFFSET 6
+#define SEC_CIPHER_OFFSET 4
+#define SEC_SCENE_OFFSET 3
+#define SEC_DST_SGL_OFFSET 2
+#define SEC_SRC_SGL_OFFSET 7
+#define SEC_CKEY_OFFSET 9
+#define SEC_CMODE_OFFSET 12
+#define SEC_AKEY_OFFSET 5
+#define SEC_AEAD_ALG_OFFSET 11
+#define SEC_AUTH_OFFSET 6
+
+#define SEC_FLAG_OFFSET 7
+#define SEC_FLAG_MASK 0x0780
+#define SEC_TYPE_MASK 0x0F
+#define SEC_DONE_MASK 0x0001
+
+#define SEC_TOTAL_IV_SZ (SEC_IV_SIZE * QM_Q_DEPTH)
+#define SEC_SGL_SGE_NR 128
+#define SEC_CTX_DEV(ctx) (&(ctx)->sec->qm.pdev->dev)
+#define SEC_CIPHER_AUTH 0xfe
+#define SEC_AUTH_CIPHER 0x1
+#define SEC_MAX_MAC_LEN 64
+#define SEC_MAX_AAD_LEN 65535
+#define SEC_TOTAL_MAC_SZ (SEC_MAX_MAC_LEN * QM_Q_DEPTH)
+
+#define SEC_PBUF_SZ 512
+#define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
+#define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
+#define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
+ SEC_MAX_MAC_LEN * 2)
+#define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
+#define SEC_PBUF_PAGE_NUM (QM_Q_DEPTH / SEC_PBUF_NUM)
+#define SEC_PBUF_LEFT_SZ (SEC_PBUF_PKG * (QM_Q_DEPTH - \
+ SEC_PBUF_PAGE_NUM * SEC_PBUF_NUM))
+#define SEC_TOTAL_PBUF_SZ (PAGE_SIZE * SEC_PBUF_PAGE_NUM + \
+ SEC_PBUF_LEFT_SZ)
+
+#define SEC_SQE_LEN_RATE 4
+#define SEC_SQE_CFLAG 2
+#define SEC_SQE_AEAD_FLAG 3
+#define SEC_SQE_DONE 0x1
-struct hisi_sec_req_op {
- int fusion_type;
- int (*get_res)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*queue_alloc)(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
- int (*queue_free)(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
- int (*buf_map)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*buf_unmap)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*do_transfer)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*bd_fill)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*bd_send)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
- int (*callback)(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req);
-};
-
-struct hisi_sec_cipher_ctx {
- u8 *c_key;
- dma_addr_t c_key_dma;
- sector_t iv_offset;
- u32 c_gran_size;
- u8 c_mode;
- u8 c_alg;
- u8 c_key_len;
-};
-
-struct hisi_sec_qp_ctx {
- struct hisi_qp *qp;
- struct hisi_sec_req **req_list;
- struct hisi_sec_req *fusion_req;
- unsigned long *req_bitmap;
- void *priv_req_res;
- struct hisi_sec_ctx *ctx;
- struct mutex req_lock;
- atomic_t req_cnt;
- struct hisi_sec_sqe *sqe_list;
- struct hisi_acc_sgl_pool *c_in_pool;
- struct hisi_acc_sgl_pool *c_out_pool;
- int fusion_num;
- int fusion_limit;
-};
+static atomic_t sec_active_devs;
-struct hisi_sec_ctx {
- struct hisi_sec_qp_ctx *qp_ctx;
- struct hisi_sec *sec;
- struct device *dev;
- struct hisi_sec_req_op *req_op;
- struct hisi_qp **qps;
- struct hrtimer timer;
- struct work_struct work;
- atomic_t thread_cnt;
- int req_fake_limit;
- int req_limit;
- int q_num;
- int enc_q_num;
- atomic_t enc_qid;
- atomic_t dec_qid;
- struct hisi_sec_cipher_ctx c_ctx;
- int fusion_tmout_nsec;
- int fusion_limit;
- u64 enc_fusion_num;
- u64 dec_fusion_num;
- bool is_fusion;
-};
+/* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
+static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ return atomic_inc_return(&ctx->enc_qcyclic) % ctx->hlf_q_num;
-#define DES_WEAK_KEY_NUM 4
-u64 des_weak_key[DES_WEAK_KEY_NUM] = {0x0101010101010101, 0xFEFEFEFEFEFEFEFE,
- 0xE0E0E0E0F1F1F1F1, 0x1F1F1F1F0E0E0E0E};
+ return atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
+ ctx->hlf_q_num;
+}
-static void hisi_sec_req_cb(struct hisi_qp *qp, void *);
+static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
+{
+ if (req->c_req.encrypt)
+ atomic_dec(&ctx->enc_qcyclic);
+ else
+ atomic_dec(&ctx->dec_qcyclic);
+}
-static int hisi_sec_alloc_req_id(struct hisi_sec_req *req,
- struct hisi_sec_qp_ctx *qp_ctx)
+static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
{
- struct hisi_sec_ctx *ctx = req->ctx;
int req_id;
- req_id = find_first_zero_bit(qp_ctx->req_bitmap, ctx->req_limit);
- if (req_id >= ctx->req_limit || req_id < 0) {
- dev_err(ctx->dev, "no free req id\n");
- return -ENOBUFS;
+ mutex_lock(&qp_ctx->req_lock);
+
+ req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL,
+ 0, QM_Q_DEPTH, GFP_ATOMIC);
+ mutex_unlock(&qp_ctx->req_lock);
+ if (unlikely(req_id < 0)) {
+ dev_err(SEC_CTX_DEV(req->ctx), "alloc req id fail!\n");
+ return req_id;
}
- set_bit(req_id, qp_ctx->req_bitmap);
- qp_ctx->req_list[req_id] = req;
- req->req_id = req_id;
req->qp_ctx = qp_ctx;
-
- return 0;
+ qp_ctx->req_list[req_id] = req;
+ return req_id;
}
-static void hisi_sec_free_req_id(struct hisi_sec_qp_ctx *qp_ctx, int req_id)
+static void sec_free_req_id(struct sec_req *req)
{
- if (req_id < 0 || req_id >= qp_ctx->ctx->req_limit) {
- pr_err("invalid req_id[%d]\n", req_id);
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int req_id = req->req_id;
+
+ if (unlikely(req_id < 0 || req_id >= QM_Q_DEPTH)) {
+ dev_err(SEC_CTX_DEV(req->ctx), "free request id invalid!\n");
return;
}
qp_ctx->req_list[req_id] = NULL;
+ req->qp_ctx = NULL;
mutex_lock(&qp_ctx->req_lock);
- clear_bit(req_id, qp_ctx->req_bitmap);
- atomic_dec(&qp_ctx->req_cnt);
+ idr_remove(&qp_ctx->req_idr, req_id);
mutex_unlock(&qp_ctx->req_lock);
}
-static int sec_request_transfer(struct hisi_sec_ctx *, struct hisi_sec_req *);
-static int sec_request_send(struct hisi_sec_ctx *, struct hisi_sec_req *);
-
-void qp_ctx_work_process(struct hisi_sec_qp_ctx *qp_ctx)
+static void sec_req_cb(struct hisi_qp *qp, void *resp)
{
- struct hisi_sec_req *req;
- struct hisi_sec_ctx *ctx;
- ktime_t cur_time = ktime_get();
- int ret;
-
- mutex_lock(&qp_ctx->req_lock);
-
- req = qp_ctx->fusion_req;
- if (req == NULL) {
- mutex_unlock(&qp_ctx->req_lock);
+ struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
+ struct sec_sqe *bd = resp;
+ struct sec_ctx *ctx;
+ struct sec_req *req;
+ u16 done, flag;
+ int err = 0;
+ u8 type;
+
+ type = bd->type_cipher_auth & SEC_TYPE_MASK;
+ if (unlikely(type != SEC_BD_TYPE2)) {
+ pr_err("err bd type [%d]\n", type);
return;
}
- ctx = req->ctx;
- if (ctx == NULL || req->fusion_num == qp_ctx->fusion_limit) {
- mutex_unlock(&qp_ctx->req_lock);
+ req = qp_ctx->req_list[le16_to_cpu(bd->type2.tag)];
+ if (unlikely(!req)) {
+ atomic_inc(&qp->qp_status.used);
return;
}
- if (cur_time - qp_ctx->fusion_req->st_time < ctx->fusion_tmout_nsec) {
- mutex_unlock(&qp_ctx->req_lock);
- return;
+ req->err_type = bd->type2.error_type;
+ ctx = req->ctx;
+ done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
+ flag = (le16_to_cpu(bd->type2.done_flag) &
+ SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
+ if (unlikely(req->err_type || done != SEC_SQE_DONE ||
+ (ctx->alg_type == SEC_SKCIPHER && flag != SEC_SQE_CFLAG))) {
+ dev_err_ratelimited(SEC_CTX_DEV(ctx),
+ "err_type[%d],done[%d],flag[%d]\n",
+ req->err_type, done, flag);
+ err = -EIO;
}
- qp_ctx->fusion_req = NULL;
+ atomic64_inc(&ctx->sec->debug.dfx.recv_cnt);
+
+ ctx->req_op->buf_unmap(ctx, req);
+
+ ctx->req_op->callback(ctx, req, err);
+}
+
+static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ int ret;
+ mutex_lock(&qp_ctx->req_lock);
+ ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
mutex_unlock(&qp_ctx->req_lock);
+ atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
- ret = sec_request_transfer(ctx, req);
- if (ret)
- goto err_free_req;
-
- ret = sec_request_send(ctx, req);
- __sync_add_and_fetch(&ctx->sec->sec_dfx.send_by_tmout, 1);
- if (ret != -EBUSY && ret != -EINPROGRESS) {
- dev_err(ctx->dev, "[%s][%d] ret[%d]\n", __func__,
- __LINE__, ret);
- goto err_unmap_req;
- }
+ if (unlikely(ret == -EBUSY))
+ return -ENOBUFS;
- return;
+ if (!ret) {
+ if (req->fake_busy)
+ ret = -EBUSY;
+ else
+ ret = -EINPROGRESS;
+ }
-err_unmap_req:
- ctx->req_op->buf_unmap(ctx, req);
-err_free_req:
- hisi_sec_free_req_id(qp_ctx, req->req_id);
- atomic_dec(&ctx->thread_cnt);
+ return ret;
}
-void ctx_work_process(struct work_struct *work)
+/* Get DMA memory resources */
+static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
{
- struct hisi_sec_ctx *ctx;
int i;
- ctx = container_of(work, struct hisi_sec_ctx, work);
- for (i = 0; i < ctx->q_num; i++)
- qp_ctx_work_process(&ctx->qp_ctx[i]);
+ res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ,
+ &res->c_ivin_dma, GFP_KERNEL);
+ if (!res->c_ivin)
+ return -ENOMEM;
+
+ for (i = 1; i < QM_Q_DEPTH; i++) {
+ res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
+ res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
+ }
+
+ return 0;
}
-static enum hrtimer_restart hrtimer_handler(struct hrtimer *timer)
+static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
{
- struct hisi_sec_ctx *ctx;
- ktime_t tim;
+ if (res->c_ivin)
+ dma_free_coherent(dev, SEC_TOTAL_IV_SZ,
+ res->c_ivin, res->c_ivin_dma);
+}
- ctx = container_of(timer, struct hisi_sec_ctx, timer);
- tim = ktime_set(0, ctx->fusion_tmout_nsec);
+static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
+{
+ if (res->pbuf)
+ dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ,
+ res->pbuf, res->pbuf_dma);
+}
- if (ctx->sec->qm.wq)
- queue_work(ctx->sec->qm.wq, &ctx->work);
- else
- schedule_work(&ctx->work);
+/*
+ * To improve performance, pbuffer is used for
+ * small packets (< 576Bytes) as IOMMU translation using.
+ */
+static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
+{
+ int pbuf_page_offset;
+ int i, j, k;
- hrtimer_forward(timer, timer->base->get_time(), tim);
+ res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ, &res->pbuf_dma,
+ GFP_KERNEL);
+ if (!res->pbuf)
+ return -ENOMEM;
- return HRTIMER_RESTART;
+ /*
+ * SEC_PBUF_PKG contains data pbuf, iv and
+ * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
+ * Every PAGE contains six SEC_PBUF_PKG
+ * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
+ * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
+ * for the SEC_TOTAL_PBUF_SZ
+ */
+ for (i = 0; i <= SEC_PBUF_PAGE_NUM; i++) {
+ pbuf_page_offset = PAGE_SIZE * i;
+ for (j = 0; j < SEC_PBUF_NUM; j++) {
+ k = i * SEC_PBUF_NUM + j;
+ if (k == QM_Q_DEPTH)
+ break;
+ res[k].pbuf = res->pbuf +
+ j * SEC_PBUF_PKG + pbuf_page_offset;
+ res[k].pbuf_dma = res->pbuf_dma +
+ j * SEC_PBUF_PKG + pbuf_page_offset;
+ }
+ }
+ return 0;
}
-static int hisi_sec_create_qp_ctx(struct hisi_sec_ctx *ctx,
- int qp_ctx_id, int req_type)
+static int sec_alg_resource_alloc(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
{
- struct hisi_sec_qp_ctx *qp_ctx;
- struct device *dev = ctx->dev;
- struct hisi_qp *qp;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_alg_res *res = qp_ctx->res;
int ret;
+ ret = sec_alloc_civ_resource(dev, res);
+ if (ret)
+ return ret;
+
+ if (ctx->pbuf_supported) {
+ ret = sec_alloc_pbuf_resource(dev, res);
+ if (ret) {
+ dev_err(dev, "fail to alloc pbuf dma resource!\n");
+ goto alloc_fail;
+ }
+ }
+ return 0;
+alloc_fail:
+ sec_free_civ_resource(dev, res);
+
+ return ret;
+}
+
+static void sec_alg_resource_free(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+
+ sec_free_civ_resource(dev, qp_ctx->res);
+
+ if (ctx->pbuf_supported)
+ sec_free_pbuf_resource(dev, qp_ctx->res);
+}
+
+static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id, int alg_type)
+{
+ struct device *dev = SEC_CTX_DEV(ctx);
+ struct sec_qp_ctx *qp_ctx;
+ struct hisi_qp *qp;
+ int ret = -ENOMEM;
+
qp_ctx = &ctx->qp_ctx[qp_ctx_id];
qp = ctx->qps[qp_ctx_id];
- qp->req_type = req_type;
+ qp->req_type = 0;
qp->qp_ctx = qp_ctx;
-#ifdef SEC_ASYNC
- qp->req_cb = hisi_sec_req_cb;
-#endif
+ qp->req_cb = sec_req_cb;
qp_ctx->qp = qp;
- qp_ctx->fusion_num = 0;
- qp_ctx->fusion_req = NULL;
- qp_ctx->fusion_limit = ctx->fusion_limit;
qp_ctx->ctx = ctx;
mutex_init(&qp_ctx->req_lock);
- atomic_set(&qp_ctx->req_cnt, 0);
-
- qp_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(QM_Q_DEPTH), sizeof(long),
- GFP_ATOMIC);
- if (!qp_ctx->req_bitmap)
- return -ENOMEM;
-
- qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
- if (!qp_ctx->req_list) {
- ret = -ENOMEM;
- goto err_free_req_bitmap;
- }
-
- qp_ctx->sqe_list = kcalloc(ctx->fusion_limit,
- sizeof(struct hisi_sec_sqe), GFP_KERNEL);
- if (!qp_ctx->sqe_list) {
- ret = -ENOMEM;
- goto err_free_req_list;
- }
+ atomic_set(&qp_ctx->pending_reqs, 0);
+ idr_init(&qp_ctx->req_idr);
qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- FUSION_LIMIT_MAX);
+ SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_in_pool)) {
- ret = PTR_ERR(qp_ctx->c_in_pool);
- goto err_free_sqe_list;
+ dev_err(dev, "fail to create sgl pool for input!\n");
+ goto err_destroy_idr;
}
qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, QM_Q_DEPTH,
- FUSION_LIMIT_MAX);
+ SEC_SGL_SGE_NR);
if (IS_ERR(qp_ctx->c_out_pool)) {
- ret = PTR_ERR(qp_ctx->c_out_pool);
+ dev_err(dev, "fail to create sgl pool for output!\n");
goto err_free_c_in_pool;
}
- ret = ctx->req_op->queue_alloc(ctx, qp_ctx);
+ ret = sec_alg_resource_alloc(ctx, qp_ctx);
if (ret)
goto err_free_c_out_pool;
@@ -389,304 +334,153 @@ static int hisi_sec_create_qp_ctx(struct hisi_sec_ctx *ctx,
return 0;
err_queue_free:
- ctx->req_op->queue_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
err_free_c_out_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
err_free_c_in_pool:
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
-err_free_sqe_list:
- kfree(qp_ctx->sqe_list);
-err_free_req_list:
- kfree(qp_ctx->req_list);
-err_free_req_bitmap:
- kfree(qp_ctx->req_bitmap);
+err_destroy_idr:
+ idr_destroy(&qp_ctx->req_idr);
return ret;
}
-static void hisi_sec_release_qp_ctx(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx)
+static void sec_release_qp_ctx(struct sec_ctx *ctx,
+ struct sec_qp_ctx *qp_ctx)
{
- struct device *dev = ctx->dev;
+ struct device *dev = SEC_CTX_DEV(ctx);
hisi_qm_stop_qp(qp_ctx->qp);
- ctx->req_op->queue_free(ctx, qp_ctx);
+ sec_alg_resource_free(ctx, qp_ctx);
+
hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
- kfree(qp_ctx->req_bitmap);
- kfree(qp_ctx->req_list);
- kfree(qp_ctx->sqe_list);
-}
-
-static int __hisi_sec_ctx_init(struct hisi_sec_ctx *ctx, int qlen)
-{
- if (!ctx || qlen < 0)
- return -EINVAL;
-
- ctx->req_limit = qlen;
- ctx->req_fake_limit = qlen / 2;
- atomic_set(&ctx->thread_cnt, 0);
- atomic_set(&ctx->enc_qid, 0);
- atomic_set(&ctx->dec_qid, ctx->enc_q_num);
- if (ctx->fusion_limit > 1 && ctx->fusion_tmout_nsec > 0) {
- ktime_t tim = ktime_set(0, ctx->fusion_tmout_nsec);
-
- hrtimer_init(&ctx->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ctx->timer.function = hrtimer_handler;
- hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
- INIT_WORK(&ctx->work, ctx_work_process);
- }
-
- return 0;
-}
-
-static void hisi_sec_get_fusion_param(struct hisi_sec_ctx *ctx,
- struct hisi_sec *sec)
-{
- if (ctx->is_fusion) {
- ctx->fusion_tmout_nsec = sec->fusion_tmout_nsec;
- ctx->fusion_limit = sec->fusion_limit;
- } else {
- ctx->fusion_tmout_nsec = 0;
- ctx->fusion_limit = 1;
- }
+ idr_destroy(&qp_ctx->req_idr);
}
-static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
+static int sec_ctx_base_init(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_sec_cipher_ctx *c_ctx;
- struct hisi_sec *sec;
+ struct sec_dev *sec;
int i, ret;
- crypto_skcipher_set_reqsize(tfm, sizeof(struct hisi_sec_req));
-
ctx->qps = sec_create_qps();
if (!ctx->qps) {
pr_err("Can not create sec qps!\n");
return -ENODEV;
}
- sec = container_of(ctx->qps[0]->qm, struct hisi_sec, qm);
+ sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
ctx->sec = sec;
+ ctx->hlf_q_num = sec->ctx_q_num >> 1;
- ctx->dev = &sec->qm.pdev->dev;
-
- ctx->q_num = sec->ctx_q_num;
-
- ctx->enc_q_num = ctx->q_num / 2;
- ctx->qp_ctx = kcalloc(ctx->q_num, sizeof(struct hisi_sec_qp_ctx),
- GFP_KERNEL);
- if (!ctx->qp_ctx) {
- dev_err(ctx->dev, "failed to alloc qp_ctx");
+ if (ctx->sec->iommu_used)
+ ctx->pbuf_supported = true;
+ else
+ ctx->pbuf_supported = false;
+ ctx->use_pbuf = false;
+
+ /* Half of queue depth is taken as fake requests limit in the queue. */
+ ctx->fake_req_limit = QM_Q_DEPTH >> 1;
+ ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
+ GFP_KERNEL);
+ if (!ctx->qp_ctx)
return -ENOMEM;
- }
-
- hisi_sec_get_fusion_param(ctx, sec);
- for (i = 0; i < ctx->q_num; i++) {
- ret = hisi_sec_create_qp_ctx(ctx, i, 0);
+ for (i = 0; i < sec->ctx_q_num; i++) {
+ ret = sec_create_qp_ctx(ctx, i, 0);
if (ret)
goto err_sec_release_qp_ctx;
}
-
- c_ctx = &ctx->c_ctx;
- c_ctx->c_key = dma_alloc_coherent(ctx->dev,
- SEC_MAX_KEY_SIZE, &c_ctx->c_key_dma, GFP_KERNEL);
-
- if (!ctx->c_ctx.c_key) {
- ret = -ENOMEM;
- goto err_sec_release_qp_ctx;
- }
-
- return __hisi_sec_ctx_init(ctx, QM_Q_DEPTH);
-
+ return 0;
err_sec_release_qp_ctx:
for (i = i - 1; i >= 0; i--)
- hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
sec_destroy_qps(ctx->qps, sec->ctx_q_num);
kfree(ctx->qp_ctx);
+
return ret;
}
-static void hisi_sec_cipher_ctx_exit(struct crypto_skcipher *tfm)
+static void sec_ctx_base_uninit(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_sec_cipher_ctx *c_ctx;
- int i = 0;
-
- c_ctx = &ctx->c_ctx;
-
- if (ctx->fusion_limit > 1 && ctx->fusion_tmout_nsec > 0)
- hrtimer_cancel(&ctx->timer);
-
- if (c_ctx->c_key) {
- memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
- dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, c_ctx->c_key,
- c_ctx->c_key_dma);
- c_ctx->c_key = NULL;
- }
+ int i;
- for (i = 0; i < ctx->q_num; i++)
- hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ for (i = 0; i < ctx->sec->ctx_q_num; i++)
+ sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
- sec_destroy_qps(ctx->qps, ctx->q_num);
+ sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
kfree(ctx->qp_ctx);
}
-static int hisi_sec_skcipher_get_res(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_queue_alloc(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
-static int hisi_sec_skcipher_queue_free(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx);
-static int hisi_sec_skcipher_buf_map(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_buf_unmap(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_copy_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_bd_fill_base(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_bd_fill_storage(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_bd_fill_multi_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_bd_send_asyn(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-static int hisi_sec_skcipher_callback(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req);
-
-struct hisi_sec_req_op sec_req_ops_tbl[] = {
- {
- .fusion_type = SEC_NO_FUSION,
- .get_res = hisi_sec_skcipher_get_res,
- .queue_alloc = hisi_sec_skcipher_queue_alloc,
- .queue_free = hisi_sec_skcipher_queue_free,
- .buf_map = hisi_sec_skcipher_buf_map,
- .buf_unmap = hisi_sec_skcipher_buf_unmap,
- .do_transfer = hisi_sec_skcipher_copy_iv,
- .bd_fill = hisi_sec_skcipher_bd_fill_base,
- .bd_send = hisi_sec_bd_send_asyn,
- .callback = hisi_sec_skcipher_callback,
- }, {
- .fusion_type = SEC_IV_FUSION,
- .get_res = hisi_sec_skcipher_get_res,
- .queue_alloc = hisi_sec_skcipher_queue_alloc,
- .queue_free = hisi_sec_skcipher_queue_free,
- .buf_map = hisi_sec_skcipher_buf_map,
- .buf_unmap = hisi_sec_skcipher_buf_unmap,
- .do_transfer = hisi_sec_skcipher_copy_iv,
- .bd_fill = hisi_sec_skcipher_bd_fill_multi_iv,
- .bd_send = hisi_sec_bd_send_asyn,
- .callback = hisi_sec_skcipher_callback,
- }
-};
-
-static int hisi_sec_cipher_ctx_init_alg(struct crypto_skcipher *tfm)
+static int sec_cipher_init(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- ctx->req_op = &sec_req_ops_tbl[SEC_OPS_SKCIPHER_ALG];
- ctx->is_fusion = ctx->req_op->fusion_type;
+ c_ctx->c_key = dma_alloc_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ &c_ctx->c_key_dma, GFP_KERNEL);
+ if (!c_ctx->c_key)
+ return -ENOMEM;
- return hisi_sec_cipher_ctx_init(tfm);
+ return 0;
}
-static int hisi_sec_cipher_ctx_init_multi_iv(struct crypto_skcipher *tfm)
+static void sec_cipher_uninit(struct sec_ctx *ctx)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- ctx->req_op = &sec_req_ops_tbl[SEC_OPS_MULTI_IV];
- ctx->is_fusion = ctx->req_op->fusion_type;
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- return hisi_sec_cipher_ctx_init(tfm);
+ memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
+ dma_free_coherent(SEC_CTX_DEV(ctx), SEC_MAX_KEY_SIZE,
+ c_ctx->c_key, c_ctx->c_key_dma);
}
-static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp)
+static int sec_skcipher_init(struct crypto_skcipher *tfm)
{
- struct hisi_sec_sqe *sec_sqe = (struct hisi_sec_sqe *)resp;
- struct hisi_sec_qp_ctx *qp_ctx = qp->qp_ctx;
- struct device *dev = &qp->qm->pdev->dev;
- struct hisi_sec_req *req;
- struct hisi_sec_dfx *dfx;
- u32 req_id;
-
- if (sec_sqe->type == 1) {
- req_id = sec_sqe->type1.tag;
- req = qp_ctx->req_list[req_id];
-
- req->err_type = sec_sqe->type1.error_type;
- if (req->err_type || sec_sqe->type1.done != 0x1 ||
- sec_sqe->type1.flag != 0x2) {
- dev_err_ratelimited(dev,
- "err_type[%d] done[%d] flag[%d]\n",
- req->err_type,
- sec_sqe->type1.done,
- sec_sqe->type1.flag);
- }
- } else if (sec_sqe->type == 2) {
- req_id = sec_sqe->type2.tag;
- req = qp_ctx->req_list[req_id];
-
- req->err_type = sec_sqe->type2.error_type;
- if (req->err_type || sec_sqe->type2.done != 0x1 ||
- sec_sqe->type2.flag != 0x2) {
- dev_err_ratelimited(dev,
- "err_type[%d] done[%d] flag[%d]\n",
- req->err_type,
- sec_sqe->type2.done,
- sec_sqe->type2.flag);
- }
- } else {
- dev_err_ratelimited(dev, "err bd type [%d]\n", sec_sqe->type);
- return;
- }
-
- dfx = &req->ctx->sec->sec_dfx;
-
- req->ctx->req_op->buf_unmap(req->ctx, req);
- req->ctx->req_op->callback(req->ctx, req);
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
- __sync_add_and_fetch(&dfx->recv_cnt, 1);
-}
+ ctx->alg_type = SEC_SKCIPHER;
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
+ ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
+ if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
+ dev_err(SEC_CTX_DEV(ctx), "get error skcipher iv size!\n");
+ return -EINVAL;
+ }
-static int sec_des_weak_key(const u64 *key, const u32 keylen)
-{
- int i;
+ ret = sec_ctx_base_init(ctx);
+ if (ret)
+ return ret;
- for (i = 0; i < DES_WEAK_KEY_NUM; i++)
- if (*key == des_weak_key[i])
- return 1;
+ ret = sec_cipher_init(ctx);
+ if (ret)
+ goto err_cipher_init;
return 0;
+err_cipher_init:
+ sec_ctx_base_uninit(ctx);
+
+ return ret;
}
-static int sec_skcipher_des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
- const u32 keylen, const u8 *key)
+static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
{
- if (keylen != DES_KEY_SIZE)
- return -EINVAL;
-
- if (sec_des_weak_key((const u64 *)key, keylen))
- return -EKEYREJECTED;
-
- c_ctx->c_key_len = CKEY_LEN_DES;
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- return 0;
+ sec_cipher_uninit(ctx);
+ sec_ctx_base_uninit(ctx);
}
-static int sec_skcipher_3des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
- const u32 keylen, const enum C_MODE c_mode)
+static int sec_skcipher_3des_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
{
switch (keylen) {
case SEC_DES3_2KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_3DES_2KEY;
+ c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
break;
case SEC_DES3_3KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_3DES_3KEY;
+ c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
break;
default:
return -EINVAL;
@@ -695,32 +489,35 @@ static int sec_skcipher_3des_setkey(struct hisi_sec_cipher_ctx *c_ctx,
return 0;
}
-static int sec_skcipher_aes_sm4_setkey(struct hisi_sec_cipher_ctx *c_ctx,
- const u32 keylen, const enum C_MODE c_mode)
+static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
+ const u32 keylen,
+ const enum sec_cmode c_mode)
{
- if (c_mode == C_MODE_XTS) {
+ if (c_mode == SEC_CMODE_XTS) {
switch (keylen) {
case SEC_XTS_MIN_KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_128_BIT;
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
break;
case SEC_XTS_MAX_KEY_SIZE:
- c_ctx->c_key_len = CKEY_LEN_256_BIT;
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
break;
default:
+ pr_err("hisi_sec2: xts mode key error!\n");
return -EINVAL;
}
} else {
switch (keylen) {
case AES_KEYSIZE_128:
- c_ctx->c_key_len = CKEY_LEN_128_BIT;
+ c_ctx->c_key_len = SEC_CKEY_128BIT;
break;
case AES_KEYSIZE_192:
- c_ctx->c_key_len = CKEY_LEN_192_BIT;
+ c_ctx->c_key_len = SEC_CKEY_192BIT;
break;
case AES_KEYSIZE_256:
- c_ctx->c_key_len = CKEY_LEN_256_BIT;
+ c_ctx->c_key_len = SEC_CKEY_256BIT;
break;
default:
+ pr_err("hisi_sec2: aes key error!\n");
return -EINVAL;
}
}
@@ -729,38 +526,40 @@ static int sec_skcipher_aes_sm4_setkey(struct hisi_sec_cipher_ctx *c_ctx,
}
static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
- const u32 keylen, const enum C_ALG c_alg, const enum C_MODE c_mode)
+ const u32 keylen, const enum sec_calg c_alg,
+ const enum sec_cmode c_mode)
{
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
int ret;
- if (c_mode == C_MODE_XTS) {
+ if (c_mode == SEC_CMODE_XTS) {
ret = xts_verify_key(tfm, key, keylen);
- if (ret)
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "xts mode key err!\n");
return ret;
+ }
}
c_ctx->c_alg = c_alg;
c_ctx->c_mode = c_mode;
switch (c_alg) {
- case C_ALG_DES:
- ret = sec_skcipher_des_setkey(c_ctx, keylen, key);
- break;
- case C_ALG_3DES:
+ case SEC_CALG_3DES:
ret = sec_skcipher_3des_setkey(c_ctx, keylen, c_mode);
break;
- case C_ALG_AES:
- case C_ALG_SM4:
+ case SEC_CALG_AES:
+ case SEC_CALG_SM4:
ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
break;
default:
return -EINVAL;
}
- if (ret)
+ if (ret) {
+ dev_err(SEC_CTX_DEV(ctx), "set sec key err!\n");
return ret;
+ }
memcpy(c_ctx->c_key, key, keylen);
@@ -769,639 +568,423 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
#define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
- u32 keylen)\
+ u32 keylen) \
{ \
return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
}
-GEN_SEC_SETKEY_FUNC(aes_ecb, C_ALG_AES, C_MODE_ECB)
-GEN_SEC_SETKEY_FUNC(aes_cbc, C_ALG_AES, C_MODE_CBC)
-GEN_SEC_SETKEY_FUNC(sm4_cbc, C_ALG_SM4, C_MODE_CBC)
-
-GEN_SEC_SETKEY_FUNC(des_ecb, C_ALG_DES, C_MODE_ECB)
-GEN_SEC_SETKEY_FUNC(des_cbc, C_ALG_DES, C_MODE_CBC)
-GEN_SEC_SETKEY_FUNC(3des_ecb, C_ALG_3DES, C_MODE_ECB)
-GEN_SEC_SETKEY_FUNC(3des_cbc, C_ALG_3DES, C_MODE_CBC)
-
-GEN_SEC_SETKEY_FUNC(aes_xts, C_ALG_AES, C_MODE_XTS)
-GEN_SEC_SETKEY_FUNC(sm4_xts, C_ALG_SM4, C_MODE_XTS)
+GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
+GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
-static int hisi_sec_get_async_ret(int ret, int req_cnt, int req_fake_limit)
-{
- if (ret == 0) {
- if (req_cnt >= req_fake_limit)
- ret = -EBUSY;
- else
- ret = -EINPROGRESS;
- } else {
- if (ret == -EBUSY)
- ret = -ENOBUFS;
- }
+GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
+GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
- return ret;
-}
+GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
+GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
-static int hisi_sec_skcipher_get_res(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src)
{
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- struct cipher_res *c_res = (struct cipher_res *)qp_ctx->priv_req_res;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int copy_size, pbuf_length;
int req_id = req->req_id;
- c_req->c_ivin = c_res[req_id].c_ivin;
- c_req->c_ivin_dma = c_res[req_id].c_ivin_dma;
- req->priv = (void **)c_res[req_id].sk_reqs;
- c_req->src = c_res[req_id].src;
- c_req->dst = c_res[req_id].dst;
-
- return 0;
-}
-
-static int hisi_sec_skcipher_queue_alloc(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx)
-{
- struct cipher_res *c_res;
- int req_num = ctx->fusion_limit;
- int alloc_num = QM_Q_DEPTH * ctx->fusion_limit;
- int buf_map_num = QM_Q_DEPTH * ctx->fusion_limit;
- struct device *dev = ctx->dev;
- int i, ret;
-
- c_res = kcalloc(QM_Q_DEPTH, sizeof(struct cipher_res), GFP_KERNEL);
- if (!c_res)
- return -ENOMEM;
-
- qp_ctx->priv_req_res = (void *)c_res;
-
- c_res[0].sk_reqs = kcalloc(alloc_num,
- sizeof(struct skcipher_request_ctx *), GFP_KERNEL);
- if (!c_res[0].sk_reqs) {
- ret = -ENOMEM;
- goto err_free_c_res;
- }
-
- c_res[0].c_ivin = dma_alloc_coherent(dev,
- SEC_IV_SIZE * alloc_num, &c_res[0].c_ivin_dma, GFP_KERNEL);
- if (!c_res[0].c_ivin) {
- ret = -ENOMEM;
- goto err_free_sk_reqs;
- }
+ copy_size = c_req->c_len;
- c_res[0].src = kcalloc(buf_map_num, sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!c_res[0].src) {
- ret = -ENOMEM;
- goto err_free_c_ivin;
+ pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
+ qp_ctx->res[req_id].pbuf, copy_size);
+ if (unlikely(pbuf_length != copy_size)) {
+ dev_err(dev, "copy src data to pbuf error!\n");
+ return -EINVAL;
}
- c_res[0].dst = kcalloc(buf_map_num, sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!c_res[0].dst) {
- ret = -ENOMEM;
- goto err_free_src;
+ c_req->c_in_dma = qp_ctx->res[req_id].pbuf_dma;
+ if (!c_req->c_in_dma) {
+ dev_err(dev, "fail to set pbuffer address!\n");
+ return -ENOMEM;
}
- for (i = 1; i < QM_Q_DEPTH; i++) {
- c_res[i].sk_reqs = c_res[0].sk_reqs + i * req_num;
- c_res[i].c_ivin = c_res[0].c_ivin
- + i * req_num * SEC_IV_SIZE;
- c_res[i].c_ivin_dma = c_res[0].c_ivin_dma
- + i * req_num * SEC_IV_SIZE;
- c_res[i].src = c_res[0].src + i * req_num;
- c_res[i].dst = c_res[0].dst + i * req_num;
- }
+ c_req->c_out_dma = c_req->c_in_dma;
return 0;
-
-err_free_src:
- kfree(c_res[0].src);
-err_free_c_ivin:
- dma_free_coherent(dev, SEC_IV_SIZE * alloc_num, c_res[0].c_ivin,
- c_res[0].c_ivin_dma);
-err_free_sk_reqs:
- kfree(c_res[0].sk_reqs);
-err_free_c_res:
- kfree(c_res);
-
- return ret;
}
-static int hisi_sec_skcipher_queue_free(struct hisi_sec_ctx *ctx,
- struct hisi_sec_qp_ctx *qp_ctx)
+static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *dst)
{
- struct cipher_res *c_res = (struct cipher_res *)qp_ctx->priv_req_res;
- struct device *dev = ctx->dev;
- int alloc_num = QM_Q_DEPTH * ctx->fusion_limit;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ int copy_size, pbuf_length;
+ int req_id = req->req_id;
- kfree(c_res[0].dst);
- kfree(c_res[0].src);
- dma_free_coherent(dev, SEC_IV_SIZE * alloc_num, c_res[0].c_ivin,
- c_res[0].c_ivin_dma);
- kfree(c_res[0].sk_reqs);
- kfree(c_res);
+ copy_size = c_req->c_len;
- return 0;
+ pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
+ qp_ctx->res[req_id].pbuf, copy_size);
+ if (unlikely(pbuf_length != copy_size))
+ dev_err(dev, "copy pbuf data to dst error!\n");
}
-static int hisi_sec_skcipher_buf_map(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct device *dev = ctx->dev;
- struct skcipher_request *sk_next;
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- int src_nents, src_nents_sum, copyed_src_nents;
- int dst_nents, dst_nents_sum, copyed_dst_nents;
- int i, ret, buf_map_limit;
-
- src_nents_sum = 0;
- dst_nents_sum = 0;
- for (i = 0; i < req->fusion_num; i++) {
- sk_next = (struct skcipher_request *)req->priv[i];
- if (sk_next == NULL) {
- dev_err(ctx->dev, "nullptr at [%d]\n", i);
- return -EFAULT;
- }
- src_nents_sum += sg_nents(sk_next->src);
- dst_nents_sum += sg_nents(sk_next->dst);
- if (sk_next->src == sk_next->dst && i > 0) {
- dev_err(ctx->dev, "err: src == dst\n");
- return -EFAULT;
- }
- }
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
+ struct device *dev = SEC_CTX_DEV(ctx);
- buf_map_limit = FUSION_LIMIT_MAX;
- if (src_nents_sum > buf_map_limit || dst_nents_sum > buf_map_limit) {
- dev_err(ctx->dev, "src[%d] or dst[%d] bigger than %d\n",
- src_nents_sum, dst_nents_sum, buf_map_limit);
- return -ENOBUFS;
- }
-
- copyed_src_nents = 0;
- copyed_dst_nents = 0;
- for (i = 0; i < req->fusion_num; i++) {
- sk_next = (struct skcipher_request *)req->priv[i];
- src_nents = sg_nents(sk_next->src);
- dst_nents = sg_nents(sk_next->dst);
-
- if (i != req->fusion_num - 1) {
- sg_unmark_end(&sk_next->src[src_nents - 1]);
- sg_unmark_end(&sk_next->dst[dst_nents - 1]);
- }
-
- memcpy(c_req->src + copyed_src_nents, sk_next->src,
- src_nents * sizeof(struct scatterlist));
- memcpy(c_req->dst + copyed_dst_nents, sk_next->dst,
- dst_nents * sizeof(struct scatterlist));
+ if (ctx->use_pbuf)
+ return sec_cipher_pbuf_map(ctx, req, src);
- copyed_src_nents += src_nents;
- copyed_dst_nents += dst_nents;
- }
-
- c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, c_req->src,
- qp_ctx->c_in_pool, req->req_id, &c_req->c_in_dma);
+ c_req->c_in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
+ qp_ctx->c_in_pool,
+ req->req_id,
+ &c_req->c_in_dma);
- if (IS_ERR(c_req->c_in))
+ if (IS_ERR(c_req->c_in)) {
+ dev_err(dev, "fail to dma map input sgl buffers!\n");
return PTR_ERR(c_req->c_in);
+ }
- if (c_req->dst == c_req->src) {
+ if (dst == src) {
c_req->c_out = c_req->c_in;
c_req->c_out_dma = c_req->c_in_dma;
} else {
- c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, c_req->dst,
- qp_ctx->c_out_pool, req->req_id, &c_req->c_out_dma);
+ c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
+ qp_ctx->c_out_pool,
+ req->req_id,
+ &c_req->c_out_dma);
+
if (IS_ERR(c_req->c_out)) {
- ret = PTR_ERR(c_req->c_out);
- goto err_unmap_src;
+ dev_err(dev, "fail to dma map output sgl buffers!\n");
+ hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
+ return PTR_ERR(c_req->c_out);
}
}
return 0;
-
-err_unmap_src:
- hisi_acc_sg_buf_unmap(dev, c_req->src, c_req->c_in);
-
- return ret;
}
-static int hisi_sec_skcipher_buf_unmap(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
+ struct scatterlist *src, struct scatterlist *dst)
{
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct device *dev = ctx->dev;
-
- if (c_req->dst != c_req->src)
- hisi_acc_sg_buf_unmap(dev, c_req->src, c_req->c_in);
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct device *dev = SEC_CTX_DEV(ctx);
- hisi_acc_sg_buf_unmap(dev, c_req->dst, c_req->c_out);
+ if (ctx->use_pbuf) {
+ sec_cipher_pbuf_unmap(ctx, req, dst);
+ } else {
+ if (dst != src)
+ hisi_acc_sg_buf_unmap(dev, src, c_req->c_in);
- return 0;
+ hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
+ }
}
-static int hisi_sec_skcipher_copy_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct skcipher_request *sk_req =
- (struct skcipher_request *)req->priv[0];
- struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(sk_req);
- struct skcipher_request *sk_next;
- int i, iv_size;
-
- c_req->c_len = sk_req->cryptlen;
-
- iv_size = crypto_skcipher_ivsize(atfm);
- if (iv_size > SEC_IV_SIZE)
- return -EINVAL;
+ struct skcipher_request *sq = req->c_req.sk_req;
- memcpy(c_req->c_ivin, sk_req->iv, iv_size);
-
- if (ctx->is_fusion) {
- for (i = 1; i < req->fusion_num; i++) {
- sk_next = (struct skcipher_request *)req->priv[i];
- memcpy(c_req->c_ivin + i * iv_size, sk_next->iv,
- iv_size);
- }
-
- c_req->gran_num = req->fusion_num;
- c_ctx->c_gran_size = sk_req->cryptlen;
- }
-
- return 0;
+ return sec_cipher_map(ctx, req, sq->src, sq->dst);
}
-static int hisi_sec_skcipher_bd_fill_storage(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
+ struct skcipher_request *sq = req->c_req.sk_req;
- if (!c_req->c_len)
- return -EINVAL;
-
- sec_sqe->type1.c_key_addr_l = lower_32_bits(c_ctx->c_key_dma);
- sec_sqe->type1.c_key_addr_h = upper_32_bits(c_ctx->c_key_dma);
- sec_sqe->type1.c_ivin_addr_l = lower_32_bits(c_req->c_ivin_dma);
- sec_sqe->type1.c_ivin_addr_h = upper_32_bits(c_req->c_ivin_dma);
- sec_sqe->type1.data_src_addr_l = lower_32_bits(c_req->c_in_dma);
- sec_sqe->type1.data_src_addr_h = upper_32_bits(c_req->c_in_dma);
- sec_sqe->type1.data_dst_addr_l = lower_32_bits(c_req->c_out_dma);
- sec_sqe->type1.data_dst_addr_h = upper_32_bits(c_req->c_out_dma);
-
- sec_sqe->type1.c_mode = c_ctx->c_mode;
- sec_sqe->type1.c_alg = c_ctx->c_alg;
- sec_sqe->type1.c_key_len = c_ctx->c_key_len;
-
- sec_sqe->src_addr_type = SGL;
- sec_sqe->dst_addr_type = SGL;
- sec_sqe->type = BD_TYPE1;
- sec_sqe->scene = SCENE_STORAGE;
- sec_sqe->de = c_req->c_in_dma != c_req->c_out_dma;
-
- if (c_req->encrypt)
- sec_sqe->cipher = SEC_CIPHER_ENC;
- else
- sec_sqe->cipher = SEC_CIPHER_DEC;
-
- if (c_ctx->c_mode == C_MODE_XTS)
- sec_sqe->type1.ci_gen = CI_GEN_BY_LBA;
-
- sec_sqe->type1.cipher_gran_size = c_ctx->c_gran_size;
- sec_sqe->type1.gran_num = c_req->gran_num;
- __sync_fetch_and_add(&ctx->sec->sec_dfx.gran_task_cnt, c_req->gran_num);
- sec_sqe->type1.block_size = c_req->c_len;
-
- sec_sqe->type1.lba_l = lower_32_bits(c_req->lba);
- sec_sqe->type1.lba_h = upper_32_bits(c_req->lba);
-
- sec_sqe->type1.tag = req->req_id;
-
- return 0;
+ sec_cipher_unmap(ctx, req, sq->src, sq->dst);
}
-static int hisi_sec_skcipher_bd_fill_multi_iv(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
{
int ret;
- ret = hisi_sec_skcipher_bd_fill_storage(ctx, req);
- if (ret)
+ ret = ctx->req_op->buf_map(ctx, req);
+ if (unlikely(ret))
return ret;
- req->sec_sqe.type1.ci_gen = CI_GEN_BY_ADDR;
-
- return 0;
-}
-
-static int hisi_sec_skcipher_bd_fill_base(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
-{
- struct hisi_sec_cipher_ctx *c_ctx = &ctx->c_ctx;
- struct hisi_sec_cipher_req *c_req = &req->c_req;
- struct hisi_sec_sqe *sec_sqe = &req->sec_sqe;
-
- if (!c_req->c_len)
- return -EINVAL;
-
- sec_sqe->type2.c_key_addr_l = lower_32_bits(c_ctx->c_key_dma);
- sec_sqe->type2.c_key_addr_h = upper_32_bits(c_ctx->c_key_dma);
- sec_sqe->type2.c_ivin_addr_l = lower_32_bits(c_req->c_ivin_dma);
- sec_sqe->type2.c_ivin_addr_h = upper_32_bits(c_req->c_ivin_dma);
- sec_sqe->type2.data_src_addr_l = lower_32_bits(c_req->c_in_dma);
- sec_sqe->type2.data_src_addr_h = upper_32_bits(c_req->c_in_dma);
- sec_sqe->type2.data_dst_addr_l = lower_32_bits(c_req->c_out_dma);
- sec_sqe->type2.data_dst_addr_h = upper_32_bits(c_req->c_out_dma);
+ ctx->req_op->do_transfer(ctx, req);
- sec_sqe->type2.c_mode = c_ctx->c_mode;
- sec_sqe->type2.c_alg = c_ctx->c_alg;
- sec_sqe->type2.c_key_len = c_ctx->c_key_len;
-
- sec_sqe->src_addr_type = SGL;
- sec_sqe->dst_addr_type = SGL;
- sec_sqe->type = BD_TYPE2;
- sec_sqe->scene = SCENE_IPSEC;
- sec_sqe->de = c_req->c_in_dma != c_req->c_out_dma;
+ ret = ctx->req_op->bd_fill(ctx, req);
+ if (unlikely(ret))
+ goto unmap_req_buf;
- __sync_fetch_and_add(&ctx->sec->sec_dfx.gran_task_cnt, 1);
+ return ret;
- if (c_req->encrypt)
- sec_sqe->cipher = SEC_CIPHER_ENC;
- else
- sec_sqe->cipher = SEC_CIPHER_DEC;
+unmap_req_buf:
+ ctx->req_op->buf_unmap(ctx, req);
- sec_sqe->type2.c_len = c_req->c_len;
- sec_sqe->type2.tag = req->req_id;
+ return ret;
+}
- return 0;
+static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
+{
+ ctx->req_op->buf_unmap(ctx, req);
}
-static int hisi_sec_bd_send_asyn(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- int req_cnt = req->req_cnt;
- int ret;
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_alg_res *res = &req->qp_ctx->res[req->req_id];
- mutex_lock(&qp_ctx->req_lock);
- ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
- if (ret == 0)
- ctx->sec->sec_dfx.send_cnt++;
- mutex_unlock(&qp_ctx->req_lock);
+ if (ctx->use_pbuf) {
+ c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
+ c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
+ } else {
+ c_req->c_ivin = res->c_ivin;
+ c_req->c_ivin_dma = res->c_ivin_dma;
+ }
- return hisi_sec_get_async_ret(ret, req_cnt, ctx->req_fake_limit);
+ memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
}
-static void hisi_sec_skcipher_complete(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req, int err_code)
+static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
{
- struct skcipher_request **sk_reqs =
- (struct skcipher_request **)req->priv;
- int i, req_fusion_num;
-
- if (ctx->is_fusion == SEC_NO_FUSION)
- req_fusion_num = 1;
+ struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ struct sec_cipher_req *c_req = &req->c_req;
+ struct sec_sqe *sec_sqe = &req->sec_sqe;
+ u8 scene, sa_type, da_type;
+ u8 bd_type, cipher;
+ u8 de = 0;
+
+ memset(sec_sqe, 0, sizeof(struct sec_sqe));
+
+ sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
+ sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
+ sec_sqe->type2.data_src_addr = cpu_to_le64(c_req->c_in_dma);
+ sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
+
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
+ SEC_CMODE_OFFSET);
+ sec_sqe->type2.c_alg = c_ctx->c_alg;
+ sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
+ SEC_CKEY_OFFSET);
+
+ bd_type = SEC_BD_TYPE2;
+ if (c_req->encrypt)
+ cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
else
- req_fusion_num = req->fusion_num;
+ cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
+ sec_sqe->type_cipher_auth = bd_type | cipher;
- for (i = 0; i < req_fusion_num; i++)
- sk_reqs[i]->base.complete(&sk_reqs[i]->base, err_code);
-
- /* free sk_reqs if this request is completed */
- if (err_code != -EINPROGRESS)
- __sync_add_and_fetch(&ctx->sec->sec_dfx.put_task_cnt,
- req_fusion_num);
+ if (ctx->use_pbuf)
+ sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
else
- __sync_add_and_fetch(&ctx->sec->sec_dfx.busy_comp_cnt,
- req_fusion_num);
-}
-
-static int hisi_sec_skcipher_callback(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
-{
- struct hisi_sec_qp_ctx *qp_ctx = req->qp_ctx;
- int req_id = req->req_id;
+ sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
+ scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
+ if (c_req->c_in_dma != c_req->c_out_dma)
+ de = 0x1 << SEC_DE_OFFSET;
- if (__sync_bool_compare_and_swap(&req->fake_busy, 1, 0))
- hisi_sec_skcipher_complete(ctx, req, -EINPROGRESS);
+ sec_sqe->sds_sa_type = (de | scene | sa_type);
- hisi_sec_skcipher_complete(ctx, req, req->err_type);
+ /* Just set DST address type */
+ if (ctx->use_pbuf)
+ da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
+ else
+ da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
+ sec_sqe->sdm_addr_type |= da_type;
- hisi_sec_free_req_id(qp_ctx, req_id);
+ sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
+ sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
return 0;
}
-static int sec_get_issue_id_range(atomic_t *qid, int start, int end)
+static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
{
- int issue_id;
- int issue_len = end - start;
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ u32 iv_size = req->ctx->c_ctx.ivsize;
+ struct scatterlist *sgl;
+ unsigned int cryptlen;
+ size_t sz;
+ u8 *iv;
+
+ if (req->c_req.encrypt)
+ sgl = sk_req->dst;
+ else
+ sgl = sk_req->src;
- issue_id = (atomic_inc_return(qid) - start) % issue_len + start;
- if (issue_id % issue_len == 0 && atomic_read(qid) > issue_len)
- atomic_sub(issue_len, qid);
+ iv = sk_req->iv;
+ cryptlen = sk_req->cryptlen;
- return issue_id;
+ sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
+ cryptlen - iv_size);
+ if (unlikely(sz != iv_size))
+ dev_err(SEC_CTX_DEV(req->ctx), "copy output iv error!\n");
}
-static inline int sec_get_issue_id(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
+ int err)
{
- int issue_id;
+ struct skcipher_request *sk_req = req->c_req.sk_req;
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- if (req->c_req.encrypt == 1)
- issue_id = sec_get_issue_id_range(&ctx->enc_qid, 0,
- ctx->enc_q_num);
- else
- issue_id = sec_get_issue_id_range(&ctx->dec_qid, ctx->enc_q_num,
- ctx->q_num);
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
- return issue_id;
-}
+ /* IV output at encrypto of CBC mode */
+ if (!err && ctx->c_ctx.c_mode == SEC_CMODE_CBC && req->c_req.encrypt)
+ sec_update_iv(req, SEC_SKCIPHER);
-static inline void hisi_sec_inc_thread_cnt(struct hisi_sec_ctx *ctx)
-{
- int thread_cnt = atomic_inc_return(&ctx->thread_cnt);
+ if (req->fake_busy)
+ sk_req->base.complete(&sk_req->base, -EINPROGRESS);
- if (thread_cnt > ctx->sec->sec_dfx.thread_cnt)
- ctx->sec->sec_dfx.thread_cnt = thread_cnt;
+ sk_req->base.complete(&sk_req->base, err);
}
-static struct hisi_sec_req *sec_request_alloc(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *in_req, int *fusion_send, int *fake_busy)
+static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
{
- struct hisi_sec_qp_ctx *qp_ctx;
- struct hisi_sec_req *req;
- int issue_id, ret;
-
- __sync_add_and_fetch(&ctx->sec->sec_dfx.get_task_cnt, 1);
-
- issue_id = sec_get_issue_id(ctx, in_req);
- hisi_sec_inc_thread_cnt(ctx);
-
- qp_ctx = &ctx->qp_ctx[issue_id];
-
- mutex_lock(&qp_ctx->req_lock);
-
- if (in_req->c_req.sk_req->src == in_req->c_req.sk_req->dst) {
- *fusion_send = 1;
- } else if (qp_ctx->fusion_req &&
- qp_ctx->fusion_req->fusion_num < qp_ctx->fusion_limit) {
- req = qp_ctx->fusion_req;
-
- *fake_busy = req->fake_busy;
- __sync_add_and_fetch(&ctx->sec->sec_dfx.fake_busy_cnt,
- *fake_busy);
-
- req->priv[req->fusion_num] = in_req->c_req.sk_req;
- req->fusion_num++;
- in_req->fusion_num = req->fusion_num;
- if (req->fusion_num == qp_ctx->fusion_limit) {
- *fusion_send = 1;
- qp_ctx->fusion_req = NULL;
- }
- mutex_unlock(&qp_ctx->req_lock);
- return req;
- }
+ struct sec_qp_ctx *qp_ctx = req->qp_ctx;
- req = in_req;
-
- if (hisi_sec_alloc_req_id(req, qp_ctx)) {
- mutex_unlock(&qp_ctx->req_lock);
- return NULL;
- }
+ atomic_dec(&qp_ctx->pending_reqs);
+ sec_free_req_id(req);
+ sec_free_queue_id(ctx, req);
+}
- req->fake_busy = 0;
+static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
+{
+ struct sec_qp_ctx *qp_ctx;
+ int queue_id;
- req->req_cnt = atomic_inc_return(&qp_ctx->req_cnt);
- if (req->req_cnt >= ctx->req_fake_limit) {
- req->fake_busy = 1;
- *fake_busy = 1;
- __sync_add_and_fetch(&ctx->sec->sec_dfx.fake_busy_cnt, 1);
- }
+ /* To load balance */
+ queue_id = sec_alloc_queue_id(ctx, req);
+ qp_ctx = &ctx->qp_ctx[queue_id];
- ret = ctx->req_op->get_res(ctx, req);
- if (ret) {
- dev_err(ctx->dev, "req_op get_res failed\n");
- mutex_unlock(&qp_ctx->req_lock);
- goto err_free_req_id;
+ req->req_id = sec_alloc_req_id(req, qp_ctx);
+ if (unlikely(req->req_id < 0)) {
+ sec_free_queue_id(ctx, req);
+ return req->req_id;
}
- if (ctx->fusion_limit <= 1 || ctx->fusion_tmout_nsec == 0)
- *fusion_send = 1;
-
- if (ctx->is_fusion && *fusion_send == 0)
- qp_ctx->fusion_req = req;
-
- req->fusion_num = 1;
-
- req->priv[0] = in_req->c_req.sk_req;
- req->st_time = ktime_get();
-
- mutex_unlock(&qp_ctx->req_lock);
-
- return req;
+ if (ctx->fake_req_limit <= atomic_inc_return(&qp_ctx->pending_reqs))
+ req->fake_busy = true;
+ else
+ req->fake_busy = false;
-err_free_req_id:
- hisi_sec_free_req_id(qp_ctx, req->req_id);
- return NULL;
+ return 0;
}
-static int sec_request_transfer(struct hisi_sec_ctx *ctx,
- struct hisi_sec_req *req)
+static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
{
+ struct sec_cipher_req *c_req = &req->c_req;
int ret;
- ret = ctx->req_op->buf_map(ctx, req);
- if (ret)
+ ret = sec_request_init(ctx, req);
+ if (unlikely(ret))
return ret;
- ret = ctx->req_op->do_transfer(ctx, req);
- if (ret)
- goto unmap_req_buf;
+ ret = sec_request_transfer(ctx, req);
+ if (unlikely(ret))
+ goto err_uninit_req;
- memset(&req->sec_sqe, 0, sizeof(struct hisi_sec_sqe));
- ret = ctx->req_op->bd_fill(ctx, req);
- if (ret)
- goto unmap_req_buf;
+ /* Output IV as decrypto */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt)
+ sec_update_iv(req, ctx->alg_type);
- return 0;
+ ret = ctx->req_op->bd_send(ctx, req);
+ if (unlikely(ret != -EBUSY && ret != -EINPROGRESS)) {
+ dev_err_ratelimited(SEC_CTX_DEV(ctx),
+ "send sec request failed!\n");
+ goto err_send_req;
+ }
-unmap_req_buf:
- ctx->req_op->buf_unmap(ctx, req);
return ret;
-}
-
-static int sec_request_send(struct hisi_sec_ctx *ctx, struct hisi_sec_req *req)
-{
- int ret;
- ret = ctx->req_op->bd_send(ctx, req);
+err_send_req:
+ /* As failing, restore the IV from user */
+ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
+ if (ctx->alg_type == SEC_SKCIPHER)
+ memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
+ ctx->c_ctx.ivsize);
+ }
- if (ret == 0 || ret == -EBUSY || ret == -EINPROGRESS)
- atomic_dec(&ctx->thread_cnt);
+ sec_request_untransfer(ctx, req);
+err_uninit_req:
+ sec_request_uninit(ctx, req);
return ret;
}
-static int sec_io_proc(struct hisi_sec_ctx *ctx, struct hisi_sec_req *in_req)
+static const struct sec_req_op sec_skcipher_req_ops = {
+ .buf_map = sec_skcipher_sgl_map,
+ .buf_unmap = sec_skcipher_sgl_unmap,
+ .do_transfer = sec_skcipher_copy_iv,
+ .bd_fill = sec_skcipher_bd_fill,
+ .bd_send = sec_bd_send,
+ .callback = sec_skcipher_callback,
+ .process = sec_process,
+};
+
+static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
{
- struct hisi_sec_req *req;
- int fusion_send = 0;
- int fake_busy = 0;
- int ret;
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- in_req->fusion_num = 1;
+ ctx->req_op = &sec_skcipher_req_ops;
- req = sec_request_alloc(ctx, in_req, &fusion_send, &fake_busy);
+ return sec_skcipher_init(tfm);
+}
- if (!req) {
- dev_err_ratelimited(ctx->dev, "sec_request_alloc failed\n");
- return -ENOMEM;
- }
+static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
+{
+ sec_skcipher_uninit(tfm);
+}
- if (ctx->is_fusion && fusion_send == 0)
- return fake_busy ? -EBUSY : -EINPROGRESS;
+static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
+{
+ struct skcipher_request *sk_req = sreq->c_req.sk_req;
+ struct device *dev = SEC_CTX_DEV(ctx);
+ u8 c_alg = ctx->c_ctx.c_alg;
- ret = sec_request_transfer(ctx, req);
- if (ret) {
- dev_err_ratelimited(ctx->dev, "sec_transfer ret[%d]\n", ret);
- goto err_free_req;
+ if (unlikely(!sk_req->src || !sk_req->dst)) {
+ dev_err(dev, "skcipher input param error!\n");
+ return -EINVAL;
}
+ sreq->c_req.c_len = sk_req->cryptlen;
- ret = sec_request_send(ctx, req);
- __sync_add_and_fetch(&ctx->sec->sec_dfx.send_by_full, 1);
- if (ret != -EBUSY && ret != -EINPROGRESS) {
- dev_err_ratelimited(ctx->dev, "sec_send ret[%d]\n", ret);
- goto err_unmap_req;
- }
+ if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
+ ctx->use_pbuf = true;
- return ret;
+ if (c_alg == SEC_CALG_3DES) {
+ if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
+ dev_err(dev, "skcipher 3des input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
+ if (unlikely(sk_req->cryptlen & (AES_BLOCK_SIZE - 1))) {
+ dev_err(dev, "skcipher aes input length error!\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
-err_unmap_req:
- ctx->req_op->buf_unmap(ctx, req);
-err_free_req:
- hisi_sec_free_req_id(req->qp_ctx, req->req_id);
- atomic_dec(&ctx->thread_cnt);
- return ret;
+ dev_err(dev, "skcipher algorithm error!\n");
+ return -EINVAL;
}
static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
{
- struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(sk_req);
- struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(atfm);
- struct hisi_sec_req *req = skcipher_request_ctx(sk_req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
+ struct sec_req *req = skcipher_request_ctx(sk_req);
+ struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int ret;
- if (!sk_req->src || !sk_req->dst || !sk_req->cryptlen)
- return -EINVAL;
+ if (!sk_req->cryptlen)
+ return 0;
- req->c_req.sk_req = sk_req;
+ req->c_req.sk_req = sk_req;
req->c_req.encrypt = encrypt;
- req->ctx = ctx;
+ req->ctx = ctx;
+
+ ret = sec_skcipher_param_check(ctx, req);
+ if (unlikely(ret))
+ return -EINVAL;
- return sec_io_proc(ctx, req);
+ return ctx->req_op->process(ctx, req);
}
static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
@@ -1415,7 +998,7 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
}
#define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \
- sec_max_key_size, hisi_sec_cipher_ctx_init_func, blk_size, iv_size)\
+ sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\
{\
.base = {\
.cra_name = sec_cra_name,\
@@ -1423,12 +1006,11 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.cra_priority = SEC_PRIORITY,\
.cra_flags = CRYPTO_ALG_ASYNC,\
.cra_blocksize = blk_size,\
- .cra_ctxsize = sizeof(struct hisi_sec_ctx),\
- .cra_alignmask = 0,\
+ .cra_ctxsize = sizeof(struct sec_ctx),\
.cra_module = THIS_MODULE,\
},\
- .init = hisi_sec_cipher_ctx_init_func,\
- .exit = hisi_sec_cipher_ctx_exit,\
+ .init = ctx_init,\
+ .exit = ctx_exit,\
.setkey = sec_set_key,\
.decrypt = sec_skcipher_decrypt,\
.encrypt = sec_skcipher_encrypt,\
@@ -1437,75 +1019,55 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
.ivsize = iv_size,\
},
-#define SEC_SKCIPHER_NORMAL_ALG(name, key_func, min_key_size, \
+#define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \
max_key_size, blk_size, iv_size) \
SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
- hisi_sec_cipher_ctx_init_alg, blk_size, iv_size)
+ sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size)
-#define SEC_SKCIPHER_FUSION_ALG(name, key_func, min_key_size, \
- max_key_size, blk_size, iv_size) \
- SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \
- hisi_sec_cipher_ctx_init_multi_iv, blk_size, iv_size)
-
-static struct skcipher_alg sec_normal_algs[] = {
- SEC_SKCIPHER_NORMAL_ALG("ecb(aes)", sec_setkey_aes_ecb,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0)
- SEC_SKCIPHER_NORMAL_ALG("cbc(aes)", sec_setkey_aes_cbc,
- AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("xts(aes)", sec_setkey_aes_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("ecb(des)", sec_setkey_des_ecb,
- DES_KEY_SIZE, DES_KEY_SIZE, DES_BLOCK_SIZE, 0)
- SEC_SKCIPHER_NORMAL_ALG("cbc(des)", sec_setkey_des_cbc,
- DES_KEY_SIZE, DES_KEY_SIZE, DES_BLOCK_SIZE, DES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0)
- SEC_SKCIPHER_NORMAL_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
- SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
- DES3_EDE_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("xts(sm4)", sec_setkey_sm4_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_NORMAL_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
-};
+static struct skcipher_alg sec_skciphers[] = {
+ SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc,
+ AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, 0)
+
+ SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc,
+ SEC_DES3_2KEY_SIZE, SEC_DES3_3KEY_SIZE,
+ DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts,
+ SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
+
+ SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
+ AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE,
+ AES_BLOCK_SIZE, AES_BLOCK_SIZE)
-static struct skcipher_alg sec_fusion_algs[] = {
- SEC_SKCIPHER_FUSION_ALG("xts(sm4)", sec_setkey_sm4_xts,
- SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
- SEC_SKCIPHER_FUSION_ALG("cbc(sm4)", sec_setkey_sm4_cbc,
- AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, AES_BLOCK_SIZE,
- AES_BLOCK_SIZE)
};
-int hisi_sec_register_to_crypto(int fusion_limit)
+int sec_register_to_crypto(void)
{
/* To avoid repeat register */
- if (atomic_add_return(1, &sec_active_devs) == 1) {
- if (fusion_limit == 1)
- return crypto_register_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- return crypto_register_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
- }
+ if (atomic_add_return(1, &sec_active_devs) == 1)
+ return crypto_register_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
return 0;
}
-void hisi_sec_unregister_from_crypto(int fusion_limit)
+void sec_unregister_from_crypto(void)
{
- if (atomic_sub_return(1, &sec_active_devs) == 0) {
- if (fusion_limit == 1)
- crypto_unregister_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- crypto_unregister_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
- }
+ if (atomic_sub_return(1, &sec_active_devs) == 0)
+ crypto_unregister_skciphers(sec_skciphers,
+ ARRAY_SIZE(sec_skciphers));
}
-
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index bffbeba..221257e 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -1,13 +1,238 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/* Copyright (c) 2018-2019 HiSilicon Limited. */
-#ifndef HISI_SEC_CRYPTO_H
-#define HISI_SEC_CRYPTO_H
+#ifndef __HISI_SEC_V2_CRYPTO_H
+#define __HISI_SEC_V2_CRYPTO_H
-#define SEC_IV_SIZE 24
-#define SEC_MAX_KEY_SIZE 64
+#define SEC_IV_SIZE 24
+#define SEC_MAX_KEY_SIZE 64
+#define SEC_MAX_AUTH_KEY_SIZE 64
-int hisi_sec_register_to_crypto(int fusion_limit);
-void hisi_sec_unregister_from_crypto(int fusion_limit);
+#define SEC_COMM_SCENE 0
+enum sec_calg {
+ SEC_CALG_3DES = 0x1,
+ SEC_CALG_AES = 0x2,
+ SEC_CALG_SM4 = 0x3,
+};
+
+enum sec_hash_alg {
+ SEC_A_HMAC_SHA1 = 0x10,
+ SEC_A_HMAC_SHA256 = 0x11,
+ SEC_A_HMAC_SHA512 = 0x15,
+};
+
+enum sec_mac_len {
+ SEC_HMAC_SHA1_MAC = 20,
+ SEC_HMAC_SHA256_MAC = 32,
+ SEC_HMAC_SHA512_MAC = 64,
+};
+
+enum sec_cmode {
+ SEC_CMODE_ECB = 0x0,
+ SEC_CMODE_CBC = 0x1,
+ SEC_CMODE_CTR = 0x4,
+ SEC_CMODE_XTS = 0x7,
+};
+
+enum sec_ckey_type {
+ SEC_CKEY_128BIT = 0x0,
+ SEC_CKEY_192BIT = 0x1,
+ SEC_CKEY_256BIT = 0x2,
+ SEC_CKEY_3DES_3KEY = 0x1,
+ SEC_CKEY_3DES_2KEY = 0x3,
+};
+
+enum sec_bd_type {
+ SEC_BD_TYPE1 = 0x1,
+ SEC_BD_TYPE2 = 0x2,
+};
+
+enum sec_auth {
+ SEC_NO_AUTH = 0x0,
+ SEC_AUTH_TYPE1 = 0x1,
+ SEC_AUTH_TYPE2 = 0x2,
+};
+
+enum sec_cipher_dir {
+ SEC_CIPHER_ENC = 0x1,
+ SEC_CIPHER_DEC = 0x2,
+};
+
+enum sec_addr_type {
+ SEC_PBUF = 0x0,
+ SEC_SGL = 0x1,
+ SEC_PRP = 0x2,
+};
+
+enum sec_ci_gen {
+ SEC_CI_GEN_BY_ADDR = 0x0,
+ SEC_CI_GEN_BY_LBA = 0X3,
+};
+
+enum sec_scene {
+ SEC_SCENE_IPSEC = 0x1,
+ SEC_SCENE_STORAGE = 0x5,
+};
+
+enum sec_work_mode {
+ SEC_NO_FUSION = 0x0,
+ SEC_IV_FUSION = 0x1,
+ SEC_FUSION_BUTT
+};
+
+enum sec_req_ops_type {
+ SEC_OPS_SKCIPHER_ALG = 0x0,
+ SEC_OPS_DMCRYPT = 0x1,
+ SEC_OPS_MULTI_IV = 0x2,
+ SEC_OPS_BUTT
+};
+
+struct sec_sqe_type2 {
+ /*
+ * mac_len: 0~4 bits
+ * a_key_len: 5~10 bits
+ * a_alg: 11~16 bits
+ */
+ __le32 mac_key_alg;
+
+ /*
+ * c_icv_len: 0~5 bits
+ * c_width: 6~8 bits
+ * c_key_len: 9~11 bits
+ * c_mode: 12~15 bits
+ */
+ __le16 icvw_kmode;
+
+ /* c_alg: 0~3 bits */
+ __u8 c_alg;
+ __u8 rsvd4;
+
+ /*
+ * a_len: 0~23 bits
+ * iv_offset_l: 24~31 bits
+ */
+ __le32 alen_ivllen;
+
+ /*
+ * c_len: 0~23 bits
+ * iv_offset_h: 24~31 bits
+ */
+ __le32 clen_ivhlen;
+
+ __le16 auth_src_offset;
+ __le16 cipher_src_offset;
+ __le16 cs_ip_header_offset;
+ __le16 cs_udp_header_offset;
+ __le16 pass_word_len;
+ __le16 dk_len;
+ __u8 salt3;
+ __u8 salt2;
+ __u8 salt1;
+ __u8 salt0;
+
+ __le16 tag;
+ __le16 rsvd5;
+
+ /*
+ * c_pad_type: 0~3 bits
+ * c_pad_len: 4~11 bits
+ * c_pad_data_type: 12~15 bits
+ */
+ __le16 cph_pad;
+
+ /* c_pad_len_field: 0~1 bits */
+ __le16 c_pad_len_field;
+
+ __le64 long_a_data_len;
+ __le64 a_ivin_addr;
+ __le64 a_key_addr;
+ __le64 mac_addr;
+ __le64 c_ivin_addr;
+ __le64 c_key_addr;
+
+ __le64 data_src_addr;
+ __le64 data_dst_addr;
+
+ /*
+ * done: 0 bit
+ * icv: 1~3 bits
+ * csc: 4~6 bits
+ * flag: 7-10 bits
+ * dif_check: 11~13 bits
+ */
+ __le16 done_flag;
+
+ __u8 error_type;
+ __u8 warning_type;
+ __u8 mac_i3;
+ __u8 mac_i2;
+ __u8 mac_i1;
+ __u8 mac_i0;
+ __le16 check_sum_i;
+ __u8 tls_pad_len_i;
+ __u8 rsvd12;
+ __le32 counter;
+};
+
+struct sec_sqe {
+ /*
+ * type: 0~3 bits
+ * cipher: 4~5 bits
+ * auth: 6~7 bit s
+ */
+ __u8 type_cipher_auth;
+
+ /*
+ * seq: 0 bit
+ * de: 1~2 bits
+ * scene: 3~6 bits
+ * src_addr_type: ~7 bit, with sdm_addr_type 0-1 bits
+ */
+ __u8 sds_sa_type;
+
+ /*
+ * src_addr_type: 0~1 bits, not used now,
+ * if support PRP, set this field, or set zero.
+ * dst_addr_type: 2~4 bits
+ * mac_addr_type: 5~7 bits
+ */
+ __u8 sdm_addr_type;
+ __u8 rsvd0;
+
+ /*
+ * nonce_len(type2): 0~3 bits
+ * huk(type2): 4 bit
+ * key_s(type2): 5 bit
+ * ci_gen: 6~7 bits
+ */
+ __u8 huk_key_ci;
+
+ /*
+ * ai_gen: 0~1 bits
+ * a_pad(type2): 2~3 bits
+ * c_s(type2): 4~5 bits
+ */
+ __u8 ai_apd_cs;
+
+ /*
+ * rhf(type2): 0 bit
+ * c_key_type: 1~2 bits
+ * a_key_type: 3~4 bits
+ * write_frame_len(type2): 5~7 bits
+ */
+ __u8 rca_key_frm;
+
+ /*
+ * cal_iv_addr_en(type2): 0 bit
+ * tls_up(type2): 1 bit
+ * inveld: 7 bit
+ */
+ __u8 iv_tls_ld;
+
+ struct sec_sqe_type2 type2;
+};
+
+int sec_register_to_crypto(void);
+void sec_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index b4e5d57f..b3340c0 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -1,33 +1,30 @@
// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (c) 2018-2019 HiSilicon Limited.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <linux/acpi.h>
#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>
+#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/topology.h>
-#include <linux/uacce.h>
+
#include "sec.h"
-#include "sec_crypto.h"
#define SEC_QUEUE_NUM_V1 4096
#define SEC_QUEUE_NUM_V2 1024
#define SEC_PF_PCI_DEVICE_ID 0xa255
#define SEC_VF_PCI_DEVICE_ID 0xa256
+#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
+#define SEC_BD_ERR_CHK_EN1 0x7ffff7fd
+#define SEC_BD_ERR_CHK_EN3 0xffffbfff
+
#define SEC_SQE_SIZE 128
#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
#define SEC_PF_DEF_Q_NUM 64
@@ -35,8 +32,6 @@
#define SEC_CTX_Q_NUM_DEF 24
#define SEC_CTX_Q_NUM_MAX 32
-#define SEC_AM_CFG_SIG_PORT_MAX_TRANS 0x300014
-#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
#define SEC_CTRL_CNT_CLR_CE 0x301120
#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
#define SEC_ENGINE_PF_CFG_OFF 0x300000
@@ -44,13 +39,13 @@
#define SEC_CORE_INT_SOURCE 0x301010
#define SEC_CORE_INT_MASK 0x301000
#define SEC_CORE_INT_STATUS 0x301008
-#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
-#define SEC_CORE_ECC_INFO 0x301C14
-#define SEC_ECC_NUM(err_val) (((err_val) >> 16) & 0xFFFF)
-#define SEC_ECC_ADDR(err_val) ((err_val) & 0xFFFF)
+#define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14
+#define SEC_ECC_NUM(err) (((err) >> 16) & 0xFFFF)
+#define SEC_ECC_ADDR(err) ((err) >> 0)
#define SEC_CORE_INT_DISABLE 0x0
#define SEC_CORE_INT_ENABLE 0x1ff
#define SEC_CORE_INT_CLEAR 0x1ff
+#define SEC_SAA_ENABLE 0x17f
#define SEC_RAS_CE_REG 0x301050
#define SEC_RAS_FE_REG 0x301054
@@ -64,6 +59,7 @@
#define SEC_CONTROL_REG 0x0200
#define SEC_TRNG_EN_SHIFT 8
+#define SEC_CLK_GATE_ENABLE BIT(3)
#define SEC_CLK_GATE_DISABLE (~BIT(3))
#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
@@ -71,26 +67,24 @@
#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
-#define SEC_SAA_EN_REG 0x270
-#define SEC_SAA_EN 0x17F
+#define SEC_SAA_EN_REG 0x0270
#define SEC_BD_ERR_CHK_EN_REG0 0x0380
#define SEC_BD_ERR_CHK_EN_REG1 0x0384
#define SEC_BD_ERR_CHK_EN_REG3 0x038c
-#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
-#define SEC_BD_ERR_CHK_EN1 0x7FFFF7FD
-#define SEC_BD_ERR_CHK_EN3 0xFFFFBFFF
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
+#define SEC_CORE_INT_STATUS_M_ECC BIT(2)
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
#define SEC_DBGFS_VAL_MAX_LEN 20
+#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
-struct hisi_sec_hw_error {
+struct sec_hw_error {
u32 int_msk;
const char *msg;
};
@@ -98,9 +92,8 @@ struct hisi_sec_hw_error {
static const char sec_name[] = "hisi_sec2";
static struct dentry *sec_debugfs_root;
static struct hisi_qm_list sec_devices;
-static struct workqueue_struct *sec_wq;
-static const struct hisi_sec_hw_error sec_hw_error[] = {
+static const struct sec_hw_error sec_hw_errors[] = {
{.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
{.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
{.int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint"},
@@ -113,36 +106,13 @@ struct hisi_sec_hw_error {
{ /* sentinel */ }
};
-enum ctrl_debug_file_index {
- SEC_CURRENT_QM,
- SEC_CLEAR_ENABLE,
- SEC_DEBUG_FILE_NUM,
-};
-
-static const char * const ctrl_debug_file_name[] = {
+static const char * const sec_dbg_file_name[] = {
[SEC_CURRENT_QM] = "current_qm",
[SEC_CLEAR_ENABLE] = "clear_enable",
};
-struct ctrl_debug_file {
- enum ctrl_debug_file_index index;
- spinlock_t lock;
- struct hisi_sec_ctrl *ctrl;
-};
-
-/*
- * One SEC controller has one PF and multiple VFs, some global configurations
- * which PF has need this structure.
- *
- * Just relevant for PF.
- */
-struct hisi_sec_ctrl {
- struct hisi_sec *hisi_sec;
- struct ctrl_debug_file files[SEC_DEBUG_FILE_NUM];
-};
-
static struct debugfs_reg32 sec_dfx_regs[] = {
- {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
+ {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010},
{"SEC_SAA_EN ", 0x301270},
{"SEC_BD_LATENCY_MIN ", 0x301600},
{"SEC_BD_LATENCY_MAX ", 0x301608},
@@ -262,71 +232,12 @@ static int vfs_num_set(const char *val, const struct kernel_param *kp)
module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
-static int sec_fusion_limit_set(const char *val, const struct kernel_param *kp)
-{
- u32 fusion_limit;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &fusion_limit);
- if (ret)
- return ret;
-
- if (!fusion_limit || fusion_limit > FUSION_LIMIT_MAX) {
- pr_err("fusion_limit[%u] is't at range(0, %d)", fusion_limit,
- FUSION_LIMIT_MAX);
- return -EINVAL;
- }
-
- return param_set_int(val, kp);
-}
-
-static const struct kernel_param_ops sec_fusion_limit_ops = {
- .set = sec_fusion_limit_set,
- .get = param_get_int,
-};
-static u32 fusion_limit = FUSION_LIMIT_DEF;
-
-module_param_cb(fusion_limit, &sec_fusion_limit_ops, &fusion_limit, 0444);
-MODULE_PARM_DESC(fusion_limit, "(1, acc_sgl_sge_nr of hisilicon QM)");
-
-static int sec_fusion_tmout_ns_set(const char *val,
- const struct kernel_param *kp)
-{
- u32 fusion_tmout_nsec;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &fusion_tmout_nsec);
- if (ret)
- return ret;
-
- if (fusion_tmout_nsec > NSEC_PER_SEC) {
- pr_err("fusion_tmout_nsec[%u] is too large", fusion_tmout_nsec);
- return -EINVAL;
- }
-
- return param_set_int(val, kp);
-}
-
-static const struct kernel_param_ops sec_fusion_time_ops = {
- .set = sec_fusion_tmout_ns_set,
- .get = param_get_int,
-};
-static u32 fusion_time = FUSION_TMOUT_NSEC_DEF; /* ns */
-module_param_cb(fusion_time, &sec_fusion_time_ops, &fusion_time, 0444);
-MODULE_PARM_DESC(fusion_time, "(0, NSEC_PER_SEC)");
-
-static const struct pci_device_id hisi_sec_dev_ids[] = {
+static const struct pci_device_id sec_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
{ 0, }
};
-MODULE_DEVICE_TABLE(pci, hisi_sec_dev_ids);
+MODULE_DEVICE_TABLE(pci, sec_dev_ids);
static u8 sec_get_endian(struct hisi_qm *qm)
{
@@ -390,9 +301,9 @@ static int sec_engine_init(struct hisi_qm *qm)
writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
writel(SEC_SINGLE_PORT_MAX_TRANS,
- qm->io_base + SEC_AM_CFG_SIG_PORT_MAX_TRANS);
+ qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS);
- writel(SEC_SAA_EN, SEC_ADDR(qm, SEC_SAA_EN_REG));
+ writel(SEC_SAA_ENABLE, SEC_ADDR(qm, SEC_SAA_EN_REG));
/* Enable sm4 extra mode, as ctr/ecb */
writel_relaxed(SEC_BD_ERR_CHK_EN0,
@@ -436,6 +347,7 @@ static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
return sec_engine_init(qm);
}
+/* sec_debug_regs_clear() - clear the sec debug regs */
static void sec_debug_regs_clear(struct hisi_qm *qm)
{
/* clear current_qm */
@@ -497,23 +409,16 @@ static void sec_hw_error_disable(struct hisi_qm *qm)
writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
}
-static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
+static u32 sec_current_qm_read(struct sec_debug_file *file)
{
- struct hisi_sec *hisi_sec = file->ctrl->hisi_sec;
-
- return &hisi_sec->qm;
-}
-
-static u32 current_qm_read(struct ctrl_debug_file *file)
-{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
return readl(qm->io_base + QM_DFX_MB_CNT_VF);
}
-static int current_qm_write(struct ctrl_debug_file *file, u32 val)
+static int sec_current_qm_write(struct sec_debug_file *file, u32 val)
{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
u32 vfq_num;
u32 tmp;
@@ -521,17 +426,17 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
- if (val == 0) {
+ if (!val) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
vfq_num = (qm->ctrl_q_num - qm->qp_num) / qm->vfs_num;
- if (val == qm->vfs_num) {
+
+ if (val == qm->vfs_num)
qm->debug.curr_qm_qp_num =
qm->ctrl_q_num - qm->qp_num -
(qm->vfs_num - 1) * vfq_num;
- } else {
+ else
qm->debug.curr_qm_qp_num = vfq_num;
- }
}
writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
@@ -548,33 +453,33 @@ static int current_qm_write(struct ctrl_debug_file *file, u32 val)
return 0;
}
-static u32 clear_enable_read(struct ctrl_debug_file *file)
+static u32 sec_clear_enable_read(struct sec_debug_file *file)
{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
SEC_CTRL_CNT_CLR_CE_BIT;
}
-static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
+static int sec_clear_enable_write(struct sec_debug_file *file, u32 val)
{
- struct hisi_qm *qm = file_to_qm(file);
+ struct hisi_qm *qm = file->qm;
u32 tmp;
if (val != 1 && val)
return -EINVAL;
tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
- ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
+ ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
return 0;
}
-static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
+static ssize_t sec_debug_read(struct file *filp, char __user *buf,
size_t count, loff_t *pos)
{
- struct ctrl_debug_file *file = filp->private_data;
+ struct sec_debug_file *file = filp->private_data;
char tbuf[SEC_DBGFS_VAL_MAX_LEN];
u32 val;
int ret;
@@ -583,10 +488,10 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
switch (file->index) {
case SEC_CURRENT_QM:
- val = current_qm_read(file);
+ val = sec_current_qm_read(file);
break;
case SEC_CLEAR_ENABLE:
- val = clear_enable_read(file);
+ val = sec_clear_enable_read(file);
break;
default:
spin_unlock_irq(&file->lock);
@@ -599,10 +504,10 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
-static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t sec_debug_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
{
- struct ctrl_debug_file *file = filp->private_data;
+ struct sec_debug_file *file = filp->private_data;
char tbuf[SEC_DBGFS_VAL_MAX_LEN];
unsigned long val;
int len, ret;
@@ -626,12 +531,12 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
switch (file->index) {
case SEC_CURRENT_QM:
- ret = current_qm_write(file, val);
+ ret = sec_current_qm_write(file, val);
if (ret)
goto err_input;
break;
case SEC_CLEAR_ENABLE:
- ret = clear_enable_write(file, val);
+ ret = sec_clear_enable_write(file, val);
if (ret)
goto err_input;
break;
@@ -649,30 +554,30 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
return ret;
}
-static const struct file_operations ctrl_debug_fops = {
+static const struct file_operations sec_dbg_fops = {
.owner = THIS_MODULE,
.open = simple_open,
- .read = ctrl_debug_read,
- .write = ctrl_debug_write,
+ .read = sec_debug_read,
+ .write = sec_debug_write,
};
-static int hisi_sec_core_debug_init(struct hisi_qm *qm)
+static int sec_debugfs_atomic64_get(void *data, u64 *val)
{
- struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
+ *val = atomic64_read((atomic64_t *)data);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get,
+ NULL, "%lld\n");
+
+static int sec_core_debug_init(struct hisi_qm *qm)
+{
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
struct device *dev = &qm->pdev->dev;
- struct hisi_sec_dfx *dfx = &sec->sec_dfx;
+ struct sec_dfx *dfx = &sec->debug.dfx;
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
- char buf[SEC_DBGFS_VAL_MAX_LEN];
- int ret;
+ struct dentry *tmp_d;
- ret = snprintf(buf, SEC_DBGFS_VAL_MAX_LEN, "sec_dfx");
- if (ret < 0)
- return -ENOENT;
-
- tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
- if (!tmp_d)
- return -ENOENT;
+ tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root);
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -682,123 +587,69 @@ static int hisi_sec_core_debug_init(struct hisi_qm *qm)
regset->nregs = ARRAY_SIZE(sec_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("send_cnt", 0444, tmp_d, &dfx->send_cnt);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
- tmp = debugfs_create_u64("send_by_tmout", 0444, tmp_d,
- &dfx->send_by_tmout);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("send_by_full", 0444, tmp_d,
- &dfx->send_by_full);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("recv_cnt", 0444, tmp_d, &dfx->recv_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("get_task_cnt", 0444, tmp_d,
- &dfx->get_task_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("put_task_cnt", 0444, tmp_d,
- &dfx->put_task_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("gran_task_cnt", 0444, tmp_d,
- &dfx->gran_task_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("thread_cnt", 0444, tmp_d, &dfx->thread_cnt);
- if (!tmp)
- return -ENOENT;
-
- tmp = debugfs_create_u64("fake_busy_cnt", 0444,
- tmp_d, &dfx->fake_busy_cnt);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file("send_cnt", 0444, tmp_d,
+ &dfx->send_cnt, &sec_atomic64_ops);
- tmp = debugfs_create_u64("busy_comp_cnt", 0444, tmp_d,
- &dfx->busy_comp_cnt);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_file("recv_cnt", 0444, tmp_d,
+ &dfx->recv_cnt, &sec_atomic64_ops);
return 0;
}
-static int hisi_sec_ctrl_debug_init(struct hisi_qm *qm)
+static int sec_debug_init(struct hisi_qm *qm)
{
- struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
- struct dentry *tmp;
+ struct sec_dev *sec = container_of(qm, struct sec_dev, qm);
int i;
for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&sec->ctrl->files[i].lock);
- sec->ctrl->files[i].ctrl = sec->ctrl;
- sec->ctrl->files[i].index = i;
+ spin_lock_init(&sec->debug.files[i].lock);
+ sec->debug.files[i].index = i;
+ sec->debug.files[i].qm = qm;
- tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
+ debugfs_create_file(sec_dbg_file_name[i], 0600,
qm->debug.debug_root,
- sec->ctrl->files + i,
- &ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+ sec->debug.files + i,
+ &sec_dbg_fops);
}
- return hisi_sec_core_debug_init(qm);
+ return sec_core_debug_init(qm);
}
-static int hisi_sec_debugfs_init(struct hisi_qm *qm)
+static int sec_debugfs_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
- struct dentry *dev_d;
int ret;
- dev_d = debugfs_create_dir(dev_name(dev), sec_debugfs_root);
- if (!dev_d)
- return -ENOENT;
-
- qm->debug.debug_root = dev_d;
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ sec_debugfs_root);
ret = hisi_qm_debug_init(qm);
if (ret)
goto failed_to_create;
if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
- ret = hisi_sec_ctrl_debug_init(qm);
+ ret = sec_debug_init(qm);
if (ret)
goto failed_to_create;
}
return 0;
- failed_to_create:
+failed_to_create:
debugfs_remove_recursive(sec_debugfs_root);
+
return ret;
}
-static void hisi_sec_debugfs_exit(struct hisi_qm *qm)
+static void sec_debugfs_exit(struct hisi_qm *qm)
{
debugfs_remove_recursive(qm->debug.debug_root);
-
- if (qm->fun_type == QM_HW_PF) {
- sec_debug_regs_clear(qm);
- qm->debug.curr_qm_qp_num = 0;
- }
}
static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
- const struct hisi_sec_hw_error *errs = sec_hw_error;
+ const struct sec_hw_error *errs = sec_hw_errors;
struct device *dev = &qm->pdev->dev;
u32 err_val;
@@ -809,7 +660,7 @@ static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
err_val = readl(qm->io_base +
- SEC_CORE_ECC_INFO);
+ SEC_CORE_SRAM_ECC_ERR_INFO);
dev_err(dev, "multi ecc sram num=0x%x\n",
SEC_ECC_NUM(err_val));
}
@@ -837,19 +688,10 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm)
writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
}
-static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
+static int sec_pf_probe_init(struct hisi_qm *qm)
{
- struct hisi_sec *hisi_sec = container_of(qm, struct hisi_sec, qm);
- struct hisi_sec_ctrl *ctrl;
int ret;
- ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return -ENOMEM;
-
- hisi_sec->ctrl = ctrl;
- ctrl->hisi_sec = hisi_sec;
-
switch (qm->ver) {
case QM_HW_V1:
qm->ctrl_q_num = SEC_QUEUE_NUM_V1;
@@ -868,7 +710,7 @@ static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
qm->err_ini.err_info.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
qm->err_ini.err_info.ce = QM_BASE_CE;
qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
- QM_ACC_WB_NOT_READY_TIMEOUT;
+ QM_ACC_WB_NOT_READY_TIMEOUT;
qm->err_ini.err_info.fe = 0;
qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID;
qm->err_ini.err_info.acpi_rst = "SRST";
@@ -884,42 +726,32 @@ static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
return ret;
hisi_qm_dev_err_init(qm);
- qm->err_ini.open_axi_master_ooo(qm);
sec_debug_regs_clear(qm);
return 0;
}
-static int hisi_sec_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+static int sec_probe_init(struct hisi_qm *qm)
{
int ret;
-#ifdef CONFIG_CRYPTO_QM_UACCE
- qm->algs = "sec\ncipher\ndigest\n";
- qm->uacce_mode = uacce_mode;
-#endif
- qm->pdev = pdev;
- ret = hisi_qm_pre_init(qm, pf_q_num, SEC_PF_DEF_Q_BASE);
- if (ret)
- return ret;
- qm->sqe_size = SEC_SQE_SIZE;
- qm->dev_name = sec_name;
- qm->qm_list = &sec_devices;
- qm->wq = sec_wq;
-
- return 0;
-}
+ qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE |
+ WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(),
+ pci_name(qm->pdev));
+ if (!qm->wq) {
+ pci_err(qm->pdev, "fail to alloc workqueue\n");
+ return -ENOMEM;
+ }
-static int hisi_sec_probe_init(struct hisi_qm *qm)
-{
if (qm->fun_type == QM_HW_PF) {
- return hisi_sec_pf_probe_init(qm);
+ ret = sec_pf_probe_init(qm);
+ if (ret)
+ goto err_probe_uninit;
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
* so currently force PF to uses SEC_PF_DEF_Q_NUM, and force
* to trigger only one VF in v1 hardware.
- *
* v2 hardware has no such problem.
*/
if (qm->ver == QM_HW_V1) {
@@ -927,41 +759,92 @@ static int hisi_sec_probe_init(struct hisi_qm *qm)
qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM;
} else if (qm->ver == QM_HW_V2) {
/* v2 starts to support get vft by mailbox */
- return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
+ if (ret)
+ goto err_probe_uninit;
}
+ } else {
+ ret = -ENODEV;
+ goto err_probe_uninit;
}
return 0;
+
+err_probe_uninit:
+ destroy_workqueue(qm->wq);
+ return ret;
+}
+
+static void sec_probe_uninit(struct hisi_qm *qm)
+{
+ if (qm->fun_type == QM_HW_PF)
+ hisi_qm_dev_err_uninit(qm);
+ destroy_workqueue(qm->wq);
+}
+
+static int sec_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+{
+ int ret;
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "sec\ncipher\ndigest\n";
+ qm->uacce_mode = uacce_mode;
+#endif
+ qm->pdev = pdev;
+ ret = hisi_qm_pre_init(qm, pf_q_num, SEC_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
+
+ qm->qm_list = &sec_devices;
+ qm->sqe_size = SEC_SQE_SIZE;
+ qm->dev_name = sec_name;
+
+ return 0;
+}
+
+static void sec_iommu_used_check(struct sec_dev *sec)
+{
+ struct iommu_domain *domain;
+ struct device *dev = &sec->qm.pdev->dev;
+
+ domain = iommu_get_domain_for_dev(dev);
+
+ /* Check if iommu is used */
+ sec->iommu_used = false;
+ if (domain) {
+ if (domain->type & __IOMMU_DOMAIN_PAGING)
+ sec->iommu_used = true;
+ dev_info(dev, "SMMU Opened! the iommu type:= %d!\n",
+ domain->type);
+ }
}
-static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct hisi_sec *hisi_sec;
+ struct sec_dev *sec;
struct hisi_qm *qm;
int ret;
- hisi_sec = devm_kzalloc(&pdev->dev, sizeof(*hisi_sec), GFP_KERNEL);
- if (!hisi_sec)
+ sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL);
+ if (!sec)
return -ENOMEM;
- qm = &hisi_sec->qm;
+ qm = &sec->qm;
qm->fun_type = pdev->is_physfn ? QM_HW_PF : QM_HW_VF;
- ret = hisi_sec_qm_pre_init(qm, pdev);
+ ret = sec_qm_pre_init(qm, pdev);
if (ret)
return ret;
- hisi_sec->ctx_q_num = ctx_q_num;
- hisi_sec->fusion_limit = fusion_limit;
- hisi_sec->fusion_tmout_nsec = fusion_time;
-
+ sec->ctx_q_num = ctx_q_num;
+ sec_iommu_used_check(sec);
ret = hisi_qm_init(qm);
if (ret) {
pci_err(pdev, "Failed to init qm (%d)!\n", ret);
return ret;
}
- ret = hisi_sec_probe_init(qm);
+ ret = sec_probe_init(qm);
if (ret) {
pci_err(pdev, "Failed to probe init (%d)!\n", ret);
goto err_qm_uninit;
@@ -970,18 +853,18 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = hisi_qm_start(qm);
if (ret) {
pci_err(pdev, "Failed to start qm (%d)!\n", ret);
- goto err_qm_uninit;
+ goto err_probe_uninit;
}
- ret = hisi_sec_debugfs_init(qm);
+ ret = sec_debugfs_init(qm);
if (ret)
pci_warn(pdev, "Failed to init debugfs (%d)!\n", ret);
hisi_qm_add_to_list(qm, &sec_devices);
- ret = hisi_sec_register_to_crypto(fusion_limit);
+ ret = sec_register_to_crypto();
if (ret < 0) {
- pci_err(pdev, "Failed to register driver to crypto!\n");
+ pr_err("Failed to register driver to crypto!\n");
goto err_remove_from_list;
}
@@ -994,121 +877,115 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_crypto_unregister:
- hisi_sec_unregister_from_crypto(fusion_limit);
+ sec_unregister_from_crypto();
err_remove_from_list:
hisi_qm_del_from_list(qm, &sec_devices);
- hisi_sec_debugfs_exit(qm);
+ sec_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
+err_probe_uninit:
+ sec_probe_uninit(qm);
+
err_qm_uninit:
hisi_qm_uninit(qm);
return ret;
}
-static int hisi_sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
+static int sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
- if (num_vfs == 0)
- return hisi_qm_sriov_disable(pdev, &sec_devices);
- else
+ if (num_vfs)
return hisi_qm_sriov_enable(pdev, num_vfs);
+ else
+ return hisi_qm_sriov_disable(pdev, &sec_devices);
}
-static void hisi_sec_remove(struct pci_dev *pdev)
+static void sec_remove(struct pci_dev *pdev)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
- if (uacce_mode != UACCE_MODE_NOUACCE)
- hisi_qm_remove_wait_delay(qm, &sec_devices);
+ hisi_qm_remove_wait_delay(qm, &sec_devices);
+
+ sec_unregister_from_crypto();
+
+ hisi_qm_del_from_list(qm, &sec_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
(void)hisi_qm_sriov_disable(pdev, NULL);
- hisi_sec_unregister_from_crypto(fusion_limit);
+ sec_debugfs_exit(qm);
- hisi_qm_del_from_list(qm, &sec_devices);
- hisi_sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
- hisi_qm_dev_err_uninit(qm);
+ sec_debug_regs_clear(qm);
+
+ sec_probe_uninit(qm);
hisi_qm_uninit(qm);
}
-static const struct pci_error_handlers hisi_sec_err_handler = {
+static const struct pci_error_handlers sec_err_handler = {
.error_detected = hisi_qm_dev_err_detected,
- .slot_reset = hisi_qm_dev_slot_reset,
- .reset_prepare = hisi_qm_reset_prepare,
- .reset_done = hisi_qm_reset_done,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
-static struct pci_driver hisi_sec_pci_driver = {
+static struct pci_driver sec_pci_driver = {
.name = "hisi_sec2",
- .id_table = hisi_sec_dev_ids,
- .probe = hisi_sec_probe,
- .remove = hisi_sec_remove,
- .sriov_configure = hisi_sec_sriov_configure,
- .err_handler = &hisi_sec_err_handler,
+ .id_table = sec_dev_ids,
+ .probe = sec_probe,
+ .remove = sec_remove,
+ .err_handler = &sec_err_handler,
+ .sriov_configure = sec_sriov_configure,
.shutdown = hisi_qm_dev_shutdown,
};
-static void hisi_sec_register_debugfs(void)
+static void sec_register_debugfs(void)
{
if (!debugfs_initialized())
return;
sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL);
- if (IS_ERR_OR_NULL(sec_debugfs_root))
- sec_debugfs_root = NULL;
}
-static void hisi_sec_unregister_debugfs(void)
+static void sec_unregister_debugfs(void)
{
debugfs_remove_recursive(sec_debugfs_root);
}
-static int __init hisi_sec_init(void)
+static int __init sec_init(void)
{
int ret;
- sec_wq = alloc_workqueue("hisi_sec2", WQ_HIGHPRI | WQ_CPU_INTENSIVE |
- WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
-
- if (!sec_wq) {
- pr_err("Fallied to alloc workqueue\n");
- return -ENOMEM;
- }
-
INIT_LIST_HEAD(&sec_devices.list);
mutex_init(&sec_devices.lock);
sec_devices.check = NULL;
+ sec_register_debugfs();
- hisi_sec_register_debugfs();
-
- ret = pci_register_driver(&hisi_sec_pci_driver);
+ ret = pci_register_driver(&sec_pci_driver);
if (ret < 0) {
- hisi_sec_unregister_debugfs();
- if (sec_wq)
- destroy_workqueue(sec_wq);
+ sec_unregister_debugfs();
pr_err("Failed to register pci driver.\n");
+ return ret;
}
- return ret;
+ return 0;
}
-static void __exit hisi_sec_exit(void)
+static void __exit sec_exit(void)
{
- pci_unregister_driver(&hisi_sec_pci_driver);
- hisi_sec_unregister_debugfs();
- if (sec_wq)
- destroy_workqueue(sec_wq);
+ pci_unregister_driver(&sec_pci_driver);
+ sec_unregister_debugfs();
}
-module_init(hisi_sec_init);
-module_exit(hisi_sec_exit);
+module_init(sec_init);
+module_exit(sec_exit);
MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Zaibo Xu <xuzaibo(a)huawei.com>");
+MODULE_AUTHOR("Longfang Liu <liulongfang(a)huawei.com>");
MODULE_AUTHOR("Zhang Wei <zhangwei375(a)huawei.com>");
MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");
diff --git a/drivers/crypto/hisilicon/sec2/sec_usr_if.h b/drivers/crypto/hisilicon/sec2/sec_usr_if.h
deleted file mode 100644
index 7c76e19..00000000
--- a/drivers/crypto/hisilicon/sec2/sec_usr_if.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/* Copyright (c) 2018-2019 HiSilicon Limited. */
-
-#ifndef HISI_SEC_USR_IF_H
-#define HISI_SEC_USR_IF_H
-
-struct hisi_sec_sqe_type1 {
- __u32 rsvd2:6;
- __u32 ci_gen:2;
- __u32 ai_gen:2;
- __u32 rsvd1:7;
- __u32 c_key_type:2;
- __u32 a_key_type:2;
- __u32 rsvd0:10;
- __u32 inveld:1;
-
- __u32 mac_len:6;
- __u32 a_key_len:5;
- __u32 a_alg:6;
- __u32 rsvd3:15;
- __u32 c_icv_len:6;
- __u32 c_width:3;
- __u32 c_key_len:3;
- __u32 c_mode:4;
- __u32 c_alg:4;
- __u32 rsvd4:12;
- __u32 auth_gran_size:24;
- __u32:8;
- __u32 cipher_gran_size:24;
- __u32:8;
- __u32 auth_src_offset:16;
- __u32 cipher_src_offset:16;
- __u32 gran_num:16;
- __u32 rsvd5:16;
- __u32 src_skip_data_len:24;
- __u32 rsvd6:8;
- __u32 dst_skip_data_len:24;
- __u32 rsvd7:8;
- __u32 tag:16;
- __u32 rsvd8:16;
- __u32 gen_page_pad_ctrl:4;
- __u32 gen_grd_ctrl:4;
- __u32 gen_ver_ctrl:4;
- __u32 gen_app_ctrl:4;
- __u32 gen_ver_val:8;
- __u32 gen_app_val:8;
- __u32 private_info;
- __u32 gen_ref_ctrl:4;
- __u32 page_pad_type:2;
- __u32 rsvd9:2;
- __u32 chk_grd_ctrl:4;
- __u32 chk_ref_ctrl:4;
- __u32 block_size:16;
- __u32 lba_l;
- __u32 lba_h;
- __u32 a_key_addr_l;
- __u32 a_key_addr_h;
- __u32 mac_addr_l;
- __u32 mac_addr_h;
- __u32 c_ivin_addr_l;
- __u32 c_ivin_addr_h;
- __u32 c_key_addr_l;
- __u32 c_key_addr_h;
- __u32 data_src_addr_l;
- __u32 data_src_addr_h;
- __u32 data_dst_addr_l;
- __u32 data_dst_addr_h;
- __u32 done:1;
- __u32 icv:3;
- __u32 rsvd11:3;
- __u32 flag:4;
- __u32 dif_check:3;
- __u32 rsvd10:2;
- __u32 error_type:8;
- __u32 warning_type:8;
- __u32 dw29;
- __u32 dw30;
- __u32 dw31;
-};
-
-struct hisi_sec_sqe_type2 {
- __u32 nonce_len:4;
- __u32 huk:1;
- __u32 key_s:1;
- __u32 ci_gen:2;
- __u32 ai_gen:2;
- __u32 a_pad:2;
- __u32 c_s:2;
- __u32 rsvd1:2;
- __u32 rhf:1;
- __u32 c_key_type:2;
- __u32 a_key_type:2;
- __u32 write_frame_len:3;
- __u32 cal_iv_addr_en:1;
- __u32 tls_up:1;
- __u32 rsvd0:5;
- __u32 inveld:1;
- __u32 mac_len:5;
- __u32 a_key_len:6;
- __u32 a_alg:6;
- __u32 rsvd3:15;
- __u32 c_icv_len:6;
- __u32 c_width:3;
- __u32 c_key_len:3;
- __u32 c_mode:4;
- __u32 c_alg:4;
- __u32 rsvd4:12;
- __u32 a_len:24;
- __u32 iv_offset_l:8;
- __u32 c_len:24;
- __u32 iv_offset_h:8;
- __u32 auth_src_offset:16;
- __u32 cipher_src_offset:16;
- __u32 cs_ip_header_offset:16;
- __u32 cs_udp_header_offset:16;
- __u32 pass_word_len:16;
- __u32 dk_len:16;
- __u32 salt3:8;
- __u32 salt2:8;
- __u32 salt1:8;
- __u32 salt0:8;
- __u32 tag:16;
- __u32 rsvd5:16;
- __u32 c_pad_type:4;
- __u32 c_pad_len:8;
- __u32 c_pad_data_type:4;
- __u32 c_pad_len_field:2;
- __u32 rsvd6:14;
- __u32 long_a_data_len_l;
- __u32 long_a_data_len_h;
- __u32 a_ivin_addr_l;
- __u32 a_ivin_addr_h;
- __u32 a_key_addr_l;
- __u32 a_key_addr_h;
- __u32 mac_addr_l;
- __u32 mac_addr_h;
- __u32 c_ivin_addr_l;
- __u32 c_ivin_addr_h;
- __u32 c_key_addr_l;
- __u32 c_key_addr_h;
- __u32 data_src_addr_l;
- __u32 data_src_addr_h;
- __u32 data_dst_addr_l;
- __u32 data_dst_addr_h;
- __u32 done:1;
- __u32 icv:3;
- __u32 rsvd11:3;
- __u32 flag:4;
- __u32 rsvd10:5;
- __u32 error_type:8;
- __u32 warning_type:8;
- __u32 mac_i3:8;
- __u32 mac_i2:8;
- __u32 mac_i1:8;
- __u32 mac_i0:8;
- __u32 check_sum_i:16;
- __u32 tls_pad_len_i:8;
- __u32 rsvd12:8;
- __u32 counter;
-};
-
-struct hisi_sec_sqe {
- __u32 type:4;
- __u32 cipher:2;
- __u32 auth:2;
- __u32 seq:1;
- __u32 de:2;
- __u32 scene:4;
- __u32 src_addr_type:3;
- __u32 dst_addr_type:3;
- __u32 mac_addr_type:3;
- __u32 rsvd0:8;
- union {
- struct hisi_sec_sqe_type1 type1;
- struct hisi_sec_sqe_type2 type2;
- };
-};
-
-#endif
--
1.8.3
1
1

[PATCH] arm64: kprobes: Recover pstate.D in single-step exception handler
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Masami Hiramatsu <mhiramat(a)kernel.org>
mainline inclusion
from mainline-5.3-rc3
commit b3980e48528c
category: bugfix
bugzilla: 20080
CVE: NA
-------------------------------------------------
kprobes manipulates the interrupted PSTATE for single step, and
doesn't restore it. Thus, if we put a kprobe where the pstate.D
(debug) masked, the mask will be cleared after the kprobe hits.
Moreover, in the most complicated case, this can lead a kernel
crash with below message when a nested kprobe hits.
[ 152.118921] Unexpected kernel single-step exception at EL1
When the 1st kprobe hits, do_debug_exception() will be called.
At this point, debug exception (= pstate.D) must be masked (=1).
But if another kprobes hits before single-step of the first kprobe
(e.g. inside user pre_handler), it unmask the debug exception
(pstate.D = 0) and return.
Then, when the 1st kprobe setting up single-step, it saves current
DAIF, mask DAIF, enable single-step, and restore DAIF.
However, since "D" flag in DAIF is cleared by the 2nd kprobe, the
single-step exception happens soon after restoring DAIF.
This has been introduced by commit 7419333fa15e ("arm64: kprobe:
Always clear pstate.D in breakpoint exception handler")
To solve this issue, this stores all DAIF bits and restore it
after single stepping.
Reported-by: Naresh Kamboju <naresh.kamboju(a)linaro.org>
Fixes: 7419333fa15e ("arm64: kprobe: Always clear pstate.D in breakpoint exception handler")
Reviewed-by: James Morse <james.morse(a)arm.com>
Tested-by: James Morse <james.morse(a)arm.com>
Signed-off-by: Masami Hiramatsu <mhiramat(a)kernel.org>
Signed-off-by: Will Deacon <will(a)kernel.org>
Signed-off-by: Wei Li <liwei391(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/arm64/include/asm/daifflags.h | 1 +
arch/arm64/kernel/probes/kprobes.c | 40 ++++++--------------------------------
2 files changed, 7 insertions(+), 34 deletions(-)
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 3441ca0..1230923 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -24,6 +24,7 @@
#define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
+#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
/* mask/save/unmask/restore all exceptions, including interrupts. */
static inline void local_daif_mask(void)
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 2d63df1..fe9d207 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -30,6 +30,7 @@
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
+#include <asm/daifflags.h>
#include <asm/system_misc.h>
#include <asm/insn.h>
#include <linux/uaccess.h>
@@ -180,33 +181,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
}
/*
- * When PSTATE.D is set (masked), then software step exceptions can not be
- * generated.
- * SPSR's D bit shows the value of PSTATE.D immediately before the
- * exception was taken. PSTATE.D is set while entering into any exception
- * mode, however software clears it for any normal (none-debug-exception)
- * mode in the exception entry. Therefore, when we are entering into kprobe
- * breakpoint handler from any normal mode then SPSR.D bit is already
- * cleared, however it is set when we are entering from any debug exception
- * mode.
- * Since we always need to generate single step exception after a kprobe
- * breakpoint exception therefore we need to clear it unconditionally, when
- * we become sure that the current breakpoint exception is for kprobe.
- */
-static void __kprobes
-spsr_set_debug_flag(struct pt_regs *regs, int mask)
-{
- unsigned long spsr = regs->pstate;
-
- if (mask)
- spsr |= PSR_D_BIT;
- else
- spsr &= ~PSR_D_BIT;
-
- regs->pstate = spsr;
-}
-
-/*
* Interrupts need to be disabled before single-step mode is set, and not
* reenabled until after single-step mode ends.
* Without disabling interrupt on local CPU, there is a chance of
@@ -217,17 +191,17 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- kcb->saved_irqflag = regs->pstate;
+ kcb->saved_irqflag = regs->pstate & DAIF_MASK;
regs->pstate |= PSR_I_BIT;
+ /* Unmask PSTATE.D for enabling software step exceptions. */
+ regs->pstate &= ~PSR_D_BIT;
}
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- if (kcb->saved_irqflag & PSR_I_BIT)
- regs->pstate |= PSR_I_BIT;
- else
- regs->pstate &= ~PSR_I_BIT;
+ regs->pstate &= ~DAIF_MASK;
+ regs->pstate |= kcb->saved_irqflag;
}
static void __kprobes
@@ -264,8 +238,6 @@ static void __kprobes setup_singlestep(struct kprobe *p,
set_ss_context(kcb, slot); /* mark pending ss */
- spsr_set_debug_flag(regs, 0);
-
/* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
kernel_enable_single_step(regs);
--
1.8.3
1
0

[PATCH 1/2] qm: optimize the maximum number of VF and delete invalid addr
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
In this patch, we try to optimize the way to set the maximum number
of VF, which is designed for compation with next hardware standards.
Then we remove invalid address parameter definition and assignment.
Meanwhile, the return code judgment of debugfs related functions is
deleted, because this does not affect the main function of driver.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Cheng Hu <hucheng.hu(a)huawei.com>
Reviewed-by: Guangwei Zhou <zhouguangwei5(a)huawei.com>
Reviewed-by: Junxian Liu <liujunxian3(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/hpre/hpre.h | 1 -
drivers/crypto/hisilicon/hpre/hpre_main.c | 54 ++++++++++++++-----------------
drivers/crypto/hisilicon/qm.c | 22 ++++---------
drivers/crypto/hisilicon/qm.h | 6 ++--
drivers/crypto/hisilicon/rde/rde_main.c | 2 ++
drivers/crypto/hisilicon/zip/zip_main.c | 2 ++
6 files changed, 38 insertions(+), 49 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index 3ac02ef..42b2f2a 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -18,7 +18,6 @@ enum {
HPRE_CLUSTERS_NUM,
};
-
enum hpre_ctrl_dbgfs_file {
HPRE_CURRENT_QM,
HPRE_CLEAR_ENABLE,
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 4dc0d3e..f727158 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -435,8 +435,7 @@ static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
vfq_num = (qm->ctrl_q_num - qm->qp_num) / num_vfs;
if (val == num_vfs) {
qm->debug.curr_qm_qp_num =
- qm->ctrl_q_num - qm->qp_num -
- (num_vfs - 1) * vfq_num;
+ qm->ctrl_q_num - qm->qp_num - (num_vfs - 1) * vfq_num;
} else {
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -592,7 +591,7 @@ static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
- struct dentry *tmp, *file_dir;
+ struct dentry *file_dir;
struct hpre *hpre;
if (dir) {
@@ -609,10 +608,9 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
dbg->files[indx].debug = dbg;
dbg->files[indx].type = type;
dbg->files[indx].index = indx;
- tmp = debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
- dbg->files + indx, &hpre_ctrl_debug_fops);
- if (!tmp)
- return -ENOENT;
+
+ debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
+ dbg->files + indx, &hpre_ctrl_debug_fops);
return 0;
}
@@ -623,7 +621,6 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
struct hisi_qm *qm = &hpre->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
- struct dentry *tmp;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -633,11 +630,7 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, qm->debug.debug_root,
- regset);
- if (!tmp)
- return -ENOENT;
-
+ debugfs_create_regset32("regs", 0444, qm->debug.debug_root, regset);
return 0;
}
@@ -648,7 +641,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
struct device *dev = &qm->pdev->dev;
char buf[HPRE_DBGFS_VAL_MAX_LEN];
struct debugfs_regset32 *regset;
- struct dentry *tmp_d, *tmp;
+ struct dentry *tmp_d;
int i, ret;
for (i = 0; i < HPRE_CLUSTERS_NUM; i++) {
@@ -657,8 +650,6 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
return -EINVAL;
tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
- if (!tmp_d)
- return -ENOENT;
regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
if (!regset)
@@ -668,9 +659,8 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
regset->base = qm->io_base + hpre_cluster_offsets[i];
- tmp = debugfs_create_regset32("regs", 0444, tmp_d, regset);
- if (!tmp)
- return -ENOENT;
+ debugfs_create_regset32("regs", 0444, tmp_d, regset);
+
ret = hpre_create_debugfs_file(debug, tmp_d, HPRE_CLUSTER_CTRL,
i + HPRE_CLUSTER_CTRL);
if (ret)
@@ -705,14 +695,10 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
{
struct hpre *hpre = container_of(qm, struct hpre, qm);
struct device *dev = &qm->pdev->dev;
- struct dentry *dir;
int ret;
- dir = debugfs_create_dir(dev_name(dev), hpre_debugfs_root);
- if (!dir)
- return -ENOENT;
-
- qm->debug.debug_root = dir;
+ qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
+ hpre_debugfs_root);
ret = hisi_qm_debug_init(qm);
if (ret)
@@ -730,6 +716,11 @@ static int hpre_debugfs_init(struct hisi_qm *qm)
return ret;
}
+static void hpre_debugfs_exit(struct hisi_qm *qm)
+{
+ debugfs_remove_recursive(qm->debug.debug_root);
+}
+
static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
int ret;
@@ -929,7 +920,8 @@ static void hpre_remove(struct pci_dev *pdev)
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
- debugfs_remove_recursive(qm->debug.debug_root);
+
+ hpre_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
@@ -967,19 +959,23 @@ static void hpre_register_debugfs(void)
hpre_debugfs_root = NULL;
}
+static void hpre_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hpre_debugfs_root);
+}
+
static int __init hpre_init(void)
{
int ret;
INIT_LIST_HEAD(&hpre_devices.list);
mutex_init(&hpre_devices.lock);
- hpre_devices.check = NULL;
hpre_register_debugfs();
ret = pci_register_driver(&hpre_pci_driver);
if (ret) {
- debugfs_remove_recursive(hpre_debugfs_root);
+ hpre_unregister_debugfs();
pr_err("hpre: can't register hisi hpre driver.\n");
}
@@ -989,7 +985,7 @@ static int __init hpre_init(void)
static void __exit hpre_exit(void)
{
pci_unregister_driver(&hpre_pci_driver);
- debugfs_remove_recursive(hpre_debugfs_root);
+ hpre_unregister_debugfs();
}
module_init(hpre_init);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 6a5337a..d3429e7 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1428,6 +1428,7 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, int pasid)
qp->c_flag << QM_CQ_FLAG_SHIFT);
ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
+
dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
kfree(cqc);
@@ -2330,6 +2331,7 @@ static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
+
dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
kfree(aeqc);
@@ -2384,13 +2386,6 @@ static int __hisi_qm_start(struct hisi_qm *qm)
QM_INIT_BUF(qm, sqc, qm->qp_num);
QM_INIT_BUF(qm, cqc, qm->qp_num);
-#ifdef CONFIG_CRYPTO_QM_UACCE
- /* get reserved dma memory */
- qm->reserve = qm->qdma.va + off;
- qm->reserve_dma = qm->qdma.dma + off;
- off += PAGE_SIZE;
-#endif
-
ret = qm_eq_aeq_ctx_cfg(qm);
if (ret)
return ret;
@@ -2681,7 +2676,7 @@ void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
*/
int hisi_qm_debug_init(struct hisi_qm *qm)
{
- struct dentry *qm_d, *qm_regs;
+ struct dentry *qm_d;
int i, ret;
qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
@@ -2697,12 +2692,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
goto failed_to_create;
}
- qm_regs = debugfs_create_file("regs", 0444, qm->debug.qm_d, qm,
- &qm_regs_fops);
- if (IS_ERR(qm_regs)) {
- ret = -ENOENT;
- goto failed_to_create;
- }
+ debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
return 0;
@@ -3038,7 +3028,9 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
int pre_existing_vfs, num_vfs, ret;
+ int total_vfs;
+ total_vfs = pci_sriov_get_totalvfs(pdev);
pre_existing_vfs = pci_num_vf(pdev);
if (pre_existing_vfs) {
pci_err(pdev,
@@ -3046,7 +3038,7 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
return 0;
}
- num_vfs = min_t(int, max_vfs, QM_MAX_VFS_NUM);
+ num_vfs = min_t(int, max_vfs, total_vfs);
ret = qm_vf_q_assign(qm, num_vfs);
if (ret) {
pci_err(pdev, "Can't assign queues for VF!\n");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 36e888f..79e29ee 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -19,7 +19,7 @@
#define QNUM_V1 4096
#define QNUM_V2 1024
-#define QM_MAX_VFS_NUM 63
+#define QM_MAX_VFS_NUM_V2 63
/* qm user domain */
#define QM_ARUSER_M_CFG_1 0x100088
#define AXUSER_SNOOP_ENABLE BIT(30)
@@ -322,9 +322,7 @@ struct hisi_qm {
resource_size_t size;
struct uacce uacce;
const char *algs;
- void *reserve;
int uacce_mode;
- dma_addr_t reserve_dma;
#endif
struct workqueue_struct *wq;
struct work_struct work;
@@ -423,7 +421,7 @@ static inline int vf_num_set(const char *val, const struct kernel_param *kp)
if (ret < 0)
return ret;
- if (n > QM_MAX_VFS_NUM)
+ if (n > QM_MAX_VFS_NUM_V2)
return -ERANGE;
return param_set_int(val, kp);
diff --git a/drivers/crypto/hisilicon/rde/rde_main.c b/drivers/crypto/hisilicon/rde/rde_main.c
index 318d4a0..946532f 100644
--- a/drivers/crypto/hisilicon/rde/rde_main.c
+++ b/drivers/crypto/hisilicon/rde/rde_main.c
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/init.h>
+#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -20,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/topology.h>
#include <linux/uacce.h>
+
#include "rde.h"
#define HRDE_QUEUE_NUM_V1 4096
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 54681dc..83e2869 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -858,8 +858,10 @@ static void hisi_zip_remove(struct pci_dev *pdev)
{
struct hisi_qm *qm = pci_get_drvdata(pdev);
+#ifdef CONFIG_CRYPTO_QM_UACCE
if (uacce_mode != UACCE_MODE_NOUACCE)
hisi_qm_remove_wait_delay(qm, &zip_devices);
+#endif
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
hisi_qm_sriov_disable(pdev, NULL);
--
1.8.3
1
1

[PATCH 1/4] Revert "dm crypt: use WQ_HIGHPRI for the IO and crypt workqueues"
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Mike Snitzer <snitzer(a)redhat.com>
mainline inclusion
from mainline-5.5-rc1
commit f612b2132db529feac4f965f28a1b9258ea7c22b
category: bugfix
bugzilla: 25149
CVE: NA
---------------------------
This reverts commit a1b89132dc4f61071bdeaab92ea958e0953380a1.
Revert required hand-patching due to subsequent changes that were
applied since commit a1b89132dc4f61071bdeaab92ea958e0953380a1.
Requires: ed0302e83098d ("dm crypt: make workqueue names device-specific")
Cc: stable(a)vger.kernel.org
Bug: https://bugzilla.kernel.org/show_bug.cgi?id=199857
Reported-by: Vito Caputo <vcaputo(a)pengaru.com>
Signed-off-by: Mike Snitzer <snitzer(a)redhat.com>
Signed-off-by: Sun Ke <sunke32(a)huawei.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/md/dm-crypt.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f68b9bd..d451f98 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -3996,17 +3996,16 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
else
- cc->crypt_queue = alloc_workqueue("kcryptd",
- WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+ cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
num_online_cpus());
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
--
1.8.3
1
3

17 Apr '20
From: Bob Moore <robert.moore(a)intel.com>
mainline inclusion
from mainline-v5.5-rc1
commit 197aba2090e3
category: bugfix
bugzilla: 25975
CVE: NA
-------------------------------------------------
ACPICA commit 7bc16c650317001bc82d4bae227b888a49c51f5e
Avoid possible overflow from get_tick_count. Also, cast math
using ACPI_100NSEC_PER_MSEC to uint64.
Link: https://github.com/acpica/acpica/commit/7bc16c65
Signed-off-by: Bob Moore <robert.moore(a)intel.com>
Signed-off-by: Erik Schmauss <erik.schmauss(a)intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki(a)intel.com>
Signed-off-by: Xiongfeng Wang <wangxiongfeng2(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/acpi/acpica/dscontrol.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 0da9626..ebaa74d 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -85,7 +85,7 @@
walk_state->parser_state.pkg_end;
control_state->control.opcode = op->common.aml_opcode;
control_state->control.loop_timeout = acpi_os_get_timer() +
- (u64)(acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
+ ((u64)acpi_gbl_max_loop_iterations * ACPI_100NSEC_PER_SEC);
/* Push the control state on this walk's control stack */
--
1.8.3
1
0

[PATCH 001/276] core: Don't skip generic XDP program execution for cloned SKBs
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Toke Høiland-Jørgensen <toke(a)redhat.com>
[ Upstream commit ad1e03b2b3d4430baaa109b77bc308dc73050de3 ]
The current generic XDP handler skips execution of XDP programs entirely if
an SKB is marked as cloned. This leads to some surprising behaviour, as
packets can end up being cloned in various ways, which will make an XDP
program not see all the traffic on an interface.
This was discovered by a simple test case where an XDP program that always
returns XDP_DROP is installed on a veth device. When combining this with
the Scapy packet sniffer (which uses an AF_PACKET) socket on the sending
side, SKBs reliably end up in the cloned state, causing them to be passed
through to the receiving interface instead of being dropped. A minimal
reproducer script for this is included below.
This patch fixed the issue by simply triggering the existing linearisation
code for cloned SKBs instead of skipping the XDP program execution. This
behaviour is in line with the behaviour of the native XDP implementation
for the veth driver, which will reallocate and copy the SKB data if the SKB
is marked as shared.
Reproducer Python script (requires BCC and Scapy):
from scapy.all import TCP, IP, Ether, sendp, sniff, AsyncSniffer, Raw, UDP
from bcc import BPF
import time, sys, subprocess, shlex
SKB_MODE = (1 << 1)
DRV_MODE = (1 << 2)
PYTHON=sys.executable
def client():
time.sleep(2)
# Sniffing on the sender causes skb_cloned() to be set
s = AsyncSniffer()
s.start()
for p in range(10):
sendp(Ether(dst="aa:aa:aa:aa:aa:aa", src="cc:cc:cc:cc:cc:cc")/IP()/UDP()/Raw("Test"),
verbose=False)
time.sleep(0.1)
s.stop()
return 0
def server(mode):
prog = BPF(text="int dummy_drop(struct xdp_md *ctx) {return XDP_DROP;}")
func = prog.load_func("dummy_drop", BPF.XDP)
prog.attach_xdp("a_to_b", func, mode)
time.sleep(1)
s = sniff(iface="a_to_b", count=10, timeout=15)
if len(s):
print(f"Got {len(s)} packets - should have gotten 0")
return 1
else:
print("Got no packets - as expected")
return 0
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} <skb|drv>")
sys.exit(1)
if sys.argv[1] == "client":
sys.exit(client())
elif sys.argv[1] == "server":
mode = SKB_MODE if sys.argv[2] == 'skb' else DRV_MODE
sys.exit(server(mode))
else:
try:
mode = sys.argv[1]
if mode not in ('skb', 'drv'):
print(f"Usage: {sys.argv[0]} <skb|drv>")
sys.exit(1)
print(f"Running in {mode} mode")
for cmd in [
'ip netns add netns_a',
'ip netns add netns_b',
'ip -n netns_a link add a_to_b type veth peer name b_to_a netns netns_b',
# Disable ipv6 to make sure there's no address autoconf traffic
'ip netns exec netns_a sysctl -qw net.ipv6.conf.a_to_b.disable_ipv6=1',
'ip netns exec netns_b sysctl -qw net.ipv6.conf.b_to_a.disable_ipv6=1',
'ip -n netns_a link set dev a_to_b address aa:aa:aa:aa:aa:aa',
'ip -n netns_b link set dev b_to_a address cc:cc:cc:cc:cc:cc',
'ip -n netns_a link set dev a_to_b up',
'ip -n netns_b link set dev b_to_a up']:
subprocess.check_call(shlex.split(cmd))
server = subprocess.Popen(shlex.split(f"ip netns exec netns_a {PYTHON} {sys.argv[0]} server {mode}"))
client = subprocess.Popen(shlex.split(f"ip netns exec netns_b {PYTHON} {sys.argv[0]} client"))
client.wait()
server.wait()
sys.exit(server.returncode)
finally:
subprocess.run(shlex.split("ip netns delete netns_a"))
subprocess.run(shlex.split("ip netns delete netns_b"))
Fixes: d445516966dc ("net: xdp: support xdp generic on virtual devices")
Reported-by: Stepan Horacek <shoracek(a)redhat.com>
Suggested-by: Paolo Abeni <pabeni(a)redhat.com>
Signed-off-by: Toke Høiland-Jørgensen <toke(a)redhat.com>
Signed-off-by: David S. Miller <davem(a)davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
net/core/dev.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/net/core/dev.c b/net/core/dev.c
index 1c0224e..c1a3baf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4306,14 +4306,14 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
*/
- if (skb_cloned(skb) || skb_is_tc_redirected(skb))
+ if (skb_is_tc_redirected(skb))
return XDP_PASS;
/* XDP packets must be linear and must have sufficient headroom
* of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
* native XDP provides, thus we need to do it here as well.
*/
- if (skb_is_nonlinear(skb) ||
+ if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
int troom = skb->tail + skb->data_len - skb->end;
--
1.8.3
1
275
From: Al Viro <viro(a)zeniv.linux.org.uk>
mainline inclusion
from mainline-5.1-rc1
commit 35ac1184244f1329783e1d897f74926d8bb1103a
category: bugfix
bugzilla: 30217
CVE: NA
---------------------------
* make the reference from superblock to cgroup_root counting -
do cgroup_put() in cgroup_kill_sb() whether we'd done
percpu_ref_kill() or not; matching grab is done when we allocate
a new root. That gives the same refcounting rules for all callers
of cgroup_do_mount() - a reference to cgroup_root has been grabbed
by caller and it either is transferred to new superblock or dropped.
* have cgroup_kill_sb() treat an already killed refcount as "just
don't bother killing it, then".
* after successful cgroup_do_mount() have cgroup1_mount() recheck
if we'd raced with mount/umount from somebody else and cgroup_root
got killed. In that case we drop the superblock and bugger off
with -ERESTARTSYS, same as if we'd found it in the list already
dying.
* don't bother with delayed initialization of refcount - it's
unreliable and not needed. No need to prevent attempts to bump
the refcount if we find cgroup_root of another mount in progress -
sget will reuse an existing superblock just fine and if the
other sb manages to die before we get there, we'll catch
that immediately after cgroup_do_mount().
* don't bother with kernfs_pin_sb() - no need for doing that
either.
Signed-off-by: Al Viro <viro(a)zeniv.linux.org.uk>
Signed-off-by: yangerkun <yangerkun(a)huawei.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
kernel/cgroup/cgroup-internal.h | 2 +-
kernel/cgroup/cgroup-v1.c | 58 +++++++++--------------------------------
kernel/cgroup/cgroup.c | 16 +++++-------
3 files changed, 21 insertions(+), 55 deletions(-)
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index 75568fc..6f02be1 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -196,7 +196,7 @@ int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
void cgroup_free_root(struct cgroup_root *root);
void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts);
-int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags);
+int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask);
int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
struct cgroup_root *root, unsigned long magic,
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index ff3e1aa..e66bb45 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -1112,13 +1112,11 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
void *data, unsigned long magic,
struct cgroup_namespace *ns)
{
- struct super_block *pinned_sb = NULL;
struct cgroup_sb_opts opts;
struct cgroup_root *root;
struct cgroup_subsys *ss;
struct dentry *dentry;
int i, ret;
- bool new_root = false;
cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
@@ -1180,29 +1178,6 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
if (root->flags ^ opts.flags)
pr_warn("new mount options do not match the existing superblock, will be ignored\n");
- /*
- * We want to reuse @root whose lifetime is governed by its
- * ->cgrp. Let's check whether @root is alive and keep it
- * that way. As cgroup_kill_sb() can happen anytime, we
- * want to block it by pinning the sb so that @root doesn't
- * get killed before mount is complete.
- *
- * With the sb pinned, tryget_live can reliably indicate
- * whether @root can be reused. If it's being killed,
- * drain it. We can use wait_queue for the wait but this
- * path is super cold. Let's just sleep a bit and retry.
- */
- pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
- if (IS_ERR(pinned_sb) ||
- !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
- mutex_unlock(&cgroup_mutex);
- if (!IS_ERR_OR_NULL(pinned_sb))
- deactivate_super(pinned_sb);
- msleep(10);
- ret = restart_syscall();
- goto out_free;
- }
-
ret = 0;
goto out_unlock;
}
@@ -1228,15 +1203,20 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
ret = -ENOMEM;
goto out_unlock;
}
- new_root = true;
init_cgroup_root(root, &opts);
- ret = cgroup_setup_root(root, opts.subsys_mask, PERCPU_REF_INIT_DEAD);
+ ret = cgroup_setup_root(root, opts.subsys_mask);
if (ret)
cgroup_free_root(root);
out_unlock:
+ if (!ret && !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
+ mutex_unlock(&cgroup_mutex);
+ msleep(10);
+ ret = restart_syscall();
+ goto out_free;
+ }
mutex_unlock(&cgroup_mutex);
out_free:
kfree(opts.release_agent);
@@ -1248,25 +1228,13 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
dentry = cgroup_do_mount(&cgroup_fs_type, flags, root,
CGROUP_SUPER_MAGIC, ns);
- /*
- * There's a race window after we release cgroup_mutex and before
- * allocating a superblock. Make sure a concurrent process won't
- * be able to re-use the root during this window by delaying the
- * initialization of root refcnt.
- */
- if (new_root) {
- mutex_lock(&cgroup_mutex);
- percpu_ref_reinit(&root->cgrp.self.refcnt);
- mutex_unlock(&cgroup_mutex);
+ if (!IS_ERR(dentry) && percpu_ref_is_dying(&root->cgrp.self.refcnt)) {
+ struct super_block *sb = dentry->d_sb;
+ dput(dentry);
+ deactivate_locked_super(sb);
+ msleep(10);
+ dentry = ERR_PTR(restart_syscall());
}
-
- /*
- * If @pinned_sb, we're reusing an existing root and holding an
- * extra ref on its sb. Mount is complete. Put the extra ref.
- */
- if (pinned_sb)
- deactivate_super(pinned_sb);
-
return dentry;
}
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 08bd40d..eaee21d 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1897,7 +1897,7 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
}
-int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags)
+int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
{
LIST_HEAD(tmp_links);
struct cgroup *root_cgrp = &root->cgrp;
@@ -1914,7 +1914,7 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags)
root_cgrp->ancestor_ids[0] = ret;
ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release,
- ref_flags, GFP_KERNEL);
+ 0, GFP_KERNEL);
if (ret)
goto out;
@@ -2091,18 +2091,16 @@ static void cgroup_kill_sb(struct super_block *sb)
struct cgroup_root *root = cgroup_root_from_kf(kf_root);
/*
- * If @root doesn't have any mounts or children, start killing it.
+ * If @root doesn't have any children, start killing it.
* This prevents new mounts by disabling percpu_ref_tryget_live().
* cgroup_mount() may wait for @root's release.
*
* And don't kill the default root.
*/
- if (!list_empty(&root->cgrp.self.children) ||
- root == &cgrp_dfl_root)
- cgroup_put(&root->cgrp);
- else
+ if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root &&
+ !percpu_ref_is_dying(&root->cgrp.self.refcnt))
percpu_ref_kill(&root->cgrp.self.refcnt);
-
+ cgroup_put(&root->cgrp);
kernfs_kill_sb(sb);
}
@@ -5371,7 +5369,7 @@ int __init cgroup_init(void)
hash_add(css_set_table, &init_css_set.hlist,
css_set_hash(init_css_set.subsys));
- BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0, 0));
+ BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
mutex_unlock(&cgroup_mutex);
--
1.8.3
1
1

17 Apr '20
From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
In this patch, we try to fixup the problem of wrong judgement of
used parameter. When the accelerator driver registers to crypto,
the self-test program will send task to hardware, the used para
will decrease in interrupt thread, but exit flow of crypto will
call hisi_qm_stop_qp_nolock function to stop queue, which try to
get value of used. In the above scene, it will appear to get the
value first and then decrease, which causes null pointer. So we
should distinguish fault handling process from normal stop process.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Cheng Hu <hucheng.hu(a)huawei.com>
Reviewed-by: Guangwei Zhou <zhouguangwei5(a)huawei.com>
Reviewed-by: Junxian Liu <liujunxian3(a)huawei.com>
Reviewed-by: Shukun Tan <tanshukun1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/qm.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index e89a770..86f4a12 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1591,10 +1591,12 @@ static int hisi_qm_stop_qp_nolock(struct hisi_qp *qp)
if (qp->qm->wq)
flush_workqueue(qp->qm->wq);
+ else
+ flush_work(&qp->qm->work);
/* wait for increase used count in qp send and last poll qp finish */
udelay(WAIT_PERIOD);
- if (atomic_read(&qp->qp_status.used))
+ if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
qp_stop_fail_cb(qp);
dev_dbg(dev, "stop queue %u!", qp->qp_id);
--
1.8.3
1
1

[PATCH 1/9] nfsd: Ensure CLONE persists data and metadata changes to the target file
by Yang Yingliang 17 Apr '20
by Yang Yingliang 17 Apr '20
17 Apr '20
From: Trond Myklebust <trondmy(a)gmail.com>
mainline inclusion
from mainline-v5.5-rc1
commit a25e3726b32c746c0098125d4c7463bb84df72bb
category: bugfix
bugzilla: 27346
CVE: NA
-------------------------------------------------
The NFSv4.2 CLONE operation has implicit persistence requirements on the
target file, since there is no protocol requirement that the client issue
a separate operation to persist data.
For that reason, we should call vfs_fsync_range() on the destination file
after a successful call to vfs_clone_file_range().
Fixes: ffa0160a1039 ("nfsd: implement the NFSv4.2 CLONE operation")
Signed-off-by: Trond Myklebust <trond.myklebust(a)hammerspace.com>
Cc: stable(a)vger.kernel.org # v4.5+
Signed-off-by: J. Bruce Fields <bfields(a)redhat.com>
Conflicts:
fs/nfsd/nfs4proc.c
fs/nfsd/vfs.c
42ec3d4c0218 ("vfs: make remap_file_range functions take and return bytes
completed")
2e5dfc99f2e6 ("vfs: combine the clone and dedupe into a single
remap_file_range")
Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5(a)huawei.com>
Reviewed-by: zhangyi (F) <yi.zhang(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/nfsd/nfs4proc.c | 3 ++-
fs/nfsd/vfs.c | 16 +++++++++++++---
fs/nfsd/vfs.h | 2 +-
3 files changed, 16 insertions(+), 5 deletions(-)
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index f35aa9f..1c3e6de 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1082,7 +1082,8 @@ static __be32 nfsd4_do_lookupp(struct svc_rqst *rqstp, struct svc_fh *fh)
goto out;
status = nfsd4_clone_file_range(src, clone->cl_src_pos,
- dst, clone->cl_dst_pos, clone->cl_count);
+ dst, clone->cl_dst_pos, clone->cl_count,
+ EX_ISSYNC(cstate->current_fh.fh_export));
fput(dst);
fput(src);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 80ceded..90e97c8 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -552,10 +552,20 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
#endif
__be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
- u64 dst_pos, u64 count)
+ u64 dst_pos, u64 count, bool sync)
{
- return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos,
- count));
+ int cloned;
+
+ cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count);
+ if (cloned < 0)
+ return nfserrno(cloned);
+ if (sync) {
+ loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX;
+ int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
+ if (status < 0)
+ return nfserrno(status);
+ }
+ return 0;
}
ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index db35124..02b0a14 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -58,7 +58,7 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *, struct svc_fh *,
__be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *,
struct file *, loff_t, loff_t, int);
__be32 nfsd4_clone_file_range(struct file *, u64, struct file *,
- u64, u64);
+ u64, u64, bool);
#endif /* CONFIG_NFSD_V4 */
__be32 nfsd_create_locked(struct svc_rqst *, struct svc_fh *,
char *name, int len, struct iattr *attrs,
--
1.8.3
1
8

[PATCH] qm: Move all the same logic functions of hisilicon crypto to qm
by Yang Yingliang 16 Apr '20
by Yang Yingliang 16 Apr '20
16 Apr '20
From: Yu'an Wang <wangyuan46(a)huawei.com>
driver inclusion
category: bugfix
bugzilla: NA
CVE: NA
In this patch, we try to move accelerator drivers into qm module
to simplify code, including RAS/FLR/SRIOV and uacce_mode/pf_q_num/
vfs_num setting.
In qm.h we add mode_set/q_num_set/vf_num_set for accelerator to
realize module parm uacce_mode/pf_q_num/vfs_num setting.
In qm.c hisi_qm_add_to_list and hisi_qm_del_from_list can be called
to manage accelerators through hisi_qm_list. We additionally realize
hisi_qm_alloc_qps_node to fix the problem that device is found but
queue request fails. Because of RAS process flow/FLR process flow/
SRIOV config flow are consistent for different accelerator drivers,
so we add Corresponding interfaces.
Meanwhile, zip/hpre/sec/rde accelerator drivers should match changes
of qm, including RAS/FLR/SRIOV processing, module parms setting, queue
allocing.
Signed-off-by: Yu'an Wang <wangyuan46(a)huawei.com>
Reviewed-by: Cheng Hu <hucheng.hu(a)huawei.com>
Reviewed-by: Wei Zhang <zhangwei375(a)huawei.com>
Reviewed-by: Guangwei Zhang <zhouguangwei5(a)huawei.com>
Reviewed-by: Junxian Liu <liujunxian3(a)huawei.com>
Reviewed-by: Shukun Tan <tanshukun1(a)huawei.com>
Reviewed-by: Hao Fang <fanghao11(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/crypto/hisilicon/hpre/hpre.h | 9 +-
drivers/crypto/hisilicon/hpre/hpre_crypto.c | 20 +-
drivers/crypto/hisilicon/hpre/hpre_main.c | 944 +++---------------
drivers/crypto/hisilicon/qm.c | 1093 +++++++++++++++++----
drivers/crypto/hisilicon/qm.h | 209 +++-
drivers/crypto/hisilicon/rde/rde.h | 11 +-
drivers/crypto/hisilicon/rde/rde_api.c | 29 +-
drivers/crypto/hisilicon/rde/rde_api.h | 2 +-
drivers/crypto/hisilicon/rde/rde_main.c | 717 ++++----------
drivers/crypto/hisilicon/sec2/sec.h | 13 +-
drivers/crypto/hisilicon/sec2/sec_crypto.c | 83 +-
drivers/crypto/hisilicon/sec2/sec_main.c | 1364 +++++++--------------------
drivers/crypto/hisilicon/zip/zip.h | 9 +-
drivers/crypto/hisilicon/zip/zip_crypto.c | 30 +-
drivers/crypto/hisilicon/zip/zip_main.c | 1152 +++++-----------------
15 files changed, 2053 insertions(+), 3632 deletions(-)
diff --git a/drivers/crypto/hisilicon/hpre/hpre.h b/drivers/crypto/hisilicon/hpre/hpre.h
index ba7c88e..3ac02ef 100644
--- a/drivers/crypto/hisilicon/hpre/hpre.h
+++ b/drivers/crypto/hisilicon/hpre/hpre.h
@@ -35,25 +35,18 @@ struct hpre_debugfs_file {
struct hpre_debug *debug;
};
-#define HPRE_RESET 0
-#define HPRE_WAIT_DELAY 1000
-
/*
* One HPRE controller has one PF and multiple VFs, some global configurations
* which PF has need this structure.
* Just relevant for PF.
*/
struct hpre_debug {
- struct dentry *debug_root;
struct hpre_debugfs_file files[HPRE_DEBUGFS_FILE_NUM];
};
struct hpre {
struct hisi_qm qm;
- struct list_head list;
struct hpre_debug debug;
- u32 num_vfs;
- unsigned long status;
};
enum hpre_alg_type {
@@ -80,7 +73,7 @@ struct hpre_sqe {
__le32 rsvd1[_HPRE_SQE_ALIGN_EXT];
};
-struct hpre *hpre_find_device(int node);
+struct hisi_qp *hpre_create_qp(void);
int hpre_algs_register(void);
void hpre_algs_unregister(void);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index aadc975..7610e13 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -147,26 +147,18 @@ static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
static struct hisi_qp *hpre_get_qp_and_start(void)
{
struct hisi_qp *qp;
- struct hpre *hpre;
int ret;
- /* find the proper hpre device, which is near the current CPU core */
- hpre = hpre_find_device(cpu_to_node(smp_processor_id()));
- if (!hpre) {
- pr_err("Can not find proper hpre device!\n");
- return ERR_PTR(-ENODEV);
- }
-
- qp = hisi_qm_create_qp(&hpre->qm, 0);
- if (IS_ERR(qp)) {
- pci_err(hpre->qm.pdev, "Can not create qp!\n");
+ qp = hpre_create_qp();
+ if (!qp) {
+ pr_err("Can not create hpre qp!\n");
return ERR_PTR(-ENODEV);
}
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
- hisi_qm_release_qp(qp);
- pci_err(hpre->qm.pdev, "Can not start qp!\n");
+ hisi_qm_free_qps(&qp, 1);
+ pci_err(qp->qm->pdev, "Can not start qp!\n");
return ERR_PTR(-EINVAL);
}
@@ -337,7 +329,7 @@ static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
if (is_clear_all) {
idr_destroy(&ctx->req_idr);
kfree(ctx->req_list);
- hisi_qm_release_qp(ctx->qp);
+ hisi_qm_free_qps(&ctx->qp, 1);
}
ctx->crt_g2_mode = false;
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index 6a3bce2..4dc0d3e 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -13,9 +13,6 @@
#include <linux/uacce.h>
#include "hpre.h"
-#define HPRE_ENABLE 1
-#define HPRE_DISABLE 0
-#define HPRE_VF_NUM 63
#define HPRE_QUEUE_NUM_V2 1024
#define HPRE_QUEUE_NUM_V1 4096
#define HPRE_QM_ABNML_INT_MASK 0x100004
@@ -63,10 +60,6 @@
#define HPRE_HAC_ECC2_CNT 0x301a08
#define HPRE_HAC_INT_STATUS 0x301800
#define HPRE_HAC_SOURCE_INT 0x301600
-#define MASTER_GLOBAL_CTRL_SHUTDOWN 1
-#define MASTER_TRANS_RETURN_RW 3
-#define HPRE_MASTER_TRANS_RETURN 0x300150
-#define HPRE_MASTER_GLOBAL_CTRL 0x300000
#define HPRE_CLSTR_ADDR_INTRVL 0x1000
#define HPRE_CLUSTER_INQURY 0x100
#define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
@@ -83,24 +76,18 @@
#define HPRE_QM_VFG_AX_MASK 0xff
#define HPRE_BD_USR_MASK 0x3
#define HPRE_CLUSTER_CORE_MASK 0xf
-#define HPRE_RESET_WAIT_TIMEOUT 400
#define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
#define AM_OOO_SHUTDOWN_ENABLE BIT(0)
#define AM_OOO_SHUTDOWN_DISABLE 0xFFFFFFFE
-#define HPRE_WR_MSI_PORT 0xFFFB
+#define HPRE_WR_MSI_PORT BIT(2)
-#define HPRE_HW_ERROR_IRQ_ENABLE 1
-#define HPRE_HW_ERROR_IRQ_DISABLE 0
-#define HPRE_PCI_COMMAND_INVALID 0xFFFFFFFF
#define HPRE_CORE_ECC_2BIT_ERR BIT(1)
#define HPRE_OOO_ECC_2BIT_ERR BIT(5)
-#define HPRE_QM_BME_FLR BIT(7)
-#define HPRE_QM_PM_FLR BIT(11)
-#define HPRE_QM_SRIOV_FLR BIT(12)
-
-#define HPRE_USLEEP 10
+#define HPRE_QM_BME_FLR BIT(7)
+#define HPRE_QM_PM_FLR BIT(11)
+#define HPRE_QM_SRIOV_FLR BIT(12)
/* function index:
* 1 for hpre bypass mode,
@@ -108,8 +95,7 @@
*/
#define HPRE_VIA_MSI_DSM 1
-static LIST_HEAD(hpre_list);
-static DEFINE_MUTEX(hpre_list_lock);
+static struct hisi_qm_list hpre_devices;
static const char hpre_name[] = "hisi_hpre";
static struct dentry *hpre_debugfs_root;
static const struct pci_device_id hpre_dev_ids[] = {
@@ -183,59 +169,29 @@ struct hpre_hw_error {
{"INT_STATUS ", HPRE_INT_STATUS},
};
-static int hpre_pf_q_num_set(const char *val, const struct kernel_param *kp)
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev;
- u32 q_num;
- u32 n = 0;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HPRE_PCI_DEVICE_ID, NULL);
- if (!pdev) {
- q_num = HPRE_QUEUE_NUM_V2;
- pr_info("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- if (rev_id != QM_HW_V2)
- return -EINVAL;
-
- q_num = HPRE_QUEUE_NUM_V2;
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n == 0 || n > q_num)
- return -EINVAL;
-
- return param_set_int(val, kp);
+ return mode_set(val, kp);
}
-static const struct kernel_param_ops hpre_pf_q_num_ops = {
- .set = hpre_pf_q_num_set,
+static const struct kernel_param_ops uacce_mode_ops = {
+ .set = uacce_mode_set,
.get = param_get_int,
};
-static int uacce_mode_set(const char *val, const struct kernel_param *kp)
-{
- u32 n;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
- return -EINVAL;
+static int uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+#endif
- return param_set_int(val, kp);
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, HPRE_PCI_DEVICE_ID);
}
-static const struct kernel_param_ops uacce_mode_ops = {
- .set = uacce_mode_set,
+static const struct kernel_param_ops hpre_pf_q_num_ops = {
+ .set = pf_q_num_set,
.get = param_get_int,
};
@@ -243,46 +199,31 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp)
module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(1-1024)");
-static int uacce_mode = UACCE_MODE_NOUACCE;
-module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
-MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
-static inline void hpre_add_to_list(struct hpre *hpre)
+static int vfs_num_set(const char *val, const struct kernel_param *kp)
{
- mutex_lock(&hpre_list_lock);
- list_add_tail(&hpre->list, &hpre_list);
- mutex_unlock(&hpre_list_lock);
+ return vf_num_set(val, kp);
}
-static inline void hpre_remove_from_list(struct hpre *hpre)
-{
- mutex_lock(&hpre_list_lock);
- list_del(&hpre->list);
- mutex_unlock(&hpre_list_lock);
-}
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
+ .get = param_get_int,
+};
-struct hpre *hpre_find_device(int node)
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
+
+struct hisi_qp *hpre_create_qp(void)
{
- struct hpre *hpre, *ret = NULL;
- int min_distance = INT_MAX;
- struct device *dev;
- int dev_node = 0;
-
- mutex_lock(&hpre_list_lock);
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = &hpre->qm.pdev->dev;
-#ifdef CONFIG_NUMA
- dev_node = dev->numa_node;
- if (dev_node < 0)
- dev_node = 0;
-#endif
- if (node_distance(dev_node, node) < min_distance) {
- ret = hpre;
- min_distance = node_distance(dev_node, node);
- }
- }
- mutex_unlock(&hpre_list_lock);
+ int node = cpu_to_node(smp_processor_id());
+ struct hisi_qp *qp = NULL;
+ int ret;
- return ret;
+ ret = hisi_qm_alloc_qps_node(node, &hpre_devices, &qp, 1, 0);
+ if (!ret)
+ return qp;
+
+ return NULL;
}
static void hpre_pasid_enable(struct hisi_qm *qm)
@@ -351,9 +292,8 @@ static int hpre_set_cluster(struct hisi_qm *qm)
return 0;
}
-static int hpre_set_user_domain_and_cache(struct hpre *hpre)
+static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
struct pci_dev *pdev = qm->pdev;
u32 val;
int ret;
@@ -403,7 +343,7 @@ static int hpre_set_user_domain_and_cache(struct hpre *hpre)
pci_err(pdev, "acpi_evaluate_dsm err.\n");
/* disable FLR triggered by BME(bus master enable) */
- val = readl(hpre->qm.io_base + QM_PEH_AXUSER_CFG);
+ val = readl(HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);
val |= HPRE_QM_PM_FLR;
writel(val, HPRE_ADDR(qm, QM_PEH_AXUSER_CFG));
@@ -433,23 +373,21 @@ static void hpre_cnt_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void hpre_hw_error_disable(struct hpre *hpre)
+static void hpre_hw_error_disable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
u32 val;
/* disable hpre hw error interrupts */
writel(HPRE_CORE_INT_DISABLE, qm->io_base + HPRE_INT_MASK);
/* disable HPRE block master OOO when m-bit error occur */
- val = readl(hpre->qm.io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
val &= AM_OOO_SHUTDOWN_DISABLE;
- writel(val, hpre->qm.io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
-static void hpre_hw_error_enable(struct hpre *hpre)
+static void hpre_hw_error_enable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
u32 val;
/* clear HPRE hw error source if having */
@@ -462,9 +400,9 @@ static void hpre_hw_error_enable(struct hpre *hpre)
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
/* enable HPRE block master OOO when m-bit error occur */
- val = readl(hpre->qm.io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
val |= AM_OOO_SHUTDOWN_ENABLE;
- writel(val, hpre->qm.io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ writel(val, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -484,9 +422,7 @@ static u32 hpre_current_qm_read(struct hpre_debugfs_file *file)
static int hpre_current_qm_write(struct hpre_debugfs_file *file, u32 val)
{
struct hisi_qm *qm = hpre_file_to_qm(file);
- struct hpre_debug *debug = file->debug;
- struct hpre *hpre = container_of(debug, struct hpre, debug);
- u32 num_vfs = hpre->num_vfs;
+ u32 num_vfs = qm->vfs_num;
u32 vfq_num, tmp;
if (val > num_vfs)
@@ -657,11 +593,14 @@ static int hpre_create_debugfs_file(struct hpre_debug *dbg, struct dentry *dir,
enum hpre_ctrl_dbgfs_file type, int indx)
{
struct dentry *tmp, *file_dir;
+ struct hpre *hpre;
- if (dir)
+ if (dir) {
file_dir = dir;
- else
- file_dir = dbg->debug_root;
+ } else {
+ hpre = container_of(dbg, struct hpre, debug);
+ file_dir = hpre->qm.debug.debug_root;
+ }
if (type >= HPRE_DEBUG_FILE_NUM)
return -EINVAL;
@@ -694,7 +633,8 @@ static int hpre_pf_comm_regs_debugfs_init(struct hpre_debug *debug)
regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
regset->base = qm->io_base;
- tmp = debugfs_create_regset32("regs", 0444, debug->debug_root, regset);
+ tmp = debugfs_create_regset32("regs", 0444, qm->debug.debug_root,
+ regset);
if (!tmp)
return -ENOENT;
@@ -716,7 +656,7 @@ static int hpre_cluster_debugfs_init(struct hpre_debug *debug)
if (ret < 0)
return -EINVAL;
- tmp_d = debugfs_create_dir(buf, debug->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
if (!tmp_d)
return -ENOENT;
@@ -761,9 +701,9 @@ static int hpre_ctrl_debug_init(struct hpre_debug *debug)
return hpre_cluster_debugfs_init(debug);
}
-static int hpre_debugfs_init(struct hpre *hpre)
+static int hpre_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
+ struct hpre *hpre = container_of(qm, struct hpre, qm);
struct device *dev = &qm->pdev->dev;
struct dentry *dir;
int ret;
@@ -779,7 +719,6 @@ static int hpre_debugfs_init(struct hpre *hpre)
goto failed_to_create;
if (qm->pdev->device == HPRE_PCI_DEVICE_ID) {
- hpre->debug.debug_root = dir;
ret = hpre_ctrl_debug_init(&hpre->debug);
if (ret)
goto failed_to_create;
@@ -791,69 +730,41 @@ static int hpre_debugfs_init(struct hpre *hpre)
return ret;
}
-static void hpre_debugfs_exit(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
-
- debugfs_remove_recursive(qm->debug.debug_root);
-}
-
static int hpre_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id < 0)
- return -ENODEV;
+ int ret;
- if (rev_id == QM_HW_V1) {
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "rsa\ndh\n";
+ qm->uacce_mode = uacce_mode;
+#endif
+ qm->pdev = pdev;
+ ret = hisi_qm_pre_init(qm, pf_q_num, HPRE_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
+ if (qm->ver == QM_HW_V1) {
pci_warn(pdev, "HPRE version 1 is not supported!\n");
return -EINVAL;
}
- qm->pdev = pdev;
- qm->ver = rev_id;
+ qm->qm_list = &hpre_devices;
qm->sqe_size = HPRE_SQE_SIZE;
qm->dev_name = hpre_name;
- qm->fun_type = (pdev->device == HPRE_PCI_DEVICE_ID) ?
- QM_HW_PF : QM_HW_VF;
- qm->algs = "rsa\ndh\n";
- switch (uacce_mode) {
- case UACCE_MODE_NOUACCE:
- qm->use_uacce = false;
- break;
- case UACCE_MODE_NOIOMMU:
- qm->use_uacce = true;
- break;
- default:
- return -EINVAL;
- }
- if (pdev->is_physfn) {
- qm->qp_base = HPRE_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
- }
return 0;
}
-static void hpre_hw_err_init(struct hpre *hpre)
-{
- hisi_qm_hw_error_init(&hpre->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
- 0, QM_DB_RANDOM_INVALID);
- hpre_hw_error_enable(hpre);
-}
-
-static void hpre_open_master_ooo(struct hisi_qm *qm)
+static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
- u32 val;
+ const struct hpre_hw_error *err = hpre_hw_errors;
+ struct device *dev = &qm->pdev->dev;
- val = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
- writel(val & AM_OOO_SHUTDOWN_DISABLE,
- HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
- writel(val | AM_OOO_SHUTDOWN_ENABLE,
- HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+ while (err->msg) {
+ if (err->int_msk & err_sts)
+ dev_warn(dev, "%s [error status=0x%x] found\n",
+ err->msg, err->int_msk);
+ err++;
+ }
}
static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
@@ -866,41 +777,47 @@ static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
}
-static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
+static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
{
- const struct hpre_hw_error *err = hpre_hw_errors;
- struct device *dev = &qm->pdev->dev;
+ u32 value;
- while (err->msg) {
- if (err->int_msk & err_sts)
- dev_warn(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
- err++;
- }
+ value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
+ writel(value & AM_OOO_SHUTDOWN_DISABLE,
+ HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
+ writel(value | AM_OOO_SHUTDOWN_ENABLE,
+ HPRE_ADDR(qm, HPRE_AM_OOO_SHUTDOWN_ENB));
}
-static int hpre_pf_probe_init(struct hpre *hpre)
+static int hpre_pf_probe_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hpre->qm;
int ret;
if (qm->ver != QM_HW_V2)
return -EINVAL;
qm->ctrl_q_num = HPRE_QUEUE_NUM_V2;
- qm->err_ini.qm_wr_port = HPRE_WR_MSI_PORT;
- qm->err_ini.ecc_2bits_mask = (HPRE_CORE_ECC_2BIT_ERR |
- HPRE_OOO_ECC_2BIT_ERR);
- qm->err_ini.open_axi_master_ooo = hpre_open_master_ooo;
qm->err_ini.get_dev_hw_err_status = hpre_get_hw_err_status;
qm->err_ini.clear_dev_hw_err_status = hpre_clear_hw_err_status;
+ qm->err_ini.err_info.ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR |
+ HPRE_OOO_ECC_2BIT_ERR;
+ qm->err_ini.err_info.ce = QM_BASE_CE;
+ qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
+ qm->err_ini.err_info.fe = 0;
+ qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID;
+ qm->err_ini.err_info.acpi_rst = "HRST";
+
+ qm->err_ini.hw_err_disable = hpre_hw_error_disable;
+ qm->err_ini.hw_err_enable = hpre_hw_error_enable;
+ qm->err_ini.set_usr_domain_cache = hpre_set_user_domain_and_cache;
qm->err_ini.log_dev_hw_err = hpre_log_hw_error;
+ qm->err_ini.open_axi_master_ooo = hpre_open_axi_master_ooo;
+ qm->err_ini.err_info.msi_wr_port = HPRE_WR_MSI_PORT;
- ret = hpre_set_user_domain_and_cache(hpre);
+ ret = qm->err_ini.set_usr_domain_cache(qm);
if (ret)
return ret;
- hpre_hw_err_init(hpre);
+ hisi_qm_dev_err_init(qm);
return 0;
}
@@ -914,10 +831,9 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);
if (!hpre)
return -ENOMEM;
-
- pci_set_drvdata(pdev, hpre);
-
qm = &hpre->qm;
+ qm->fun_type = pdev->is_physfn ? QM_HW_PF : QM_HW_VF;
+
ret = hpre_qm_pre_init(qm, pdev);
if (ret)
return ret;
@@ -929,7 +845,7 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (pdev->is_physfn) {
- ret = hpre_pf_probe_init(hpre);
+ ret = hpre_pf_probe_init(qm);
if (ret) {
pci_err(pdev, "Failed to init pf probe (%d)!\n", ret);
goto err_with_qm_init;
@@ -947,26 +863,35 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_with_err_init;
}
- ret = hpre_debugfs_init(hpre);
+ ret = hpre_debugfs_init(qm);
if (ret)
pci_warn(pdev, "init debugfs fail!\n");
- hpre_add_to_list(hpre);
+ hisi_qm_add_to_list(qm, &hpre_devices);
ret = hpre_algs_register();
if (ret < 0) {
- hpre_remove_from_list(hpre);
pci_err(pdev, "fail to register algs to crypto!\n");
goto err_with_qm_start;
}
+
+ if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_with_crypto_register;
+ }
+
return 0;
+err_with_crypto_register:
+ hpre_algs_unregister();
+
err_with_qm_start:
+ hisi_qm_del_from_list(qm, &hpre_devices);
hisi_qm_stop(qm, QM_NORMAL);
err_with_err_init:
- if (pdev->is_physfn)
- hpre_hw_error_disable(hpre);
+ hisi_qm_dev_err_uninit(qm);
err_with_qm_init:
hisi_qm_uninit(qm);
@@ -974,627 +899,51 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
-static int hpre_vf_q_assign(struct hpre *hpre, int num_vfs)
-{
- struct hisi_qm *qm = &hpre->qm;
- u32 qp_num = qm->qp_num;
- int q_num, remain_q_num, i;
- u32 q_base = qp_num;
- int ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_q_num - qp_num;
- /* If remain queues not enough, return error. */
- if (remain_q_num < num_vfs)
- return -EINVAL;
-
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, (u32)q_num);
- if (ret)
- return ret;
- q_base += q_num;
- }
-
- return 0;
-}
-
-static int hpre_clear_vft_config(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- u32 num_vfs = hpre->num_vfs;
- int ret;
- u32 i;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
- hpre->num_vfs = 0;
-
- return 0;
-}
-
-static int hpre_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- int pre_existing_vfs, num_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
- if (pre_existing_vfs) {
- pci_err(pdev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(int, max_vfs, HPRE_VF_NUM);
- ret = hpre_vf_q_assign(hpre, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- hpre->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- pci_err(pdev, "Can't enable VF!\n");
- hpre_clear_vft_config(hpre);
- return ret;
- }
- return num_vfs;
-}
-
-static int hpre_try_frozen_vfs(struct pci_dev *pdev)
-{
- int ret = 0;
- struct hpre *hpre, *vf_hpre;
- struct pci_dev *dev;
-
- /* Try to frozen all the VFs as disable SRIOV */
- mutex_lock(&hpre_list_lock);
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = hpre->qm.pdev;
- if (dev == pdev)
- continue;
- if (pci_physfn(dev) == pdev) {
- vf_hpre = pci_get_drvdata(dev);
- ret = hisi_qm_frozen(&vf_hpre->qm);
- if (ret)
- goto frozen_fail;
- }
- }
-
-frozen_fail:
- mutex_unlock(&hpre_list_lock);
- return ret;
-}
-
-static int hpre_sriov_disable(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- pci_err(pdev, "Failed to disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- /* While VF is in used, SRIOV cannot be disabled.
- * However, there is a risk that the behavior is uncertain if the
- * device is in hardware resetting.
- */
- if (hpre_try_frozen_vfs(pdev)) {
- dev_err(&pdev->dev,
- "Uacce user space task is using its VF!\n");
- return -EBUSY;
- }
-
- /* remove in hpre_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
- return hpre_clear_vft_config(hpre);
-}
-
static int hpre_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs)
- return hpre_sriov_enable(pdev, num_vfs);
+ return hisi_qm_sriov_enable(pdev, num_vfs);
else
- return hpre_sriov_disable(pdev);
-}
-
-static void hpre_remove_wait_delay(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
-
- while (hisi_qm_frozen(&hpre->qm) ||
- ((qm->fun_type == QM_HW_PF) &&
- hpre_try_frozen_vfs(hpre->qm.pdev)))
- usleep_range(HPRE_USLEEP, HPRE_USLEEP);
- udelay(HPRE_WAIT_DELAY);
+ return hisi_qm_sriov_disable(pdev, &hpre_devices);
}
static void hpre_remove(struct pci_dev *pdev)
{
- struct hpre *hpre = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hpre->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+#ifdef CONFIG_CRYPTO_QM_UACCE
if (uacce_mode != UACCE_MODE_NOUACCE)
- hpre_remove_wait_delay(hpre);
-
+ hisi_qm_remove_wait_delay(qm, &hpre_devices);
+#endif
hpre_algs_unregister();
- hpre_remove_from_list(hpre);
- if (qm->fun_type == QM_HW_PF && hpre->num_vfs != 0)
- hpre_sriov_disable(pdev);
-
+ hisi_qm_del_from_list(qm, &hpre_devices);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num) {
+ ret = hisi_qm_sriov_disable(pdev, NULL);
+ if (ret) {
+ pci_err(pdev, "Disable SRIOV fail!\n");
+ return;
+ }
+ }
if (qm->fun_type == QM_HW_PF) {
hpre_cnt_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
-
- hpre_debugfs_exit(hpre);
+ debugfs_remove_recursive(qm->debug.debug_root);
hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
- hpre_hw_error_disable(hpre);
+ hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
}
-static void hpre_shutdown(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
-
- hisi_qm_stop(&hpre->qm, QM_NORMAL);
-}
-
-static pci_ers_result_t hpre_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_NONE;
-
- pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return hisi_qm_process_dev_error(pdev);
-}
-
-static int hpre_vf_reset_prepare(struct pci_dev *pdev,
- enum qm_stop_reason stop_reason)
-{
- struct pci_dev *dev;
- struct hisi_qm *qm;
- struct hpre *hpre;
- int ret = 0;
-
- mutex_lock(&hpre_list_lock);
- if (pdev->is_physfn) {
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = hpre->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hpre->qm;
-
- ret = hisi_qm_stop(qm, stop_reason);
- if (ret)
- goto prepare_fail;
- }
- }
- }
-
-prepare_fail:
- mutex_unlock(&hpre_list_lock);
- return ret;
-}
-
-static int hpre_reset_prepare_rdy(struct hpre *hpre)
-{
- struct pci_dev *pdev = hpre->qm.pdev;
- struct hpre *hisi_hpre = pci_get_drvdata(pci_physfn(pdev));
- int delay = 0;
-
- while (test_and_set_bit(HPRE_RESET, &hisi_hpre->status)) {
- msleep(++delay);
- if (delay > HPRE_RESET_WAIT_TIMEOUT)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hpre_controller_reset_prepare(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = hpre_reset_prepare_rdy(hpre);
- if (ret) {
- dev_err(&pdev->dev, "Controller reset not ready!\n");
- return ret;
- }
-
- ret = hpre_vf_reset_prepare(pdev, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop VFs!\n");
- return ret;
- }
-
- ret = hisi_qm_stop(qm, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop QM!\n");
- return ret;
- }
-
-#ifdef CONFIG_CRYPTO_QM_UACCE
- if (qm->use_uacce) {
- ret = uacce_hw_err_isolate(&qm->uacce);
- if (ret) {
- dev_err(&pdev->dev, "Fails to isolate hw err!\n");
- return ret;
- }
- }
-#endif
-
- return 0;
-}
-
-static int hpre_soft_reset(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- struct device *dev = &qm->pdev->dev;
- unsigned long long value = 0;
- int ret;
- u32 val;
-
- ret = hisi_qm_reg_test(qm);
- if (ret)
- return ret;
-
- ret = hisi_qm_set_vf_mse(qm, HPRE_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable vf mse bit.\n");
- return ret;
- }
-
- ret = hisi_qm_set_msi(qm, HPRE_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable peh msi bit.\n");
- return ret;
- }
-
- /* Set qm ecc if dev ecc happened to hold on ooo */
- hisi_qm_set_ecc(qm);
-
- /* OOO register set and check */
- writel(MASTER_GLOBAL_CTRL_SHUTDOWN,
- hpre->qm.io_base + HPRE_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(hpre->qm.io_base +
- HPRE_MASTER_TRANS_RETURN, val,
- (val == MASTER_TRANS_RETURN_RW),
- HPRE_REG_RD_INTVRL_US,
- HPRE_REG_RD_TMOUT_US);
- if (ret) {
- dev_emerg(dev, "Bus lock! Please reset system.\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, HPRE_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable pf mse bit.\n");
- return ret;
- }
-
- /* The reset related sub-control registers are not in PCI BAR */
- if (ACPI_HANDLE(dev)) {
- acpi_status s;
-
- s = acpi_evaluate_integer(ACPI_HANDLE(dev), "HRST",
- NULL, &value);
- if (ACPI_FAILURE(s)) {
- dev_err(dev, "NO controller reset method!\n");
- return -EIO;
- }
-
- if (value) {
- dev_err(dev, "Reset step %llu failed!\n", value);
- return -EIO;
- }
- } else {
- dev_err(dev, "No reset method!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int hpre_vf_reset_done(struct pci_dev *pdev)
-{
- struct pci_dev *dev;
- struct hisi_qm *qm;
- struct hpre *hpre;
- int ret = 0;
-
- mutex_lock(&hpre_list_lock);
- list_for_each_entry(hpre, &hpre_list, list) {
- dev = hpre->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hpre->qm;
-
- ret = hisi_qm_restart(qm);
- if (ret)
- goto reset_fail;
- }
- }
-
-reset_fail:
- mutex_unlock(&hpre_list_lock);
- return ret;
-}
-
-static int hpre_controller_reset_done(struct hpre *hpre)
-{
- struct hisi_qm *qm = &hpre->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = hisi_qm_set_msi(qm, HPRE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Fails to enable peh msi bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, HPRE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Fails to enable pf mse bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_vf_mse(qm, HPRE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Fails to enable vf mse bit!\n");
- return ret;
- }
-
- ret = hpre_set_user_domain_and_cache(hpre);
- if (ret)
- return ret;
-
- hisi_qm_restart_prepare(qm);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(&pdev->dev, "Failed to start QM!\n");
- return ret;
- }
-
- if (hpre->num_vfs)
- hpre_vf_q_assign(hpre, hpre->num_vfs);
-
- ret = hpre_vf_reset_done(pdev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to start VFs!\n");
- return -EPERM;
- }
-
- hisi_qm_restart_done(qm);
- hpre_hw_err_init(hpre);
-
- return 0;
-}
-
-static int hpre_controller_reset(struct hpre *hpre)
-{
- struct device *dev = &hpre->qm.pdev->dev;
- int ret;
-
- dev_info(dev, "Controller resetting...\n");
-
- ret = hpre_controller_reset_prepare(hpre);
- if (ret)
- return ret;
-
- ret = hpre_soft_reset(hpre);
- if (ret) {
- dev_err(dev, "Controller reset failed (%d)\n", ret);
- return ret;
- }
-
- ret = hpre_controller_reset_done(hpre);
- if (ret)
- return ret;
-
- clear_bit(HPRE_RESET, &hpre->status);
- dev_info(dev, "Controller reset complete\n");
-
- return 0;
-}
-
-static pci_ers_result_t hpre_slot_reset(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- int ret;
-
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_RECOVERED;
-
- dev_info(&pdev->dev, "Requesting reset due to PCI error\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
- /* reset hpre controller */
- ret = hpre_controller_reset(hpre);
- if (ret) {
- dev_err(&pdev->dev, "hpre controller reset failed (%d)\n",
- ret);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void hpre_set_hw_error(struct hpre *hisi_hpre, bool enable)
-{
- struct pci_dev *pdev = hisi_hpre->qm.pdev;
- struct hpre *hpre = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &hpre->qm;
-
- if (qm->fun_type == QM_HW_VF)
- return;
-
- if (enable) {
- hisi_qm_hw_error_init(qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
- 0, QM_DB_RANDOM_INVALID);
- hpre_hw_error_enable(hpre);
- } else {
- hisi_qm_hw_error_uninit(qm);
- hpre_hw_error_disable(hpre);
- }
-}
-
-static int hpre_get_hw_error_status(struct hpre *hpre)
-{
- u32 err_sts;
-
- err_sts = readl(hpre->qm.io_base + HPRE_HAC_INT_STATUS) &
- (HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR);
- if (err_sts)
- return err_sts;
-
- return 0;
-}
-
-/* check the interrupt is ecc-zbit error or not */
-static int hpre_check_hw_error(struct hpre *hisi_hpre)
-{
- struct pci_dev *pdev = hisi_hpre->qm.pdev;
- struct hpre *hpre = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &hpre->qm;
- int ret;
-
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
- ret = hisi_qm_get_hw_error_status(qm);
- if (ret)
- return ret;
-
- /* Now the ecc-2bit is ce_err, so this func is always return 0 */
- return hpre_get_hw_error_status(hpre);
-}
-
-static void hpre_reset_prepare(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hpre->qm;
- struct device *dev = &pdev->dev;
- u32 delay = 0;
- int ret;
-
- hpre_set_hw_error(hpre, HPRE_HW_ERROR_IRQ_DISABLE);
-
- while (hpre_check_hw_error(hpre)) {
- msleep(++delay);
- if (delay > HPRE_RESET_WAIT_TIMEOUT)
- return;
- }
-
- ret = hpre_reset_prepare_rdy(hpre);
- if (ret) {
- dev_err(dev, "FLR not ready!\n");
- return;
- }
-
- ret = hpre_vf_reset_prepare(pdev, QM_FLR);
- if (ret) {
- dev_err(&pdev->dev, "Fails to prepare reset!\n");
- return;
- }
-
- ret = hisi_qm_stop(qm, QM_FLR);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop QM!\n");
- return;
- }
-
- dev_info(dev, "FLR resetting...\n");
-}
-
-static bool hpre_flr_reset_complete(struct pci_dev *pdev)
-{
- struct pci_dev *pf_pdev = pci_physfn(pdev);
- struct hpre *hpre = pci_get_drvdata(pf_pdev);
- struct device *dev = &hpre->qm.pdev->dev;
- u32 id;
-
- pci_read_config_dword(hpre->qm.pdev, PCI_COMMAND, &id);
- if (id == HPRE_PCI_COMMAND_INVALID) {
- dev_err(dev, "Device HPRE can not be used!\n");
- return false;
- }
-
- clear_bit(HPRE_RESET, &hpre->status);
- return true;
-}
-
-static void hpre_reset_done(struct pci_dev *pdev)
-{
- struct hpre *hpre = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hpre->qm;
- struct device *dev = &pdev->dev;
- int ret;
-
- hpre_set_hw_error(hpre, HPRE_HW_ERROR_IRQ_ENABLE);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- return;
- }
-
- if (pdev->is_physfn) {
- ret = hpre_set_user_domain_and_cache(hpre);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- goto flr_done;
- }
-
- hpre_hw_err_init(hpre);
-
- if (hpre->num_vfs)
- hpre_vf_q_assign(hpre, hpre->num_vfs);
-
- ret = hpre_vf_reset_done(pdev);
- if (ret) {
- dev_err(&pdev->dev, "Failed to start VFs!\n");
- return;
- }
- }
-
-flr_done:
- if (hpre_flr_reset_complete(pdev))
- dev_info(dev, "FLR reset complete\n");
-}
-
static const struct pci_error_handlers hpre_err_handler = {
- .error_detected = hpre_error_detected,
- .slot_reset = hpre_slot_reset,
+ .error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
#ifdef CONFIG_CRYPTO_QM_UACCE
- .reset_prepare = hpre_reset_prepare,
- .reset_done = hpre_reset_done,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
#endif
};
@@ -1605,7 +954,7 @@ static void hpre_reset_done(struct pci_dev *pdev)
.remove = hpre_remove,
.sriov_configure = hpre_sriov_configure,
.err_handler = &hpre_err_handler,
- .shutdown = hpre_shutdown,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hpre_register_debugfs(void)
@@ -1618,20 +967,19 @@ static void hpre_register_debugfs(void)
hpre_debugfs_root = NULL;
}
-static void hpre_unregister_debugfs(void)
-{
- debugfs_remove_recursive(hpre_debugfs_root);
-}
-
static int __init hpre_init(void)
{
int ret;
+ INIT_LIST_HEAD(&hpre_devices.list);
+ mutex_init(&hpre_devices.lock);
+ hpre_devices.check = NULL;
+
hpre_register_debugfs();
ret = pci_register_driver(&hpre_pci_driver);
if (ret) {
- hpre_unregister_debugfs();
+ debugfs_remove_recursive(hpre_debugfs_root);
pr_err("hpre: can't register hisi hpre driver.\n");
}
@@ -1641,7 +989,7 @@ static int __init hpre_init(void)
static void __exit hpre_exit(void)
{
pci_unregister_driver(&hpre_pci_driver);
- hpre_unregister_debugfs();
+ debugfs_remove_recursive(hpre_debugfs_root);
}
module_init(hpre_init);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 4bd7739..e89a770 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019 HiSilicon Limited. */
#include <asm/page.h>
+#include <linux/acpi.h>
+#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
@@ -117,7 +119,7 @@
#define QM_ABNORMAL_INT_MASK 0x100004
#define QM_HW_ERROR_IRQ_DISABLE GENMASK(12, 0)
#define QM_ABNORMAL_INT_STATUS 0x100008
-#define QM_ABNORMAL_INT_SET 0x10000c
+#define QM_PF_ABNORMAL_INT_SET 0x10000c
#define QM_ABNORMAL_INF00 0x100010
#define QM_FIFO_OVERFLOW_TYPE 0xc0
#define QM_FIFO_OVERFLOW_VF 0x3f
@@ -167,17 +169,30 @@
#define TASK_TIMEOUT 10000
#define WAIT_PERIOD 20
-#define MAX_WAIT_COUNTS 1000
#define WAIT_PERIOD_US_MAX 200
#define WAIT_PERIOD_US_MIN 100
-#define MAX_WAIT_TASK_COUNTS 10
-
-#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
+#define REMOVE_WAIT_DELAY 10
+#define MAX_WAIT_COUNTS 1000
+#define DELAY_PERIOD_MS 100
+#define QM_DEV_RESET_STATUS 0
+#define QM_RESET_WAIT_TIMEOUT 400
+#define QM_PCI_COMMAND_INVALID 0xFFFFFFFF
+#define MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
+#define MASTER_TRANS_RETURN_RW 3
+#define MASTER_TRANS_RETURN 0x300150
+#define MASTER_GLOBAL_CTRL 0x300000
+#define QM_REG_RD_INTVRL_US 10
+#define QM_REG_RD_TMOUT_US 1000
+#define AM_CFG_PORT_RD_EN 0x300018
#define AM_CFG_PORT_WR_EN 0x30001C
-#define AM_CFG_PORT_WR_EN_VALUE 0xFFFF
+#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
#define AM_ROB_ECC_INT_STS 0x300104
#define ROB_ECC_ERR_MULTPL BIT(1)
+#define QM_DBG_READ_LEN 256
+#define QM_DBG_WRITE_LEN 1024
+#define QM_DBG_SHOW_SHIFT 16
+
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
(((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
@@ -219,6 +234,12 @@ enum vft_type {
CQC_VFT,
};
+struct hisi_qm_resource {
+ struct hisi_qm *qm;
+ int distance;
+ struct list_head list;
+};
+
struct hisi_qm_hw_ops {
int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
void (*qm_db)(struct hisi_qm *qm, u16 qn,
@@ -237,11 +258,6 @@ struct hisi_qm_hw_ops {
[QM_STATE] = "qm_state",
};
-struct hisi_qm_hw_error {
- u32 int_msk;
- const char *msg;
-};
-
static const struct hisi_qm_hw_error qm_hw_error[] = {
{ .int_msk = BIT(0), .msg = "qm_axi_rresp" },
{ .int_msk = BIT(1), .msg = "qm_axi_bresp" },
@@ -1115,13 +1131,20 @@ static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
{
u32 irq_enable = ce | nfe | fe | msi;
u32 irq_unmask = ~irq_enable;
+ u32 error_status;
qm->error_mask = ce | nfe | fe;
qm->msi_mask = msi;
/* clear QM hw residual error source */
- writel(QM_ABNORMAL_INT_SOURCE_CLR, qm->io_base +
- QM_ABNORMAL_INT_SOURCE);
+ error_status = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
+ if (!(qm->hw_status & BIT(QM_DEV_RESET_STATUS))
+ || !error_status)
+ error_status = QM_ABNORMAL_INT_SOURCE_CLR;
+ else
+ error_status &= qm->error_mask;
+
+ writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
@@ -1190,9 +1213,7 @@ static pci_ers_result_t qm_hw_error_handle_v2(struct hisi_qm *qm)
error_status = qm->error_mask & tmp;
if (error_status) {
if (error_status & QM_ECC_MBIT)
- qm->err_ini.is_qm_ecc_mbit = 1;
- else
- qm->err_ini.is_qm_ecc_mbit = 0;
+ qm->err_ini.err_info.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
return PCI_ERS_RESULT_NEED_RESET;
@@ -1513,7 +1534,8 @@ static void qm_qp_has_no_task(struct hisi_qp *qp)
int i = 0;
int ret;
- if (qp->qm->err_ini.is_qm_ecc_mbit || qp->qm->err_ini.is_dev_ecc_mbit)
+ if (qp->qm->err_ini.err_info.is_qm_ecc_mbit ||
+ qp->qm->err_ini.err_info.is_dev_ecc_mbit)
return;
addr = qm_ctx_alloc(qp->qm, size, &dma_addr);
@@ -1967,6 +1989,74 @@ static int qm_unregister_uacce(struct hisi_qm *qm)
#endif
/**
+ * hisi_qm_frozen() - Try to froze QM to cut continuous queue request. If
+ * there is user on the QM, return failure without doing anything.
+ * @qm: The qm needed to be fronzen.
+ *
+ * This function frozes QM, then we can do SRIOV disabling.
+ */
+static int hisi_qm_frozen(struct hisi_qm *qm)
+{
+ int count, i;
+
+ down_write(&qm->qps_lock);
+ for (i = 0, count = 0; i < qm->qp_num; i++)
+ if (!qm->qp_array[i])
+ count++;
+
+ if (count == qm->qp_num) {
+ bitmap_set(qm->qp_bitmap, 0, qm->qp_num);
+ } else {
+ up_write(&qm->qps_lock);
+ return -EBUSY;
+ }
+ up_write(&qm->qps_lock);
+
+ return 0;
+}
+
+static int qm_try_frozen_vfs(struct pci_dev *pdev,
+ struct hisi_qm_list *qm_list)
+{
+ struct hisi_qm *qm, *vf_qm;
+ struct pci_dev *dev;
+ int ret = 0;
+
+ if (!qm_list || !pdev)
+ return -EINVAL;
+
+ /* Try to frozen all the VFs as disable SRIOV */
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = qm->pdev;
+ if (dev == pdev)
+ continue;
+ if (pci_physfn(dev) == pdev) {
+ vf_qm = pci_get_drvdata(dev);
+ ret = hisi_qm_frozen(vf_qm);
+ if (ret)
+ goto frozen_fail;
+ }
+ }
+
+frozen_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+void hisi_qm_remove_wait_delay(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list)
+{
+ while (hisi_qm_frozen(qm) ||
+ ((qm->fun_type == QM_HW_PF) &&
+ qm_try_frozen_vfs(qm->pdev, qm_list))) {
+ msleep(WAIT_PERIOD);
+ }
+ udelay(REMOVE_WAIT_DELAY);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_remove_wait_delay);
+
+/**
* hisi_qm_init() - Initialize configures about qm.
* @qm: The qm needed init.
*
@@ -2107,32 +2197,21 @@ void hisi_qm_uninit(struct hisi_qm *qm)
EXPORT_SYMBOL_GPL(hisi_qm_uninit);
/**
- * hisi_qm_frozen() - Try to froze QM to cut continuous queue request. If
- * there is user on the QM, return failure without doing anything.
- * @qm: The qm needed to be fronzen.
+ * hisi_qm_dev_shutdown() - shutdown device.
+ * @pdev: The device will be shutdown.
*
- * This function frozes QM, then we can do SRIOV disabling.
+ * This function will stop qm when OS shutdown or rebooting.
*/
-int hisi_qm_frozen(struct hisi_qm *qm)
+void hisi_qm_dev_shutdown(struct pci_dev *pdev)
{
- int count, i;
-
- down_write(&qm->qps_lock);
- for (i = 0, count = 0; i < qm->qp_num; i++)
- if (!qm->qp_array[i])
- count++;
-
- if (count == qm->qp_num) {
- bitmap_set(qm->qp_bitmap, 0, qm->qp_num);
- } else {
- up_write(&qm->qps_lock);
- return -EBUSY;
- }
- up_write(&qm->qps_lock);
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
- return 0;
+ ret = hisi_qm_stop(qm, QM_NORMAL);
+ if (ret)
+ dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
}
-EXPORT_SYMBOL_GPL(hisi_qm_frozen);
+EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
/**
* hisi_qm_get_vft() - Get vft from a qm.
@@ -2174,7 +2253,7 @@ int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
* Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
* (VF function number 0x2)
*/
-int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
+static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
u32 number)
{
u32 max_q_num = qm->ctrl_q_num;
@@ -2185,7 +2264,6 @@ int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_vft);
static void qm_init_eq_aeq_status(struct hisi_qm *qm)
{
@@ -2483,6 +2561,28 @@ static int qm_stop_started_qp(struct hisi_qm *qm)
}
/**
+ * qm_clear_queues() - Clear memory of queues in a qm.
+ * @qm: The qm which memory needs clear.
+ *
+ * This function clears all queues memory in a qm. Reset of accelerator can
+ * use this to clear queues.
+ */
+static void qm_clear_queues(struct hisi_qm *qm)
+{
+ struct hisi_qp *qp;
+ int i;
+
+ for (i = 0; i < qm->qp_num; i++) {
+ qp = qm->qp_array[i];
+ if (qp)
+ /* device state use the last page */
+ memset(qp->qdma.va, 0, qp->qdma.size - PAGE_SIZE);
+ }
+
+ memset(qm->qdma.va, 0, qm->qdma.size);
+}
+
+/**
* hisi_qm_stop() - Stop a qm.
* @qm: The qm which will be stopped.
* @r: The reason to stop qm.
@@ -2528,7 +2628,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
}
}
- hisi_qm_clear_queues(qm);
+ qm_clear_queues(qm);
atomic_set(&qm->status.flags, QM_STOP);
err_unlock:
@@ -2589,7 +2689,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
goto failed_to_create;
}
- qm_regs = debugfs_create_file("qm_regs", 0444, qm->debug.qm_d, qm,
+ qm_regs = debugfs_create_file("regs", 0444, qm->debug.qm_d, qm,
&qm_regs_fops);
if (IS_ERR(qm_regs)) {
ret = -ENOENT;
@@ -2605,7 +2705,7 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
/**
- * hisi_qm_hw_error_init() - Configure qm hardware error report method.
+ * qm_hw_error_init() - Configure qm hardware error report method.
* @qm: The qm which we want to configure.
* @ce: Correctable error configure.
* @nfe: Non-fatal error configure.
@@ -2622,9 +2722,13 @@ int hisi_qm_debug_init(struct hisi_qm *qm)
* related report methods. Error report will be masked if related error bit
* does not configure.
*/
-void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi)
+static void qm_hw_error_init(struct hisi_qm *qm)
{
+ u32 nfe = qm->err_ini.err_info.nfe;
+ u32 msi = qm->err_ini.err_info.msi;
+ u32 ce = qm->err_ini.err_info.ce;
+ u32 fe = qm->err_ini.err_info.fe;
+
if (!qm->ops->hw_error_init) {
dev_err(&qm->pdev->dev,
"QM version %d doesn't support hw error handling!\n",
@@ -2634,9 +2738,8 @@ void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
qm->ops->hw_error_init(qm, ce, nfe, fe, msi);
}
-EXPORT_SYMBOL_GPL(hisi_qm_hw_error_init);
-void hisi_qm_hw_error_uninit(struct hisi_qm *qm)
+static void qm_hw_error_uninit(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_uninit) {
dev_err(&qm->pdev->dev,
@@ -2647,15 +2750,14 @@ void hisi_qm_hw_error_uninit(struct hisi_qm *qm)
qm->ops->hw_error_uninit(qm);
}
-EXPORT_SYMBOL_GPL(hisi_qm_hw_error_uninit);
/**
- * hisi_qm_hw_error_handle() - Handle qm non-fatal hardware errors.
+ * qm_hw_error_handle() - Handle qm non-fatal hardware errors.
* @qm: The qm which has non-fatal hardware errors.
*
* Accelerators use this function to handle qm non-fatal hardware errors.
*/
-pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
+static pci_ers_result_t qm_hw_error_handle(struct hisi_qm *qm)
{
if (!qm->ops->hw_error_handle) {
dev_err(&qm->pdev->dev,
@@ -2666,104 +2768,19 @@ pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm)
return qm->ops->hw_error_handle(qm);
}
-EXPORT_SYMBOL_GPL(hisi_qm_hw_error_handle);
-
-/**
- * hisi_qm_clear_queues() - Clear memory of queues in a qm.
- * @qm: The qm which memory needs clear.
- *
- * This function clears all queues memory in a qm. Reset of accelerator can
- * use this to clear queues.
- */
-void hisi_qm_clear_queues(struct hisi_qm *qm)
-{
- struct hisi_qp *qp;
- int i;
-
- for (i = 0; i < qm->qp_num; i++) {
- qp = qm->qp_array[i];
- if (qp)
- /* device state use the last page */
- memset(qp->qdma.va, 0, qp->qdma.size - PAGE_SIZE);
- }
-
- memset(qm->qdma.va, 0, qm->qdma.size);
-}
-EXPORT_SYMBOL_GPL(hisi_qm_clear_queues);
-
-/**
- * hisi_qm_get_hw_version() - Get hardware version of a qm.
- * @pdev: The device which hardware version we want to get.
- *
- * This function gets the hardware version of a qm. Return QM_HW_UNKNOWN
- * if the hardware version is not supported.
- */
-enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev)
-{
- switch (pdev->revision) {
- case QM_HW_V1:
- case QM_HW_V2:
- return pdev->revision;
- default:
- return QM_HW_UNKNOWN;
- }
-}
-EXPORT_SYMBOL_GPL(hisi_qm_get_hw_version);
-int hisi_qm_get_hw_error_status(struct hisi_qm *qm)
+static int qm_get_hw_error_status(struct hisi_qm *qm)
{
u32 err_sts;
- err_sts = readl(qm->io_base + QM_ABNORMAL_INT_STATUS) &
- QM_ECC_MBIT;
+ err_sts = readl(qm->io_base + QM_ABNORMAL_INT_STATUS) & QM_ECC_MBIT;
if (err_sts)
return err_sts;
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_qm_get_hw_error_status);
-
-static pci_ers_result_t hisi_qm_dev_err_handle(struct hisi_qm *qm)
-{
- u32 err_sts;
-
- if (!qm->err_ini.get_dev_hw_err_status ||
- !qm->err_ini.log_dev_hw_err)
- return PCI_ERS_RESULT_RECOVERED;
-
- /* read err sts */
- err_sts = qm->err_ini.get_dev_hw_err_status(qm);
- if (err_sts) {
- if (err_sts & qm->err_ini.ecc_2bits_mask)
- qm->err_ini.is_dev_ecc_mbit = 1;
- else
- qm->err_ini.is_dev_ecc_mbit = 0;
-
- qm->err_ini.log_dev_hw_err(qm, err_sts);
- return PCI_ERS_RESULT_NEED_RESET;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev)
-{
- struct hisi_qm *qm = pci_get_drvdata(pdev);
- pci_ers_result_t qm_ret, dev_ret;
-
- /* log qm error */
- qm_ret = hisi_qm_hw_error_handle(qm);
-
- /* log device error */
- dev_ret = hisi_qm_dev_err_handle(qm);
-
- return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
- dev_ret == PCI_ERS_RESULT_NEED_RESET) ?
- PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
-}
-EXPORT_SYMBOL_GPL(hisi_qm_process_dev_error);
-int hisi_qm_reg_test(struct hisi_qm *qm)
+static int qm_reg_test(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
int ret;
@@ -2782,16 +2799,13 @@ int hisi_qm_reg_test(struct hisi_qm *qm)
ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
(val == PCI_VENDOR_ID_HUAWEI),
POLL_PERIOD, POLL_TIMEOUT);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
- return ret;
- }
return ret;
}
-EXPORT_SYMBOL_GPL(hisi_qm_reg_test);
-int hisi_qm_set_pf_mse(struct hisi_qm *qm, bool set)
+static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
{
struct pci_dev *pdev = qm->pdev;
u16 cmd;
@@ -2814,9 +2828,8 @@ int hisi_qm_set_pf_mse(struct hisi_qm *qm, bool set)
return -ETIMEDOUT;
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_pf_mse);
-int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set)
+static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
{
struct pci_dev *pdev = qm->pdev;
u16 sriov_ctrl;
@@ -2833,8 +2846,8 @@ int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set)
for (i = 0; i < MAX_WAIT_COUNTS; i++) {
pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
- if (set == ((sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
- PEH_SRIOV_CTRL_VF_MSE_SHIFT))
+ if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
+ PEH_SRIOV_CTRL_VF_MSE_SHIFT)
return 0;
udelay(1);
@@ -2842,9 +2855,8 @@ int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set)
return -ETIMEDOUT;
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_vf_mse);
-int hisi_qm_set_msi(struct hisi_qm *qm, bool set)
+static int qm_set_msi(struct hisi_qm *qm, bool set)
{
struct pci_dev *pdev = qm->pdev;
@@ -2854,7 +2866,8 @@ int hisi_qm_set_msi(struct hisi_qm *qm, bool set)
} else {
pci_write_config_dword(pdev, pdev->msi_cap +
PCI_MSI_MASK_64, PEH_MSI_DISABLE);
- if (qm->err_ini.is_qm_ecc_mbit || qm->err_ini.is_dev_ecc_mbit)
+ if (qm->err_ini.err_info.is_qm_ecc_mbit ||
+ qm->err_ini.err_info.is_dev_ecc_mbit)
return 0;
mdelay(1);
@@ -2864,64 +2877,768 @@ int hisi_qm_set_msi(struct hisi_qm *qm, bool set)
return 0;
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_msi);
-void hisi_qm_set_ecc(struct hisi_qm *qm)
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
{
- u32 nfe_enb;
+ int i;
- if ((!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit) ||
- (qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.inject_dev_hw_err) ||
- (qm->err_ini.is_dev_ecc_mbit && qm->err_ini.inject_dev_hw_err))
+ if (!qps || qp_num < 0)
return;
- if (qm->err_ini.inject_dev_hw_err)
- qm->err_ini.inject_dev_hw_err(qm);
- else {
- nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
- writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
- qm->io_base + QM_RAS_NFE_ENABLE);
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
- qm->err_ini.is_qm_ecc_mbit = 1;
+ for (i = qp_num - 1; i >= 0; i--)
+ hisi_qm_release_qp(qps[i]);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
+
+static void free_list(struct list_head *head)
+{
+ struct hisi_qm_resource *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, head, list) {
+ list_del(&res->list);
+ kfree(res);
}
}
-EXPORT_SYMBOL_GPL(hisi_qm_set_ecc);
-void hisi_qm_restart_prepare(struct hisi_qm *qm)
+static int hisi_qm_sort_devices(int node, struct list_head *head,
+ struct hisi_qm_list *qm_list)
{
- if (!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit)
- return;
+ struct hisi_qm_resource *res, *tmp;
+ struct hisi_qm *qm;
+ struct list_head *n;
+ struct device *dev;
+ int dev_node = 0;
+
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = &qm->pdev->dev;
+
+ if (IS_ENABLED(CONFIG_NUMA)) {
+ dev_node = dev->numa_node;
+ if (dev_node < 0)
+ dev_node = 0;
+ }
- /* close AM wr msi port */
- writel(qm->err_ini.qm_wr_port, qm->io_base + AM_CFG_PORT_WR_EN);
+ if (qm_list->check && !qm_list->check(qm))
+ continue;
+
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
- /* clear dev ecc 2bit error source */
- if (qm->err_ini.clear_dev_hw_err_status) {
- qm->err_ini.clear_dev_hw_err_status(qm,
- qm->err_ini.ecc_2bits_mask);
+ res->qm = qm;
+ res->distance = node_distance(dev_node, node);
+ n = head;
+ list_for_each_entry(tmp, head, list) {
+ if (res->distance < tmp->distance) {
+ n = &tmp->list;
+ break;
+ }
+ }
+ list_add_tail(&res->list, n);
}
- /* clear QM ecc mbit error source */
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ return 0;
+}
- /* clear AM Reorder Buffer ecc mbit source */
- writel(ROB_ECC_ERR_MULTPL, qm->io_base + AM_ROB_ECC_INT_STS);
+int hisi_qm_alloc_qps_node(int node, struct hisi_qm_list *qm_list,
+ struct hisi_qp **qps, int qp_num, u8 alg_type)
+{
+ struct hisi_qm_resource *tmp;
+ int ret = -ENODEV;
+ LIST_HEAD(head);
+ int i;
- if (qm->err_ini.open_axi_master_ooo)
- qm->err_ini.open_axi_master_ooo(qm);
+ if (!qps || !qm_list || qp_num <= 0)
+ return -EINVAL;
+
+ mutex_lock(&qm_list->lock);
+ if (hisi_qm_sort_devices(node, &head, qm_list)) {
+ mutex_unlock(&qm_list->lock);
+ goto err;
+ }
+
+ list_for_each_entry(tmp, &head, list) {
+ for (i = 0; i < qp_num; i++) {
+ qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
+ if (IS_ERR(qps[i])) {
+ hisi_qm_free_qps(qps, i);
+ break;
+ }
+ }
+
+ if (i == qp_num) {
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&qm_list->lock);
+ if (ret)
+ pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n",
+ node, alg_type, qp_num);
+
+err:
+ free_list(&head);
+ return ret;
}
-EXPORT_SYMBOL_GPL(hisi_qm_restart_prepare);
+EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
-void hisi_qm_restart_done(struct hisi_qm *qm)
+static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
{
- if (!qm->err_ini.is_qm_ecc_mbit && !qm->err_ini.is_dev_ecc_mbit)
- return;
+ u32 q_num, i, remain_q_num;
+ u32 q_base = qm->qp_num;
+ int ret;
+
+ if (!num_vfs)
+ return -EINVAL;
+
+ remain_q_num = qm->ctrl_q_num - qm->qp_num;
+
+ /* If remain queues not enough, return error. */
+ if (qm->ctrl_q_num < qm->qp_num || remain_q_num < num_vfs)
+ return -EINVAL;
+
+ q_num = remain_q_num / num_vfs;
+ for (i = 1; i <= num_vfs; i++) {
+ if (i == num_vfs)
+ q_num += remain_q_num % num_vfs;
+ ret = hisi_qm_set_vft(qm, i, q_base, q_num);
+ if (ret)
+ return ret;
+ q_base += q_num;
+ }
+
+ return 0;
+}
+
+static int qm_clear_vft_config(struct hisi_qm *qm)
+{
+ int ret;
+ u32 i;
+
+ for (i = 1; i <= qm->vfs_num; i++) {
+ ret = hisi_qm_set_vft(qm, i, 0, 0);
+ if (ret)
+ return ret;
+ }
+ qm->vfs_num = 0;
+
+ return 0;
+}
+
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int pre_existing_vfs, num_vfs, ret;
+
+ pre_existing_vfs = pci_num_vf(pdev);
+ if (pre_existing_vfs) {
+ pci_err(pdev,
+ "Can't enable VF. Please disable pre-enabled VFs!\n");
+ return 0;
+ }
+
+ num_vfs = min_t(int, max_vfs, QM_MAX_VFS_NUM);
+ ret = qm_vf_q_assign(qm, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't assign queues for VF!\n");
+ return ret;
+ }
+
+ qm->vfs_num = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ pci_err(pdev, "Can't enable VF!\n");
+ qm_clear_vft_config(qm);
+ return ret;
+ }
+
+ pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
+
+ return num_vfs;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
+
+int hisi_qm_sriov_disable(struct pci_dev *pdev, struct hisi_qm_list *qm_list)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+
+ if (pci_vfs_assigned(pdev)) {
+ pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
+ return -EPERM;
+ }
+
+ /* While VF is in used, SRIOV cannot be disabled.
+ * However, there is a risk that the behavior is uncertain if the
+ * device is in hardware resetting.
+ */
+ if (qm_list && qm_try_frozen_vfs(pdev, qm_list)) {
+ pci_err(pdev, "Uacce user space task is using its VF!\n");
+ return -EBUSY;
+ }
+
+ /* remove in hpre_pci_driver will be called to free VF resources */
+ pci_disable_sriov(pdev);
+ return qm_clear_vft_config(qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
+
+void hisi_qm_dev_err_init(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ if (pf_qm->fun_type == QM_HW_VF)
+ return;
+
+ qm_hw_error_init(pf_qm);
+ pf_qm->err_ini.hw_err_enable(pf_qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
+
+/**
+ * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
+ * @qm: The qm for which we want to do error uninitialization.
+ *
+ * Uninitialize QM and device error related configuration, It may called
+ * by PF/VF, the caller should ensure the scene explicilty.
+ */
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+
+ if (pf_qm->fun_type == QM_HW_VF)
+ return;
+
+ qm_hw_error_uninit(pf_qm);
+ pf_qm->err_ini.hw_err_disable(pf_qm);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
+
+static pci_ers_result_t qm_dev_err_handle(struct hisi_qm *qm)
+{
+ u32 err_sts;
+
+ /* read err sts */
+ err_sts = qm->err_ini.get_dev_hw_err_status(qm);
+ if (err_sts) {
+ if (err_sts & qm->err_ini.err_info.ecc_2bits_mask)
+ qm->err_ini.err_info.is_dev_ecc_mbit = true;
+
+ qm->err_ini.log_dev_hw_err(qm, err_sts);
+ return PCI_ERS_RESULT_NEED_RESET;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ pci_ers_result_t qm_ret, dev_ret;
+
+ /* log qm error */
+ qm_ret = qm_hw_error_handle(qm);
+
+ /* log device error */
+ dev_ret = qm_dev_err_handle(qm);
+
+ return (qm_ret == PCI_ERS_RESULT_NEED_RESET ||
+ dev_ret == PCI_ERS_RESULT_NEED_RESET) ?
+ PCI_ERS_RESULT_NEED_RESET : PCI_ERS_RESULT_RECOVERED;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_process_dev_error);
+
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_NONE;
+
+ pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return hisi_qm_process_dev_error(pdev);
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
+
+static int qm_vf_reset_prepare(struct pci_dev *pdev,
+ struct hisi_qm_list *qm_list,
+ enum qm_stop_reason stop_reason)
+{
+ struct pci_dev *dev;
+ struct hisi_qm *qm;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = qm->pdev;
+ if (dev == pdev)
+ continue;
+
+ if (pci_physfn(dev) == pdev) {
+ /* save VFs PCIE BAR configuration */
+ pci_save_state(dev);
+
+ ret = hisi_qm_stop(qm, stop_reason);
+ if (ret)
+ goto prepare_fail;
+ }
+ }
+
+prepare_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+static int qm_reset_prepare_ready(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ int delay = 0;
+
+ while (test_and_set_bit(QM_DEV_RESET_STATUS, &pf_qm->hw_status)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qm_controller_reset_prepare(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset not ready!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_reset_prepare(pdev, qm->qm_list, QM_SOFT_RESET);
+ if (ret) {
+ pci_err(pdev, "Fails to stop VFs!\n");
+ return ret;
+ }
+ }
+
+ ret = hisi_qm_stop(qm, QM_SOFT_RESET);
+ if (ret) {
+ pci_err(pdev, "Fails to stop QM!\n");
+ return ret;
+ }
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ if (qm->use_uacce) {
+ ret = uacce_hw_err_isolate(&qm->uacce);
+ if (ret) {
+ pci_err(pdev, "Fails to isolate hw err!\n");
+ return ret;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
+{
+ u32 nfe_enb = 0;
+
+ if (!qm->err_ini.err_info.is_dev_ecc_mbit &&
+ qm->err_ini.err_info.is_qm_ecc_mbit &&
+ qm->err_ini.close_axi_master_ooo) {
+
+ qm->err_ini.close_axi_master_ooo(qm);
+
+ } else if (qm->err_ini.err_info.is_dev_ecc_mbit &&
+ !qm->err_ini.err_info.is_qm_ecc_mbit &&
+ !qm->err_ini.close_axi_master_ooo) {
+
+ nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
+ qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(QM_ECC_MBIT, qm->io_base + QM_PF_ABNORMAL_INT_SET);
+ }
+}
+
+static int qm_soft_reset(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+ u32 val;
+
+ ret = qm_reg_test(qm);
+ if (ret)
+ return ret;
+
+ if (qm->vfs_num) {
+ ret = qm_set_vf_mse(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable vf mse bit.\n");
+ return ret;
+ }
+ }
+
+ ret = qm_set_msi(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable peh msi bit.\n");
+ return ret;
+ }
+
+ qm_dev_ecc_mbit_handle(qm);
+
+ mdelay(DELAY_PERIOD_MS);
+
+ /* OOO register set and check */
+ writel(MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + MASTER_GLOBAL_CTRL);
+
+ /* If bus lock, reset chip */
+ ret = readl_relaxed_poll_timeout(qm->io_base + MASTER_TRANS_RETURN,
+ val, (val == MASTER_TRANS_RETURN_RW),
+ QM_REG_RD_INTVRL_US,
+ QM_REG_RD_TMOUT_US);
+ if (ret) {
+ pci_emerg(pdev, "Bus lock! Please reset system.\n");
+ return ret;
+ }
+
+ ret = qm_set_pf_mse(qm, false);
+ if (ret) {
+ pci_err(pdev, "Fails to disable pf mse bit.\n");
+ return ret;
+ }
+
+ /* The reset related sub-control registers are not in PCI BAR */
+ if (ACPI_HANDLE(&pdev->dev)) {
+ unsigned long long value = 0;
+ acpi_status s;
+
+ s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
+ qm->err_ini.err_info.acpi_rst,
+ NULL, &value);
+ if (ACPI_FAILURE(s)) {
+ pci_err(pdev, "NO controller reset method!\n");
+ return -EIO;
+ }
+
+ if (value) {
+ pci_err(pdev, "Reset step %llu failed!\n", value);
+ return -EIO;
+ }
+ } else {
+ pci_err(pdev, "No reset method!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qm_vf_reset_done(struct pci_dev *pdev,
+ struct hisi_qm_list *qm_list)
+{
+ struct pci_dev *dev;
+ struct hisi_qm *qm;
+ int ret = 0;
+
+ mutex_lock(&qm_list->lock);
+ list_for_each_entry(qm, &qm_list->list, list) {
+ dev = qm->pdev;
+ if (dev == pdev)
+ continue;
+
+ if (pci_physfn(dev) == pdev) {
+ /* enable VFs PCIE BAR configuration */
+ pci_restore_state(dev);
+
+ ret = hisi_qm_restart(qm);
+ if (ret)
+ goto reset_fail;
+ }
+ }
+
+reset_fail:
+ mutex_unlock(&qm_list->lock);
+ return ret;
+}
+
+static int qm_get_dev_err_status(struct hisi_qm *qm)
+{
+ u32 err_sts;
+
+ err_sts = qm->err_ini.get_dev_hw_err_status(qm) &
+ qm->err_ini.err_info.ecc_2bits_mask;
+ if (err_sts)
+ return err_sts;
+
+ return 0;
+}
+
+static void hisi_qm_restart_prepare(struct hisi_qm *qm)
+{
+ u32 value;
+
+ if (!qm->err_ini.err_info.is_qm_ecc_mbit &&
+ !qm->err_ini.err_info.is_dev_ecc_mbit)
+ return;
+
+ value = readl(qm->io_base + AM_CFG_PORT_WR_EN);
+ writel(value & ~qm->err_ini.err_info.msi_wr_port,
+ qm->io_base + AM_CFG_PORT_WR_EN);
+
+ /* clear dev ecc 2bit error source if having */
+ value = qm_get_dev_err_status(qm);
+ if (value && qm->err_ini.clear_dev_hw_err_status)
+ qm->err_ini.clear_dev_hw_err_status(qm, value);
+
+ /* clear QM ecc mbit error source */
+ writel(QM_ECC_MBIT, qm->io_base +
+ QM_ABNORMAL_INT_SOURCE);
+
+ /* clear AM Reorder Buffer ecc mbit source */
+ writel(ROB_ECC_ERR_MULTPL, qm->io_base +
+ AM_ROB_ECC_INT_STS);
+
+ if (qm->err_ini.open_axi_master_ooo)
+ qm->err_ini.open_axi_master_ooo(qm);
+}
+
+static void hisi_qm_restart_done(struct hisi_qm *qm)
+{
+ u32 value;
+
+ if (!qm->err_ini.err_info.is_qm_ecc_mbit &&
+ !qm->err_ini.err_info.is_dev_ecc_mbit)
+ return;
+
+ value = readl(qm->io_base + AM_CFG_PORT_WR_EN);
+ value |= qm->err_ini.err_info.msi_wr_port;
+
+ writel(value, qm->io_base + AM_CFG_PORT_WR_EN);
+ qm->err_ini.err_info.is_qm_ecc_mbit = false;
+ qm->err_ini.err_info.is_dev_ecc_mbit = false;
+}
+
+static int qm_controller_reset_done(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ ret = qm_set_msi(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable peh msi bit!\n");
+ return ret;
+ }
+
+ ret = qm_set_pf_mse(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable pf mse bit!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_set_vf_mse(qm, true);
+ if (ret) {
+ pci_err(pdev, "Fails to enable vf mse bit!\n");
+ return ret;
+ }
+ }
+
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret)
+ return ret;
+
+ hisi_qm_restart_prepare(qm);
+
+ ret = hisi_qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM!\n");
+ return ret;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_q_assign(qm, qm->vfs_num);
+ if (ret) {
+ pci_err(pdev, "Failed to assign queue!\n");
+ return ret;
+ }
+ }
+
+ ret = qm_vf_reset_done(pdev, qm->qm_list);
+ if (ret) {
+ pci_err(pdev, "Failed to start VFs!\n");
+ return -EPERM;
+ }
+
+ hisi_qm_dev_err_init(qm);
+
+ hisi_qm_restart_done(qm);
+
+ return 0;
+}
+
+int hisi_qm_controller_reset(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ int ret;
+
+ pci_info(pdev, "Controller resetting...\n");
+
+ ret = qm_controller_reset_prepare(qm);
+ if (ret)
+ return ret;
+
+ ret = qm_soft_reset(qm);
+ if (ret) {
+ pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ return ret;
+ }
+
+ ret = qm_controller_reset_done(qm);
+ if (ret)
+ return ret;
+
+ clear_bit(QM_DEV_RESET_STATUS, &qm->hw_status);
+ pci_info(pdev, "Controller reset complete\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_controller_reset);
+
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ if (pdev->is_virtfn)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ pci_info(pdev, "Requesting reset due to PCI error\n");
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ /* reset pcie device controller */
+ ret = hisi_qm_controller_reset(qm);
+ if (ret) {
+ pci_err(pdev, "controller reset failed (%d)\n", ret);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
+
+/* check the interrupt is ecc-mbit error or not */
+static int qm_check_dev_error(struct hisi_qm *qm)
+{
+ struct pci_dev *pdev = qm->pdev;
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
+ int ret;
+
+ if (pf_qm->fun_type == QM_HW_VF)
+ return 0;
+
+ ret = qm_get_hw_error_status(pf_qm);
+ if (ret)
+ return ret;
+
+ return qm_get_dev_err_status(pf_qm);
+}
+
+void hisi_qm_reset_prepare(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ u32 delay = 0;
+ int ret;
+
+ hisi_qm_dev_err_uninit(qm);
+
+ while (qm_check_dev_error(qm)) {
+ msleep(++delay);
+ if (delay > QM_RESET_WAIT_TIMEOUT)
+ return;
+ }
+
+ ret = qm_reset_prepare_ready(qm);
+ if (ret) {
+ pci_err(pdev, "FLR not ready!\n");
+ return;
+ }
+
+ if (qm->vfs_num) {
+ ret = qm_vf_reset_prepare(pdev, qm->qm_list, QM_FLR);
+ if (ret) {
+ pci_err(pdev, "Fails to prepare reset!\n");
+ return;
+ }
+ }
+
+ ret = hisi_qm_stop(qm, QM_FLR);
+ if (ret) {
+ pci_err(pdev, "Fails to stop QM!\n");
+ return;
+ }
+
+ pci_info(pdev, "FLR resetting...\n");
+}
+EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
+
+static bool qm_flr_reset_complete(struct pci_dev *pdev)
+{
+ struct pci_dev *pf_pdev = pci_physfn(pdev);
+ struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
+ u32 id;
+
+ pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
+ if (id == QM_PCI_COMMAND_INVALID) {
+ pci_err(pdev, "Device can not be used!\n");
+ return false;
+ }
+
+ clear_bit(QM_DEV_RESET_STATUS, &qm->hw_status);
+ return true;
+}
+
+void hisi_qm_reset_done(struct pci_dev *pdev)
+{
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ int ret;
+
+ hisi_qm_dev_err_init(qm);
+
+ ret = hisi_qm_restart(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM!\n");
+ goto flr_done;
+ }
+
+ if (qm->fun_type == QM_HW_PF) {
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret) {
+ pci_err(pdev, "Failed to start QM!\n");
+ goto flr_done;
+ }
+
+ if (qm->vfs_num)
+ qm_vf_q_assign(qm, qm->vfs_num);
+
+ ret = qm_vf_reset_done(pdev, qm->qm_list);
+ if (ret) {
+ pci_err(pdev, "Failed to start VFs!\n");
+ goto flr_done;
+ }
+ }
- writel(AM_CFG_PORT_WR_EN_VALUE, qm->io_base + AM_CFG_PORT_WR_EN);
- qm->err_ini.is_qm_ecc_mbit = 0;
- qm->err_ini.is_dev_ecc_mbit = 0;
+flr_done:
+ if (qm_flr_reset_complete(pdev))
+ pci_info(pdev, "FLR reset complete\n");
}
-EXPORT_SYMBOL_GPL(hisi_qm_restart_done);
+EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Zhou Wang <wangzhou1(a)hisilicon.com>");
diff --git a/drivers/crypto/hisilicon/qm.h b/drivers/crypto/hisilicon/qm.h
index 24b3609..36e888f 100644
--- a/drivers/crypto/hisilicon/qm.h
+++ b/drivers/crypto/hisilicon/qm.h
@@ -17,6 +17,9 @@
#include "qm_usr_if.h"
+#define QNUM_V1 4096
+#define QNUM_V2 1024
+#define QM_MAX_VFS_NUM 63
/* qm user domain */
#define QM_ARUSER_M_CFG_1 0x100088
#define AXUSER_SNOOP_ENABLE BIT(30)
@@ -49,6 +52,7 @@
#define QM_AXI_M_CFG 0x1000ac
#define AXI_M_CFG 0xffff
#define QM_AXI_M_CFG_ENABLE 0x1000b0
+#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014
#define AXI_M_CFG_ENABLE 0xffffffff
#define QM_PEH_AXUSER_CFG 0x1000cc
#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0
@@ -235,19 +239,41 @@ struct hisi_qm_status {
int stop_reason;
};
+struct hisi_qm_hw_error {
+ u32 int_msk;
+ const char *msg;
+};
+
struct hisi_qm;
-struct hisi_qm_err_ini {
- u32 qm_wr_port;
+struct hisi_qm_err_info {
+ char *acpi_rst;
+ u32 msi_wr_port;
+ u32 ecc_2bits_mask;
u32 is_qm_ecc_mbit;
u32 is_dev_ecc_mbit;
- u32 ecc_2bits_mask;
- void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ u32 ce;
+ u32 nfe;
+ u32 fe;
+ u32 msi;
+};
+
+struct hisi_qm_err_ini {
u32 (*get_dev_hw_err_status)(struct hisi_qm *qm);
void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts);
+ void (*hw_err_enable)(struct hisi_qm *qm);
+ void (*hw_err_disable)(struct hisi_qm *qm);
+ int (*set_usr_domain_cache)(struct hisi_qm *qm);
void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts);
- /* design for module can not hold on ooo through qm, such as zip */
- void (*inject_dev_hw_err)(struct hisi_qm *qm);
+ void (*open_axi_master_ooo)(struct hisi_qm *qm);
+ void (*close_axi_master_ooo)(struct hisi_qm *qm);
+ struct hisi_qm_err_info err_info;
+};
+
+struct hisi_qm_list {
+ struct mutex lock;
+ struct list_head list;
+ bool (*check)(struct hisi_qm *qm);
};
struct hisi_qm {
@@ -260,7 +286,9 @@ struct hisi_qm {
u32 qp_base;
u32 qp_num;
u32 ctrl_q_num;
-
+ u32 vfs_num;
+ struct list_head list;
+ struct hisi_qm_list *qm_list;
struct qm_dma qdma;
struct qm_sqc *sqc;
struct qm_cqc *cqc;
@@ -285,8 +313,7 @@ struct hisi_qm {
u32 error_mask;
u32 msi_mask;
-
- const char *algs;
+ unsigned long hw_status;
bool use_uacce; /* register to uacce */
bool use_sva;
@@ -294,7 +321,9 @@ struct hisi_qm {
resource_size_t phys_base;
resource_size_t size;
struct uacce uacce;
+ const char *algs;
void *reserve;
+ int uacce_mode;
dma_addr_t reserve_dma;
#endif
struct workqueue_struct *wq;
@@ -345,9 +374,144 @@ struct hisi_qp {
#endif
};
+static inline int q_num_set(const char *val, const struct kernel_param *kp,
+ unsigned int device)
+{
+ struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
+ device, NULL);
+ u32 n, q_num;
+ u8 rev_id;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ if (!pdev) {
+ q_num = min_t(u32, QNUM_V1, QNUM_V2);
+ pr_info("No device found currently, suppose queue number is %d\n",
+ q_num);
+ } else {
+ rev_id = pdev->revision;
+ switch (rev_id) {
+ case QM_HW_V1:
+ q_num = QNUM_V1;
+ break;
+ case QM_HW_V2:
+ q_num = QNUM_V2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret || !n || n > q_num)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static inline int vf_num_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret < 0)
+ return ret;
+
+ if (n > QM_MAX_VFS_NUM)
+ return -ERANGE;
+
+ return param_set_int(val, kp);
+}
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static inline int mode_set(const char *val, const struct kernel_param *kp)
+{
+ u32 n;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+
+ ret = kstrtou32(val, 10, &n);
+ if (ret != 0 || (n != UACCE_MODE_NOIOMMU &&
+ n != UACCE_MODE_NOUACCE))
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+#endif
+
+static inline void hisi_qm_add_to_list(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_add_tail(&qm->list, &qm_list->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+static inline void hisi_qm_del_from_list(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+static inline int hisi_qm_pre_init(struct hisi_qm *qm,
+ u32 pf_q_num, u32 def_q_num)
+{
+ struct pci_dev *pdev = qm->pdev;
+
+ switch (pdev->revision) {
+ case QM_HW_V1:
+ case QM_HW_V2:
+ qm->ver = pdev->revision;
+ break;
+ default:
+ pci_err(pdev, "hardware version err!\n");
+ return -ENODEV;
+ }
+
+ pci_set_drvdata(pdev, qm);
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ switch (qm->uacce_mode) {
+ case UACCE_MODE_NOUACCE:
+ qm->use_uacce = false;
+ break;
+ case UACCE_MODE_NOIOMMU:
+ qm->use_uacce = true;
+ break;
+ default:
+ pci_err(pdev, "uacce mode error!\n");
+ return -EINVAL;
+ }
+#else
+ qm->use_uacce = false;
+#endif
+ if (qm->fun_type == QM_HW_PF) {
+ qm->qp_base = def_q_num;
+ qm->qp_num = pf_q_num;
+ qm->debug.curr_qm_qp_num = pf_q_num;
+ }
+
+ return 0;
+}
+
+void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
+int hisi_qm_alloc_qps_node(int node, struct hisi_qm_list *qm_list,
+ struct hisi_qp **qps, int qp_num, u8 alg_type);
int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
-int hisi_qm_frozen(struct hisi_qm *qm);
+void hisi_qm_dev_shutdown(struct pci_dev *pdev);
+void hisi_qm_remove_wait_delay(struct hisi_qm *qm,
+ struct hisi_qm_list *qm_list);
int hisi_qm_start(struct hisi_qm *qm);
int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r);
struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type);
@@ -358,25 +522,20 @@ struct hisi_qp {
int hisi_qp_wait(struct hisi_qp *qp);
int hisi_qm_get_free_qp_num(struct hisi_qm *qm);
int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number);
-int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number);
void hisi_qm_debug_regs_clear(struct hisi_qm *qm);
int hisi_qm_debug_init(struct hisi_qm *qm);
-void hisi_qm_hw_error_init(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe,
- u32 msi);
-void hisi_qm_hw_error_uninit(struct hisi_qm *qm);
-pci_ers_result_t hisi_qm_hw_error_handle(struct hisi_qm *qm);
-void hisi_qm_clear_queues(struct hisi_qm *qm);
-enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev);
int hisi_qm_restart(struct hisi_qm *qm);
-int hisi_qm_get_hw_error_status(struct hisi_qm *qm);
+int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs);
+int hisi_qm_sriov_disable(struct pci_dev *pdev, struct hisi_qm_list *qm_list);
+void hisi_qm_dev_err_init(struct hisi_qm *qm);
+void hisi_qm_dev_err_uninit(struct hisi_qm *qm);
+pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev);
+void hisi_qm_reset_prepare(struct pci_dev *pdev);
+void hisi_qm_reset_done(struct pci_dev *pdev);
pci_ers_result_t hisi_qm_process_dev_error(struct pci_dev *pdev);
-int hisi_qm_reg_test(struct hisi_qm *qm);
-int hisi_qm_set_pf_mse(struct hisi_qm *qm, bool set);
-int hisi_qm_set_vf_mse(struct hisi_qm *qm, bool set);
-int hisi_qm_set_msi(struct hisi_qm *qm, bool set);
-void hisi_qm_set_ecc(struct hisi_qm *qm);
-void hisi_qm_restart_prepare(struct hisi_qm *qm);
-void hisi_qm_restart_done(struct hisi_qm *qm);
+int hisi_qm_controller_reset(struct hisi_qm *qm);
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
diff --git a/drivers/crypto/hisilicon/rde/rde.h b/drivers/crypto/hisilicon/rde/rde.h
index aa7887a..e06efc7 100644
--- a/drivers/crypto/hisilicon/rde/rde.h
+++ b/drivers/crypto/hisilicon/rde/rde.h
@@ -22,19 +22,11 @@
struct hisi_rde_ctrl;
-enum hisi_rde_status {
- HISI_RDE_RESET,
-};
-
struct hisi_rde {
struct hisi_qm qm;
- struct list_head list;
struct hisi_rde_ctrl *ctrl;
struct work_struct reset_work;
- struct mutex *rde_list_lock;
- unsigned long status;
u32 smmu_state;
- int q_ref;
};
#define RDE_CM_LOAD_ENABLE 1
@@ -134,7 +126,6 @@ struct hisi_rde_msg {
struct hisi_rde_ctx {
struct device *dev;
struct hisi_qp *qp;
- struct hisi_rde *rde_dev;
struct hisi_rde_msg *req_list;
unsigned long *req_bitmap;
spinlock_t req_lock;
@@ -323,7 +314,7 @@ static inline void rde_table_dump(const struct hisi_rde_msg *req)
}
}
-struct hisi_rde *find_rde_device(int node);
+struct hisi_qp *rde_create_qp(void);
int hisi_rde_abnormal_fix(struct hisi_qm *qm);
#endif
diff --git a/drivers/crypto/hisilicon/rde/rde_api.c b/drivers/crypto/hisilicon/rde/rde_api.c
index 1be468a..f1330f1 100644
--- a/drivers/crypto/hisilicon/rde/rde_api.c
+++ b/drivers/crypto/hisilicon/rde/rde_api.c
@@ -835,17 +835,12 @@ int hisi_rde_io_proc(struct acc_ctx *ctx, struct raid_ec_ctrl *ctrl,
return ret;
}
-static int hisi_rde_create_qp(struct hisi_qm *qm, struct acc_ctx *ctx,
- int alg_type, int req_type)
+static int hisi_rde_start_qp(struct hisi_qp *qp, struct acc_ctx *ctx,
+ int req_type)
{
- struct hisi_qp *qp;
struct hisi_rde_ctx *rde_ctx;
int ret;
- qp = hisi_qm_create_qp(qm, alg_type);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
-
qp->req_type = req_type;
qp->qp_ctx = ctx;
@@ -994,9 +989,10 @@ static int hisi_rde_ctx_init(struct hisi_rde_ctx *rde_ctx, int qlen)
int acc_init(struct acc_ctx *ctx)
{
+ struct hisi_rde_ctx *rde_ctx;
struct hisi_rde *hisi_rde;
+ struct hisi_qp *qp;
struct hisi_qm *qm;
- struct hisi_rde_ctx *rde_ctx;
int ret;
if (unlikely(!ctx)) {
@@ -1004,9 +1000,9 @@ int acc_init(struct acc_ctx *ctx)
return -EINVAL;
}
- hisi_rde = find_rde_device(cpu_to_node(smp_processor_id()));
- if (unlikely(!hisi_rde)) {
- pr_err("[%s]Can not find proper RDE device.\n", __func__);
+ qp = rde_create_qp();
+ if (unlikely(!qp)) {
+ pr_err("[%s]Can not create RDE qp.\n", __func__);
return -ENODEV;
}
/* alloc inner private struct */
@@ -1017,20 +1013,20 @@ int acc_init(struct acc_ctx *ctx)
}
ctx->inner = (void *)rde_ctx;
- qm = &hisi_rde->qm;
+ qm = qp->qm;
if (unlikely(!qm->pdev)) {
pr_err("[%s] Pdev is NULL.\n", __func__);
return -ENODEV;
}
rde_ctx->dev = &qm->pdev->dev;
- ret = hisi_rde_create_qp(qm, ctx, 0, 0);
+ ret = hisi_rde_start_qp(qp, ctx, 0);
if (ret) {
- dev_err(rde_ctx->dev, "[%s] Create qp failed.\n", __func__);
+ dev_err(rde_ctx->dev, "[%s] start qp failed.\n", __func__);
goto qp_err;
}
- rde_ctx->rde_dev = hisi_rde;
+ hisi_rde = container_of(qm, struct hisi_rde, qm);
rde_ctx->smmu_state = hisi_rde->smmu_state;
rde_ctx->addr_type = ctx->addr_type;
hisi_rde_session_init(rde_ctx);
@@ -1081,9 +1077,6 @@ int acc_clear(struct acc_ctx *ctx)
rde_ctx->req_list = NULL;
hisi_rde_release_qp(rde_ctx);
- mutex_lock(rde_ctx->rde_dev->rde_list_lock);
- rde_ctx->rde_dev->q_ref = rde_ctx->rde_dev->q_ref - 1;
- mutex_unlock(rde_ctx->rde_dev->rde_list_lock);
kfree(rde_ctx);
ctx->inner = NULL;
diff --git a/drivers/crypto/hisilicon/rde/rde_api.h b/drivers/crypto/hisilicon/rde/rde_api.h
index 0f9021b..167607e 100644
--- a/drivers/crypto/hisilicon/rde/rde_api.h
+++ b/drivers/crypto/hisilicon/rde/rde_api.h
@@ -308,7 +308,7 @@ struct acc_dif {
* @input_block: number of sector
* @data_len: data len of per disk, block_size (with dif)* input_block
* @buf_type: denoted by ACC_BUF_TYPE_E
- * @src_dif��dif information of source disks
+ * @src_dif: dif information of source disks
* @dst_dif: dif information of dest disks
* @cm_load: coe_matrix reload control, 0: do not load, 1: load
* @cm_len: length of loaded coe_matrix, equal to src_num
diff --git a/drivers/crypto/hisilicon/rde/rde_main.c b/drivers/crypto/hisilicon/rde/rde_main.c
index 453657a..318d4a0 100644
--- a/drivers/crypto/hisilicon/rde/rde_main.c
+++ b/drivers/crypto/hisilicon/rde/rde_main.c
@@ -22,7 +22,6 @@
#include <linux/uacce.h>
#include "rde.h"
-#define HRDE_VF_NUM 63
#define HRDE_QUEUE_NUM_V1 4096
#define HRDE_QUEUE_NUM_V2 1024
#define HRDE_PCI_DEVICE_ID 0xa25a
@@ -32,7 +31,6 @@
#define HRDE_PF_DEF_Q_BASE 0
#define HRDE_RD_INTVRL_US 10
#define HRDE_RD_TMOUT_US 1000
-#define FORMAT_DECIMAL 10
#define HRDE_RST_TMOUT_MS 400
#define HRDE_ENABLE 1
#define HRDE_DISABLE 0
@@ -68,7 +66,7 @@
#define CHN_CFG 0x5010101
#define HRDE_AXI_SHUTDOWN_EN BIT(26)
#define HRDE_AXI_SHUTDOWN_DIS 0xFBFFFFFF
-#define HRDE_WR_MSI_PORT 0xFFFE
+#define HRDE_WR_MSI_PORT BIT(0)
#define HRDE_AWUSER_BD_1 0x310104
#define HRDE_ARUSER_BD_1 0x310114
#define HRDE_ARUSER_SGL_1 0x310124
@@ -87,9 +85,6 @@
#define HRDE_QM_IDEL_STATUS 0x1040e4
#define HRDE_QM_PEH_DFX_INFO0 0x1000fc
#define PEH_MSI_MASK_SHIFT 0x90
-#define HRDE_MASTER_GLOBAL_CTRL 0x300000
-#define MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
-#define MASTER_TRANS_RETURN_RW 0x3
#define CACHE_CTL 0x1833
#define HRDE_DBGFS_VAL_MAX_LEN 20
#define HRDE_PROBE_ADDR 0x31025c
@@ -100,16 +95,9 @@
static const char hisi_rde_name[] = "hisi_rde";
static struct dentry *hrde_debugfs_root;
-LIST_HEAD(hisi_rde_list);
-DEFINE_MUTEX(hisi_rde_list_lock);
+static struct hisi_qm_list rde_devices;
static void hisi_rde_ras_proc(struct work_struct *work);
-struct hisi_rde_resource {
- struct hisi_rde *hrde;
- int distance;
- struct list_head list;
-};
-
static const struct hisi_rde_hw_error rde_hw_error[] = {
{.int_msk = BIT(0), .msg = "Rde_ecc_1bitt_err"},
{.int_msk = BIT(1), .msg = "Rde_ecc_2bit_err"},
@@ -157,7 +145,6 @@ struct ctrl_debug_file {
*/
struct hisi_rde_ctrl {
struct hisi_rde *hisi_rde;
- struct dentry *debug_root;
struct ctrl_debug_file files[HRDE_DEBUG_FILE_NUM];
};
@@ -199,78 +186,36 @@ struct hisi_rde_ctrl {
{"HRDE_AM_CURR_WR_TXID_STS_2", 0x300178ull},
};
-static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev;
- u32 n;
- u32 q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, HRDE_PCI_DEVICE_ID, NULL);
- if (unlikely(!pdev)) {
- q_num = min_t(u32, HRDE_QUEUE_NUM_V1, HRDE_QUEUE_NUM_V2);
- pr_info
- ("No device found currently, suppose queue number is %d.\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- switch (rev_id) {
- case QM_HW_V1:
- q_num = HRDE_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = HRDE_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
-
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n > q_num)
- return -EINVAL;
-
- return param_set_int(val, kp);
+ return mode_set(val, kp);
}
-static const struct kernel_param_ops pf_q_num_ops = {
- .set = pf_q_num_set,
+static const struct kernel_param_ops uacce_mode_ops = {
+ .set = uacce_mode_set,
.get = param_get_int,
};
-static int uacce_mode_set(const char *val, const struct kernel_param *kp)
-{
- u32 n;
- int ret;
-
- if (!val)
- return -EINVAL;
-
- ret = kstrtou32(val, FORMAT_DECIMAL, &n);
- if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
- return -EINVAL;
+static int uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+#endif
- return param_set_int(val, kp);
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, HRDE_PCI_DEVICE_ID);
}
-static const struct kernel_param_ops uacce_mode_ops = {
- .set = uacce_mode_set,
+static const struct kernel_param_ops pf_q_num_ops = {
+ .set = pf_q_num_set,
.get = param_get_int,
};
-
static u32 pf_q_num = HRDE_PF_DEF_Q_NUM;
module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
-static int uacce_mode = UACCE_MODE_NOUACCE;
-module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
-MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
-
static const struct pci_device_id hisi_rde_dev_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, HRDE_PCI_DEVICE_ID)},
{0,}
@@ -278,125 +223,59 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp)
MODULE_DEVICE_TABLE(pci, hisi_rde_dev_ids);
-static void free_list(struct list_head *head)
-{
- struct hisi_rde_resource *res;
- struct hisi_rde_resource *tmp;
-
- list_for_each_entry_safe(res, tmp, head, list) {
- list_del(&res->list);
- kfree(res);
- }
-}
-
-struct hisi_rde *find_rde_device(int node)
+struct hisi_qp *rde_create_qp(void)
{
- struct hisi_rde *ret = NULL;
-#ifdef CONFIG_NUMA
- struct hisi_rde_resource *res, *tmp;
- struct hisi_rde *hisi_rde;
- struct list_head *n;
- struct device *dev;
- LIST_HEAD(head);
-
- mutex_lock(&hisi_rde_list_lock);
-
- list_for_each_entry(hisi_rde, &hisi_rde_list, list) {
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- goto err;
-
- dev = &hisi_rde->qm.pdev->dev;
- res->hrde = hisi_rde;
- res->distance = node_distance(dev->numa_node, node);
- n = &head;
- list_for_each_entry(tmp, &head, list) {
- if (res->distance < tmp->distance) {
- n = &tmp->list;
- break;
- }
- }
- list_add_tail(&res->list, n);
- }
-
- list_for_each_entry(tmp, &head, list) {
- if (tmp->hrde->q_ref + 1 <= pf_q_num) {
- tmp->hrde->q_ref = tmp->hrde->q_ref + 1;
- ret = tmp->hrde;
- break;
- }
- }
+ int node = cpu_to_node(smp_processor_id());
+ struct hisi_qp *qp;
+ int ret;
- free_list(&head);
-#else
- mutex_lock(&hisi_rde_list_lock);
- ret = list_first_entry(&hisi_rde_list, struct hisi_rde, list);
-#endif
- mutex_unlock(&hisi_rde_list_lock);
- return ret;
+ ret = hisi_qm_alloc_qps_node(node, &rde_devices, &qp, 1, 0);
+ if (!ret)
+ return qp;
-err:
- free_list(&head);
- mutex_unlock(&hisi_rde_list_lock);
return NULL;
}
-static inline void hisi_rde_add_to_list(struct hisi_rde *hisi_rde)
-{
- mutex_lock(&hisi_rde_list_lock);
- list_add_tail(&hisi_rde->list, &hisi_rde_list);
- mutex_unlock(&hisi_rde_list_lock);
-}
-
-static inline void hisi_rde_remove_from_list(struct hisi_rde *hisi_rde)
-{
- mutex_lock(&hisi_rde_list_lock);
- list_del(&hisi_rde->list);
- mutex_unlock(&hisi_rde_list_lock);
-}
-
-static void hisi_rde_engine_init(struct hisi_rde *hisi_rde)
+static int hisi_rde_engine_init(struct hisi_qm *qm)
{
- writel(DFX_CTRL0, hisi_rde->qm.io_base + HRDE_DFX_CTRL_0);
+ writel(DFX_CTRL0, qm->io_base + HRDE_DFX_CTRL_0);
/* usr domain */
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_AWUSER_BD_1);
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_ARUSER_BD_1);
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_AWUSER_DAT_1);
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_ARUSER_DAT_1);
- writel(HRDE_USER_SMMU, hisi_rde->qm.io_base + HRDE_ARUSER_SGL_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_AWUSER_BD_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_ARUSER_BD_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_AWUSER_DAT_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_ARUSER_DAT_1);
+ writel(HRDE_USER_SMMU, qm->io_base + HRDE_ARUSER_SGL_1);
/* rde cache */
- writel(AWCACHE, hisi_rde->qm.io_base + HRDE_AWCACHE);
- writel(ARCACHE, hisi_rde->qm.io_base + HRDE_ARCACHE);
+ writel(AWCACHE, qm->io_base + HRDE_AWCACHE);
+ writel(ARCACHE, qm->io_base + HRDE_ARCACHE);
/* rde chn enable + outstangding config */
- writel(CHN_CFG, hisi_rde->qm.io_base + HRDE_CFG);
+ writel(CHN_CFG, qm->io_base + HRDE_CFG);
+
+ return 0;
}
-static void hisi_rde_set_user_domain_and_cache(struct hisi_rde *hisi_rde)
+static int hisi_rde_set_user_domain_and_cache(struct hisi_qm *qm)
{
/* qm user domain */
- writel(AXUSER_BASE, hisi_rde->qm.io_base + QM_ARUSER_M_CFG_1);
- writel(ARUSER_M_CFG_ENABLE, hisi_rde->qm.io_base +
- QM_ARUSER_M_CFG_ENABLE);
- writel(AXUSER_BASE, hisi_rde->qm.io_base + QM_AWUSER_M_CFG_1);
- writel(AWUSER_M_CFG_ENABLE, hisi_rde->qm.io_base +
- QM_AWUSER_M_CFG_ENABLE);
- writel(WUSER_M_CFG_ENABLE, hisi_rde->qm.io_base +
- QM_WUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
+ writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1);
+ writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
+ writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE);
/* qm cache */
- writel(AXI_M_CFG, hisi_rde->qm.io_base + QM_AXI_M_CFG);
- writel(AXI_M_CFG_ENABLE, hisi_rde->qm.io_base + QM_AXI_M_CFG_ENABLE);
+ writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG);
+ writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE);
/* disable BME/PM/SRIOV FLR*/
- writel(PEH_AXUSER_CFG, hisi_rde->qm.io_base + QM_PEH_AXUSER_CFG);
- writel(PEH_AXUSER_CFG_ENABLE, hisi_rde->qm.io_base +
- QM_PEH_AXUSER_CFG_ENABLE);
+ writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
+ writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
- writel(CACHE_CTL, hisi_rde->qm.io_base + QM_CACHE_CTL);
+ writel(CACHE_CTL, qm->io_base + QM_CACHE_CTL);
- hisi_rde_engine_init(hisi_rde);
+ return hisi_rde_engine_init(qm);
}
static void hisi_rde_debug_regs_clear(struct hisi_qm *qm)
@@ -418,30 +297,38 @@ static void hisi_rde_debug_regs_clear(struct hisi_qm *qm)
hisi_qm_debug_regs_clear(qm);
}
-static void hisi_rde_hw_error_set_state(struct hisi_rde *hisi_rde, bool state)
+static void hisi_rde_hw_error_enable(struct hisi_qm *qm)
{
- u32 ras_msk = (HRDE_RAS_CE_MSK | HRDE_RAS_NFE_MSK);
u32 val;
- val = readl(hisi_rde->qm.io_base + HRDE_CFG);
- if (state) {
- writel(HRDE_INT_SOURCE_CLEAR,
- hisi_rde->qm.io_base + HRDE_INT_SOURCE);
- writel(HRDE_RAS_ENABLE,
- hisi_rde->qm.io_base + HRDE_RAS_INT_MSK);
- /* bd prefetch should bd masked to prevent misreport */
- writel((HRDE_INT_ENABLE | HRDE_BD_PREFETCH),
- hisi_rde->qm.io_base + HRDE_INT_MSK);
- /* make master ooo close, when m-bits error happens*/
- val = val | HRDE_AXI_SHUTDOWN_EN;
- } else {
- writel(ras_msk, hisi_rde->qm.io_base + HRDE_RAS_INT_MSK);
- writel(HRDE_INT_DISABLE, hisi_rde->qm.io_base + HRDE_INT_MSK);
- /* make master ooo open, when m-bits error happens*/
- val = val & HRDE_AXI_SHUTDOWN_DIS;
- }
+ val = readl(qm->io_base + HRDE_CFG);
+
+ /* clear RDE hw error source if having */
+ writel(HRDE_INT_SOURCE_CLEAR, qm->io_base + HRDE_INT_SOURCE);
+ writel(HRDE_RAS_ENABLE, qm->io_base + HRDE_RAS_INT_MSK);
+
+ /* bd prefetch should bd masked to prevent misreport */
+ writel((HRDE_INT_ENABLE | HRDE_BD_PREFETCH),
+ qm->io_base + HRDE_INT_MSK);
- writel(val, hisi_rde->qm.io_base + HRDE_CFG);
+ /* when m-bit error occur, master ooo will close */
+ val = val | HRDE_AXI_SHUTDOWN_EN;
+ writel(val, qm->io_base + HRDE_CFG);
+}
+
+static void hisi_rde_hw_error_disable(struct hisi_qm *qm)
+{
+ u32 ras_msk = HRDE_RAS_CE_MSK | HRDE_RAS_NFE_MSK;
+ u32 val;
+
+ val = readl(qm->io_base + HRDE_CFG);
+
+ writel(ras_msk, qm->io_base + HRDE_RAS_INT_MSK);
+ writel(HRDE_INT_DISABLE, qm->io_base + HRDE_INT_MSK);
+
+ /* when m-bit error occur, master ooo will not close */
+ val = val & HRDE_AXI_SHUTDOWN_DIS;
+ writel(val, qm->io_base + HRDE_CFG);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -587,10 +474,8 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
.write = ctrl_debug_write,
};
-static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl)
+static int hisi_rde_chn_debug_init(struct hisi_qm *qm)
{
- struct hisi_rde *hisi_rde = ctrl->hisi_rde;
- struct hisi_qm *qm = &hisi_rde->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset, *regset_ooo;
struct dentry *tmp_d, *tmp;
@@ -601,7 +486,7 @@ static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl)
if (ret < 0)
return -ENOENT;
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
if (!tmp_d)
return -ENOENT;
@@ -628,29 +513,30 @@ static int hisi_rde_chn_debug_init(struct hisi_rde_ctrl *ctrl)
return 0;
}
-static int hisi_rde_ctrl_debug_init(struct hisi_rde_ctrl *ctrl)
+static int hisi_rde_ctrl_debug_init(struct hisi_qm *qm)
{
+ struct hisi_rde *hisi_rde = container_of(qm, struct hisi_rde, qm);
struct dentry *tmp;
int i;
for (i = HRDE_CURRENT_FUNCTION; i < HRDE_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&ctrl->files[i].lock);
- ctrl->files[i].ctrl = ctrl;
- ctrl->files[i].index = i;
+ spin_lock_init(&hisi_rde->ctrl->files[i].lock);
+ hisi_rde->ctrl->files[i].ctrl = hisi_rde->ctrl;
+ hisi_rde->ctrl->files[i].index = i;
tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
+ qm->debug.debug_root,
+ hisi_rde->ctrl->files + i,
&ctrl_debug_fops);
if (!tmp)
return -ENOENT;
}
- return hisi_rde_chn_debug_init(ctrl);
+ return hisi_rde_chn_debug_init(qm);
}
-static int hisi_rde_debugfs_init(struct hisi_rde *hisi_rde)
+static int hisi_rde_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_rde->qm;
struct device *dev = &qm->pdev->dev;
struct dentry *dev_d;
int ret;
@@ -665,8 +551,7 @@ static int hisi_rde_debugfs_init(struct hisi_rde *hisi_rde)
goto failed_to_create;
if (qm->pdev->device == HRDE_PCI_DEVICE_ID) {
- hisi_rde->ctrl->debug_root = dev_d;
- ret = hisi_rde_ctrl_debug_init(hisi_rde->ctrl);
+ ret = hisi_rde_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
}
@@ -678,49 +563,17 @@ static int hisi_rde_debugfs_init(struct hisi_rde *hisi_rde)
return ret;
}
-static void hisi_rde_debugfs_exit(struct hisi_rde *hisi_rde)
+static void hisi_rde_debugfs_exit(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_rde->qm;
-
debugfs_remove_recursive(qm->debug.debug_root);
+
if (qm->fun_type == QM_HW_PF) {
hisi_rde_debug_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
}
-static void hisi_rde_set_hw_error(struct hisi_rde *hisi_rde, bool state)
-{
- if (state)
- hisi_qm_hw_error_init(&hisi_rde->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT,
- 0, 0);
- else
- hisi_qm_hw_error_uninit(&hisi_rde->qm);
-
- hisi_rde_hw_error_set_state(hisi_rde, state);
-}
-
-static void hisi_rde_open_master_ooo(struct hisi_qm *qm)
-{
- u32 val;
-
- val = readl(qm->io_base + HRDE_CFG);
- writel(val & HRDE_AXI_SHUTDOWN_DIS, qm->io_base + HRDE_CFG);
- writel(val | HRDE_AXI_SHUTDOWN_EN, qm->io_base + HRDE_CFG);
-}
-
-static u32 hisi_rde_get_hw_err_status(struct hisi_qm *qm)
-{
- return readl(qm->io_base + HRDE_INT_STATUS);
-}
-
-static void hisi_rde_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
-{
- writel(err_sts, qm->io_base + HRDE_INT_SOURCE);
-}
-
-static void hisi_rde_hw_error_log(struct hisi_qm *qm, u32 err_sts)
+void hisi_rde_hw_error_log(struct hisi_qm *qm, u32 err_sts)
{
const struct hisi_rde_hw_error *err = rde_hw_error;
struct device *dev = &qm->pdev->dev;
@@ -751,10 +604,30 @@ static void hisi_rde_hw_error_log(struct hisi_qm *qm, u32 err_sts)
}
}
-static int hisi_rde_pf_probe_init(struct hisi_rde *hisi_rde)
+u32 hisi_rde_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + HRDE_INT_STATUS);
+}
+
+void hisi_rde_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
- struct hisi_qm *qm = &hisi_rde->qm;
+ writel(err_sts, qm->io_base + HRDE_INT_SOURCE);
+}
+
+static void hisi_rde_open_master_ooo(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(qm->io_base + HRDE_CFG);
+ writel(val & HRDE_AXI_SHUTDOWN_DIS, qm->io_base + HRDE_CFG);
+ writel(val | HRDE_AXI_SHUTDOWN_EN, qm->io_base + HRDE_CFG);
+}
+
+static int hisi_rde_pf_probe_init(struct hisi_qm *qm)
+{
+ struct hisi_rde *hisi_rde = container_of(qm, struct hisi_rde, qm);
struct hisi_rde_ctrl *ctrl;
+ int ret;
ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -776,14 +649,26 @@ static int hisi_rde_pf_probe_init(struct hisi_rde *hisi_rde)
return -EINVAL;
}
- qm->err_ini.qm_wr_port = HRDE_WR_MSI_PORT;
- qm->err_ini.ecc_2bits_mask = HRDE_ECC_2BIT_ERR;
- qm->err_ini.open_axi_master_ooo = hisi_rde_open_master_ooo;
qm->err_ini.get_dev_hw_err_status = hisi_rde_get_hw_err_status;
qm->err_ini.clear_dev_hw_err_status = hisi_rde_clear_hw_err_status;
+ qm->err_ini.err_info.ecc_2bits_mask = HRDE_ECC_2BIT_ERR;
+ qm->err_ini.err_info.ce = QM_BASE_CE;
+ qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT;
+ qm->err_ini.err_info.fe = 0;
+ qm->err_ini.err_info.msi = 0;
+ qm->err_ini.err_info.acpi_rst = "RRST";
+ qm->err_ini.hw_err_disable = hisi_rde_hw_error_disable;
+ qm->err_ini.hw_err_enable = hisi_rde_hw_error_enable;
+ qm->err_ini.set_usr_domain_cache = hisi_rde_set_user_domain_and_cache;
qm->err_ini.log_dev_hw_err = hisi_rde_hw_error_log;
- hisi_rde_set_user_domain_and_cache(hisi_rde);
- hisi_rde_set_hw_error(hisi_rde, true);
+ qm->err_ini.open_axi_master_ooo = hisi_rde_open_master_ooo;
+ qm->err_ini.err_info.msi_wr_port = HRDE_WR_MSI_PORT;
+
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret)
+ return ret;
+
+ hisi_qm_dev_err_init(qm);
qm->err_ini.open_axi_master_ooo(qm);
hisi_rde_debug_regs_clear(qm);
@@ -792,33 +677,21 @@ static int hisi_rde_pf_probe_init(struct hisi_rde *hisi_rde)
static int hisi_rde_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
+ int ret;
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -EINVAL;
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "ec\n";
+ qm->uacce_mode = uacce_mode;
+#endif
qm->pdev = pdev;
- qm->ver = rev_id;
+ ret = hisi_qm_pre_init(qm, pf_q_num, HRDE_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
+
+ qm->qm_list = &rde_devices;
qm->sqe_size = HRDE_SQE_SIZE;
qm->dev_name = hisi_rde_name;
- qm->fun_type = QM_HW_PF;
- qm->algs = "ec\n";
-
- switch (uacce_mode) {
- case UACCE_MODE_NOUACCE:
- qm->use_uacce = false;
- break;
- case UACCE_MODE_NOIOMMU:
- qm->use_uacce = true;
- break;
- default:
- return -EINVAL;
- }
-
- qm->qp_base = HRDE_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
qm->abnormal_fix = hisi_rde_abnormal_fix;
return 0;
@@ -849,11 +722,12 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!hisi_rde)
return -ENOMEM;
- pci_set_drvdata(pdev, hisi_rde);
INIT_WORK(&hisi_rde->reset_work, hisi_rde_ras_proc);
hisi_rde->smmu_state = hisi_rde_smmu_state(&pdev->dev);
qm = &hisi_rde->qm;
+ qm->fun_type = QM_HW_PF;
+
ret = hisi_rde_qm_pre_init(qm, pdev);
if (ret) {
pci_err(pdev, "Pre init qm failed!\n");
@@ -866,7 +740,7 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
- ret = hisi_rde_pf_probe_init(hisi_rde);
+ ret = hisi_rde_pf_probe_init(qm);
if (ret) {
pci_err(pdev, "Init pf failed!\n");
goto err_qm_uninit;
@@ -878,16 +752,15 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_qm_uninit;
}
- ret = hisi_rde_debugfs_init(hisi_rde);
+ ret = hisi_rde_debugfs_init(qm);
if (ret)
pci_warn(pdev, "Init debugfs failed!\n");
- hisi_rde_add_to_list(hisi_rde);
- hisi_rde->rde_list_lock = &hisi_rde_list_lock;
+ hisi_qm_add_to_list(qm, &rde_devices);
return 0;
- err_qm_uninit:
+err_qm_uninit:
hisi_qm_uninit(qm);
return ret;
@@ -895,198 +768,20 @@ static int hisi_rde_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void hisi_rde_remove(struct pci_dev *pdev)
{
- struct hisi_rde *hisi_rde = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_rde->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
+ struct hisi_rde *hisi_rde = container_of(qm, struct hisi_rde, qm);
+
+ hisi_qm_remove_wait_delay(qm, &rde_devices);
qm->abnormal_fix = NULL;
- hisi_rde_hw_error_set_state(hisi_rde, false);
+ hisi_qm_dev_err_uninit(qm);
cancel_work_sync(&hisi_rde->reset_work);
- hisi_rde_remove_from_list(hisi_rde);
- hisi_rde_debugfs_exit(hisi_rde);
+ hisi_qm_del_from_list(qm, &rde_devices);
+ hisi_rde_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
hisi_qm_uninit(qm);
}
-static void hisi_rde_shutdown(struct pci_dev *pdev)
-{
- struct hisi_rde *hisi_rde = pci_get_drvdata(pdev);
-
- hisi_qm_stop(&hisi_rde->qm, QM_NORMAL);
-}
-
-static int hisi_rde_reset_prepare_rdy(struct hisi_rde *hisi_rde)
-{
- int delay = 0;
-
- while (test_and_set_bit(HISI_RDE_RESET, &hisi_rde->status)) {
- msleep(++delay);
- if (delay > HRDE_RST_TMOUT_MS)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hisi_rde_controller_reset_prepare(struct hisi_rde *hisi_rde)
-{
- struct hisi_qm *qm = &hisi_rde->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = hisi_rde_reset_prepare_rdy(hisi_rde);
- if (ret) {
- dev_err(&pdev->dev, "Controller reset not ready!\n");
- return ret;
- }
-
- ret = hisi_qm_stop(qm, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Stop QM failed!\n");
- return ret;
- }
-
-#ifdef CONFIG_CRYPTO_QM_UACCE
- if (qm->use_uacce) {
- ret = uacce_hw_err_isolate(&qm->uacce);
- if (ret) {
- dev_err(&pdev->dev, "Isolate hw err failed!\n");
- return ret;
- }
- }
-#endif
-
- return 0;
-}
-
-static int hisi_rde_soft_reset(struct hisi_rde *hisi_rde)
-{
- struct hisi_qm *qm = &hisi_rde->qm;
- struct device *dev = &qm->pdev->dev;
- unsigned long long value;
- int ret;
- u32 val;
-
- /* Check PF stream stop */
- ret = hisi_qm_reg_test(qm);
- if (ret)
- return ret;
-
- /* Disable PEH MSI */
- ret = hisi_qm_set_msi(qm, HRDE_DISABLE);
- if (ret) {
- dev_err(dev, "Disable peh msi bit failed.\n");
- return ret;
- }
-
- /* Set qm ecc if dev ecc happened to hold on ooo */
- hisi_qm_set_ecc(qm);
-
- /* OOO register set and check */
- writel(MASTER_GLOBAL_CTRL_SHUTDOWN,
- hisi_rde->qm.io_base + HRDE_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(hisi_rde->qm.io_base +
- HRDE_MASTER_TRANS_RET, val,
- (val == MASTER_TRANS_RETURN_RW),
- HRDE_RD_INTVRL_US, HRDE_RD_TMOUT_US);
- if (ret) {
- dev_emerg(dev, "Bus lock! Please reset system.\n");
- return ret;
- }
-
- /* Disable PF MSE bit */
- ret = hisi_qm_set_pf_mse(qm, HRDE_DISABLE);
- if (ret) {
- dev_err(dev, "Disable pf mse bit failed.\n");
- return ret;
- }
-
- /* The reset related sub-control registers are not in PCI BAR */
- if (ACPI_HANDLE(dev)) {
- acpi_status s;
-
- s = acpi_evaluate_integer(ACPI_HANDLE(dev), "RRST",
- NULL, &value);
- if (ACPI_FAILURE(s)) {
- dev_err(dev, "No controller reset method.\n");
- return -EIO;
- }
-
- if (value) {
- dev_err(dev, "Reset step %llu failed.\n", value);
- return -EIO;
- }
- } else {
- dev_err(dev, "No reset method!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int hisi_rde_controller_reset_done(struct hisi_rde *hisi_rde)
-{
- struct hisi_qm *qm = &hisi_rde->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- /* Enable PEH MSI */
- ret = hisi_qm_set_msi(qm, HRDE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Enable peh msi bit failed!\n");
- return ret;
- }
-
- /* Enable PF MSE bit */
- ret = hisi_qm_set_pf_mse(qm, HRDE_ENABLE);
- if (ret) {
- dev_err(&pdev->dev, "Enable pf mse bit failed!\n");
- return ret;
- }
-
- hisi_rde_set_user_domain_and_cache(hisi_rde);
- hisi_qm_restart_prepare(qm);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(&pdev->dev, "Start QM failed!\n");
- return -EPERM;
- }
-
- hisi_qm_restart_done(qm);
- hisi_rde_set_hw_error(hisi_rde, true);
-
- return 0;
-}
-
-static int hisi_rde_controller_reset(struct hisi_rde *hisi_rde)
-{
- struct device *dev = &hisi_rde->qm.pdev->dev;
- int ret;
-
- dev_info_ratelimited(dev, "Controller resetting...\n");
-
- ret = hisi_rde_controller_reset_prepare(hisi_rde);
- if (ret)
- return ret;
-
- ret = hisi_rde_soft_reset(hisi_rde);
- if (ret) {
- dev_err(dev, "Controller reset failed (%d).\n", ret);
- return ret;
- }
-
- ret = hisi_rde_controller_reset_done(hisi_rde);
- if (ret)
- return ret;
-
- clear_bit(HISI_RDE_RESET, &hisi_rde->status);
- dev_info_ratelimited(dev, "Controller reset complete.\n");
-
- return 0;
-}
-
static void hisi_rde_ras_proc(struct work_struct *work)
{
struct pci_dev *pdev;
@@ -1100,121 +795,26 @@ static void hisi_rde_ras_proc(struct work_struct *work)
ret = hisi_qm_process_dev_error(pdev);
if (ret == PCI_ERS_RESULT_NEED_RESET)
- if (hisi_rde_controller_reset(hisi_rde))
+ if (hisi_qm_controller_reset(&hisi_rde->qm))
dev_err(&pdev->dev, "Hisi_rde reset fail.\n");
}
int hisi_rde_abnormal_fix(struct hisi_qm *qm)
{
- struct pci_dev *pdev;
struct hisi_rde *hisi_rde;
if (!qm)
return -EINVAL;
- pdev = qm->pdev;
- if (!pdev)
- return -EINVAL;
-
- hisi_rde = pci_get_drvdata(pdev);
- if (!hisi_rde) {
- dev_err(&pdev->dev, "Hisi_rde is NULL.\n");
- return -EINVAL;
- }
+ hisi_rde = container_of(qm, struct hisi_rde, qm);
return schedule_work(&hisi_rde->reset_work);
}
-static int hisi_rde_get_hw_error_status(struct hisi_rde *hisi_rde)
-{
- u32 err_sts;
-
- err_sts = readl(hisi_rde->qm.io_base + HRDE_INT_STATUS) &
- HRDE_ECC_2BIT_ERR;
- if (err_sts)
- return err_sts;
-
- return 0;
-}
-
-static int hisi_rde_check_hw_error(struct hisi_rde *hisi_rde)
-{
- int ret;
-
- ret = hisi_qm_get_hw_error_status(&hisi_rde->qm);
- if (ret)
- return ret;
-
- return hisi_rde_get_hw_error_status(hisi_rde);
-}
-
-static void hisi_rde_reset_prepare(struct pci_dev *pdev)
-{
- struct hisi_rde *hisi_rde = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_rde->qm;
- u32 delay = 0;
- int ret;
-
- hisi_rde_set_hw_error(hisi_rde, false);
-
- while (hisi_rde_check_hw_error(hisi_rde)) {
- msleep(++delay);
- if (delay > HRDE_RST_TMOUT_MS)
- return;
- }
-
- ret = hisi_rde_reset_prepare_rdy(hisi_rde);
- if (ret) {
- dev_err(&pdev->dev, "FLR not ready!\n");
- return;
- }
-
- ret = hisi_qm_stop(qm, QM_FLR);
- if (ret) {
- dev_err(&pdev->dev, "Stop QM failed!\n");
- return;
- }
-
- dev_info(&pdev->dev, "FLR resetting...\n");
-}
-
-static void hisi_rde_flr_reset_complete(struct pci_dev *pdev,
- struct hisi_rde *hisi_rde)
-{
- u32 id;
-
- pci_read_config_dword(pdev, PCI_COMMAND, &id);
- if (id == HRDE_PCI_COMMAND_INVALID)
- dev_err(&pdev->dev, "Device can not be used!\n");
-
- clear_bit(HISI_RDE_RESET, &hisi_rde->status);
-}
-
-static void hisi_rde_reset_done(struct pci_dev *pdev)
-{
- struct hisi_rde *hisi_rde = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_rde->qm;
- int ret;
-
- hisi_rde_set_hw_error(hisi_rde, true);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(&pdev->dev, "Start QM failed!\n");
- goto flr_done;
- }
-
- hisi_rde_set_user_domain_and_cache(hisi_rde);
-
-flr_done:
- hisi_rde_flr_reset_complete(pdev, hisi_rde);
- dev_info(&pdev->dev, "FLR reset complete.\n");
-}
-
static const struct pci_error_handlers hisi_rde_err_handler = {
- .reset_prepare = hisi_rde_reset_prepare,
- .reset_done = hisi_rde_reset_done,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hisi_rde_pci_driver = {
@@ -1223,7 +823,7 @@ static void hisi_rde_reset_done(struct pci_dev *pdev)
.probe = hisi_rde_probe,
.remove = hisi_rde_remove,
.err_handler = &hisi_rde_err_handler,
- .shutdown = hisi_rde_shutdown,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hisi_rde_register_debugfs(void)
@@ -1245,6 +845,9 @@ static int __init hisi_rde_init(void)
{
int ret;
+ INIT_LIST_HEAD(&rde_devices.list);
+ mutex_init(&rde_devices.lock);
+ rde_devices.check = NULL;
hisi_rde_register_debugfs();
ret = pci_register_driver(&hisi_rde_pci_driver);
diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
index 0e16452..f85dd06 100644
--- a/drivers/crypto/hisilicon/sec2/sec.h
+++ b/drivers/crypto/hisilicon/sec2/sec.h
@@ -11,7 +11,6 @@
#undef pr_fmt
#define pr_fmt(fmt) "hisi_sec: " fmt
-#define CTX_Q_NUM_DEF 24
#define FUSION_LIMIT_DEF 1
#define FUSION_LIMIT_MAX 64
#define FUSION_TMOUT_NSEC_DEF (400 * 1000)
@@ -24,10 +23,6 @@ enum sec_endian {
struct hisi_sec_ctrl;
-enum hisi_sec_status {
- HISI_SEC_RESET,
-};
-
struct hisi_sec_dfx {
u64 send_cnt;
u64 send_by_tmout;
@@ -39,21 +34,19 @@ struct hisi_sec_dfx {
u64 thread_cnt;
u64 fake_busy_cnt;
u64 busy_comp_cnt;
- u64 sec_ctrl;
};
struct hisi_sec {
struct hisi_qm qm;
- struct list_head list;
struct hisi_sec_dfx sec_dfx;
struct hisi_sec_ctrl *ctrl;
- struct mutex *hisi_sec_list_lock;
- int q_ref;
int ctx_q_num;
int fusion_limit;
int fusion_tmout_nsec;
- unsigned long status;
};
+void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
+struct hisi_qp **sec_create_qps(void);
struct hisi_sec *find_sec_device(int node);
+
#endif
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 3a362ce..0643955 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -16,6 +16,8 @@
#include "sec.h"
#include "sec_crypto.h"
+static atomic_t sec_active_devs;
+
#define SEC_ASYNC
#define SEC_INVLD_REQ_ID (-1)
@@ -179,6 +181,7 @@ struct hisi_sec_ctx {
struct hisi_sec *sec;
struct device *dev;
struct hisi_sec_req_op *req_op;
+ struct hisi_qp **qps;
struct hrtimer timer;
struct work_struct work;
atomic_t thread_cnt;
@@ -200,11 +203,6 @@ struct hisi_sec_ctx {
u64 des_weak_key[DES_WEAK_KEY_NUM] = {0x0101010101010101, 0xFEFEFEFEFEFEFEFE,
0xE0E0E0E0F1F1F1F1, 0x1F1F1F1F0E0E0E0E};
-static void sec_update_iv(struct hisi_sec_req *req, u8 *iv)
-{
- // todo: update iv by cbc/ctr mode
-}
-
static void hisi_sec_req_cb(struct hisi_qp *qp, void *);
static int hisi_sec_alloc_req_id(struct hisi_sec_req *req,
@@ -324,19 +322,16 @@ static enum hrtimer_restart hrtimer_handler(struct hrtimer *timer)
return HRTIMER_RESTART;
}
-static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx,
- int qp_ctx_id, int alg_type, int req_type)
+static int hisi_sec_create_qp_ctx(struct hisi_sec_ctx *ctx,
+ int qp_ctx_id, int req_type)
{
- struct hisi_qp *qp;
struct hisi_sec_qp_ctx *qp_ctx;
struct device *dev = ctx->dev;
+ struct hisi_qp *qp;
int ret;
- qp = hisi_qm_create_qp(qm, alg_type);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
-
qp_ctx = &ctx->qp_ctx[qp_ctx_id];
+ qp = ctx->qps[qp_ctx_id];
qp->req_type = req_type;
qp->qp_ctx = qp_ctx;
#ifdef SEC_ASYNC
@@ -353,10 +348,8 @@ static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx,
qp_ctx->req_bitmap = kcalloc(BITS_TO_LONGS(QM_Q_DEPTH), sizeof(long),
GFP_ATOMIC);
- if (!qp_ctx->req_bitmap) {
- ret = -ENOMEM;
- goto err_qm_release_qp;
- }
+ if (!qp_ctx->req_bitmap)
+ return -ENOMEM;
qp_ctx->req_list = kcalloc(QM_Q_DEPTH, sizeof(void *), GFP_ATOMIC);
if (!qp_ctx->req_list) {
@@ -407,8 +400,7 @@ static int hisi_sec_create_qp_ctx(struct hisi_qm *qm, struct hisi_sec_ctx *ctx,
kfree(qp_ctx->req_list);
err_free_req_bitmap:
kfree(qp_ctx->req_bitmap);
-err_qm_release_qp:
- hisi_qm_release_qp(qp);
+
return ret;
}
@@ -424,7 +416,6 @@ static void hisi_sec_release_qp_ctx(struct hisi_sec_ctx *ctx,
kfree(qp_ctx->req_bitmap);
kfree(qp_ctx->req_list);
kfree(qp_ctx->sqe_list);
- hisi_qm_release_qp(qp_ctx->qp);
}
static int __hisi_sec_ctx_init(struct hisi_sec_ctx *ctx, int qlen)
@@ -465,22 +456,22 @@ static void hisi_sec_get_fusion_param(struct hisi_sec_ctx *ctx,
static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
{
struct hisi_sec_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct hisi_qm *qm;
struct hisi_sec_cipher_ctx *c_ctx;
struct hisi_sec *sec;
int i, ret;
crypto_skcipher_set_reqsize(tfm, sizeof(struct hisi_sec_req));
- sec = find_sec_device(cpu_to_node(smp_processor_id()));
- if (!sec) {
- pr_err("failed to find a proper sec device!\n");
+ ctx->qps = sec_create_qps();
+ if (!ctx->qps) {
+ pr_err("Can not create sec qps!\n");
return -ENODEV;
}
+
+ sec = container_of(ctx->qps[0]->qm, struct hisi_sec, qm);
ctx->sec = sec;
- qm = &sec->qm;
- ctx->dev = &qm->pdev->dev;
+ ctx->dev = &sec->qm.pdev->dev;
ctx->q_num = sec->ctx_q_num;
@@ -495,7 +486,7 @@ static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
hisi_sec_get_fusion_param(ctx, sec);
for (i = 0; i < ctx->q_num; i++) {
- ret = hisi_sec_create_qp_ctx(qm, ctx, i, 0, 0);
+ ret = hisi_sec_create_qp_ctx(ctx, i, 0);
if (ret)
goto err_sec_release_qp_ctx;
}
@@ -515,6 +506,7 @@ static int hisi_sec_cipher_ctx_init(struct crypto_skcipher *tfm)
for (i = i - 1; i >= 0; i--)
hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_destroy_qps(ctx->qps, sec->ctx_q_num);
kfree(ctx->qp_ctx);
return ret;
}
@@ -540,11 +532,8 @@ static void hisi_sec_cipher_ctx_exit(struct crypto_skcipher *tfm)
for (i = 0; i < ctx->q_num; i++)
hisi_sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
+ sec_destroy_qps(ctx->qps, ctx->q_num);
kfree(ctx->qp_ctx);
-
- mutex_lock(ctx->sec->hisi_sec_list_lock);
- ctx->sec->q_ref -= ctx->sec->ctx_q_num;
- mutex_unlock(ctx->sec->hisi_sec_list_lock);
}
static int hisi_sec_skcipher_get_res(struct hisi_sec_ctx *ctx,
@@ -658,8 +647,6 @@ static void hisi_sec_req_cb(struct hisi_qp *qp, void *resp)
dfx = &req->ctx->sec->sec_dfx;
- sec_update_iv(req, req->c_req.sk_req->iv);
-
req->ctx->req_op->buf_unmap(req->ctx, req);
req->ctx->req_op->callback(req->ctx, req);
@@ -1497,20 +1484,28 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
int hisi_sec_register_to_crypto(int fusion_limit)
{
- if (fusion_limit == 1)
- return crypto_register_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- return crypto_register_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
+ /* To avoid repeat register */
+ if (atomic_add_return(1, &sec_active_devs) == 1) {
+ if (fusion_limit == 1)
+ return crypto_register_skciphers(sec_normal_algs,
+ ARRAY_SIZE(sec_normal_algs));
+ else
+ return crypto_register_skciphers(sec_fusion_algs,
+ ARRAY_SIZE(sec_fusion_algs));
+ }
+
+ return 0;
}
void hisi_sec_unregister_from_crypto(int fusion_limit)
{
- if (fusion_limit == 1)
- crypto_unregister_skciphers(sec_normal_algs,
- ARRAY_SIZE(sec_normal_algs));
- else
- crypto_unregister_skciphers(sec_fusion_algs,
- ARRAY_SIZE(sec_fusion_algs));
+ if (atomic_sub_return(1, &sec_active_devs) == 0) {
+ if (fusion_limit == 1)
+ crypto_unregister_skciphers(sec_normal_algs,
+ ARRAY_SIZE(sec_normal_algs));
+ else
+ crypto_unregister_skciphers(sec_fusion_algs,
+ ARRAY_SIZE(sec_fusion_algs));
+ }
}
+
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index ba5c478..b4e5d57f 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -23,21 +23,24 @@
#include "sec.h"
#include "sec_crypto.h"
-#define SEC_VF_NUM 63
#define SEC_QUEUE_NUM_V1 4096
#define SEC_QUEUE_NUM_V2 1024
-#define SEC_PCI_DEVICE_ID_PF 0xa255
-#define SEC_PCI_DEVICE_ID_VF 0xa256
+#define SEC_PF_PCI_DEVICE_ID 0xa255
+#define SEC_VF_PCI_DEVICE_ID 0xa256
-#define SEC_COMMON_REG_OFF 0x1000
+#define SEC_SQE_SIZE 128
+#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
+#define SEC_PF_DEF_Q_NUM 64
+#define SEC_PF_DEF_Q_BASE 0
+#define SEC_CTX_Q_NUM_DEF 24
+#define SEC_CTX_Q_NUM_MAX 32
-#define SEC_MASTER_GLOBAL_CTRL 0x300000
-#define SEC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
-#define SEC_MASTER_TRANS_RETURN 0x300150
-#define SEC_MASTER_TRANS_RETURN_RW 0x3
#define SEC_AM_CFG_SIG_PORT_MAX_TRANS 0x300014
#define SEC_SINGLE_PORT_MAX_TRANS 0x2060
-
+#define SEC_CTRL_CNT_CLR_CE 0x301120
+#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
+#define SEC_ENGINE_PF_CFG_OFF 0x300000
+#define SEC_ACC_COMMON_REG_OFF 0x1000
#define SEC_CORE_INT_SOURCE 0x301010
#define SEC_CORE_INT_MASK 0x301000
#define SEC_CORE_INT_STATUS 0x301008
@@ -45,41 +48,17 @@
#define SEC_CORE_ECC_INFO 0x301C14
#define SEC_ECC_NUM(err_val) (((err_val) >> 16) & 0xFFFF)
#define SEC_ECC_ADDR(err_val) ((err_val) & 0xFFFF)
-
#define SEC_CORE_INT_DISABLE 0x0
#define SEC_CORE_INT_ENABLE 0x1ff
-#define SEC_HW_ERROR_IRQ_ENABLE 1
-#define SEC_HW_ERROR_IRQ_DISABLE 0
-
-#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
-#define SEC_BD_ERR_CHK_EN1 0x7FFFF7FD
-#define SEC_BD_ERR_CHK_EN3 0xFFFFBFFF
-#define SEC_BD_ERR_CHK_EN_REG0 0x0380
-#define SEC_BD_ERR_CHK_EN_REG1 0x0384
-#define SEC_BD_ERR_CHK_EN_REG3 0x038c
-
-#define SEC_SQE_SIZE 128
-#define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH)
-#define SEC_PF_DEF_Q_NUM 64
-#define SEC_PF_DEF_Q_BASE 0
-
-#define SEC_CTRL_CNT_CLR_CE 0x301120
-#define SEC_CTRL_CNT_CLR_CE_BIT BIT(0)
-
-#define SEC_ENGINE_PF_CFG_OFF 0x300000
-#define SEC_ACC_COMMON_REG_OFF 0x1000
+#define SEC_CORE_INT_CLEAR 0x1ff
-#define SEC_RAS_CE_REG 0x50
-#define SEC_RAS_FE_REG 0x54
-#define SEC_RAS_NFE_REG 0x58
+#define SEC_RAS_CE_REG 0x301050
+#define SEC_RAS_FE_REG 0x301054
+#define SEC_RAS_NFE_REG 0x301058
#define SEC_RAS_CE_ENB_MSK 0x88
#define SEC_RAS_FE_ENB_MSK 0x0
#define SEC_RAS_NFE_ENB_MSK 0x177
#define SEC_RAS_DISABLE 0x0
-
-#define SEC_SAA_EN_REG 0x270
-#define SEC_SAA_EN 0x17F
-
#define SEC_MEM_START_INIT_REG 0x0100
#define SEC_MEM_INIT_DONE_REG 0x0104
@@ -88,114 +67,39 @@
#define SEC_CLK_GATE_DISABLE (~BIT(3))
#define SEC_AXI_SHUTDOWN_ENABLE BIT(12)
#define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF
-#define SEC_WR_MSI_PORT 0xFFFE
+#define SEC_WR_MSI_PORT BIT(0)
#define SEC_INTERFACE_USER_CTRL0_REG 0x0220
#define SEC_INTERFACE_USER_CTRL1_REG 0x0224
+#define SEC_SAA_EN_REG 0x270
+#define SEC_SAA_EN 0x17F
+#define SEC_BD_ERR_CHK_EN_REG0 0x0380
+#define SEC_BD_ERR_CHK_EN_REG1 0x0384
+#define SEC_BD_ERR_CHK_EN_REG3 0x038c
+#define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF
+#define SEC_BD_ERR_CHK_EN1 0x7FFFF7FD
+#define SEC_BD_ERR_CHK_EN3 0xFFFFBFFF
#define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15))
#define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7))
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
-#define SEC_WAIT_DELAY 1000
-
#define SEC_DBGFS_VAL_MAX_LEN 20
-#define SEC_CHAIN_ABN_LEN 128UL
-#define SEC_ENABLE 1
-#define SEC_DISABLE 0
-#define SEC_RESET_WAIT_TIMEOUT 400
-#define SEC_PCI_COMMAND_INVALID 0xFFFFFFFF
-
-#define FORMAT_DECIMAL 10
-#define FROZEN_RANGE_MIN 10
-#define FROZEN_RANGE_MAX 20
-
-static const char sec_name[] = "hisi_sec2";
-static struct dentry *sec_debugfs_root;
-static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
-static struct workqueue_struct *sec_wq;
-
-LIST_HEAD(hisi_sec_list);
-DEFINE_MUTEX(hisi_sec_list_lock);
-
-struct hisi_sec_resource {
- struct hisi_sec *sec;
- int distance;
- struct list_head list;
-};
-
-static void free_list(struct list_head *head)
-{
- struct hisi_sec_resource *res, *tmp;
-
- list_for_each_entry_safe(res, tmp, head, list) {
- list_del(&res->list);
- kfree(res);
- }
-}
-
-struct hisi_sec *find_sec_device(int node)
-{
- struct hisi_sec *ret = NULL;
-#ifdef CONFIG_NUMA
- struct hisi_sec_resource *res, *tmp;
- struct hisi_sec *hisi_sec;
- struct list_head *n;
- struct device *dev;
- LIST_HEAD(head);
-
- mutex_lock(&hisi_sec_list_lock);
-
- list_for_each_entry(hisi_sec, &hisi_sec_list, list) {
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- goto err;
-
- dev = &hisi_sec->qm.pdev->dev;
- res->sec = hisi_sec;
- res->distance = node_distance(dev->numa_node, node);
-
- n = &head;
- list_for_each_entry(tmp, &head, list) {
- if (res->distance < tmp->distance) {
- n = &tmp->list;
- break;
- }
- }
- list_add_tail(&res->list, n);
- }
-
- list_for_each_entry(tmp, &head, list) {
- if (tmp->sec->q_ref + tmp->sec->ctx_q_num <= pf_q_num) {
- tmp->sec->q_ref += tmp->sec->ctx_q_num;
- ret = tmp->sec;
- break;
- }
- }
-
- free_list(&head);
-#else
- mutex_lock(&hisi_sec_list_lock);
-
- ret = list_first_entry(&hisi_sec_list, struct hisi_sec, list);
-#endif
- mutex_unlock(&hisi_sec_list_lock);
-
- return ret;
-
-err:
- free_list(&head);
- mutex_unlock(&hisi_sec_list_lock);
- return NULL;
-}
+#define SEC_ADDR(qm, offset) ((qm)->io_base + (offset) + \
+ SEC_ENGINE_PF_CFG_OFF + SEC_ACC_COMMON_REG_OFF)
struct hisi_sec_hw_error {
u32 int_msk;
const char *msg;
};
+static const char sec_name[] = "hisi_sec2";
+static struct dentry *sec_debugfs_root;
+static struct hisi_qm_list sec_devices;
+static struct workqueue_struct *sec_wq;
+
static const struct hisi_sec_hw_error sec_hw_error[] = {
{.int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint"},
{.int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint"},
@@ -233,9 +137,7 @@ struct ctrl_debug_file {
* Just relevant for PF.
*/
struct hisi_sec_ctrl {
- u32 num_vfs;
struct hisi_sec *hisi_sec;
- struct dentry *debug_root;
struct ctrl_debug_file files[SEC_DEBUG_FILE_NUM];
};
@@ -263,94 +165,104 @@ struct hisi_sec_ctrl {
{"SEC_BD_SAA8 ", 0x301C40},
};
-static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
- SEC_PCI_DEVICE_ID_PF, NULL);
- u32 n, q_num;
- u8 rev_id;
+ u32 ctx_q_num;
int ret;
if (!val)
return -EINVAL;
- if (unlikely(!pdev)) {
- q_num = min_t(u32, SEC_QUEUE_NUM_V1, SEC_QUEUE_NUM_V2);
- pr_info
- ("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- switch (rev_id) {
- case QM_HW_V1:
- q_num = SEC_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = SEC_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
+ ret = kstrtou32(val, 10, &ctx_q_num);
+ if (ret)
+ return -EINVAL;
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n > q_num)
+ if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) {
+ pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num);
return -EINVAL;
+ }
return param_set_int(val, kp);
}
-static const struct kernel_param_ops pf_q_num_ops = {
- .set = pf_q_num_set,
+static const struct kernel_param_ops sec_ctx_q_num_ops = {
+ .set = sec_ctx_q_num_set,
.get = param_get_int,
};
+static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF;
+module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444);
+MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (24 default, 2, 4, ..., 32)");
-static int uacce_mode_set(const char *val, const struct kernel_param *kp)
+void sec_destroy_qps(struct hisi_qp **qps, int qp_num)
+{
+ hisi_qm_free_qps(qps, qp_num);
+ kfree(qps);
+}
+
+struct hisi_qp **sec_create_qps(void)
{
- u32 n;
+ int node = cpu_to_node(smp_processor_id());
+ u32 ctx_num = ctx_q_num;
+ struct hisi_qp **qps;
int ret;
- if (!val)
- return -EINVAL;
+ qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL);
+ if (!qps)
+ return NULL;
- ret = kstrtou32(val, FORMAT_DECIMAL, &n);
- if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
- return -EINVAL;
+ ret = hisi_qm_alloc_qps_node(node, &sec_devices, qps, ctx_num, 0);
+ if (!ret)
+ return qps;
- return param_set_int(val, kp);
+ kfree(qps);
+ return NULL;
+}
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static int uacce_mode_set(const char *val, const struct kernel_param *kp)
+{
+ return mode_set(val, kp);
}
-static const struct kernel_param_ops uacce_mode_ops = {
+static const struct kernel_param_ops sec_uacce_mode_ops = {
.set = uacce_mode_set,
.get = param_get_int,
};
-static int ctx_q_num_set(const char *val, const struct kernel_param *kp)
-{
- u32 ctx_q_num;
- int ret;
+static u32 uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+#endif
- if (!val)
- return -EINVAL;
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID);
+}
- ret = kstrtou32(val, FORMAT_DECIMAL, &ctx_q_num);
- if (ret)
- return -EINVAL;
+static const struct kernel_param_ops sec_pf_q_num_ops = {
+ .set = pf_q_num_set,
+ .get = param_get_int,
+};
- if (ctx_q_num == 0 || ctx_q_num > QM_Q_DEPTH || ctx_q_num % 2 == 1) {
- pr_err("ctx_q_num[%u] is invalid\n", ctx_q_num);
- return -EINVAL;
- }
+static u32 pf_q_num = SEC_PF_DEF_Q_NUM;
+module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444);
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
- return param_set_int(val, kp);
+static int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ return vf_num_set(val, kp);
}
-static const struct kernel_param_ops ctx_q_num_ops = {
- .set = ctx_q_num_set,
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
.get = param_get_int,
};
-static int fusion_limit_set(const char *val, const struct kernel_param *kp)
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
+
+static int sec_fusion_limit_set(const char *val, const struct kernel_param *kp)
{
u32 fusion_limit;
int ret;
@@ -358,11 +270,11 @@ static int fusion_limit_set(const char *val, const struct kernel_param *kp)
if (!val)
return -EINVAL;
- ret = kstrtou32(val, FORMAT_DECIMAL, &fusion_limit);
+ ret = kstrtou32(val, 10, &fusion_limit);
if (ret)
return ret;
- if (fusion_limit == 0 || fusion_limit > FUSION_LIMIT_MAX) {
+ if (!fusion_limit || fusion_limit > FUSION_LIMIT_MAX) {
pr_err("fusion_limit[%u] is't at range(0, %d)", fusion_limit,
FUSION_LIMIT_MAX);
return -EINVAL;
@@ -371,12 +283,17 @@ static int fusion_limit_set(const char *val, const struct kernel_param *kp)
return param_set_int(val, kp);
}
-static const struct kernel_param_ops fusion_limit_ops = {
- .set = fusion_limit_set,
+static const struct kernel_param_ops sec_fusion_limit_ops = {
+ .set = sec_fusion_limit_set,
.get = param_get_int,
};
+static u32 fusion_limit = FUSION_LIMIT_DEF;
-static int fusion_tmout_nsec_set(const char *val, const struct kernel_param *kp)
+module_param_cb(fusion_limit, &sec_fusion_limit_ops, &fusion_limit, 0444);
+MODULE_PARM_DESC(fusion_limit, "(1, acc_sgl_sge_nr of hisilicon QM)");
+
+static int sec_fusion_tmout_ns_set(const char *val,
+ const struct kernel_param *kp)
{
u32 fusion_tmout_nsec;
int ret;
@@ -384,7 +301,7 @@ static int fusion_tmout_nsec_set(const char *val, const struct kernel_param *kp)
if (!val)
return -EINVAL;
- ret = kstrtou32(val, FORMAT_DECIMAL, &fusion_tmout_nsec);
+ ret = kstrtou32(val, 10, &fusion_tmout_nsec);
if (ret)
return ret;
@@ -396,53 +313,22 @@ static int fusion_tmout_nsec_set(const char *val, const struct kernel_param *kp)
return param_set_int(val, kp);
}
-static const struct kernel_param_ops fusion_tmout_nsec_ops = {
- .set = fusion_tmout_nsec_set,
+static const struct kernel_param_ops sec_fusion_time_ops = {
+ .set = sec_fusion_tmout_ns_set,
.get = param_get_int,
};
-
-module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
-MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 0-4096, v2 0-1024)");
-
-static int uacce_mode = UACCE_MODE_NOUACCE;
-module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
-MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
-
-static int ctx_q_num = CTX_Q_NUM_DEF;
-module_param_cb(ctx_q_num, &ctx_q_num_ops, &ctx_q_num, 0444);
-MODULE_PARM_DESC(ctx_q_num, "Number of queue in ctx (2, 4, 6, ..., 1024)");
-
-static int fusion_limit = FUSION_LIMIT_DEF;
-module_param_cb(fusion_limit, &fusion_limit_ops, &fusion_limit, 0444);
-MODULE_PARM_DESC(fusion_limit, "(1, acc_sgl_sge_nr)");
-
-static int fusion_tmout_nsec = FUSION_TMOUT_NSEC_DEF;
-module_param_cb(fusion_tmout_nsec, &fusion_tmout_nsec_ops, &fusion_tmout_nsec,
- 0444);
-MODULE_PARM_DESC(fusion_tmout_nsec, "(0, NSEC_PER_SEC)");
+static u32 fusion_time = FUSION_TMOUT_NSEC_DEF; /* ns */
+module_param_cb(fusion_time, &sec_fusion_time_ops, &fusion_time, 0444);
+MODULE_PARM_DESC(fusion_time, "(0, NSEC_PER_SEC)");
static const struct pci_device_id hisi_sec_dev_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PCI_DEVICE_ID_PF) },
- { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PCI_DEVICE_ID_VF) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, hisi_sec_dev_ids);
-static inline void hisi_sec_add_to_list(struct hisi_sec *hisi_sec)
-{
- mutex_lock(&hisi_sec_list_lock);
- list_add_tail(&hisi_sec->list, &hisi_sec_list);
- mutex_unlock(&hisi_sec_list_lock);
-}
-
-static inline void hisi_sec_remove_from_list(struct hisi_sec *hisi_sec)
-{
- mutex_lock(&hisi_sec_list_lock);
- list_del(&hisi_sec->list);
- mutex_unlock(&hisi_sec_list_lock);
-}
-
-u8 sec_get_endian(struct hisi_sec *hisi_sec)
+static u8 sec_get_endian(struct hisi_qm *qm)
{
u32 reg;
@@ -450,83 +336,83 @@ u8 sec_get_endian(struct hisi_sec *hisi_sec)
* As for VF, it is a wrong way to get endian setting by
* reading a register of the engine
*/
- if (hisi_sec->qm.pdev->is_virtfn) {
- dev_err_ratelimited(&hisi_sec->qm.pdev->dev,
- "error! shouldn't access a register in VF\n");
+ if (qm->pdev->is_virtfn) {
+ dev_err_ratelimited(&qm->pdev->dev,
+ "cannot access a register in VF!\n");
return SEC_LE;
}
- reg = readl_relaxed(hisi_sec->qm.io_base + SEC_ENGINE_PF_CFG_OFF +
+ reg = readl_relaxed(qm->io_base + SEC_ENGINE_PF_CFG_OFF +
SEC_ACC_COMMON_REG_OFF + SEC_CONTROL_REG);
+
/* BD little endian mode */
if (!(reg & BIT(0)))
return SEC_LE;
+
/* BD 32-bits big endian mode */
else if (!(reg & BIT(1)))
return SEC_32BE;
+
/* BD 64-bits big endian mode */
else
return SEC_64BE;
}
-static int sec_engine_init(struct hisi_sec *hisi_sec)
+static int sec_engine_init(struct hisi_qm *qm)
{
int ret;
u32 reg;
- struct hisi_qm *qm = &hisi_sec->qm;
- void *base = qm->io_base + SEC_ENGINE_PF_CFG_OFF +
- SEC_ACC_COMMON_REG_OFF;
-
- /* config sec single port max outstanding */
- writel(SEC_SINGLE_PORT_MAX_TRANS,
- qm->io_base + SEC_AM_CFG_SIG_PORT_MAX_TRANS);
-
- /* config sec saa enable */
- writel(SEC_SAA_EN, base + SEC_SAA_EN_REG);
/* disable clock gate control */
- reg = readl_relaxed(base + SEC_CONTROL_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
reg &= SEC_CLK_GATE_DISABLE;
- writel(reg, base + SEC_CONTROL_REG);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
- writel(0x1, base + SEC_MEM_START_INIT_REG);
- ret = readl_relaxed_poll_timeout(base +
- SEC_MEM_INIT_DONE_REG, reg, reg & 0x1,
- SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ writel_relaxed(0x1, SEC_ADDR(qm, SEC_MEM_START_INIT_REG));
+
+ ret = readl_relaxed_poll_timeout(SEC_ADDR(qm, SEC_MEM_INIT_DONE_REG),
+ reg, reg & 0x1, SEC_DELAY_10_US,
+ SEC_POLL_TIMEOUT_US);
if (ret) {
- dev_err(&qm->pdev->dev, "fail to init sec mem\n");
+ pci_err(qm->pdev, "fail to init sec mem\n");
return ret;
}
- reg = readl_relaxed(base + SEC_CONTROL_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
reg |= (0x1 << SEC_TRNG_EN_SHIFT);
- writel(reg, base + SEC_CONTROL_REG);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
- reg = readl_relaxed(base + SEC_INTERFACE_USER_CTRL0_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
reg |= SEC_USER0_SMMU_NORMAL;
- writel(reg, base + SEC_INTERFACE_USER_CTRL0_REG);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL0_REG));
- reg = readl_relaxed(base + SEC_INTERFACE_USER_CTRL1_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
reg |= SEC_USER1_SMMU_NORMAL;
- writel(reg, base + SEC_INTERFACE_USER_CTRL1_REG);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_INTERFACE_USER_CTRL1_REG));
+
+ writel(SEC_SINGLE_PORT_MAX_TRANS,
+ qm->io_base + SEC_AM_CFG_SIG_PORT_MAX_TRANS);
+
+ writel(SEC_SAA_EN, SEC_ADDR(qm, SEC_SAA_EN_REG));
/* Enable sm4 extra mode, as ctr/ecb */
- writel(SEC_BD_ERR_CHK_EN0, base + SEC_BD_ERR_CHK_EN_REG0);
+ writel_relaxed(SEC_BD_ERR_CHK_EN0,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG0));
/* Enable sm4 xts mode multiple iv */
- writel(SEC_BD_ERR_CHK_EN1, base + SEC_BD_ERR_CHK_EN_REG1);
- writel(SEC_BD_ERR_CHK_EN3, base + SEC_BD_ERR_CHK_EN_REG3);
+ writel_relaxed(SEC_BD_ERR_CHK_EN1,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG1));
+ writel_relaxed(SEC_BD_ERR_CHK_EN3,
+ SEC_ADDR(qm, SEC_BD_ERR_CHK_EN_REG3));
/* config endian */
- reg = readl_relaxed(base + SEC_CONTROL_REG);
- reg |= sec_get_endian(hisi_sec);
- writel(reg, base + SEC_CONTROL_REG);
+ reg = readl_relaxed(SEC_ADDR(qm, SEC_CONTROL_REG));
+ reg |= sec_get_endian(qm);
+ writel_relaxed(reg, SEC_ADDR(qm, SEC_CONTROL_REG));
return 0;
}
-static void hisi_sec_set_user_domain_and_cache(struct hisi_sec *hisi_sec)
+static int sec_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
-
/* qm user domain */
writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1);
writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
@@ -540,22 +426,18 @@ static void hisi_sec_set_user_domain_and_cache(struct hisi_sec *hisi_sec)
/* disable FLR triggered by BME(bus master enable) */
writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG);
- writel(PEH_AXUSER_CFG_ENABLE, qm->io_base +
- QM_PEH_AXUSER_CFG_ENABLE);
+ writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
/* enable sqc,cqc writeback */
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL);
- if (sec_engine_init(hisi_sec))
- dev_err(&qm->pdev->dev, "sec_engine_init failed");
+ return sec_engine_init(qm);
}
-static void hisi_sec_debug_regs_clear(struct hisi_sec *hisi_sec)
+static void sec_debug_regs_clear(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
-
/* clear current_qm */
writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
@@ -566,50 +448,53 @@ static void hisi_sec_debug_regs_clear(struct hisi_sec *hisi_sec)
hisi_qm_debug_regs_clear(qm);
}
-static void hisi_sec_hw_error_set_state(struct hisi_sec *hisi_sec, bool state)
+static void sec_hw_error_enable(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
- void *base = qm->io_base + SEC_ENGINE_PF_CFG_OFF +
- SEC_ACC_COMMON_REG_OFF;
u32 val;
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
- dev_info(&qm->pdev->dev, "v%d don't support hw error handle\n",
- qm->ver);
+ pci_info(qm->pdev, "V1 not support hw error handle\n");
return;
}
- val = readl(base + SEC_CONTROL_REG);
- if (state) {
- /* clear SEC hw error source if having */
- writel(SEC_CORE_INT_ENABLE,
- hisi_sec->qm.io_base + SEC_CORE_INT_SOURCE);
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
- /* enable SEC hw error interrupts */
- writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
+ /* clear SEC hw error source if having */
+ writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE);
- /* enable RAS int */
- writel(SEC_RAS_CE_ENB_MSK, base + SEC_RAS_CE_REG);
- writel(SEC_RAS_FE_ENB_MSK, base + SEC_RAS_FE_REG);
- writel(SEC_RAS_NFE_ENB_MSK, base + SEC_RAS_NFE_REG);
+ /* enable SEC hw error interrupts */
+ writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK);
- /* enable SEC block master OOO when m-bit error occur */
- val = val | SEC_AXI_SHUTDOWN_ENABLE;
- } else {
- /* disable RAS int */
- writel(SEC_RAS_DISABLE, base + SEC_RAS_CE_REG);
- writel(SEC_RAS_DISABLE, base + SEC_RAS_FE_REG);
- writel(SEC_RAS_DISABLE, base + SEC_RAS_NFE_REG);
+ /* enable RAS int */
+ writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG);
+ writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
+ writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG);
- /* disable SEC hw error interrupts */
- writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
+ /* enable SEC block master OOO when m-bit error occur */
+ val = val | SEC_AXI_SHUTDOWN_ENABLE;
- /* disable SEC block master OOO when m-bit error occur */
- val = val & SEC_AXI_SHUTDOWN_DISABLE;
- }
+ writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
+}
- writel(val, base + SEC_CONTROL_REG);
+static void sec_hw_error_disable(struct hisi_qm *qm)
+{
+ u32 val;
+
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+
+ /* disable RAS int */
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG);
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG);
+ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG);
+
+ /* disable SEC hw error interrupts */
+ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
+
+ /* disable SEC block master OOO when m-bit error occur */
+ val = val & SEC_AXI_SHUTDOWN_DISABLE;
+
+ writel(val, SEC_ADDR(qm, SEC_CONTROL_REG));
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
@@ -629,21 +514,21 @@ static u32 current_qm_read(struct ctrl_debug_file *file)
static int current_qm_write(struct ctrl_debug_file *file, u32 val)
{
struct hisi_qm *qm = file_to_qm(file);
- struct hisi_sec_ctrl *ctrl = file->ctrl;
- u32 tmp, vfq_num;
+ u32 vfq_num;
+ u32 tmp;
- if (val > ctrl->num_vfs)
+ if (val > qm->vfs_num)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
if (val == 0) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
- vfq_num = (qm->ctrl_q_num - qm->qp_num) / ctrl->num_vfs;
- if (val == ctrl->num_vfs) {
+ vfq_num = (qm->ctrl_q_num - qm->qp_num) / qm->vfs_num;
+ if (val == qm->vfs_num) {
qm->debug.curr_qm_qp_num =
qm->ctrl_q_num - qm->qp_num -
- (ctrl->num_vfs - 1) * vfq_num;
+ (qm->vfs_num - 1) * vfq_num;
} else {
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -668,7 +553,7 @@ static u32 clear_enable_read(struct ctrl_debug_file *file)
struct hisi_qm *qm = file_to_qm(file);
return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
- SEC_CTRL_CNT_CLR_CE_BIT;
+ SEC_CTRL_CNT_CLR_CE_BIT;
}
static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
@@ -676,11 +561,11 @@ static int clear_enable_write(struct ctrl_debug_file *file, u32 val)
struct hisi_qm *qm = file_to_qm(file);
u32 tmp;
- if (val != 1 && val != 0)
+ if (val != 1 && val)
return -EINVAL;
tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) &
- ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
+ ~SEC_CTRL_CNT_CLR_CE_BIT) | val;
writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE);
return 0;
@@ -695,6 +580,7 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
int ret;
spin_lock_irq(&file->lock);
+
switch (file->index) {
case SEC_CURRENT_QM:
val = current_qm_read(file);
@@ -706,8 +592,10 @@ static ssize_t ctrl_debug_read(struct file *filp, char __user *buf,
spin_unlock_irq(&file->lock);
return -EINVAL;
}
+
spin_unlock_irq(&file->lock);
ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val);
+
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
@@ -726,7 +614,7 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
return -ENOSPC;
len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1,
- pos, buf, count);
+ pos, buf, count);
if (len < 0)
return len;
@@ -735,6 +623,7 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
return -EFAULT;
spin_lock_irq(&file->lock);
+
switch (file->index) {
case SEC_CURRENT_QM:
ret = current_qm_write(file, val);
@@ -750,6 +639,7 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
ret = -EINVAL;
goto err_input;
}
+
spin_unlock_irq(&file->lock);
return count;
@@ -766,12 +656,11 @@ static ssize_t ctrl_debug_write(struct file *filp, const char __user *buf,
.write = ctrl_debug_write,
};
-static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl)
+static int hisi_sec_core_debug_init(struct hisi_qm *qm)
{
- struct hisi_sec *hisi_sec = ctrl->hisi_sec;
- struct hisi_qm *qm = &hisi_sec->qm;
+ struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
struct device *dev = &qm->pdev->dev;
- struct hisi_sec_dfx *dfx = &hisi_sec->sec_dfx;
+ struct hisi_sec_dfx *dfx = &sec->sec_dfx;
struct debugfs_regset32 *regset;
struct dentry *tmp_d, *tmp;
char buf[SEC_DBGFS_VAL_MAX_LEN];
@@ -781,7 +670,7 @@ static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl)
if (ret < 0)
return -ENOENT;
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
if (!tmp_d)
return -ENOENT;
@@ -847,29 +736,30 @@ static int hisi_sec_core_debug_init(struct hisi_sec_ctrl *ctrl)
return 0;
}
-static int hisi_sec_ctrl_debug_init(struct hisi_sec_ctrl *ctrl)
+static int hisi_sec_ctrl_debug_init(struct hisi_qm *qm)
{
+ struct hisi_sec *sec = container_of(qm, struct hisi_sec, qm);
struct dentry *tmp;
int i;
for (i = SEC_CURRENT_QM; i < SEC_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&ctrl->files[i].lock);
- ctrl->files[i].ctrl = ctrl;
- ctrl->files[i].index = i;
+ spin_lock_init(&sec->ctrl->files[i].lock);
+ sec->ctrl->files[i].ctrl = sec->ctrl;
+ sec->ctrl->files[i].index = i;
tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
+ qm->debug.debug_root,
+ sec->ctrl->files + i,
&ctrl_debug_fops);
if (!tmp)
return -ENOENT;
}
- return hisi_sec_core_debug_init(ctrl);
+ return hisi_sec_core_debug_init(qm);
}
-static int hisi_sec_debugfs_init(struct hisi_sec *hisi_sec)
+static int hisi_sec_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
struct device *dev = &qm->pdev->dev;
struct dentry *dev_d;
int ret;
@@ -883,9 +773,8 @@ static int hisi_sec_debugfs_init(struct hisi_sec *hisi_sec)
if (ret)
goto failed_to_create;
- if (qm->pdev->device == SEC_PCI_DEVICE_ID_PF) {
- hisi_sec->ctrl->debug_root = dev_d;
- ret = hisi_sec_ctrl_debug_init(hisi_sec->ctrl);
+ if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) {
+ ret = hisi_sec_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
}
@@ -897,71 +786,62 @@ static int hisi_sec_debugfs_init(struct hisi_sec *hisi_sec)
return ret;
}
-static void hisi_sec_debugfs_exit(struct hisi_sec *hisi_sec)
+static void hisi_sec_debugfs_exit(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
-
debugfs_remove_recursive(qm->debug.debug_root);
+
if (qm->fun_type == QM_HW_PF) {
- hisi_sec_debug_regs_clear(hisi_sec);
+ sec_debug_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
}
-static void hisi_sec_hw_error_init(struct hisi_sec *hisi_sec)
+static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
- hisi_qm_hw_error_init(&hisi_sec->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT
- | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
- QM_DB_RANDOM_INVALID);
- hisi_sec_hw_error_set_state(hisi_sec, true);
-}
+ const struct hisi_sec_hw_error *errs = sec_hw_error;
+ struct device *dev = &qm->pdev->dev;
+ u32 err_val;
-static void hisi_sec_open_master_ooo(struct hisi_qm *qm)
-{
- u32 val;
- void *base = qm->io_base + SEC_ENGINE_PF_CFG_OFF +
- SEC_ACC_COMMON_REG_OFF;
+ while (errs->msg) {
+ if (errs->int_msk & err_sts) {
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ errs->msg, errs->int_msk);
- val = readl(base + SEC_CONTROL_REG);
- writel(val & SEC_AXI_SHUTDOWN_DISABLE, base + SEC_CONTROL_REG);
- writel(val | SEC_AXI_SHUTDOWN_ENABLE, base + SEC_CONTROL_REG);
+ if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) {
+ err_val = readl(qm->io_base +
+ SEC_CORE_ECC_INFO);
+ dev_err(dev, "multi ecc sram num=0x%x\n",
+ SEC_ECC_NUM(err_val));
+ }
+ }
+ errs++;
+ }
}
-static u32 hisi_sec_get_hw_err_status(struct hisi_qm *qm)
+static u32 sec_get_hw_err_status(struct hisi_qm *qm)
{
return readl(qm->io_base + SEC_CORE_INT_STATUS);
}
-static void hisi_sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
{
writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE);
}
-static void hisi_sec_log_hw_error(struct hisi_qm *qm, u32 err_sts)
+static void sec_open_axi_master_ooo(struct hisi_qm *qm)
{
- const struct hisi_sec_hw_error *err = sec_hw_error;
- struct device *dev = &qm->pdev->dev;
- u32 err_val;
-
- while (err->msg) {
- if (err->int_msk & err_sts)
- dev_err(dev, "%s [error status=0x%x] found\n",
- err->msg, err->int_msk);
- err++;
- }
+ u32 val;
- if (SEC_CORE_INT_STATUS_M_ECC & err_sts) {
- err_val = readl(qm->io_base + SEC_CORE_ECC_INFO);
- dev_err(dev, "hisi-sec multi ecc sram num=0x%x\n",
- SEC_ECC_NUM(err_val));
- }
+ val = readl(SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val & SEC_AXI_SHUTDOWN_DISABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
+ writel(val | SEC_AXI_SHUTDOWN_ENABLE, SEC_ADDR(qm, SEC_CONTROL_REG));
}
-static int hisi_sec_pf_probe_init(struct hisi_sec *hisi_sec)
+static int hisi_sec_pf_probe_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_sec->qm;
+ struct hisi_sec *hisi_sec = container_of(qm, struct hisi_sec, qm);
struct hisi_sec_ctrl *ctrl;
+ int ret;
ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -983,59 +863,57 @@ static int hisi_sec_pf_probe_init(struct hisi_sec *hisi_sec)
return -EINVAL;
}
- qm->err_ini.qm_wr_port = SEC_WR_MSI_PORT;
- qm->err_ini.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- qm->err_ini.open_axi_master_ooo = hisi_sec_open_master_ooo;
- qm->err_ini.get_dev_hw_err_status = hisi_sec_get_hw_err_status;
- qm->err_ini.clear_dev_hw_err_status = hisi_sec_clear_hw_err_status;
- qm->err_ini.log_dev_hw_err = hisi_sec_log_hw_error;
- hisi_sec_set_user_domain_and_cache(hisi_sec);
- hisi_sec_hw_error_init(hisi_sec);
+ qm->err_ini.get_dev_hw_err_status = sec_get_hw_err_status;
+ qm->err_ini.clear_dev_hw_err_status = sec_clear_hw_err_status;
+ qm->err_ini.err_info.ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
+ qm->err_ini.err_info.ce = QM_BASE_CE;
+ qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT |
+ QM_ACC_WB_NOT_READY_TIMEOUT;
+ qm->err_ini.err_info.fe = 0;
+ qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID;
+ qm->err_ini.err_info.acpi_rst = "SRST";
+ qm->err_ini.hw_err_disable = sec_hw_error_disable;
+ qm->err_ini.hw_err_enable = sec_hw_error_enable;
+ qm->err_ini.set_usr_domain_cache = sec_set_user_domain_and_cache;
+ qm->err_ini.log_dev_hw_err = sec_log_hw_error;
+ qm->err_ini.open_axi_master_ooo = sec_open_axi_master_ooo;
+ qm->err_ini.err_info.msi_wr_port = SEC_WR_MSI_PORT;
+
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret)
+ return ret;
+
+ hisi_qm_dev_err_init(qm);
qm->err_ini.open_axi_master_ooo(qm);
- hisi_sec_debug_regs_clear(hisi_sec);
+ sec_debug_regs_clear(qm);
return 0;
}
-static int hisi_sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
+static int hisi_sec_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
{
- enum qm_hw_ver rev_id;
-
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -ENODEV;
+ int ret;
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "sec\ncipher\ndigest\n";
+ qm->uacce_mode = uacce_mode;
+#endif
qm->pdev = pdev;
- qm->ver = rev_id;
-
+ ret = hisi_qm_pre_init(qm, pf_q_num, SEC_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
qm->sqe_size = SEC_SQE_SIZE;
qm->dev_name = sec_name;
- qm->fun_type = (pdev->device == SEC_PCI_DEVICE_ID_PF) ?
- QM_HW_PF : QM_HW_VF;
- qm->algs = "sec\ncipher\ndigest\n";
+ qm->qm_list = &sec_devices;
qm->wq = sec_wq;
- switch (uacce_mode) {
- case UACCE_MODE_NOUACCE:
- qm->use_uacce = false;
- break;
- case UACCE_MODE_NOIOMMU:
- qm->use_uacce = true;
- break;
- default:
- return -EINVAL;
- }
-
- return hisi_qm_init(qm);
+ return 0;
}
-static int hisi_sec_probe_init(struct hisi_qm *qm, struct hisi_sec *hisi_sec)
+static int hisi_sec_probe_init(struct hisi_qm *qm)
{
if (qm->fun_type == QM_HW_PF) {
- qm->qp_base = SEC_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
- return hisi_sec_pf_probe_init(hisi_sec);
+ return hisi_sec_pf_probe_init(qm);
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
@@ -1066,660 +944,104 @@ static int hisi_sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!hisi_sec)
return -ENOMEM;
- pci_set_drvdata(pdev, hisi_sec);
-
- hisi_sec_add_to_list(hisi_sec);
+ qm = &hisi_sec->qm;
+ qm->fun_type = pdev->is_physfn ? QM_HW_PF : QM_HW_VF;
- hisi_sec->hisi_sec_list_lock = &hisi_sec_list_lock;
+ ret = hisi_sec_qm_pre_init(qm, pdev);
+ if (ret)
+ return ret;
hisi_sec->ctx_q_num = ctx_q_num;
hisi_sec->fusion_limit = fusion_limit;
+ hisi_sec->fusion_tmout_nsec = fusion_time;
- hisi_sec->fusion_tmout_nsec = fusion_tmout_nsec;
-
- qm = &hisi_sec->qm;
-
- ret = hisi_sec_qm_init(qm, pdev);
+ ret = hisi_qm_init(qm);
if (ret) {
- dev_err(&pdev->dev, "Failed to pre init qm!\n");
- goto err_remove_from_list;
+ pci_err(pdev, "Failed to init qm (%d)!\n", ret);
+ return ret;
}
- ret = hisi_sec_probe_init(qm, hisi_sec);
+ ret = hisi_sec_probe_init(qm);
if (ret) {
- dev_err(&pdev->dev, "Failed to probe!\n");
+ pci_err(pdev, "Failed to probe init (%d)!\n", ret);
goto err_qm_uninit;
}
ret = hisi_qm_start(qm);
- if (ret)
+ if (ret) {
+ pci_err(pdev, "Failed to start qm (%d)!\n", ret);
goto err_qm_uninit;
+ }
- ret = hisi_sec_debugfs_init(hisi_sec);
+ ret = hisi_sec_debugfs_init(qm);
if (ret)
- dev_err(&pdev->dev, "Failed to init debugfs (%d)!\n", ret);
-
- return 0;
-
- err_qm_uninit:
- hisi_qm_uninit(qm);
- err_remove_from_list:
- hisi_sec_remove_from_list(hisi_sec);
- return ret;
-}
-
-/* now we only support equal assignment */
-static int hisi_sec_vf_q_assign(struct hisi_sec *hisi_sec, u32 num_vfs)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
- u32 qp_num = qm->qp_num;
- u32 q_base = qp_num;
- u32 q_num, remain_q_num, i;
- int ret;
+ pci_warn(pdev, "Failed to init debugfs (%d)!\n", ret);
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_q_num - qp_num;
- q_num = remain_q_num / num_vfs;
+ hisi_qm_add_to_list(qm, &sec_devices);
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
- if (ret)
- return ret;
- q_base += q_num;
+ ret = hisi_sec_register_to_crypto(fusion_limit);
+ if (ret < 0) {
+ pci_err(pdev, "Failed to register driver to crypto!\n");
+ goto err_remove_from_list;
}
- return 0;
-}
-
-static int hisi_sec_clear_vft_config(struct hisi_sec *hisi_sec)
-{
- struct hisi_sec_ctrl *ctrl = hisi_sec->ctrl;
- struct hisi_qm *qm = &hisi_sec->qm;
- u32 num_vfs = ctrl->num_vfs;
- int ret;
- u32 i;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
+ if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_crypto_unregister;
}
- ctrl->num_vfs = 0;
-
return 0;
-}
-
-static int hisi_sec_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
-#ifdef CONFIG_PCI_IOV
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- u32 num_vfs;
- int pre_existing_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
-
- if (pre_existing_vfs) {
- dev_err(&pdev->dev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(u32, max_vfs, SEC_VF_NUM);
-
- ret = hisi_sec_vf_q_assign(hisi_sec, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't assign queues for VF!\n");
- return ret;
- }
- hisi_sec->ctrl->num_vfs = num_vfs;
+err_crypto_unregister:
+ hisi_sec_unregister_from_crypto(fusion_limit);
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't enable VF!\n");
- hisi_sec_clear_vft_config(hisi_sec);
- return ret;
- }
+err_remove_from_list:
+ hisi_qm_del_from_list(qm, &sec_devices);
+ hisi_sec_debugfs_exit(qm);
+ hisi_qm_stop(qm, QM_NORMAL);
- return num_vfs;
-#else
- return 0;
-#endif
-}
-
-static int hisi_sec_try_frozen_vfs(struct pci_dev *pdev)
-{
- struct hisi_sec *sec, *vf_sec;
- struct pci_dev *dev;
- int ret = 0;
-
- /* Try to frozen all the VFs as disable SRIOV */
- mutex_lock(&hisi_sec_list_lock);
- list_for_each_entry(sec, &hisi_sec_list, list) {
- dev = sec->qm.pdev;
- if (dev == pdev)
- continue;
- if (pci_physfn(dev) == pdev) {
- vf_sec = pci_get_drvdata(dev);
- ret = hisi_qm_frozen(&vf_sec->qm);
- if (ret)
- goto frozen_fail;
- }
- }
+err_qm_uninit:
+ hisi_qm_uninit(qm);
-frozen_fail:
- mutex_unlock(&hisi_sec_list_lock);
return ret;
}
-static int hisi_sec_sriov_disable(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- dev_err(&pdev->dev,
- "Can't disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- if (hisi_sec_try_frozen_vfs(pdev)) {
- dev_err(&pdev->dev, "try frozen VFs failed!\n");
- return -EBUSY;
- }
-
- /* remove in hisi_sec_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
- return hisi_sec_clear_vft_config(hisi_sec);
-}
-
static int hisi_sec_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
- return hisi_sec_sriov_disable(pdev);
+ return hisi_qm_sriov_disable(pdev, &sec_devices);
else
- return hisi_sec_sriov_enable(pdev, num_vfs);
-}
-
-static void hisi_sec_remove_wait_delay(struct hisi_sec *hisi_sec)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
-
- while (hisi_qm_frozen(qm) || ((qm->fun_type == QM_HW_PF) &&
- hisi_sec_try_frozen_vfs(qm->pdev)))
- usleep_range(FROZEN_RANGE_MIN, FROZEN_RANGE_MAX);
-
- udelay(SEC_WAIT_DELAY);
+ return hisi_qm_sriov_enable(pdev, num_vfs);
}
static void hisi_sec_remove(struct pci_dev *pdev)
{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_sec->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
if (uacce_mode != UACCE_MODE_NOUACCE)
- hisi_sec_remove_wait_delay(hisi_sec);
+ hisi_qm_remove_wait_delay(qm, &sec_devices);
+
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ (void)hisi_qm_sriov_disable(pdev, NULL);
- if (qm->fun_type == QM_HW_PF && hisi_sec->ctrl->num_vfs != 0)
- (void)hisi_sec_sriov_disable(pdev);
+ hisi_sec_unregister_from_crypto(fusion_limit);
- hisi_sec_debugfs_exit(hisi_sec);
+ hisi_qm_del_from_list(qm, &sec_devices);
+ hisi_sec_debugfs_exit(qm);
(void)hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
- hisi_sec_hw_error_set_state(hisi_sec, false);
+ hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
- hisi_sec_remove_from_list(hisi_sec);
-}
-
-static void hisi_sec_shutdown(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
-
- hisi_qm_stop(&hisi_sec->qm, QM_NORMAL);
-}
-
-static pci_ers_result_t hisi_sec_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_NONE;
-
- dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return hisi_qm_process_dev_error(pdev);
-}
-
-static int hisi_sec_reset_prepare_ready(struct hisi_sec *hisi_sec)
-{
- struct pci_dev *pdev = hisi_sec->qm.pdev;
- struct hisi_sec *sec = pci_get_drvdata(pci_physfn(pdev));
- int delay = 0;
-
- while (test_and_set_bit(HISI_SEC_RESET, &sec->status)) {
- msleep(++delay);
- if (delay > SEC_RESET_WAIT_TIMEOUT)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hisi_sec_vf_reset_prepare(struct pci_dev *pdev,
- enum qm_stop_reason stop_reason)
-{
- struct hisi_sec *hisi_sec;
- struct pci_dev *dev;
- struct hisi_qm *qm;
- int ret = 0;
-
- mutex_lock(&hisi_sec_list_lock);
- if (pdev->is_physfn) {
- list_for_each_entry(hisi_sec, &hisi_sec_list, list) {
- dev = hisi_sec->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hisi_sec->qm;
-
- ret = hisi_qm_stop(qm, stop_reason);
- if (ret)
- goto prepare_fail;
- }
- }
- }
-
-prepare_fail:
- mutex_unlock(&hisi_sec_list_lock);
- return ret;
-}
-
-static int hisi_sec_controller_reset_prepare(struct hisi_sec *hisi_sec)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
- struct pci_dev *pdev = qm->pdev;
- int ret;
-
- ret = hisi_sec_reset_prepare_ready(hisi_sec);
- if (ret) {
- dev_err(&pdev->dev, "Controller reset not ready!\n");
- return ret;
- }
-
- ret = hisi_sec_vf_reset_prepare(pdev, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop VFs!\n");
- return ret;
- }
-
- ret = hisi_qm_stop(qm, QM_SOFT_RESET);
- if (ret) {
- dev_err(&pdev->dev, "Fails to stop QM!\n");
- return ret;
- }
-
-#ifdef CONFIG_CRYPTO_QM_UACCE
- if (qm->use_uacce) {
- ret = uacce_hw_err_isolate(&qm->uacce);
- if (ret) {
- dev_err(&pdev->dev, "Fails to isolate hw err!\n");
- return ret;
- }
- }
-#endif
-
- return 0;
-}
-
-static int hisi_sec_soft_reset(struct hisi_sec *hisi_sec)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
- struct device *dev = &qm->pdev->dev;
- unsigned long long value;
- int ret;
- u32 val;
-
- ret = hisi_qm_reg_test(qm);
- if (ret)
- return ret;
-
- ret = hisi_qm_set_vf_mse(qm, SEC_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable vf mse bit.\n");
- return ret;
- }
-
- ret = hisi_qm_set_msi(qm, SEC_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable peh msi bit.\n");
- return ret;
- }
-
- /* Set qm ecc if dev ecc happened to hold on ooo */
- hisi_qm_set_ecc(qm);
-
- /* OOO register set and check */
- writel(SEC_MASTER_GLOBAL_CTRL_SHUTDOWN,
- hisi_sec->qm.io_base + SEC_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(hisi_sec->qm.io_base +
- SEC_MASTER_TRANS_RETURN,
- val,
- (val == SEC_MASTER_TRANS_RETURN_RW),
- SEC_DELAY_10_US,
- SEC_POLL_TIMEOUT_US);
- if (ret) {
- dev_emerg(dev, "Bus lock! Please reset system.\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, SEC_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable pf mse bit.\n");
- return ret;
- }
-
- /* The reset related sub-control registers are not in PCI BAR */
- if (ACPI_HANDLE(dev)) {
- acpi_status s;
-
- s = acpi_evaluate_integer(ACPI_HANDLE(dev), "SRST",
- NULL, &value);
- if (ACPI_FAILURE(s) || value) {
- dev_err(dev, "Controller reset fails %lld\n", value);
- return -EIO;
- }
- } else {
- dev_err(dev, "No reset method!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int hisi_sec_vf_reset_done(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec;
- struct pci_dev *dev;
- struct hisi_qm *qm;
- int ret = 0;
-
- mutex_lock(&hisi_sec_list_lock);
- list_for_each_entry(hisi_sec, &hisi_sec_list, list) {
- dev = hisi_sec->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hisi_sec->qm;
-
- ret = hisi_qm_restart(qm);
- if (ret)
- goto reset_fail;
- }
- }
-
-reset_fail:
- mutex_unlock(&hisi_sec_list_lock);
- return ret;
-}
-
-static int hisi_sec_controller_reset_done(struct hisi_sec *hisi_sec)
-{
- struct hisi_qm *qm = &hisi_sec->qm;
- struct pci_dev *pdev = qm->pdev;
- struct device *dev = &pdev->dev;
- int ret;
-
- ret = hisi_qm_set_msi(qm, SEC_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable peh msi bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, SEC_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable pf mse bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_vf_mse(qm, SEC_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable vf mse bit!\n");
- return ret;
- }
-
- hisi_sec_set_user_domain_and_cache(hisi_sec);
- hisi_qm_restart_prepare(qm);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- return -EPERM;
- }
-
- if (hisi_sec->ctrl->num_vfs) {
- ret = hisi_sec_vf_q_assign(hisi_sec, hisi_sec->ctrl->num_vfs);
- if (ret) {
- dev_err(dev, "Failed to assign vf queues!\n");
- return ret;
- }
- }
-
- ret = hisi_sec_vf_reset_done(pdev);
- if (ret) {
- dev_err(dev, "Failed to start VFs!\n");
- return -EPERM;
- }
-
- hisi_qm_restart_done(qm);
- hisi_sec_hw_error_init(hisi_sec);
-
- return 0;
-}
-
-static int hisi_sec_controller_reset(struct hisi_sec *hisi_sec)
-{
- struct device *dev = &hisi_sec->qm.pdev->dev;
- int ret;
-
- dev_info(dev, "Controller resetting...\n");
-
- ret = hisi_sec_controller_reset_prepare(hisi_sec);
- if (ret)
- return ret;
-
- ret = hisi_sec_soft_reset(hisi_sec);
- if (ret) {
- dev_err(dev, "Controller reset failed (%d)\n", ret);
- return ret;
- }
-
- ret = hisi_sec_controller_reset_done(hisi_sec);
- if (ret)
- return ret;
-
- clear_bit(HISI_SEC_RESET, &hisi_sec->status);
- dev_info(dev, "Controller reset complete\n");
-
- return 0;
-}
-
-static pci_ers_result_t hisi_sec_slot_reset(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- int ret;
-
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_RECOVERED;
-
- dev_info(&pdev->dev, "Requesting reset due to PCI error\n");
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
- /* reset sec controller */
- ret = hisi_sec_controller_reset(hisi_sec);
- if (ret) {
- dev_warn(&pdev->dev, "hisi_sec controller reset failed (%d)\n",
- ret);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void hisi_sec_set_hw_error(struct hisi_sec *hisi_sec, bool state)
-{
- struct pci_dev *pdev = hisi_sec->qm.pdev;
- struct hisi_sec *sec = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &sec->qm;
-
- if (qm->fun_type == QM_HW_VF)
- return;
-
- if (state)
- hisi_qm_hw_error_init(qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT,
- 0, QM_DB_RANDOM_INVALID);
- else
- hisi_qm_hw_error_uninit(qm);
-
- hisi_sec_hw_error_set_state(sec, state);
-}
-
-static int hisi_sec_get_hw_error_status(struct hisi_sec *hisi_sec)
-{
- u32 err_sts;
-
- err_sts = readl(hisi_sec->qm.io_base + SEC_CORE_INT_STATUS) &
- SEC_CORE_INT_STATUS_M_ECC;
- if (err_sts)
- return err_sts;
-
- return 0;
-}
-
-static int hisi_sec_check_hw_error(struct hisi_sec *hisi_sec)
-{
- struct pci_dev *pdev = hisi_sec->qm.pdev;
- struct hisi_sec *sec = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &sec->qm;
- int ret;
-
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
- ret = hisi_qm_get_hw_error_status(qm);
- if (ret)
- return ret;
-
- return hisi_sec_get_hw_error_status(sec);
-}
-
-static void hisi_sec_reset_prepare(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_sec->qm;
- struct device *dev = &pdev->dev;
- u32 delay = 0;
- int ret;
-
- hisi_sec_set_hw_error(hisi_sec, SEC_HW_ERROR_IRQ_DISABLE);
-
- while (hisi_sec_check_hw_error(hisi_sec)) {
- msleep(++delay);
- if (delay > SEC_RESET_WAIT_TIMEOUT)
- return;
- }
-
- ret = hisi_sec_reset_prepare_ready(hisi_sec);
- if (ret) {
- dev_err(dev, "FLR not ready!\n");
- return;
- }
-
- ret = hisi_sec_vf_reset_prepare(pdev, QM_FLR);
- if (ret) {
- dev_err(dev, "Fails to prepare reset!\n");
- return;
- }
-
- ret = hisi_qm_stop(qm, QM_FLR);
- if (ret) {
- dev_err(dev, "Fails to stop QM!\n");
- return;
- }
-
- dev_info(dev, "FLR resetting...\n");
-}
-
-static void hisi_sec_flr_reset_complete(struct pci_dev *pdev)
-{
- struct pci_dev *pf_pdev = pci_physfn(pdev);
- struct hisi_sec *hisi_sec = pci_get_drvdata(pf_pdev);
- struct device *dev = &hisi_sec->qm.pdev->dev;
- u32 id;
-
- pci_read_config_dword(hisi_sec->qm.pdev, PCI_COMMAND, &id);
- if (id == SEC_PCI_COMMAND_INVALID)
- dev_err(dev, "Device can not be used!\n");
-
- clear_bit(HISI_SEC_RESET, &hisi_sec->status);
-}
-
-static void hisi_sec_reset_done(struct pci_dev *pdev)
-{
- struct hisi_sec *hisi_sec = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_sec->qm;
- struct device *dev = &pdev->dev;
- int ret;
-
- hisi_sec_set_hw_error(hisi_sec, SEC_HW_ERROR_IRQ_ENABLE);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- goto flr_done;
- }
-
- if (pdev->is_physfn) {
- hisi_sec_set_user_domain_and_cache(hisi_sec);
- if (hisi_sec->ctrl->num_vfs) {
- ret = hisi_sec_vf_q_assign(hisi_sec,
- hisi_sec->ctrl->num_vfs);
- if (ret) {
- dev_err(dev, "Failed to assign vf queue\n");
- goto flr_done;
- }
- }
-
- ret = hisi_sec_vf_reset_done(pdev);
- if (ret) {
- dev_err(dev, "Failed to reset vf\n");
- goto flr_done;
- }
- }
-
-flr_done:
- hisi_sec_flr_reset_complete(pdev);
-
- dev_info(dev, "FLR reset complete\n");
}
static const struct pci_error_handlers hisi_sec_err_handler = {
- .error_detected = hisi_sec_error_detected,
- .slot_reset = hisi_sec_slot_reset,
- .reset_prepare = hisi_sec_reset_prepare,
- .reset_done = hisi_sec_reset_done,
+ .error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hisi_sec_pci_driver = {
@@ -1729,7 +1051,7 @@ static void hisi_sec_reset_done(struct pci_dev *pdev)
.remove = hisi_sec_remove,
.sriov_configure = hisi_sec_sriov_configure,
.err_handler = &hisi_sec_err_handler,
- .shutdown = hisi_sec_shutdown,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hisi_sec_register_debugfs(void)
@@ -1759,35 +1081,25 @@ static int __init hisi_sec_init(void)
return -ENOMEM;
}
+ INIT_LIST_HEAD(&sec_devices.list);
+ mutex_init(&sec_devices.lock);
+ sec_devices.check = NULL;
+
hisi_sec_register_debugfs();
ret = pci_register_driver(&hisi_sec_pci_driver);
if (ret < 0) {
+ hisi_sec_unregister_debugfs();
+ if (sec_wq)
+ destroy_workqueue(sec_wq);
pr_err("Failed to register pci driver.\n");
- goto err_pci;
- }
-
- pr_info("hisi_sec: register to crypto\n");
- ret = hisi_sec_register_to_crypto(fusion_limit);
- if (ret < 0) {
- pr_err("Failed to register driver to crypto.\n");
- goto err_probe_device;
}
- return 0;
-
- err_probe_device:
- pci_unregister_driver(&hisi_sec_pci_driver);
- err_pci:
- hisi_sec_unregister_debugfs();
- if (sec_wq)
- destroy_workqueue(sec_wq);
return ret;
}
static void __exit hisi_sec_exit(void)
{
- hisi_sec_unregister_from_crypto(fusion_limit);
pci_unregister_driver(&hisi_sec_pci_driver);
hisi_sec_unregister_debugfs();
if (sec_wq)
diff --git a/drivers/crypto/hisilicon/zip/zip.h b/drivers/crypto/hisilicon/zip/zip.h
index 560751a..ddd5924 100644
--- a/drivers/crypto/hisilicon/zip/zip.h
+++ b/drivers/crypto/hisilicon/zip/zip.h
@@ -18,19 +18,12 @@ enum hisi_zip_error_type {
};
struct hisi_zip_ctrl;
-
-enum hisi_zip_status {
- HISI_ZIP_RESET,
-};
-
struct hisi_zip {
struct hisi_qm qm;
- struct list_head list;
struct hisi_zip_ctrl *ctrl;
- unsigned long status;
};
-struct hisi_zip *find_zip_device(int node);
+int zip_create_qps(struct hisi_qp **qps, int ctx_num);
int hisi_zip_register_to_crypto(void);
void hisi_zip_unregister_from_crypto(void);
#endif
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index b2965ba..b247021 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -153,26 +153,19 @@ static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type,
sqe->dest_addr_h = upper_32_bits(d_addr);
}
-static int hisi_zip_create_qp(struct hisi_qm *qm, struct hisi_zip_qp_ctx *ctx,
+static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx,
int alg_type, int req_type)
{
- struct device *dev = &qm->pdev->dev;
- struct hisi_qp *qp;
+ struct device *dev = &qp->qm->pdev->dev;
int ret;
- qp = hisi_qm_create_qp(qm, alg_type);
- if (IS_ERR(qp)) {
- dev_err(dev, "create qp failed!\n");
- return PTR_ERR(qp);
- }
-
qp->req_type = req_type;
+ qp->alg_type = alg_type;
qp->qp_ctx = ctx;
ret = hisi_qm_start_qp(qp, 0);
if (ret < 0) {
dev_err(dev, "start qp failed!\n");
- hisi_qm_release_qp(qp);
return ret;
}
@@ -188,26 +181,27 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx)
static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type)
{
+ struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
struct hisi_zip *hisi_zip;
- struct hisi_qm *qm;
int ret, i, j;
- /* find the proper zip device */
- hisi_zip = find_zip_device(cpu_to_node(smp_processor_id()));
- if (!hisi_zip) {
- pr_err("Failed to find a proper ZIP device!\n");
+ ret = zip_create_qps(qps, HZIP_CTX_Q_NUM);
+ if (ret) {
+ pr_err("Can not create zip qps!\n");
return -ENODEV;
}
- qm = &hisi_zip->qm;
+
+ hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
/* alg_type = 0 for compress, 1 for decompress in hw sqe */
- ret = hisi_zip_create_qp(qm, &hisi_zip_ctx->qp_ctx[i], i,
+ ret = hisi_zip_start_qp(qps[i], &hisi_zip_ctx->qp_ctx[i], i,
req_type);
if (ret) {
for (j = i - 1; j >= 0; j--)
- hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[j]);
+ hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
+ hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
return ret;
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 5e40fbf..54681dc 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -15,7 +15,6 @@
#include <linux/uacce.h>
#include "zip.h"
-#define HZIP_VF_NUM 63
#define HZIP_QUEUE_NUM_V1 4096
#define HZIP_QUEUE_NUM_V2 1024
@@ -75,7 +74,6 @@
#define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148
#define HZIP_CORE_INT_RAS_CE_ENB 0x301160
#define HZIP_CORE_INT_RAS_NFE_ENB 0x301164
-#define HZIP_RAS_NFE_MBIT_DISABLE ~HZIP_CORE_INT_STATUS_M_ECC
#define HZIP_CORE_INT_RAS_FE_ENB 0x301168
#define HZIP_CORE_INT_RAS_NFE_ENABLE 0x7FE
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
@@ -95,95 +93,14 @@
#define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C
#define HZIP_AXI_SHUTDOWN_ENABLE BIT(14)
#define HZIP_AXI_SHUTDOWN_DISABLE 0xFFFFBFFF
-#define HZIP_WR_MSI_PORT 0xF7FF
+#define HZIP_WR_PORT BIT(11)
-#define HZIP_ENABLE 1
-#define HZIP_DISABLE 0
-#define HZIP_NUMA_DISTANCE 100
#define HZIP_BUF_SIZE 22
#define FORMAT_DECIMAL 10
-#define HZIP_REG_RD_INTVRL_US 10
-#define HZIP_REG_RD_TMOUT_US 1000
-#define HZIP_RESET_WAIT_TIMEOUT 400
-#define HZIP_PCI_COMMAND_INVALID 0xFFFFFFFF
-
-#define FROZEN_RANGE_MIN 10
-#define FROZEN_RANGE_MAX 20
static const char hisi_zip_name[] = "hisi_zip";
static struct dentry *hzip_debugfs_root;
-static LIST_HEAD(hisi_zip_list);
-static DEFINE_MUTEX(hisi_zip_list_lock);
-
-struct hisi_zip_resource {
- struct hisi_zip *hzip;
- int distance;
- struct list_head list;
-};
-
-static void free_list(struct list_head *head)
-{
- struct hisi_zip_resource *res, *tmp;
-
- list_for_each_entry_safe(res, tmp, head, list) {
- list_del(&res->list);
- kfree(res);
- }
-}
-
-struct hisi_zip *find_zip_device(int node)
-{
- struct hisi_zip *ret = NULL;
-#ifdef CONFIG_NUMA
- struct hisi_zip_resource *res, *tmp;
- struct hisi_zip *hisi_zip;
- struct list_head *n;
- struct device *dev;
- LIST_HEAD(head);
-
- mutex_lock(&hisi_zip_list_lock);
-
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- goto err;
-
- dev = &hisi_zip->qm.pdev->dev;
- res->hzip = hisi_zip;
- res->distance = node_distance(dev->numa_node, node);
-
- n = &head;
- list_for_each_entry(tmp, &head, list) {
- if (res->distance < tmp->distance) {
- n = &tmp->list;
- break;
- }
- }
- list_add_tail(&res->list, n);
- }
-
- list_for_each_entry(tmp, &head, list) {
- if (hisi_qm_get_free_qp_num(&tmp->hzip->qm)) {
- ret = tmp->hzip;
- break;
- }
- }
-
- free_list(&head);
-#else
- mutex_lock(&hisi_zip_list_lock);
-
- ret = list_first_entry(&hisi_zip_list, struct hisi_zip, list);
-#endif
- mutex_unlock(&hisi_zip_list_lock);
-
- return ret;
-
-err:
- free_list(&head);
- mutex_unlock(&hisi_zip_list_lock);
- return NULL;
-}
+static struct hisi_qm_list zip_devices;
struct hisi_zip_hw_error {
u32 int_msk;
@@ -229,9 +146,7 @@ struct ctrl_debug_file {
* Just relevant for PF.
*/
struct hisi_zip_ctrl {
- u32 num_vfs;
struct hisi_zip *hisi_zip;
- struct dentry *debug_root;
struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM];
};
@@ -282,73 +197,49 @@ enum {
{"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull},
};
-static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+#ifdef CONFIG_CRYPTO_QM_UACCE
+static int uacce_mode_set(const char *val, const struct kernel_param *kp)
{
- struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI,
- PCI_DEVICE_ID_ZIP_PF, NULL);
- u32 n, q_num;
- u8 rev_id;
- int ret;
-
- if (!val)
- return -EINVAL;
+ return mode_set(val, kp);
+}
- if (!pdev) {
- q_num = min_t(u32, HZIP_QUEUE_NUM_V1, HZIP_QUEUE_NUM_V2);
- pr_info("No device found currently, suppose queue number is %d\n",
- q_num);
- } else {
- rev_id = pdev->revision;
- switch (rev_id) {
- case QM_HW_V1:
- q_num = HZIP_QUEUE_NUM_V1;
- break;
- case QM_HW_V2:
- q_num = HZIP_QUEUE_NUM_V2;
- break;
- default:
- return -EINVAL;
- }
- }
+static const struct kernel_param_ops uacce_mode_ops = {
+ .set = uacce_mode_set,
+ .get = param_get_int,
+};
- ret = kstrtou32(val, 10, &n);
- if (ret != 0 || n == 0 || n > q_num)
- return -EINVAL;
+static int uacce_mode = UACCE_MODE_NOUACCE;
+module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
+MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+#endif
- return param_set_int(val, kp);
+static int pf_q_num_set(const char *val, const struct kernel_param *kp)
+{
+ return q_num_set(val, kp, PCI_DEVICE_ID_ZIP_PF);
}
static const struct kernel_param_ops pf_q_num_ops = {
.set = pf_q_num_set,
.get = param_get_int,
};
-static int uacce_mode_set(const char *val, const struct kernel_param *kp)
-{
- u32 n;
- int ret;
-
- if (!val)
- return -EINVAL;
- ret = kstrtou32(val, FORMAT_DECIMAL, &n);
- if (ret != 0 || (n != UACCE_MODE_NOIOMMU && n != UACCE_MODE_NOUACCE))
- return -EINVAL;
+static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
+module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
+MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
- return param_set_int(val, kp);
+static int vfs_num_set(const char *val, const struct kernel_param *kp)
+{
+ return vf_num_set(val, kp);
}
-static const struct kernel_param_ops uacce_mode_ops = {
- .set = uacce_mode_set,
+static const struct kernel_param_ops vfs_num_ops = {
+ .set = vfs_num_set,
.get = param_get_int,
};
-static u32 pf_q_num = HZIP_PF_DEF_Q_NUM;
-module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444);
-MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 1-4096, v2 1-1024)");
-
-static int uacce_mode = UACCE_MODE_NOUACCE;
-module_param_cb(uacce_mode, &uacce_mode_ops, &uacce_mode, 0444);
-MODULE_PARM_DESC(uacce_mode, "Mode of UACCE can be 0(default), 2");
+static u32 vfs_num;
+module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
+MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
static const struct pci_device_id hisi_zip_dev_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_ZIP_PF) },
@@ -357,81 +248,67 @@ static int uacce_mode_set(const char *val, const struct kernel_param *kp)
};
MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids);
-static inline void hisi_zip_add_to_list(struct hisi_zip *hisi_zip)
+int zip_create_qps(struct hisi_qp **qps, int ctx_num)
{
- mutex_lock(&hisi_zip_list_lock);
- list_add_tail(&hisi_zip->list, &hisi_zip_list);
- mutex_unlock(&hisi_zip_list_lock);
-}
+ int node = cpu_to_node(smp_processor_id());
-static inline void hisi_zip_remove_from_list(struct hisi_zip *hisi_zip)
-{
- mutex_lock(&hisi_zip_list_lock);
- list_del(&hisi_zip->list);
- mutex_unlock(&hisi_zip_list_lock);
+ return hisi_qm_alloc_qps_node(node, &zip_devices,
+ qps, ctx_num, 0);
}
-static void hisi_zip_set_user_domain_and_cache(struct hisi_zip *hisi_zip)
+static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
+ void __iomem *base = qm->io_base;
/* qm user domain */
- writel(AXUSER_BASE, hisi_zip->qm.io_base + QM_ARUSER_M_CFG_1);
- writel(ARUSER_M_CFG_ENABLE, hisi_zip->qm.io_base +
- QM_ARUSER_M_CFG_ENABLE);
- writel(AXUSER_BASE, hisi_zip->qm.io_base + QM_AWUSER_M_CFG_1);
- writel(AWUSER_M_CFG_ENABLE, hisi_zip->qm.io_base +
- QM_AWUSER_M_CFG_ENABLE);
- writel(WUSER_M_CFG_ENABLE, hisi_zip->qm.io_base +
- QM_WUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
+ writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE);
+ writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1);
+ writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE);
+ writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE);
/* qm cache */
- writel(AXI_M_CFG, hisi_zip->qm.io_base + QM_AXI_M_CFG);
- writel(AXI_M_CFG_ENABLE, hisi_zip->qm.io_base + QM_AXI_M_CFG_ENABLE);
+ writel(AXI_M_CFG, base + QM_AXI_M_CFG);
+ writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE);
+
/* disable FLR triggered by BME(bus master enable) */
- writel(PEH_AXUSER_CFG, hisi_zip->qm.io_base + QM_PEH_AXUSER_CFG);
- writel(PEH_AXUSER_CFG_ENABLE, hisi_zip->qm.io_base +
- QM_PEH_AXUSER_CFG_ENABLE);
+ writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG);
+ writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE);
/* cache */
- writel(HZIP_CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_ARCA_CHE_0);
- writel(HZIP_CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_ARCA_CHE_1);
- writel(HZIP_CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_AWCA_CHE_0);
- writel(HZIP_CACHE_ALL_EN, hisi_zip->qm.io_base + HZIP_PORT_AWCA_CHE_1);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0);
+ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1);
/* user domain configurations */
- writel(AXUSER_BASE, hisi_zip->qm.io_base + HZIP_BD_RUSER_32_63);
- writel(AXUSER_BASE, hisi_zip->qm.io_base + HZIP_SGL_RUSER_32_63);
- writel(AXUSER_BASE, hisi_zip->qm.io_base + HZIP_BD_WUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63);
if (qm->use_sva) {
- writel(AXUSER_BASE | AXUSER_SSV, hisi_zip->qm.io_base +
- HZIP_DATA_RUSER_32_63);
- writel(AXUSER_BASE | AXUSER_SSV, hisi_zip->qm.io_base +
- HZIP_DATA_WUSER_32_63);
+ writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63);
+ writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63);
} else {
- writel(AXUSER_BASE, hisi_zip->qm.io_base +
- HZIP_DATA_RUSER_32_63);
- writel(AXUSER_BASE, hisi_zip->qm.io_base +
- HZIP_DATA_WUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63);
+ writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
}
/* let's open all compression/decompression cores */
writel(HZIP_DECOMP_CHECK_ENABLE | HZIP_ALL_COMP_DECOMP_EN,
- hisi_zip->qm.io_base + HZIP_CLOCK_GATE_CTRL);
+ base + HZIP_CLOCK_GATE_CTRL);
/* enable sqc,cqc writeback */
writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE |
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
- FIELD_PREP(CQC_CACHE_WB_THRD, 1),
- hisi_zip->qm.io_base + QM_CACHE_CTL);
+ FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
+
+ return 0;
}
/* hisi_zip_debug_regs_clear() - clear the zip debug regs */
-static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip)
+static void hisi_zip_debug_regs_clear(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
-
/* clear current_qm */
writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
@@ -442,52 +319,70 @@ static void hisi_zip_debug_regs_clear(struct hisi_zip *hisi_zip)
hisi_qm_debug_regs_clear(qm);
}
-
-static void hisi_zip_hw_error_set_state(struct hisi_zip *hisi_zip, bool state)
+static int hisi_zip_hw_err_pre_set(struct hisi_qm *qm, u32 *val)
{
- struct hisi_qm *qm = &hisi_zip->qm;
- u32 val;
-
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
pci_info(qm->pdev, "ZIP v%d cannot support hw error handle!\n",
qm->ver);
- return;
+ return -EINVAL;
}
/* configure error type */
- writel(0x1, hisi_zip->qm.io_base + HZIP_CORE_INT_RAS_CE_ENB);
- writel(0x0, hisi_zip->qm.io_base + HZIP_CORE_INT_RAS_FE_ENB);
+ writel(0x1, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(0x0, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
writel(HZIP_CORE_INT_RAS_NFE_ENABLE,
- hisi_zip->qm.io_base + HZIP_CORE_INT_RAS_NFE_ENB);
-
- val = readl(hisi_zip->qm.io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
- if (state) {
- /* clear ZIP hw error source if having */
- writel(HZIP_CORE_INT_DISABLE, hisi_zip->qm.io_base +
- HZIP_CORE_INT_SOURCE);
- /* enable ZIP hw error interrupts */
- writel(0, hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
-
- /* enable ZIP block master OOO when m-bit error occur */
- val = val | HZIP_AXI_SHUTDOWN_ENABLE;
- } else {
- /* disable ZIP hw error interrupts */
- writel(HZIP_CORE_INT_DISABLE,
- hisi_zip->qm.io_base + HZIP_CORE_INT_MASK);
+ qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
- /* disable ZIP block master OOO when m-bit error occur */
- val = val & HZIP_AXI_SHUTDOWN_DISABLE;
- }
+ *val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ return 0;
+}
+
+static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ ret = hisi_zip_hw_err_pre_set(qm, &val);
+ if (ret)
+ return;
+
+ /* clear ZIP hw error source if having */
+ writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_SOURCE);
+
+ /* enable ZIP hw error interrupts */
+ writel(0, qm->io_base + HZIP_CORE_INT_MASK);
+
+ /* enable ZIP block master OOO when m-bit error occur */
+ val = val | HZIP_AXI_SHUTDOWN_ENABLE;
- writel(val, hisi_zip->qm.io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+ writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+}
+
+static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ ret = hisi_zip_hw_err_pre_set(qm, &val);
+ if (ret)
+ return;
+
+ /* disable ZIP hw error interrupts */
+ writel(HZIP_CORE_INT_DISABLE, qm->io_base + HZIP_CORE_INT_MASK);
+
+ /* disable ZIP block master OOO when m-bit error occur */
+ val = val & HZIP_AXI_SHUTDOWN_DISABLE;
+
+ writel(val, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
}
static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file)
{
- struct hisi_zip *hisi_zip = file->ctrl->hisi_zip;
+ struct hisi_zip *zip = file->ctrl->hisi_zip;
- return &hisi_zip->qm;
+ return &zip->qm;
}
static u32 current_qm_read(struct ctrl_debug_file *file)
@@ -500,22 +395,21 @@ static u32 current_qm_read(struct ctrl_debug_file *file)
static int current_qm_write(struct ctrl_debug_file *file, u32 val)
{
struct hisi_qm *qm = file_to_qm(file);
- struct hisi_zip_ctrl *ctrl = file->ctrl;
u32 vfq_num;
u32 tmp;
- if (val > ctrl->num_vfs)
+ if (val > qm->vfs_num)
return -EINVAL;
/* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
if (val == 0) {
qm->debug.curr_qm_qp_num = qm->qp_num;
} else {
- vfq_num = (qm->ctrl_q_num - qm->qp_num) / ctrl->num_vfs;
- if (val == ctrl->num_vfs) {
+ vfq_num = (qm->ctrl_q_num - qm->qp_num) / qm->vfs_num;
+ if (val == qm->vfs_num) {
qm->debug.curr_qm_qp_num =
qm->ctrl_q_num - qm->qp_num -
- (ctrl->num_vfs - 1) * vfq_num;
+ (qm->vfs_num - 1) * vfq_num;
} else {
qm->debug.curr_qm_qp_num = vfq_num;
}
@@ -638,10 +532,8 @@ static ssize_t hisi_zip_ctrl_debug_write(struct file *filp,
.write = hisi_zip_ctrl_debug_write,
};
-static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
+static int hisi_zip_core_debug_init(struct hisi_qm *qm)
{
- struct hisi_zip *hisi_zip = ctrl->hisi_zip;
- struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &qm->pdev->dev;
struct debugfs_regset32 *regset;
struct dentry *tmp_d, *tmp;
@@ -657,7 +549,7 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
if (ret < 0)
return -EINVAL;
- tmp_d = debugfs_create_dir(buf, ctrl->debug_root);
+ tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
if (!tmp_d)
return -ENOENT;
@@ -677,29 +569,29 @@ static int hisi_zip_core_debug_init(struct hisi_zip_ctrl *ctrl)
return 0;
}
-static int hisi_zip_ctrl_debug_init(struct hisi_zip_ctrl *ctrl)
+static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm)
{
+ struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
struct dentry *tmp;
int i;
for (i = HZIP_CURRENT_QM; i < HZIP_DEBUG_FILE_NUM; i++) {
- spin_lock_init(&ctrl->files[i].lock);
- ctrl->files[i].ctrl = ctrl;
- ctrl->files[i].index = i;
+ spin_lock_init(&zip->ctrl->files[i].lock);
+ zip->ctrl->files[i].ctrl = zip->ctrl;
+ zip->ctrl->files[i].index = i;
tmp = debugfs_create_file(ctrl_debug_file_name[i], 0600,
- ctrl->debug_root, ctrl->files + i,
+ qm->debug.debug_root, zip->ctrl->files + i,
&ctrl_debug_fops);
if (!tmp)
return -ENOENT;
}
- return hisi_zip_core_debug_init(ctrl);
+ return hisi_zip_core_debug_init(qm);
}
-static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
+static int hisi_zip_debugfs_init(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
struct device *dev = &qm->pdev->dev;
struct dentry *dev_d;
int ret;
@@ -714,8 +606,7 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
goto failed_to_create;
if (qm->fun_type == QM_HW_PF) {
- hisi_zip->ctrl->debug_root = dev_d;
- ret = hisi_zip_ctrl_debug_init(hisi_zip->ctrl);
+ ret = hisi_zip_ctrl_debug_init(qm);
if (ret)
goto failed_to_create;
}
@@ -727,47 +618,16 @@ static int hisi_zip_debugfs_init(struct hisi_zip *hisi_zip)
return ret;
}
-static void hisi_zip_debugfs_exit(struct hisi_zip *hisi_zip)
+static void hisi_zip_debugfs_exit(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
-
debugfs_remove_recursive(qm->debug.debug_root);
if (qm->fun_type == QM_HW_PF) {
- hisi_zip_debug_regs_clear(hisi_zip);
+ hisi_zip_debug_regs_clear(qm);
qm->debug.curr_qm_qp_num = 0;
}
}
-static void hisi_zip_hw_error_init(struct hisi_zip *hisi_zip)
-{
- hisi_qm_hw_error_init(&hisi_zip->qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT, 0,
- QM_DB_RANDOM_INVALID);
- hisi_zip_hw_error_set_state(hisi_zip, true);
-}
-
-static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
-{
- return readl(qm->io_base + HZIP_CORE_INT_STATUS);
-}
-
-static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
-{
- writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
-}
-
-static void hisi_zip_set_ecc(struct hisi_qm *qm)
-{
- u32 nfe_enb;
-
- nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
- writel(nfe_enb & HZIP_RAS_NFE_MBIT_DISABLE,
- qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
- writel(HZIP_CORE_INT_STATUS_M_ECC, qm->io_base + HZIP_CORE_INT_SET);
- qm->err_ini.is_dev_ecc_mbit = 1;
-}
-
static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
{
const struct hisi_zip_hw_error *err = zip_hw_error;
@@ -792,17 +652,53 @@ static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts)
}
}
-static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
+static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm)
+{
+ return readl(qm->io_base + HZIP_CORE_INT_STATUS);
+}
+
+static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
+{
+ writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE);
+}
+
+static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
{
- struct hisi_qm *qm = &hisi_zip->qm;
+ u32 val;
+
+ val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ writel(val & HZIP_AXI_SHUTDOWN_DISABLE,
+ qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+
+ writel(val | HZIP_AXI_SHUTDOWN_ENABLE,
+ qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
+}
+
+static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm)
+{
+ u32 nfe_enb;
+
+ nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC,
+ qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+
+ writel(HZIP_CORE_INT_STATUS_M_ECC,
+ qm->io_base + HZIP_CORE_INT_SET);
+}
+
+static int hisi_zip_pf_probe_init(struct hisi_qm *qm)
+{
+ struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm);
struct hisi_zip_ctrl *ctrl;
+ int ret;
ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
- hisi_zip->ctrl = ctrl;
- ctrl->hisi_zip = hisi_zip;
+ zip->ctrl = ctrl;
+ ctrl->hisi_zip = zip;
switch (qm->ver) {
case QM_HW_V1:
@@ -817,61 +713,71 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
return -EINVAL;
}
- qm->err_ini.qm_wr_port = HZIP_WR_MSI_PORT;
- qm->err_ini.ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
qm->err_ini.get_dev_hw_err_status = hisi_zip_get_hw_err_status;
qm->err_ini.clear_dev_hw_err_status = hisi_zip_clear_hw_err_status;
+ qm->err_ini.err_info.ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
+ qm->err_ini.err_info.ce = QM_BASE_CE;
+ qm->err_ini.err_info.nfe = QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT;
+ qm->err_ini.err_info.fe = 0;
+ qm->err_ini.err_info.msi = QM_DB_RANDOM_INVALID;
+ qm->err_ini.err_info.acpi_rst = "ZRST";
+ qm->err_ini.hw_err_disable = hisi_zip_hw_error_disable;
+ qm->err_ini.hw_err_enable = hisi_zip_hw_error_enable;
+ qm->err_ini.set_usr_domain_cache = hisi_zip_set_user_domain_and_cache;
qm->err_ini.log_dev_hw_err = hisi_zip_log_hw_error;
- qm->err_ini.inject_dev_hw_err = hisi_zip_set_ecc;
+ qm->err_ini.open_axi_master_ooo = hisi_zip_open_axi_master_ooo;
+ qm->err_ini.close_axi_master_ooo = hisi_zip_close_axi_master_ooo;
- hisi_zip_set_user_domain_and_cache(hisi_zip);
- hisi_zip_hw_error_init(hisi_zip);
- hisi_zip_debug_regs_clear(hisi_zip);
+ qm->err_ini.err_info.msi_wr_port = HZIP_WR_PORT;
+
+ ret = qm->err_ini.set_usr_domain_cache(qm);
+ if (ret)
+ return ret;
+
+ hisi_qm_dev_err_init(qm);
+
+ hisi_zip_debug_regs_clear(qm);
+
+ return 0;
+}
+
+static int hisi_zip_qm_pre_init(struct hisi_qm *qm, struct pci_dev *pdev)
+{
+ int ret;
+
+#ifdef CONFIG_CRYPTO_QM_UACCE
+ qm->algs = "zlib\ngzip\nxts(sm4)\nxts(aes)\n";
+ qm->uacce_mode = uacce_mode;
+#endif
+ qm->pdev = pdev;
+ ret = hisi_qm_pre_init(qm, pf_q_num, HZIP_PF_DEF_Q_BASE);
+ if (ret)
+ return ret;
+ qm->sqe_size = HZIP_SQE_SIZE;
+ qm->dev_name = hisi_zip_name;
+ qm->qm_list = &zip_devices;
return 0;
}
static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- struct hisi_zip *hisi_zip;
- enum qm_hw_ver rev_id;
+ struct hisi_zip *zip;
struct hisi_qm *qm;
int ret;
- rev_id = hisi_qm_get_hw_version(pdev);
- if (rev_id == QM_HW_UNKNOWN)
- return -EINVAL;
-
- hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL);
- if (!hisi_zip)
+ zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
+ if (!zip)
return -ENOMEM;
- pci_set_drvdata(pdev, hisi_zip);
-
- hisi_zip_add_to_list(hisi_zip);
+ qm = &zip->qm;
+ qm->fun_type = pdev->is_physfn ? QM_HW_PF : QM_HW_VF;
- hisi_zip->status = 0;
- qm = &hisi_zip->qm;
- qm->pdev = pdev;
- qm->ver = rev_id;
+ ret = hisi_zip_qm_pre_init(qm, pdev);
+ if (ret)
+ return ret;
- qm->sqe_size = HZIP_SQE_SIZE;
- qm->dev_name = hisi_zip_name;
- qm->fun_type = (pdev->device == PCI_DEVICE_ID_ZIP_PF) ? QM_HW_PF :
- QM_HW_VF;
- qm->algs = "zlib\ngzip\nxts(sm4)\nxts(aes)\n";
-
- switch (uacce_mode) {
- case UACCE_MODE_NOUACCE:
- qm->use_uacce = false;
- break;
- case UACCE_MODE_NOIOMMU:
- qm->use_uacce = true;
- break;
- default:
- ret = -EINVAL;
- goto err_remove_from_list;
- }
+ hisi_qm_add_to_list(qm, &zip_devices);
ret = hisi_qm_init(qm);
if (ret) {
@@ -880,15 +786,11 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
if (qm->fun_type == QM_HW_PF) {
- ret = hisi_zip_pf_probe_init(hisi_zip);
+ ret = hisi_zip_pf_probe_init(qm);
if (ret) {
pci_err(pdev, "Failed to init pf probe (%d)!\n", ret);
goto err_remove_from_list;
}
-
- qm->qp_base = HZIP_PF_DEF_Q_BASE;
- qm->qp_num = pf_q_num;
- qm->debug.curr_qm_qp_num = pf_q_num;
} else if (qm->fun_type == QM_HW_VF) {
/*
* have no way to get qm configure in VM in v1 hardware,
@@ -914,7 +816,7 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_qm_uninit;
}
- ret = hisi_zip_debugfs_init(hisi_zip);
+ ret = hisi_zip_debugfs_init(qm);
if (ret)
pci_err(pdev, "Failed to init debugfs (%d)!\n", ret);
@@ -923,630 +825,62 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_err(pdev, "Failed to register driver to crypto!\n");
goto err_qm_stop;
}
+
+ if (qm->fun_type == QM_HW_PF && vfs_num > 0) {
+ ret = hisi_qm_sriov_enable(pdev, vfs_num);
+ if (ret < 0)
+ goto err_crypto_unregister;
+ }
+
return 0;
+err_crypto_unregister:
+ hisi_zip_unregister_from_crypto();
err_qm_stop:
- hisi_zip_debugfs_exit(hisi_zip);
+ hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
err_qm_uninit:
hisi_qm_uninit(qm);
err_remove_from_list:
- hisi_zip_remove_from_list(hisi_zip);
- return ret;
-}
-
-/* now we only support equal assignment */
-static int hisi_zip_vf_q_assign(struct hisi_zip *hisi_zip, u32 num_vfs)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- u32 qp_num = qm->qp_num;
- u32 q_base = qp_num;
- u32 q_num, remain_q_num, i;
- int ret;
-
- if (!num_vfs)
- return -EINVAL;
-
- remain_q_num = qm->ctrl_q_num - qp_num;
- /* If remain queues not enough, return error. */
- if (remain_q_num < num_vfs)
- return -EINVAL;
-
- q_num = remain_q_num / num_vfs;
- for (i = 1; i <= num_vfs; i++) {
- if (i == num_vfs)
- q_num += remain_q_num % num_vfs;
- ret = hisi_qm_set_vft(qm, i, q_base, q_num);
- if (ret)
- return ret;
- q_base += q_num;
- }
-
- return 0;
-}
-
-static int hisi_zip_clear_vft_config(struct hisi_zip *hisi_zip)
-{
- struct hisi_zip_ctrl *ctrl = hisi_zip->ctrl;
- struct hisi_qm *qm = &hisi_zip->qm;
- u32 i, num_vfs = ctrl->num_vfs;
- int ret;
-
- for (i = 1; i <= num_vfs; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
-
- ctrl->num_vfs = 0;
-
- return 0;
-}
-
-static int hisi_zip_sriov_enable(struct pci_dev *pdev, int max_vfs)
-{
-#ifdef CONFIG_PCI_IOV
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- int pre_existing_vfs, num_vfs, ret;
-
- pre_existing_vfs = pci_num_vf(pdev);
- if (pre_existing_vfs) {
- dev_err(&pdev->dev,
- "Can't enable VF. Please disable pre-enabled VFs!\n");
- return 0;
- }
-
- num_vfs = min_t(int, max_vfs, HZIP_VF_NUM);
-
- ret = hisi_zip_vf_q_assign(hisi_zip, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't assign queues for VF!\n");
- return ret;
- }
-
- hisi_zip->ctrl->num_vfs = num_vfs;
-
- ret = pci_enable_sriov(pdev, num_vfs);
- if (ret) {
- dev_err(&pdev->dev, "Can't enable VF!\n");
- hisi_zip_clear_vft_config(hisi_zip);
- return ret;
- }
-
- return num_vfs;
-#else
- return 0;
-#endif
-}
-
-static int hisi_zip_try_frozen_vfs(struct pci_dev *pdev)
-{
- struct hisi_zip *zip, *vf_zip;
- struct pci_dev *dev;
- int ret = 0;
-
- /* Try to frozen all the VFs as disable SRIOV */
- mutex_lock(&hisi_zip_list_lock);
- list_for_each_entry(zip, &hisi_zip_list, list) {
- dev = zip->qm.pdev;
- if (dev == pdev)
- continue;
- if (pci_physfn(dev) == pdev) {
- vf_zip = pci_get_drvdata(dev);
- ret = hisi_qm_frozen(&vf_zip->qm);
- if (ret)
- goto frozen_fail;
- }
- }
-
-frozen_fail:
- mutex_unlock(&hisi_zip_list_lock);
+ hisi_qm_del_from_list(qm, &zip_devices);
return ret;
}
-static int hisi_zip_sriov_disable(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
-
- if (pci_vfs_assigned(pdev)) {
- dev_err(&pdev->dev,
- "Can't disable VFs while VFs are assigned!\n");
- return -EPERM;
- }
-
- if (hisi_zip_try_frozen_vfs(pdev)) {
- dev_err(&pdev->dev, "try frozen VFs failed!\n");
- return -EBUSY;
- }
-
- /* remove in hisi_zip_pci_driver will be called to free VF resources */
- pci_disable_sriov(pdev);
-
- return hisi_zip_clear_vft_config(hisi_zip);
-}
-
static int hisi_zip_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
- return hisi_zip_sriov_disable(pdev);
+ return hisi_qm_sriov_disable(pdev, &zip_devices);
else
- return hisi_zip_sriov_enable(pdev, num_vfs);
-}
-
-static void hisi_zip_remove_wait_delay(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
-
- while (hisi_qm_frozen(qm) || ((qm->fun_type == QM_HW_PF) &&
- hisi_zip_try_frozen_vfs(qm->pdev)))
- usleep_range(FROZEN_RANGE_MIN, FROZEN_RANGE_MAX);
-
- udelay(ZIP_WAIT_DELAY);
+ return hisi_qm_sriov_enable(pdev, num_vfs);
}
static void hisi_zip_remove(struct pci_dev *pdev)
{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_zip->qm;
+ struct hisi_qm *qm = pci_get_drvdata(pdev);
if (uacce_mode != UACCE_MODE_NOUACCE)
- hisi_zip_remove_wait_delay(hisi_zip);
+ hisi_qm_remove_wait_delay(qm, &zip_devices);
- if (qm->fun_type == QM_HW_PF && hisi_zip->ctrl->num_vfs != 0)
- (void)hisi_zip_sriov_disable(pdev);
+ if (qm->fun_type == QM_HW_PF && qm->vfs_num)
+ hisi_qm_sriov_disable(pdev, NULL);
hisi_zip_unregister_from_crypto();
- hisi_zip_debugfs_exit(hisi_zip);
+
+ hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
if (qm->fun_type == QM_HW_PF)
- hisi_zip_hw_error_set_state(hisi_zip, false);
+ hisi_qm_dev_err_uninit(qm);
hisi_qm_uninit(qm);
- hisi_zip_remove_from_list(hisi_zip);
-}
-
-static void hisi_zip_shutdown(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
-
- hisi_qm_stop(&hisi_zip->qm, QM_NORMAL);
-}
-
-static pci_ers_result_t hisi_zip_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_NONE;
-
- dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- return hisi_qm_process_dev_error(pdev);
-}
-
-static int hisi_zip_reset_prepare_ready(struct hisi_zip *hisi_zip)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct hisi_zip *zip = pci_get_drvdata(pci_physfn(pdev));
- int delay = 0;
-
- while (test_and_set_bit(HISI_ZIP_RESET, &zip->status)) {
- msleep(++delay);
- if (delay > HZIP_RESET_WAIT_TIMEOUT)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hisi_zip_vf_reset_prepare(struct hisi_zip *hisi_zip,
- enum qm_stop_reason stop_reason)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct pci_dev *dev;
- struct hisi_qm *qm;
- int ret = 0;
-
- mutex_lock(&hisi_zip_list_lock);
- if (pdev->is_physfn) {
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- dev = hisi_zip->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hisi_zip->qm;
-
- ret = hisi_qm_stop(qm, stop_reason);
- if (ret)
- goto prepare_fail;
- }
- }
- }
-
-prepare_fail:
- mutex_unlock(&hisi_zip_list_lock);
- return ret;
-}
-
-static int hisi_zip_controller_reset_prepare(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &qm->pdev->dev;
- int ret;
-
- ret = hisi_zip_reset_prepare_ready(hisi_zip);
- if (ret) {
- dev_err(dev, "Controller reset not ready!\n");
- return ret;
- }
-
- ret = hisi_zip_vf_reset_prepare(hisi_zip, QM_SOFT_RESET);
- if (ret) {
- dev_err(dev, "Fails to stop VFs!\n");
- return ret;
- }
-
- ret = hisi_qm_stop(qm, QM_SOFT_RESET);
- if (ret) {
- dev_err(dev, "Fails to stop QM!\n");
- return ret;
- }
-
-#ifdef CONFIG_CRYPTO_QM_UACCE
- if (qm->use_uacce) {
- ret = uacce_hw_err_isolate(&qm->uacce);
- if (ret) {
- dev_err(dev, "Fails to isolate hw err!\n");
- return ret;
- }
- }
-#endif
-
- return 0;
-}
-
-static int hisi_zip_soft_reset(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &qm->pdev->dev;
- unsigned long long value;
- int ret;
- u32 val;
-
- ret = hisi_qm_reg_test(qm);
- if (ret)
- return ret;
-
- ret = hisi_qm_set_vf_mse(qm, HZIP_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable vf mse bit.\n");
- return ret;
- }
-
- ret = hisi_qm_set_msi(qm, HZIP_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable peh msi bit.\n");
- return ret;
- }
-
- /* Set qm ecc if dev ecc happened to hold on ooo */
- hisi_qm_set_ecc(qm);
-
- /* OOO register set and check */
- writel(HZIP_MASTER_GLOBAL_CTRL_SHUTDOWN,
- hisi_zip->qm.io_base + HZIP_MASTER_GLOBAL_CTRL);
-
- /* If bus lock, reset chip */
- ret = readl_relaxed_poll_timeout(hisi_zip->qm.io_base +
- HZIP_MASTER_TRANS_RETURN, val,
- (val == HZIP_MASTER_TRANS_RETURN_RW),
- HZIP_REG_RD_INTVRL_US,
- HZIP_REG_RD_TMOUT_US);
- if (ret) {
- dev_emerg(dev, "Bus lock! Please reset system.\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, HZIP_DISABLE);
- if (ret) {
- dev_err(dev, "Fails to disable pf mse bit.\n");
- return ret;
- }
-
- /* The reset related sub-control registers are not in PCI BAR */
- if (ACPI_HANDLE(dev)) {
- acpi_status s;
-
- s = acpi_evaluate_integer(ACPI_HANDLE(dev), "ZRST",
- NULL, &value);
- if (ACPI_FAILURE(s)) {
- dev_err(dev, "NO controller reset method!\n");
- return -EIO;
- }
-
- if (value) {
- dev_err(dev, "Reset step %llu failed!\n", value);
- return -EIO;
- }
- } else {
- dev_err(dev, "No reset method!\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int hisi_zip_vf_reset_done(struct hisi_zip *hisi_zip)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct pci_dev *dev;
- struct hisi_qm *qm;
- int ret = 0;
-
- mutex_lock(&hisi_zip_list_lock);
- list_for_each_entry(hisi_zip, &hisi_zip_list, list) {
- dev = hisi_zip->qm.pdev;
- if (dev == pdev)
- continue;
-
- if (pci_physfn(dev) == pdev) {
- qm = &hisi_zip->qm;
-
- ret = hisi_qm_restart(qm);
- if (ret)
- goto reset_fail;
- }
- }
-
-reset_fail:
- mutex_unlock(&hisi_zip_list_lock);
- return ret;
-}
-
-static int hisi_zip_controller_reset_done(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &qm->pdev->dev;
- int ret;
-
- ret = hisi_qm_set_msi(qm, HZIP_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable peh msi bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_pf_mse(qm, HZIP_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable pf mse bit!\n");
- return ret;
- }
-
- ret = hisi_qm_set_vf_mse(qm, HZIP_ENABLE);
- if (ret) {
- dev_err(dev, "Fails to enable vf mse bit!\n");
- return ret;
- }
-
- hisi_zip_set_user_domain_and_cache(hisi_zip);
- hisi_qm_restart_prepare(qm);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- return -EPERM;
- }
-
- if (hisi_zip->ctrl->num_vfs) {
- ret = hisi_zip_vf_q_assign(hisi_zip, hisi_zip->ctrl->num_vfs);
- if (ret) {
- dev_err(dev, "Failed to assign vf queues!\n");
- return ret;
- }
- }
-
- ret = hisi_zip_vf_reset_done(hisi_zip);
- if (ret) {
- dev_err(dev, "Failed to start VFs!\n");
- return -EPERM;
- }
-
- hisi_qm_restart_done(qm);
- hisi_zip_hw_error_init(hisi_zip);
-
- return 0;
-}
-
-static int hisi_zip_controller_reset(struct hisi_zip *hisi_zip)
-{
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &qm->pdev->dev;
- int ret;
-
- dev_info(dev, "Controller resetting...\n");
-
- ret = hisi_zip_controller_reset_prepare(hisi_zip);
- if (ret)
- return ret;
-
- ret = hisi_zip_soft_reset(hisi_zip);
- if (ret) {
- dev_err(dev, "Controller reset failed (%d)\n", ret);
- return ret;
- }
-
- ret = hisi_zip_controller_reset_done(hisi_zip);
- if (ret)
- return ret;
-
- clear_bit(HISI_ZIP_RESET, &hisi_zip->status);
-
- dev_info(dev, "Controller reset complete\n");
-
- return ret;
-}
-
-static pci_ers_result_t hisi_zip_slot_reset(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- int ret;
-
- if (pdev->is_virtfn)
- return PCI_ERS_RESULT_RECOVERED;
-
- dev_info(&pdev->dev, "Requesting reset due to PCI error\n");
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
- ret = hisi_zip_controller_reset(hisi_zip);
- if (ret) {
- dev_err(&pdev->dev, "hisi_zip controller reset failed (%d)\n",
- ret);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void hisi_zip_set_hw_error(struct hisi_zip *hisi_zip, bool state)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct hisi_zip *zip = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &zip->qm;
-
- if (qm->fun_type == QM_HW_VF)
- return;
-
- if (state)
- hisi_qm_hw_error_init(qm, QM_BASE_CE,
- QM_BASE_NFE | QM_ACC_WB_NOT_READY_TIMEOUT,
- 0, QM_DB_RANDOM_INVALID);
- else
- hisi_qm_hw_error_uninit(qm);
-
- hisi_zip_hw_error_set_state(zip, state);
-}
-
-static int hisi_zip_get_hw_error_status(struct hisi_zip *hisi_zip)
-{
- u32 err_sts;
-
- err_sts = readl(hisi_zip->qm.io_base + HZIP_CORE_INT_STATUS) &
- HZIP_CORE_INT_STATUS_M_ECC;
- if (err_sts)
- return err_sts;
-
- return 0;
-}
-
-static int hisi_zip_check_hw_error(struct hisi_zip *hisi_zip)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct hisi_zip *zip = pci_get_drvdata(pci_physfn(pdev));
- struct hisi_qm *qm = &zip->qm;
- int ret;
-
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
- ret = hisi_qm_get_hw_error_status(qm);
- if (ret)
- return ret;
-
- return hisi_zip_get_hw_error_status(zip);
-}
-
-static void hisi_zip_reset_prepare(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &pdev->dev;
- u32 delay = 0;
- int ret;
-
- hisi_zip_set_hw_error(hisi_zip, HZIP_HW_ERROR_IRQ_DISABLE);
-
- while (hisi_zip_check_hw_error(hisi_zip)) {
- msleep(++delay);
- if (delay > HZIP_RESET_WAIT_TIMEOUT)
- return;
- }
-
- ret = hisi_zip_reset_prepare_ready(hisi_zip);
- if (ret) {
- dev_err(dev, "FLR not ready!\n");
- return;
- }
-
- ret = hisi_zip_vf_reset_prepare(hisi_zip, QM_FLR);
- if (ret) {
- dev_err(dev, "Fails to prepare reset!\n");
- return;
- }
-
- ret = hisi_qm_stop(qm, QM_FLR);
- if (ret) {
- dev_err(dev, "Fails to stop QM!\n");
- return;
- }
-
- dev_info(dev, "FLR resetting...\n");
-}
-
-static void hisi_zip_flr_reset_complete(struct hisi_zip *hisi_zip)
-{
- struct pci_dev *pdev = hisi_zip->qm.pdev;
- struct hisi_zip *zip = pci_get_drvdata(pci_physfn(pdev));
- struct device *dev = &zip->qm.pdev->dev;
- u32 id;
-
- pci_read_config_dword(zip->qm.pdev, PCI_COMMAND, &id);
- if (id == HZIP_PCI_COMMAND_INVALID)
- dev_err(dev, "Device can not be used!\n");
-
- clear_bit(HISI_ZIP_RESET, &zip->status);
-}
-
-static void hisi_zip_reset_done(struct pci_dev *pdev)
-{
- struct hisi_zip *hisi_zip = pci_get_drvdata(pdev);
- struct hisi_qm *qm = &hisi_zip->qm;
- struct device *dev = &pdev->dev;
- int ret;
-
- hisi_zip_set_hw_error(hisi_zip, HZIP_HW_ERROR_IRQ_ENABLE);
-
- ret = hisi_qm_restart(qm);
- if (ret) {
- dev_err(dev, "Failed to start QM!\n");
- goto flr_done;
- }
-
- if (pdev->is_physfn) {
- hisi_zip_set_user_domain_and_cache(hisi_zip);
- if (hisi_zip->ctrl->num_vfs)
- hisi_zip_vf_q_assign(hisi_zip,
- hisi_zip->ctrl->num_vfs);
- ret = hisi_zip_vf_reset_done(hisi_zip);
- if (ret) {
- dev_err(dev, "Failed to start VFs!\n");
- goto flr_done;
- }
- }
-
-flr_done:
- hisi_zip_flr_reset_complete(hisi_zip);
-
- dev_info(dev, "FLR reset complete\n");
+ hisi_qm_del_from_list(qm, &zip_devices);
}
static const struct pci_error_handlers hisi_zip_err_handler = {
- .error_detected = hisi_zip_error_detected,
- .slot_reset = hisi_zip_slot_reset,
- .reset_prepare = hisi_zip_reset_prepare,
- .reset_done = hisi_zip_reset_done,
+ .error_detected = hisi_qm_dev_err_detected,
+ .slot_reset = hisi_qm_dev_slot_reset,
+ .reset_prepare = hisi_qm_reset_prepare,
+ .reset_done = hisi_qm_reset_done,
};
static struct pci_driver hisi_zip_pci_driver = {
@@ -1556,7 +890,7 @@ static void hisi_zip_reset_done(struct pci_dev *pdev)
.remove = hisi_zip_remove,
.sriov_configure = hisi_zip_sriov_configure,
.err_handler = &hisi_zip_err_handler,
- .shutdown = hisi_zip_shutdown,
+ .shutdown = hisi_qm_dev_shutdown,
};
static void hisi_zip_register_debugfs(void)
@@ -1578,6 +912,10 @@ static int __init hisi_zip_init(void)
{
int ret;
+ INIT_LIST_HEAD(&zip_devices.list);
+ mutex_init(&zip_devices.lock);
+ zip_devices.check = NULL;
+
hisi_zip_register_debugfs();
ret = pci_register_driver(&hisi_zip_pci_driver);
--
1.8.3
1
0

16 Apr '20
From: Eric Auger <eric.auger(a)redhat.com>
mainline inclusion
from mainline-5.3
commit b9a7f9816483b193
category: bugfix
bugzilla: 17401
CVE: NA
-------------------------------------------------
Several call sites are about to check whether a device belongs
to the PCI sub-hierarchy of a candidate PCI-PCI bridge.
Introduce an helper to perform that check.
Signed-off-by: Eric Auger <eric.auger(a)redhat.com>
Reviewed-by: Lu Baolu <baolu.lu(a)linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel(a)suse.de>
Signed-off-by: Xiongfeng Wang <wangxiongfeng2(a)huawei.com>
Reviewed-by: Hanjun Guo <guohanjun(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/iommu/intel-iommu.c | 37 +++++++++++++++++++++++++++++--------
1 file changed, 29 insertions(+), 8 deletions(-)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f6d7955..2f52ea8 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -826,12 +826,39 @@ static int iommu_dummy(struct device *dev)
return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
}
+/**
+ * is_downstream_to_pci_bridge - test if a device belongs to the PCI
+ * sub-hierarchy of a candidate PCI-PCI bridge
+ * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
+ * @bridge: the candidate PCI-PCI bridge
+ *
+ * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
+ */
+static bool
+is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
+{
+ struct pci_dev *pdev, *pbridge;
+
+ if (!dev_is_pci(dev) || !dev_is_pci(bridge))
+ return false;
+
+ pdev = to_pci_dev(dev);
+ pbridge = to_pci_dev(bridge);
+
+ if (pbridge->subordinate &&
+ pbridge->subordinate->number <= pdev->bus->number &&
+ pbridge->subordinate->busn_res.end >= pdev->bus->number)
+ return true;
+
+ return false;
+}
+
static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
struct intel_iommu *iommu;
struct device *tmp;
- struct pci_dev *ptmp, *pdev = NULL;
+ struct pci_dev *pdev = NULL;
u16 segment = 0;
int i;
@@ -877,13 +904,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
goto out;
}
- if (!pdev || !dev_is_pci(tmp))
- continue;
-
- ptmp = to_pci_dev(tmp);
- if (ptmp->subordinate &&
- ptmp->subordinate->number <= pdev->bus->number &&
- ptmp->subordinate->busn_res.end >= pdev->bus->number)
+ if (is_downstream_to_pci_bridge(dev, tmp))
goto got_pdev;
}
--
1.8.3
1
17
From: fengsheng <fengsheng5(a)huawei.com>
driver inclusion
category: cleanup
bugzilla: NA
CVE: NA
1. sfc cleancode
Signed-off-by: fengsheng <fengsheng5(a)huawei.com>
Reviewed-by: zhangmu <zhangmu1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/mtd/hisilicon/sfc/hrd_common.h | 54 +++++++++++++--------------
drivers/mtd/hisilicon/sfc/hrd_sfc_driver.c | 4 +-
drivers/mtd/hisilicon/sfc/hrd_sflash_core.c | 12 ++----
drivers/mtd/hisilicon/sfc/hrd_sflash_core.h | 2 +
drivers/mtd/hisilicon/sfc/hrd_sflash_driver.h | 6 +--
drivers/mtd/hisilicon/sfc/hrd_sflash_hal.c | 5 +--
drivers/mtd/hisilicon/sfc/hrd_sflash_hal.h | 1 +
drivers/mtd/hisilicon/sfc/hrd_sflash_spec.h | 4 +-
8 files changed, 42 insertions(+), 46 deletions(-)
diff --git a/drivers/mtd/hisilicon/sfc/hrd_common.h b/drivers/mtd/hisilicon/sfc/hrd_common.h
index d36a7a3..71dcaa9 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_common.h
+++ b/drivers/mtd/hisilicon/sfc/hrd_common.h
@@ -40,39 +40,39 @@
#define HRD_COMMON_ERR_RES_NOT_EXIST (int)(HRD_COMMON_ERR_BASE - 13)
/* 16 bit nibble swap. example 0x1234 -> 0x2143 */
-#define HRD_NIBBLE_SWAP_16BIT(X) ((((X)&0xf) << 4) | \
- (((X)&0xF0) >> 4) | \
- (((X)&0xF00) << 4) | \
- (((X)&0xF000) >> 4))
+#define HRD_NIBBLE_SWAP_16BIT(X) ((((X) & 0xf) << 4) | \
+ (((X) & 0xF0) >> 4) | \
+ (((X) & 0xF00) << 4) | \
+ (((X) & 0xF000) >> 4))
/* 32 bit nibble swap. example 0x12345678 -> 0x21436587 */
-#define HRD_NIBBLE_SWAP_32BIT(X) (((X&0xF) << 4) | \
- (((X)&0xF0) >> 4) | \
- (((X)&0xF00) << 4) | \
- (((X)&0xF000) >> 4) | \
- (((X)&0xF0000) << 4) | \
- (((X)&0xF00000) >> 4) | \
- (((X)&0xF000000) << 4) | \
- (((X)&0xF0000000) >> 4))
+#define HRD_NIBBLE_SWAP_32BIT(X) ((((X) & 0xF) << 4) | \
+ (((X) & 0xF0) >> 4) | \
+ (((X) & 0xF00) << 4) | \
+ (((X) & 0xF000) >> 4) | \
+ (((X) & 0xF0000) << 4) | \
+ (((X) & 0xF00000) >> 4) | \
+ (((X) & 0xF000000) << 4) | \
+ (((X) & 0xF0000000) >> 4))
/* 16 bit byte swap. example 0x1234->0x3412 */
-#define HRD_BYTE_SWAP_16BIT(X) ((((X)&0xFF)<<8) | (((X)&0xFF00)>>8))
+#define HRD_BYTE_SWAP_16BIT(X) ((((X) & 0xFF) << 8) | (((X) & 0xFF00) >> 8))
/* 32 bit byte swap. example 0x12345678->0x78563412 */
-#define HRD_BYTE_SWAP_32BIT(X) ((((X)&0xFF)<<24) | \
- (((X)&0xFF00)<<8) | \
- (((X)&0xFF0000)>>8) | \
- (((X)&0xFF000000)>>24))
+#define HRD_BYTE_SWAP_32BIT(X) ((((X) & 0xFF) << 24) | \
+ (((X) & 0xFF00) << 8) | \
+ (((X) & 0xFF0000) >> 8) | \
+ (((X) & 0xFF000000) >> 24))
/* 64 bit byte swap. example 0x11223344.55667788 -> 0x88776655.44332211 */
-#define HRD_BYTE_SWAP_64BIT(X) ((l64) ((((X)&0xFFULL)<<56) | \
- (((X)&0xFF00ULL)<<40) | \
- (((X)&0xFF0000ULL)<<24) | \
- (((X)&0xFF000000ULL)<<8) | \
- (((X)&0xFF00000000ULL)>>8) | \
- (((X)&0xFF0000000000ULL)>>24) | \
- (((X)&0xFF000000000000ULL)>>40) | \
- (((X)&0xFF00000000000000ULL)>>56)))
+#define HRD_BYTE_SWAP_64BIT(X) ((l64) ((((X) & 0xFFULL) << 56) | \
+ (((X) & 0xFF00ULL) << 40) | \
+ (((X) & 0xFF0000ULL) << 24) | \
+ (((X) & 0xFF000000ULL) << 8) | \
+ (((X) & 0xFF00000000ULL) >> 8) | \
+ (((X) & 0xFF0000000000ULL) >> 24) | \
+ (((X) & 0xFF000000000000ULL) >> 40) | \
+ (((X) & 0xFF00000000000000ULL) >> 56)))
/* -- Endianess macros. */
#ifdef HRD_ENDNESS_BIGEND
@@ -91,10 +91,8 @@
#define HRD_64BIT_BE(X) HRD_BYTE_SWAP_64BIT(X)
#endif
-#define VOID void
-
#ifndef NULL
-#define NULL ((VOID *)0)
+#define NULL ((void *)0)
#endif
#define MTD_FLASH_MAP_DEBUG
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sfc_driver.c b/drivers/mtd/hisilicon/sfc/hrd_sfc_driver.c
index 6a10106..8935263 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sfc_driver.c
+++ b/drivers/mtd/hisilicon/sfc/hrd_sfc_driver.c
@@ -26,9 +26,9 @@
#include "hrd_common.h"
#include "hrd_sflash_driver.h"
-#define SFC_DRIVER_VERSION "1.8.15.0"
+#define SFC_DRIVER_VERSION "1.9.39.0"
-static const char *g_sflashMtdList[] = { "sflash", NULL };
+static const char *g_sflashMtdList[] = {"sflash", NULL};
static unsigned int hrd_flash_info_fill(struct maps_init_info *maps,
struct resource *flash_iores, struct platform_device *pdev)
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sflash_core.c b/drivers/mtd/hisilicon/sfc/hrd_sflash_core.c
index 7341f9e..68547d8 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sflash_core.c
+++ b/drivers/mtd/hisilicon/sfc/hrd_sflash_core.c
@@ -28,7 +28,6 @@
#include <linux/signal.h>
#include <linux/types.h>
#include "hrd_common.h"
-#include "hrd_sflash_driver.h"
#include "hrd_sflash_spec.h"
#include "hrd_sflash_core.h"
@@ -205,7 +204,6 @@ s32 SFC_ClearStatus(struct SFC_SFLASH_INFO *sflash)
(void)SFC_ClearInt(sflash->sfc_reg_base);
if (sflash->manufacturerId == HISI_SPANSION_MANF_ID) {
-
/* 30 for spansion , clear status */
SFC_RegisterWrite(sflash->sfc_reg_base + CMD_INS, 0x30);
@@ -234,7 +232,6 @@ void SFC_CheckErr(struct SFC_SFLASH_INFO *sflash)
unsigned long delay_us = 50; /* delay 50us */
if (sflash->manufacturerId == HISI_SPANSION_MANF_ID) {
-
ulRegValue = SFC_ReadStatus(sflash);
if (ulRegValue == WAIT_TIME_OUT) {
pr_err("[SFC] [%s %d]: SFC_ReadStatus time out\n",
@@ -362,7 +359,7 @@ s32 SFC_RegWordAlignRead(struct SFC_SFLASH_INFO *sflash,
u32 ulRegValue;
s32 ulRet;
- if (!ulReadLen || ulReadLen > SFC_HARD_BUF_LEN || (ulReadLen&0x3)) {
+ if (!ulReadLen || ulReadLen > SFC_HARD_BUF_LEN || (ulReadLen & 0x3)) {
pr_err("[SFC] [%s %d]: len=%u err\n", __func__, __LINE__, ulReadLen);
return HRD_ERR;
}
@@ -379,7 +376,7 @@ s32 SFC_RegWordAlignRead(struct SFC_SFLASH_INFO *sflash,
ulRegValue = SFC_RegisterRead(sflash->sfc_reg_base + CMD_CONFIG);
ulRegValue &= (~(0xff << DATA_CNT) & (~(1 << SEL_CS)));
ulRegValue |=
- ((ulReadLen-1) << DATA_CNT) | (1 << ADDR_EN) | (1 << DATA_EN) | (1 << RW_DATA)
+ ((ulReadLen - 1) << DATA_CNT) | (1 << ADDR_EN) | (1 << DATA_EN) | (1 << RW_DATA)
| (SFC_CHIP_CS << SEL_CS) | (0x1 << START);
wmb();
@@ -398,7 +395,6 @@ s32 SFC_RegWordAlignRead(struct SFC_SFLASH_INFO *sflash,
pulData[i] = SFC_RegisterRead(sflash->sfc_reg_base + DATABUFFER1 + (u32)(0x4 * i));
return ulRet;
-
}
s32 SFC_RegByteRead(struct SFC_SFLASH_INFO *sflash,
@@ -448,7 +444,7 @@ s32 SFC_RegWordAlignWrite(struct SFC_SFLASH_INFO *sflash,
s32 ulRet;
ulRet = SFC_WriteEnable(sflash);
- if (!ulWriteLen || ulWriteLen > SFC_HARD_BUF_LEN || (ulWriteLen&0x3)) {
+ if ((!ulWriteLen) || (ulWriteLen > SFC_HARD_BUF_LEN) || (ulWriteLen & 0x3)) {
pr_err("[SFC] [%s %d]: len=%u err\n", __func__, __LINE__, ulWriteLen);
ulRet = HRD_ERR;
goto rel;
@@ -471,7 +467,7 @@ s32 SFC_RegWordAlignWrite(struct SFC_SFLASH_INFO *sflash,
ulRegValue = SFC_RegisterRead(sflash->sfc_reg_base + CMD_CONFIG);
ulRegValue &=
(~(0xff << DATA_CNT)) & (~(1 << RW_DATA) & (~(1 << SEL_CS)));
- ulRegValue |= ((ulWriteLen-1) << DATA_CNT) | (1 << ADDR_EN) | (1 << DATA_EN)
+ ulRegValue |= ((ulWriteLen - 1) << DATA_CNT) | (1 << ADDR_EN) | (1 << DATA_EN)
| (SFC_CHIP_CS << SEL_CS) | (0x1 << START);
wmb();
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sflash_core.h b/drivers/mtd/hisilicon/sfc/hrd_sflash_core.h
index 9002c3e..56c4417 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sflash_core.h
+++ b/drivers/mtd/hisilicon/sfc/hrd_sflash_core.h
@@ -17,6 +17,8 @@
#ifndef __HRD_SFLASH_CORE_H__
#define __HRD_SFLASH_CORE_H__
+#include "hrd_sflash_driver.h"
+
#define SFC_HARD_BUF_LEN (256)
#define SPI_CMD_SR_WIP 1 /* Write in Progress bit in status register position */
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sflash_driver.h b/drivers/mtd/hisilicon/sfc/hrd_sflash_driver.h
index 3494787..f659758 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sflash_driver.h
+++ b/drivers/mtd/hisilicon/sfc/hrd_sflash_driver.h
@@ -14,8 +14,8 @@
*
*/
-#ifndef _HRD_SLASH_DRIVER_H
-#define _HRD_SLASH_DRIVER_H
+#ifndef _HRD_SFLASH_DRIVER_H
+#define _HRD_SFLASH_DRIVER_H
#include <linux/mtd/map.h>
@@ -102,4 +102,4 @@ struct SFC_SFLASH_INFO {
extern struct mtd_info *sflash_probe(struct map_info *map, struct resource *sfc_regres);
extern void sflash_destroy(struct mtd_info *mtd);
-#endif /* _HRD_SLASH_DRIVER_H */
+#endif /* _HRD_SLASH_DRIVER_H */
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.c b/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.c
index 8f5b387..ec9887a7 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.c
+++ b/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.c
@@ -700,7 +700,6 @@ s32 SFC_BlockErase(struct SFC_SFLASH_INFO *sflash, u32 ulAddr, u32 ErCmd)
rel:
SFC_FlashUnlock(sflash);
return ulRet;
-
}
static s32 _SFC_RegModeWrite(struct SFC_SFLASH_INFO *sflash,
@@ -727,7 +726,7 @@ static s32 _SFC_RegModeWrite(struct SFC_SFLASH_INFO *sflash,
}
if (ulRemain >= 0x4) {
- slRet = SFC_RegWordAlignWrite(sflash, (const u32 *)(pucSrc + i), offset + i, ulRemain&(~0x3));
+ slRet = SFC_RegWordAlignWrite(sflash, (const u32 *)(pucSrc + i), offset + i, ulRemain & (~0x3));
if (slRet != HRD_OK) {
pr_err("[SFC] [%s %d]: SFC_RegWordAlignWrite fail\n", __func__, __LINE__);
return slRet;
@@ -805,7 +804,7 @@ s32 SFC_RegModeRead(struct SFC_SFLASH_INFO *sflash,
}
if (ulRemain >= 0x4) {
- ret = SFC_RegWordAlignRead(sflash, offset + i, (u32 *) (pucDest + i), ulRemain&(~0x3));
+ ret = SFC_RegWordAlignRead(sflash, offset + i, (u32 *) (pucDest + i), ulRemain & (~0x3));
if (ret != HRD_OK) {
pr_err("[SFC] [%s %d]: SFC_RegWordAlignRead fail\n", __func__, __LINE__);
return ret;
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.h b/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.h
index 78c921c..f612731 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.h
+++ b/drivers/mtd/hisilicon/sfc/hrd_sflash_hal.h
@@ -16,6 +16,7 @@
#ifndef __HRD_SFLASH_HAL_H__
#define __HRD_SFLASH_HAL_H__
+#include "hrd_sflash_driver.h"
extern void SFC_CheckErr(struct SFC_SFLASH_INFO *sflash);
extern s32 SFC_RegModeRead(struct SFC_SFLASH_INFO *sflash, u32 offset,
diff --git a/drivers/mtd/hisilicon/sfc/hrd_sflash_spec.h b/drivers/mtd/hisilicon/sfc/hrd_sflash_spec.h
index a59965b..151957d 100644
--- a/drivers/mtd/hisilicon/sfc/hrd_sflash_spec.h
+++ b/drivers/mtd/hisilicon/sfc/hrd_sflash_spec.h
@@ -14,8 +14,8 @@
*
*/
-#ifndef __SPI_FLASH_SPEC_H__
-#define __SPI_FLASH_SPEC_H__
+#ifndef __HRD_SFLASH_SPEC_H__
+#define __HRD_SFLASH_SPEC_H__
#define SFLASH_DEFAULT_RDID_OPCD 0x9F /* Default Read ID */
#define SFLASH_DEFAULT_WREN_OPCD 0x06 /* Default Write Enable */
--
1.8.3
1
2

16 Apr '20
From: Lyude Paul <lyude(a)redhat.com>
commit bf502391353b928e63096127e5fd8482080203f5 upstream.
This supports RMI4 and everything seems to work, including the touchpad
buttons. So, let's enable this by default.
Signed-off-by: Lyude Paul <lyude(a)redhat.com>
Cc: stable(a)vger.kernel.org
Link: https://lore.kernel.org/r/20200204194322.112638-1-lyude@redhat.com
Signed-off-by: Dmitry Torokhov <dmitry.torokhov(a)gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/input/mouse/synaptics.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index e8d1134..064be84 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -172,6 +172,7 @@ void synaptics_reset(struct psmouse *psmouse)
"LEN004a", /* W541 */
"LEN005b", /* P50 */
"LEN005e", /* T560 */
+ "LEN006c", /* T470s */
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */
--
1.8.3
1
37

16 Apr '20
From: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
mainline inclusion
from mainline-v4.20-rc1
commit ab8085c130edd65be0d95cc95c28b51c4c6faf9d
category: bugfix
bugzilla: NA
CVE: NA
-----------------------------------------------
As it turns out, the AVX2 multibuffer SHA routines are currently
broken [0], in a way that would have likely been noticed if this
code were in wide use. Since the code is too complicated to be
maintained by anyone except the original authors, and since the
performance benefits for real-world use cases are debatable to
begin with, it is better to drop it entirely for the moment.
[0] https://marc.info/?l=linux-crypto-vger&m=153476243825350&w=2
Suggested-by: Eric Biggers <ebiggers(a)google.com>
Cc: Megha Dey <megha.dey(a)linux.intel.com>
Cc: Tim Chen <tim.c.chen(a)linux.intel.com>
Cc: Geert Uytterhoeven <geert(a)linux-m68k.org>
Cc: Martin Schwidefsky <schwidefsky(a)de.ibm.com>
Cc: Heiko Carstens <heiko.carstens(a)de.ibm.com>
Cc: Thomas Gleixner <tglx(a)linutronix.de>
Cc: Ingo Molnar <mingo(a)redhat.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel(a)linaro.org>
Signed-off-by: Herbert Xu <herbert(a)gondor.apana.org.au>
Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
MAINTAINERS | 8 -
arch/m68k/configs/amiga_defconfig | 1 -
arch/m68k/configs/apollo_defconfig | 1 -
arch/m68k/configs/atari_defconfig | 1 -
arch/m68k/configs/bvme6000_defconfig | 1 -
arch/m68k/configs/hp300_defconfig | 1 -
arch/m68k/configs/mac_defconfig | 1 -
arch/m68k/configs/multi_defconfig | 1 -
arch/m68k/configs/mvme147_defconfig | 1 -
arch/m68k/configs/mvme16x_defconfig | 1 -
arch/m68k/configs/q40_defconfig | 1 -
arch/m68k/configs/sun3_defconfig | 1 -
arch/m68k/configs/sun3x_defconfig | 1 -
arch/s390/configs/debug_defconfig | 1 -
arch/s390/configs/performance_defconfig | 1 -
arch/x86/crypto/Makefile | 3 -
arch/x86/crypto/sha1-mb/Makefile | 14 -
arch/x86/crypto/sha1-mb/sha1_mb.c | 1011 -------------------
arch/x86/crypto/sha1-mb/sha1_mb_ctx.h | 134 ---
arch/x86/crypto/sha1-mb/sha1_mb_mgr.h | 110 --
arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S | 287 ------
arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S | 304 ------
arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c | 64 --
arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S | 209 ----
arch/x86/crypto/sha1-mb/sha1_x8_avx2.S | 492 ---------
arch/x86/crypto/sha256-mb/Makefile | 14 -
arch/x86/crypto/sha256-mb/sha256_mb.c | 1013 -------------------
arch/x86/crypto/sha256-mb/sha256_mb_ctx.h | 134 ---
arch/x86/crypto/sha256-mb/sha256_mb_mgr.h | 108 --
.../crypto/sha256-mb/sha256_mb_mgr_datastruct.S | 304 ------
.../crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S | 307 ------
.../x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c | 65 --
.../crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S | 214 ----
arch/x86/crypto/sha256-mb/sha256_x8_avx2.S | 598 -----------
arch/x86/crypto/sha512-mb/Makefile | 12 -
arch/x86/crypto/sha512-mb/sha512_mb.c | 1047 --------------------
arch/x86/crypto/sha512-mb/sha512_mb_ctx.h | 128 ---
arch/x86/crypto/sha512-mb/sha512_mb_mgr.h | 104 --
.../crypto/sha512-mb/sha512_mb_mgr_datastruct.S | 281 ------
.../crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S | 297 ------
.../x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c | 69 --
.../crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S | 224 -----
arch/x86/crypto/sha512-mb/sha512_x4_avx2.S | 531 ----------
crypto/Kconfig | 62 --
crypto/Makefile | 1 -
crypto/mcryptd.c | 675 -------------
include/crypto/mcryptd.h | 114 ---
47 files changed, 8952 deletions(-)
delete mode 100644 arch/x86/crypto/sha1-mb/Makefile
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb.c
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
delete mode 100644 arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
delete mode 100644 arch/x86/crypto/sha256-mb/Makefile
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb.c
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
delete mode 100644 arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
delete mode 100644 arch/x86/crypto/sha512-mb/Makefile
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb.c
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
delete mode 100644 arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
delete mode 100644 crypto/mcryptd.c
delete mode 100644 include/crypto/mcryptd.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 588fc68..b143d31 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7565,14 +7565,6 @@ S: Supported
F: drivers/infiniband/hw/i40iw/
F: include/uapi/rdma/i40iw-abi.h
-INTEL SHA MULTIBUFFER DRIVER
-M: Megha Dey <megha.dey(a)linux.intel.com>
-R: Tim Chen <tim.c.chen(a)linux.intel.com>
-L: linux-crypto(a)vger.kernel.org
-S: Supported
-F: arch/x86/crypto/sha*-mb/
-F: crypto/mcryptd.c
-
INTEL TELEMETRY DRIVER
M: Souvik Kumar Chakravarty <souvik.k.chakravarty(a)intel.com>
L: platform-driver-x86(a)vger.kernel.org
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 93a3c3c..85904b7 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -621,7 +621,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index e3d0efd..9b3818b 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -578,7 +578,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 75ac0c7..7696778 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -599,7 +599,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index c6e4927..7dd264d 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index b00d1c4..515f743 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -580,7 +580,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 85cac37..8e1038c 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -602,7 +602,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index b3a5d1e..62c8aaa 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -684,7 +684,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 0ca2260..733973f 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 8e3d10d..fee30cc 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index ff7e653..eebf9c9 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -593,7 +593,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 612cf46..dabc543 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -571,7 +571,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index a6a7bb6..0d9a5c2 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -572,7 +572,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 941d8cc..259d169 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -668,7 +668,6 @@ CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index eb6f75f..37fd60c 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -610,7 +610,6 @@ CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index a450ad5..9edfa54 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -60,9 +60,6 @@ endif
ifeq ($(avx2_supported),yes)
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
- obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb/
- obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb/
- obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb/
obj-$(CONFIG_CRYPTO_MORUS1280_AVX2) += morus1280-avx2.o
endif
diff --git a/arch/x86/crypto/sha1-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile
deleted file mode 100644
index 815ded3..00000000
--- a/arch/x86/crypto/sha1-mb/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Arch-specific CryptoAPI modules.
-#
-
-OBJECT_FILES_NON_STANDARD := y
-
-avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
- $(comma)4)$(comma)%ymm2,yes,no)
-ifeq ($(avx2_supported),yes)
- obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb.o
- sha1-mb-y := sha1_mb.o sha1_mb_mgr_flush_avx2.o \
- sha1_mb_mgr_init_avx2.o sha1_mb_mgr_submit_avx2.o sha1_x8_avx2.o
-endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
deleted file mode 100644
index b938056..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ /dev/null
@@ -1,1011 +0,0 @@
-/*
- * Multi buffer SHA1 algorithm Glue Code
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/cryptohash.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <asm/byteorder.h>
-#include <linux/hardirq.h>
-#include <asm/fpu/api.h>
-#include "sha1_mb_ctx.h"
-
-#define FLUSH_INTERVAL 1000 /* in usec */
-
-static struct mcryptd_alg_state sha1_mb_alg_state;
-
-struct sha1_mb_ctx {
- struct mcryptd_ahash *mcryptd_tfm;
-};
-
-static inline struct mcryptd_hash_request_ctx
- *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
-{
- struct ahash_request *areq;
-
- areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
- return container_of(areq, struct mcryptd_hash_request_ctx, areq);
-}
-
-static inline struct ahash_request
- *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
-{
- return container_of((void *) ctx, struct ahash_request, __ctx);
-}
-
-static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct ahash_request *areq)
-{
- rctx->flag = HASH_UPDATE;
-}
-
-static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)
- (struct sha1_mb_mgr *state, struct job_sha1 *job);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
- (struct sha1_mb_mgr *state);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
- (struct sha1_mb_mgr *state);
-
-static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
- uint64_t total_len)
-{
- uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
-
- memset(&padblock[i], 0, SHA1_BLOCK_SIZE);
- padblock[i] = 0x80;
-
- i += ((SHA1_BLOCK_SIZE - 1) &
- (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1)))
- + 1 + SHA1_PADLENGTHFIELD_SIZE;
-
-#if SHA1_PADLENGTHFIELD_SIZE == 16
- *((uint64_t *) &padblock[i - 16]) = 0;
-#endif
-
- *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
-
- /* Number of extra blocks to hash */
- return i >> SHA1_LOG2_BLOCK_SIZE;
-}
-
-static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr,
- struct sha1_hash_ctx *ctx)
-{
- while (ctx) {
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Clear PROCESSING bit */
- ctx->status = HASH_CTX_STS_COMPLETE;
- return ctx;
- }
-
- /*
- * If the extra blocks are empty, begin hashing what remains
- * in the user's buffer.
- */
- if (ctx->partial_block_buffer_length == 0 &&
- ctx->incoming_buffer_length) {
-
- const void *buffer = ctx->incoming_buffer;
- uint32_t len = ctx->incoming_buffer_length;
- uint32_t copy_len;
-
- /*
- * Only entire blocks can be hashed.
- * Copy remainder to extra blocks buffer.
- */
- copy_len = len & (SHA1_BLOCK_SIZE-1);
-
- if (copy_len) {
- len -= copy_len;
- memcpy(ctx->partial_block_buffer,
- ((const char *) buffer + len),
- copy_len);
- ctx->partial_block_buffer_length = copy_len;
- }
-
- ctx->incoming_buffer_length = 0;
-
- /* len should be a multiple of the block size now */
- assert((len % SHA1_BLOCK_SIZE) == 0);
-
- /* Set len to the number of blocks to be hashed */
- len >>= SHA1_LOG2_BLOCK_SIZE;
-
- if (len) {
-
- ctx->job.buffer = (uint8_t *) buffer;
- ctx->job.len = len;
- ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr,
- &ctx->job);
- continue;
- }
- }
-
- /*
- * If the extra blocks are not empty, then we are
- * either on the last block(s) or we need more
- * user input before continuing.
- */
- if (ctx->status & HASH_CTX_STS_LAST) {
-
- uint8_t *buf = ctx->partial_block_buffer;
- uint32_t n_extra_blocks =
- sha1_pad(buf, ctx->total_length);
-
- ctx->status = (HASH_CTX_STS_PROCESSING |
- HASH_CTX_STS_COMPLETE);
- ctx->job.buffer = buf;
- ctx->job.len = (uint32_t) n_extra_blocks;
- ctx = (struct sha1_hash_ctx *)
- sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
-
- ctx->status = HASH_CTX_STS_IDLE;
- return ctx;
- }
-
- return NULL;
-}
-
-static struct sha1_hash_ctx
- *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
-{
- /*
- * If get_comp_job returns NULL, there are no jobs complete.
- * If get_comp_job returns a job, verify that it is safe to return to
- * the user.
- * If it is not ready, resubmit the job to finish processing.
- * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
- * Otherwise, all jobs currently being managed by the hash_ctx_mgr
- * still need processing.
- */
- struct sha1_hash_ctx *ctx;
-
- ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr);
- return sha1_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr)
-{
- sha1_job_mgr_init(&mgr->mgr);
-}
-
-static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
- struct sha1_hash_ctx *ctx,
- const void *buffer,
- uint32_t len,
- int flags)
-{
- if (flags & ~(HASH_UPDATE | HASH_LAST)) {
- /* User should not pass anything other than UPDATE or LAST */
- ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_PROCESSING) {
- /* Cannot submit to a currently processing job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Cannot update a finished job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
- return ctx;
- }
-
- /*
- * If we made it here, there were no errors during this call to
- * submit
- */
- ctx->error = HASH_CTX_ERROR_NONE;
-
- /* Store buffer ptr info from user */
- ctx->incoming_buffer = buffer;
- ctx->incoming_buffer_length = len;
-
- /*
- * Store the user's request flags and mark this ctx as currently
- * being processed.
- */
- ctx->status = (flags & HASH_LAST) ?
- (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
- HASH_CTX_STS_PROCESSING;
-
- /* Advance byte counter */
- ctx->total_length += len;
-
- /*
- * If there is anything currently buffered in the extra blocks,
- * append to it until it contains a whole block.
- * Or if the user's buffer contains less than a whole block,
- * append as much as possible to the extra block.
- */
- if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) {
- /*
- * Compute how many bytes to copy from user buffer into
- * extra block
- */
- uint32_t copy_len = SHA1_BLOCK_SIZE -
- ctx->partial_block_buffer_length;
- if (len < copy_len)
- copy_len = len;
-
- if (copy_len) {
- /* Copy and update relevant pointers and counters */
- memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
- buffer, copy_len);
-
- ctx->partial_block_buffer_length += copy_len;
- ctx->incoming_buffer = (const void *)
- ((const char *)buffer + copy_len);
- ctx->incoming_buffer_length = len - copy_len;
- }
-
- /*
- * The extra block should never contain more than 1 block
- * here
- */
- assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
-
- /*
- * If the extra block buffer contains exactly 1 block, it can
- * be hashed.
- */
- if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
- ctx->partial_block_buffer_length = 0;
-
- ctx->job.buffer = ctx->partial_block_buffer;
- ctx->job.len = 1;
- ctx = (struct sha1_hash_ctx *)
- sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
- }
- }
-
- return sha1_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
-{
- struct sha1_hash_ctx *ctx;
-
- while (1) {
- ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr);
-
- /* If flush returned 0, there are no more jobs in flight. */
- if (!ctx)
- return NULL;
-
- /*
- * If flush returned a job, resubmit the job to finish
- * processing.
- */
- ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
-
- /*
- * If sha1_ctx_mgr_resubmit returned a job, it is ready to be
- * returned. Otherwise, all jobs currently being managed by the
- * sha1_ctx_mgr still need processing. Loop.
- */
- if (ctx)
- return ctx;
- }
-}
-
-static int sha1_mb_init(struct ahash_request *areq)
-{
- struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
-
- hash_ctx_init(sctx);
- sctx->job.result_digest[0] = SHA1_H0;
- sctx->job.result_digest[1] = SHA1_H1;
- sctx->job.result_digest[2] = SHA1_H2;
- sctx->job.result_digest[3] = SHA1_H3;
- sctx->job.result_digest[4] = SHA1_H4;
- sctx->total_length = 0;
- sctx->partial_block_buffer_length = 0;
- sctx->status = HASH_CTX_STS_IDLE;
-
- return 0;
-}
-
-static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
-{
- int i;
- struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
- __be32 *dst = (__be32 *) rctx->out;
-
- for (i = 0; i < 5; ++i)
- dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
-
- return 0;
-}
-
-static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
- struct mcryptd_alg_cstate *cstate, bool flush)
-{
- int flag = HASH_UPDATE;
- int nbytes, err = 0;
- struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
- struct sha1_hash_ctx *sha_ctx;
-
- /* more work ? */
- while (!(rctx->flag & HASH_DONE)) {
- nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
- if (nbytes < 0) {
- err = nbytes;
- goto out;
- }
- /* check if the walk is done */
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- if (rctx->flag & HASH_FINAL)
- flag |= HASH_LAST;
-
- }
- sha_ctx = (struct sha1_hash_ctx *)
- ahash_request_ctx(&rctx->areq);
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
- rctx->walk.data, nbytes, flag);
- if (!sha_ctx) {
- if (flush)
- sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
- }
- kernel_fpu_end();
- if (sha_ctx)
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- else {
- rctx = NULL;
- goto out;
- }
- }
-
- /* copy the results */
- if (rctx->flag & HASH_FINAL)
- sha1_mb_set_results(rctx);
-
-out:
- *ret_rctx = rctx;
- return err;
-}
-
-static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate,
- int err)
-{
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha1_hash_ctx *sha_ctx;
- struct mcryptd_hash_request_ctx *req_ctx;
- int ret;
-
- /* remove from work list */
- spin_lock(&cstate->work_lock);
- list_del(&rctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- if (irqs_disabled())
- rctx->complete(&req->base, err);
- else {
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
-
- /* check to see if there are other jobs that are done */
- sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
- while (sha_ctx) {
- req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&req_ctx, cstate, false);
- if (req_ctx) {
- spin_lock(&cstate->work_lock);
- list_del(&req_ctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- req = cast_mcryptd_ctx_to_req(req_ctx);
- if (irqs_disabled())
- req_ctx->complete(&req->base, ret);
- else {
- local_bh_disable();
- req_ctx->complete(&req->base, ret);
- local_bh_enable();
- }
- }
- sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
- }
-
- return 0;
-}
-
-static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate)
-{
- unsigned long next_flush;
- unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
-
- /* initialize tag */
- rctx->tag.arrival = jiffies; /* tag the arrival time */
- rctx->tag.seq_num = cstate->next_seq_num++;
- next_flush = rctx->tag.arrival + delay;
- rctx->tag.expire = next_flush;
-
- spin_lock(&cstate->work_lock);
- list_add_tail(&rctx->waiter, &cstate->work_list);
- spin_unlock(&cstate->work_lock);
-
- mcryptd_arm_flusher(cstate, delay);
-}
-
-static int sha1_mb_update(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha1_hash_ctx *sha_ctx;
- int ret = 0, nbytes;
-
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk))
- rctx->flag |= HASH_DONE;
-
- /* submit */
- sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
- sha1_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, HASH_UPDATE);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
-
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha1_mb_finup(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha1_hash_ctx *sha_ctx;
- int ret = 0, flag = HASH_UPDATE, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- flag = HASH_LAST;
- }
-
- /* submit */
- rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
- sha1_mb_add_list(rctx, cstate);
-
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, flag);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha1_mb_final(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
-
- struct sha1_hash_ctx *sha_ctx;
- int ret = 0;
- u8 data;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- rctx->flag |= HASH_DONE | HASH_FINAL;
-
- sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
- /* flag HASH_FINAL and 0 data size */
- sha1_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
- HASH_LAST);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha1_mb_export(struct ahash_request *areq, void *out)
-{
- struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha1_mb_import(struct ahash_request *areq, const void *in)
-{
- struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
-
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha1_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
-
- return 0;
-}
-
-static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
-{
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- sizeof(struct sha1_hash_ctx));
-
- return 0;
-}
-
-static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static struct ahash_alg sha1_mb_areq_alg = {
- .init = sha1_mb_init,
- .update = sha1_mb_update,
- .final = sha1_mb_final,
- .finup = sha1_mb_finup,
- .export = sha1_mb_export,
- .import = sha1_mb_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_hash_ctx),
- .base = {
- .cra_name = "__sha1-mb",
- .cra_driver_name = "__intel_sha1-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread
- * sleep
- */
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha1_mb_areq_alg.halg.base.cra_list),
- .cra_init = sha1_mb_areq_init_tfm,
- .cra_exit = sha1_mb_areq_exit_tfm,
- .cra_ctxsize = sizeof(struct sha1_hash_ctx),
- }
- }
-};
-
-static int sha1_mb_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_init(mcryptd_req);
-}
-
-static int sha1_mb_async_update(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_update(mcryptd_req);
-}
-
-static int sha1_mb_async_finup(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_finup(mcryptd_req);
-}
-
-static int sha1_mb_async_final(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_final(mcryptd_req);
-}
-
-static int sha1_mb_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_digest(mcryptd_req);
-}
-
-static int sha1_mb_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_export(mcryptd_req, out);
-}
-
-static int sha1_mb_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
- struct mcryptd_hash_request_ctx *rctx;
- struct ahash_request *areq;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- rctx = ahash_request_ctx(mcryptd_req);
- areq = &rctx->areq;
-
- ahash_request_set_tfm(areq, child);
- ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req);
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static struct ahash_alg sha1_mb_async_alg = {
- .init = sha1_mb_async_init,
- .update = sha1_mb_async_update,
- .final = sha1_mb_async_final,
- .finup = sha1_mb_async_finup,
- .digest = sha1_mb_async_digest,
- .export = sha1_mb_async_export,
- .import = sha1_mb_async_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_hash_ctx),
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "sha1_mb",
- /*
- * Low priority, since with few concurrent hash requests
- * this is extremely slow due to the flush delay. Users
- * whose workloads would benefit from this can request
- * it explicitly by driver name, or can increase its
- * priority at runtime using NETLINK_CRYPTO.
- */
- .cra_priority = 50,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
- .cra_init = sha1_mb_async_init_tfm,
- .cra_exit = sha1_mb_async_exit_tfm,
- .cra_ctxsize = sizeof(struct sha1_mb_ctx),
- .cra_alignmask = 0,
- },
- },
-};
-
-static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
-{
- struct mcryptd_hash_request_ctx *rctx;
- unsigned long cur_time;
- unsigned long next_flush = 0;
- struct sha1_hash_ctx *sha_ctx;
-
-
- cur_time = jiffies;
-
- while (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- if (time_before(cur_time, rctx->tag.expire))
- break;
- kernel_fpu_begin();
- sha_ctx = (struct sha1_hash_ctx *)
- sha1_ctx_mgr_flush(cstate->mgr);
- kernel_fpu_end();
- if (!sha_ctx) {
- pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
- break;
- }
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- sha_finish_walk(&rctx, cstate, true);
- sha_complete_job(rctx, cstate, 0);
- }
-
- if (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- /* get the hash context and then flush time */
- next_flush = rctx->tag.expire;
- mcryptd_arm_flusher(cstate, get_delay(next_flush));
- }
- return next_flush;
-}
-
-static int __init sha1_mb_mod_init(void)
-{
-
- int cpu;
- int err;
- struct mcryptd_alg_cstate *cpu_state;
-
- /* check for dependent cpu features */
- if (!boot_cpu_has(X86_FEATURE_AVX2) ||
- !boot_cpu_has(X86_FEATURE_BMI2))
- return -ENODEV;
-
- /* initialize multibuffer structures */
- sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate);
-
- sha1_job_mgr_init = sha1_mb_mgr_init_avx2;
- sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2;
- sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2;
- sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2;
-
- if (!sha1_mb_alg_state.alg_cstate)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
- cpu_state->next_flush = 0;
- cpu_state->next_seq_num = 0;
- cpu_state->flusher_engaged = false;
- INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
- cpu_state->cpu = cpu;
- cpu_state->alg_state = &sha1_mb_alg_state;
- cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr),
- GFP_KERNEL);
- if (!cpu_state->mgr)
- goto err2;
- sha1_ctx_mgr_init(cpu_state->mgr);
- INIT_LIST_HEAD(&cpu_state->work_list);
- spin_lock_init(&cpu_state->work_lock);
- }
- sha1_mb_alg_state.flusher = &sha1_mb_flusher;
-
- err = crypto_register_ahash(&sha1_mb_areq_alg);
- if (err)
- goto err2;
- err = crypto_register_ahash(&sha1_mb_async_alg);
- if (err)
- goto err1;
-
-
- return 0;
-err1:
- crypto_unregister_ahash(&sha1_mb_areq_alg);
-err2:
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha1_mb_alg_state.alg_cstate);
- return -ENODEV;
-}
-
-static void __exit sha1_mb_mod_fini(void)
-{
- int cpu;
- struct mcryptd_alg_cstate *cpu_state;
-
- crypto_unregister_ahash(&sha1_mb_async_alg);
- crypto_unregister_ahash(&sha1_mb_areq_alg);
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha1_mb_alg_state.alg_cstate);
-}
-
-module_init(sha1_mb_mod_init);
-module_exit(sha1_mb_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
-
-MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
deleted file mode 100644
index 9454bd1..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Header file for multi buffer SHA context
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SHA_MB_CTX_INTERNAL_H
-#define _SHA_MB_CTX_INTERNAL_H
-
-#include "sha1_mb_mgr.h"
-
-#define HASH_UPDATE 0x00
-#define HASH_LAST 0x01
-#define HASH_DONE 0x02
-#define HASH_FINAL 0x04
-
-#define HASH_CTX_STS_IDLE 0x00
-#define HASH_CTX_STS_PROCESSING 0x01
-#define HASH_CTX_STS_LAST 0x02
-#define HASH_CTX_STS_COMPLETE 0x04
-
-enum hash_ctx_error {
- HASH_CTX_ERROR_NONE = 0,
- HASH_CTX_ERROR_INVALID_FLAGS = -1,
- HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
- HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
-
-#ifdef HASH_CTX_DEBUG
- HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4,
-#endif
-};
-
-
-#define hash_ctx_user_data(ctx) ((ctx)->user_data)
-#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
-#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
-#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
-#define hash_ctx_status(ctx) ((ctx)->status)
-#define hash_ctx_error(ctx) ((ctx)->error)
-#define hash_ctx_init(ctx) \
- do { \
- (ctx)->error = HASH_CTX_ERROR_NONE; \
- (ctx)->status = HASH_CTX_STS_COMPLETE; \
- } while (0)
-
-
-/* Hash Constants and Typedefs */
-#define SHA1_DIGEST_LENGTH 5
-#define SHA1_LOG2_BLOCK_SIZE 6
-
-#define SHA1_PADLENGTHFIELD_SIZE 8
-
-#ifdef SHA_MB_DEBUG
-#define assert(expr) \
-do { \
- if (unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-#else
-#define assert(expr) do {} while (0)
-#endif
-
-struct sha1_ctx_mgr {
- struct sha1_mb_mgr mgr;
-};
-
-/* typedef struct sha1_ctx_mgr sha1_ctx_mgr; */
-
-struct sha1_hash_ctx {
- /* Must be at struct offset 0 */
- struct job_sha1 job;
- /* status flag */
- int status;
- /* error flag */
- int error;
-
- uint64_t total_length;
- const void *incoming_buffer;
- uint32_t incoming_buffer_length;
- uint8_t partial_block_buffer[SHA1_BLOCK_SIZE * 2];
- uint32_t partial_block_buffer_length;
- void *user_data;
-};
-
-#endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
deleted file mode 100644
index 08ad1a9..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Header file for multi buffer SHA1 algorithm manager
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford(a)intel.com>
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __SHA_MB_MGR_H
-#define __SHA_MB_MGR_H
-
-
-#include <linux/types.h>
-
-#define NUM_SHA1_DIGEST_WORDS 5
-
-enum job_sts { STS_UNKNOWN = 0,
- STS_BEING_PROCESSED = 1,
- STS_COMPLETED = 2,
- STS_INTERNAL_ERROR = 3,
- STS_ERROR = 4
-};
-
-struct job_sha1 {
- u8 *buffer;
- u32 len;
- u32 result_digest[NUM_SHA1_DIGEST_WORDS] __aligned(32);
- enum job_sts status;
- void *user_data;
-};
-
-/* SHA1 out-of-order scheduler */
-
-/* typedef uint32_t sha1_digest_array[5][8]; */
-
-struct sha1_args_x8 {
- uint32_t digest[5][8];
- uint8_t *data_ptr[8];
-};
-
-struct sha1_lane_data {
- struct job_sha1 *job_in_lane;
-};
-
-struct sha1_mb_mgr {
- struct sha1_args_x8 args;
-
- uint32_t lens[8];
-
- /* each byte is index (0...7) of unused lanes */
- uint64_t unused_lanes;
- /* byte 4 is set to FF as a flag */
- struct sha1_lane_data ldata[8];
-};
-
-
-#define SHA1_MB_MGR_NUM_LANES_AVX2 8
-
-void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state);
-struct job_sha1 *sha1_mb_mgr_submit_avx2(struct sha1_mb_mgr *state,
- struct job_sha1 *job);
-struct job_sha1 *sha1_mb_mgr_flush_avx2(struct sha1_mb_mgr *state);
-struct job_sha1 *sha1_mb_mgr_get_comp_job_avx2(struct sha1_mb_mgr *state);
-
-#endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
deleted file mode 100644
index 86688c6..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Header file for multi buffer SHA1 algorithm data structure
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford(a)intel.com>
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# Macros for defining data structures
-
-# Usage example
-
-#START_FIELDS # JOB_AES
-### name size align
-#FIELD _plaintext, 8, 8 # pointer to plaintext
-#FIELD _ciphertext, 8, 8 # pointer to ciphertext
-#FIELD _IV, 16, 8 # IV
-#FIELD _keys, 8, 8 # pointer to keys
-#FIELD _len, 4, 4 # length in bytes
-#FIELD _status, 4, 4 # status enumeration
-#FIELD _user_data, 8, 8 # pointer to user data
-#UNION _union, size1, align1, \
-# size2, align2, \
-# size3, align3, \
-# ...
-#END_FIELDS
-#%assign _JOB_AES_size _FIELD_OFFSET
-#%assign _JOB_AES_align _STRUCT_ALIGN
-
-#########################################################################
-
-# Alternate "struc-like" syntax:
-# STRUCT job_aes2
-# RES_Q .plaintext, 1
-# RES_Q .ciphertext, 1
-# RES_DQ .IV, 1
-# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
-# RES_U .union, size1, align1, \
-# size2, align2, \
-# ...
-# ENDSTRUCT
-# # Following only needed if nesting
-# %assign job_aes2_size _FIELD_OFFSET
-# %assign job_aes2_align _STRUCT_ALIGN
-#
-# RES_* macros take a name, a count and an optional alignment.
-# The count in in terms of the base size of the macro, and the
-# default alignment is the base size.
-# The macros are:
-# Macro Base size
-# RES_B 1
-# RES_W 2
-# RES_D 4
-# RES_Q 8
-# RES_DQ 16
-# RES_Y 32
-# RES_Z 64
-#
-# RES_U defines a union. It's arguments are a name and two or more
-# pairs of "size, alignment"
-#
-# The two assigns are only needed if this structure is being nested
-# within another. Even if the assigns are not done, one can still use
-# STRUCT_NAME_size as the size of the structure.
-#
-# Note that for nesting, you still need to assign to STRUCT_NAME_size.
-#
-# The differences between this and using "struc" directly are that each
-# type is implicitly aligned to its natural length (although this can be
-# over-ridden with an explicit third parameter), and that the structure
-# is padded at the end to its overall alignment.
-#
-
-#########################################################################
-
-#ifndef _SHA1_MB_MGR_DATASTRUCT_ASM_
-#define _SHA1_MB_MGR_DATASTRUCT_ASM_
-
-## START_FIELDS
-.macro START_FIELDS
- _FIELD_OFFSET = 0
- _STRUCT_ALIGN = 0
-.endm
-
-## FIELD name size align
-.macro FIELD name size align
- _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
- \name = _FIELD_OFFSET
- _FIELD_OFFSET = _FIELD_OFFSET + (\size)
-.if (\align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = \align
-.endif
-.endm
-
-## END_FIELDS
-.macro END_FIELDS
- _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
-.endm
-
-########################################################################
-
-.macro STRUCT p1
-START_FIELDS
-.struc \p1
-.endm
-
-.macro ENDSTRUCT
- tmp = _FIELD_OFFSET
- END_FIELDS
- tmp = (_FIELD_OFFSET - %%tmp)
-.if (tmp > 0)
- .lcomm tmp
-.endif
-.endstruc
-.endm
-
-## RES_int name size align
-.macro RES_int p1 p2 p3
- name = \p1
- size = \p2
- align = .\p3
-
- _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
-.align align
-.lcomm name size
- _FIELD_OFFSET = _FIELD_OFFSET + (size)
-.if (align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = align
-.endif
-.endm
-
-
-
-# macro RES_B name, size [, align]
-.macro RES_B _name, _size, _align=1
-RES_int _name _size _align
-.endm
-
-# macro RES_W name, size [, align]
-.macro RES_W _name, _size, _align=2
-RES_int _name 2*(_size) _align
-.endm
-
-# macro RES_D name, size [, align]
-.macro RES_D _name, _size, _align=4
-RES_int _name 4*(_size) _align
-.endm
-
-# macro RES_Q name, size [, align]
-.macro RES_Q _name, _size, _align=8
-RES_int _name 8*(_size) _align
-.endm
-
-# macro RES_DQ name, size [, align]
-.macro RES_DQ _name, _size, _align=16
-RES_int _name 16*(_size) _align
-.endm
-
-# macro RES_Y name, size [, align]
-.macro RES_Y _name, _size, _align=32
-RES_int _name 32*(_size) _align
-.endm
-
-# macro RES_Z name, size [, align]
-.macro RES_Z _name, _size, _align=64
-RES_int _name 64*(_size) _align
-.endm
-
-
-#endif
-
-########################################################################
-#### Define constants
-########################################################################
-
-########################################################################
-#### Define SHA1 Out Of Order Data Structures
-########################################################################
-
-START_FIELDS # LANE_DATA
-### name size align
-FIELD _job_in_lane, 8, 8 # pointer to job object
-END_FIELDS
-
-_LANE_DATA_size = _FIELD_OFFSET
-_LANE_DATA_align = _STRUCT_ALIGN
-
-########################################################################
-
-START_FIELDS # SHA1_ARGS_X8
-### name size align
-FIELD _digest, 4*5*8, 16 # transposed digest
-FIELD _data_ptr, 8*8, 8 # array of pointers to data
-END_FIELDS
-
-_SHA1_ARGS_X4_size = _FIELD_OFFSET
-_SHA1_ARGS_X4_align = _STRUCT_ALIGN
-_SHA1_ARGS_X8_size = _FIELD_OFFSET
-_SHA1_ARGS_X8_align = _STRUCT_ALIGN
-
-########################################################################
-
-START_FIELDS # MB_MGR
-### name size align
-FIELD _args, _SHA1_ARGS_X4_size, _SHA1_ARGS_X4_align
-FIELD _lens, 4*8, 8
-FIELD _unused_lanes, 8, 8
-FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align
-END_FIELDS
-
-_MB_MGR_size = _FIELD_OFFSET
-_MB_MGR_align = _STRUCT_ALIGN
-
-_args_digest = _args + _digest
-_args_data_ptr = _args + _data_ptr
-
-
-########################################################################
-#### Define constants
-########################################################################
-
-#define STS_UNKNOWN 0
-#define STS_BEING_PROCESSED 1
-#define STS_COMPLETED 2
-
-########################################################################
-#### Define JOB_SHA1 structure
-########################################################################
-
-START_FIELDS # JOB_SHA1
-
-### name size align
-FIELD _buffer, 8, 8 # pointer to buffer
-FIELD _len, 4, 4 # length in bytes
-FIELD _result_digest, 5*4, 32 # Digest (output)
-FIELD _status, 4, 4
-FIELD _user_data, 8, 8
-END_FIELDS
-
-_JOB_SHA1_size = _FIELD_OFFSET
-_JOB_SHA1_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
deleted file mode 100644
index 7cfba73..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Flush routine for SHA1 multibuffer
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford(a)intel.com>
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha1_mb_mgr_datastruct.S"
-
-
-.extern sha1_x8_avx2
-
-# LINUX register definitions
-#define arg1 %rdi
-#define arg2 %rsi
-
-# Common definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-
-# idx must be a register not clobbered by sha1_x8_avx2
-#define idx %r8
-#define DWORD_idx %r8d
-
-#define unused_lanes %rbx
-#define lane_data %rbx
-#define tmp2 %rbx
-#define tmp2_w %ebx
-
-#define job_rax %rax
-#define tmp1 %rax
-#define size_offset %rax
-#define tmp %rax
-#define start_offset %rax
-
-#define tmp3 %arg1
-
-#define extra_blocks %arg2
-#define p %arg2
-
-.macro LABEL prefix n
-\prefix\n\():
-.endm
-
-.macro JNE_SKIP i
-jne skip_\i
-.endm
-
-.altmacro
-.macro SET_OFFSET _offset
-offset = \_offset
-.endm
-.noaltmacro
-
-# JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state)
-# arg 1 : rcx : state
-ENTRY(sha1_mb_mgr_flush_avx2)
- FRAME_BEGIN
- push %rbx
-
- # If bit (32+3) is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $32+3, unused_lanes
- jc return_null
-
- # find a lane with a non-null job
- xor idx, idx
- offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne one(%rip), idx
- offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne two(%rip), idx
- offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne three(%rip), idx
- offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne four(%rip), idx
- offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne five(%rip), idx
- offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne six(%rip), idx
- offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne seven(%rip), idx
-
- # copy idx to empty lanes
-copy_lane_data:
- offset = (_args + _data_ptr)
- mov offset(state,idx,8), tmp
-
- I = 0
-.rep 8
- offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
-.altmacro
- JNE_SKIP %I
- offset = (_args + _data_ptr + 8*I)
- mov tmp, offset(state)
- offset = (_lens + 4*I)
- movl $0xFFFFFFFF, offset(state)
-LABEL skip_ %I
- I = (I+1)
-.noaltmacro
-.endr
-
- # Find min length
- vmovdqu _lens+0*16(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqu %xmm0, _lens+0*16(state)
- vmovdqu %xmm1, _lens+1*16(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha1_x8_avx2
- # state and idx are intact
-
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state , idx, 4) , %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), tmp2_w
-
- vmovdqu %xmm0, _result_digest(job_rax)
- offset = (_result_digest + 1*16)
- mov tmp2_w, offset(job_rax)
-
-return:
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha1_mb_mgr_flush_avx2)
-
-
-#################################################################
-
-.align 16
-ENTRY(sha1_mb_mgr_get_comp_job_avx2)
- push %rbx
-
- ## if bit 32+3 is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $(32+3), unused_lanes
- jc .return_null
-
- # Find min length
- vmovdqu _lens(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
-
- vmovd %xmm2, DWORD_idx
- test $~0xF, idx
- jnz .return_null
-
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), tmp2_w
-
- vmovdqu %xmm0, _result_digest(job_rax)
- movl tmp2_w, _result_digest+1*16(job_rax)
-
- pop %rbx
-
- ret
-
-.return_null:
- xor job_rax, job_rax
- pop %rbx
- ret
-ENDPROC(sha1_mb_mgr_get_comp_job_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
-.octa 0x000000000000000000000000FFFFFFF0
-
-.section .rodata.cst8, "aM", @progbits, 8
-.align 8
-one:
-.quad 1
-two:
-.quad 2
-three:
-.quad 3
-four:
-.quad 4
-five:
-.quad 5
-six:
-.quad 6
-seven:
-.quad 7
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
deleted file mode 100644
index d2add0d..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Initialization code for multi buffer SHA1 algorithm for AVX2
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "sha1_mb_mgr.h"
-
-void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state)
-{
- unsigned int j;
- state->unused_lanes = 0xF76543210ULL;
- for (j = 0; j < 8; j++) {
- state->lens[j] = 0xFFFFFFFF;
- state->ldata[j].job_in_lane = NULL;
- }
-}
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
deleted file mode 100644
index 7a93b1c..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Buffer submit code for multi buffer SHA1 algorithm
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford(a)intel.com>
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha1_mb_mgr_datastruct.S"
-
-
-.extern sha1_x8_avx
-
-# LINUX register definitions
-arg1 = %rdi
-arg2 = %rsi
-size_offset = %rcx
-tmp2 = %rcx
-extra_blocks = %rdx
-
-# Common definitions
-#define state arg1
-#define job %rsi
-#define len2 arg2
-#define p2 arg2
-
-# idx must be a register not clobberred by sha1_x8_avx2
-idx = %r8
-DWORD_idx = %r8d
-last_len = %r8
-
-p = %r11
-start_offset = %r11
-
-unused_lanes = %rbx
-BYTE_unused_lanes = %bl
-
-job_rax = %rax
-len = %rax
-DWORD_len = %eax
-
-lane = %r12
-tmp3 = %r12
-
-tmp = %r9
-DWORD_tmp = %r9d
-
-lane_data = %r10
-
-# JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job)
-# arg 1 : rcx : state
-# arg 2 : rdx : job
-ENTRY(sha1_mb_mgr_submit_avx2)
- FRAME_BEGIN
- push %rbx
- push %r12
-
- mov _unused_lanes(state), unused_lanes
- mov unused_lanes, lane
- and $0xF, lane
- shr $4, unused_lanes
- imul $_LANE_DATA_size, lane, lane_data
- movl $STS_BEING_PROCESSED, _status(job)
- lea _ldata(state, lane_data), lane_data
- mov unused_lanes, _unused_lanes(state)
- movl _len(job), DWORD_len
-
- mov job, _job_in_lane(lane_data)
- shl $4, len
- or lane, len
-
- movl DWORD_len, _lens(state , lane, 4)
-
- # Load digest words from result_digest
- vmovdqu _result_digest(job), %xmm0
- mov _result_digest+1*16(job), DWORD_tmp
- vmovd %xmm0, _args_digest(state, lane, 4)
- vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
- vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
- vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
- movl DWORD_tmp, _args_digest+4*32(state , lane, 4)
-
- mov _buffer(job), p
- mov p, _args_data_ptr(state, lane, 8)
-
- cmp $0xF, unused_lanes
- jne return_null
-
-start_loop:
- # Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqa %xmm0, _lens + 0*16(state)
- vmovdqa %xmm1, _lens + 1*16(state)
-
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha1_x8_avx2
-
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- mov _unused_lanes(state), unused_lanes
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), DWORD_tmp
-
- vmovdqu %xmm0, _result_digest(job_rax)
- movl DWORD_tmp, _result_digest+1*16(job_rax)
-
-return:
- pop %r12
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-
-ENDPROC(sha1_mb_mgr_submit_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
- .octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
deleted file mode 100644
index 20f77aa..00000000
--- a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
+++ /dev/null
@@ -1,492 +0,0 @@
-/*
- * Multi-buffer SHA1 algorithm hash compute routine
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford(a)intel.com>
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include "sha1_mb_mgr_datastruct.S"
-
-## code to compute oct SHA1 using SSE-256
-## outer calling routine takes care of save and restore of XMM registers
-
-## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15# ymm0-15
-##
-## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15
-## Linux preserves: rdi rbp r8
-##
-## clobbers ymm0-15
-
-
-# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
-# "transpose" data in {r0...r7} using temps {t0...t1}
-# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
-# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
-# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
-# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
-# r4 = {e7 e6 e5 e4 e3 e2 e1 e0}
-# r5 = {f7 f6 f5 f4 f3 f2 f1 f0}
-# r6 = {g7 g6 g5 g4 g3 g2 g1 g0}
-# r7 = {h7 h6 h5 h4 h3 h2 h1 h0}
-#
-# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
-# r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
-# r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
-# r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
-# r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
-# r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
-# r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
-# r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
-#
-
-.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1
- # process top half (r0..r3) {a...d}
- vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
- vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
- vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
- vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
- vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
- vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
- vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3}
- vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0}
-
- # use r2 in place of t0
- # process bottom half (r4..r7) {e...h}
- vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0}
- vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2}
- vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0}
- vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2}
- vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1}
- vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2}
- vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3}
- vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0}
-
- vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6
- vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2
- vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5
- vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1
- vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7
- vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3
- vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4
- vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0
-
-.endm
-##
-## Magic functions defined in FIPS 180-1
-##
-# macro MAGIC_F0 F,B,C,D,T ## F = (D ^ (B & (C ^ D)))
-.macro MAGIC_F0 regF regB regC regD regT
- vpxor \regD, \regC, \regF
- vpand \regB, \regF, \regF
- vpxor \regD, \regF, \regF
-.endm
-
-# macro MAGIC_F1 F,B,C,D,T ## F = (B ^ C ^ D)
-.macro MAGIC_F1 regF regB regC regD regT
- vpxor \regC, \regD, \regF
- vpxor \regB, \regF, \regF
-.endm
-
-# macro MAGIC_F2 F,B,C,D,T ## F = ((B & C) | (B & D) | (C & D))
-.macro MAGIC_F2 regF regB regC regD regT
- vpor \regC, \regB, \regF
- vpand \regC, \regB, \regT
- vpand \regD, \regF, \regF
- vpor \regT, \regF, \regF
-.endm
-
-# macro MAGIC_F3 F,B,C,D,T ## F = (B ^ C ^ D)
-.macro MAGIC_F3 regF regB regC regD regT
- MAGIC_F1 \regF,\regB,\regC,\regD,\regT
-.endm
-
-# PROLD reg, imm, tmp
-.macro PROLD reg imm tmp
- vpsrld $(32-\imm), \reg, \tmp
- vpslld $\imm, \reg, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-.macro PROLD_nd reg imm tmp src
- vpsrld $(32-\imm), \src, \tmp
- vpslld $\imm, \src, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-.macro SHA1_STEP_00_15 regA regB regC regD regE regT regF memW immCNT MAGIC
- vpaddd \immCNT, \regE, \regE
- vpaddd \memW*32(%rsp), \regE, \regE
- PROLD_nd \regT, 5, \regF, \regA
- vpaddd \regT, \regE, \regE
- \MAGIC \regF, \regB, \regC, \regD, \regT
- PROLD \regB, 30, \regT
- vpaddd \regF, \regE, \regE
-.endm
-
-.macro SHA1_STEP_16_79 regA regB regC regD regE regT regF memW immCNT MAGIC
- vpaddd \immCNT, \regE, \regE
- offset = ((\memW - 14) & 15) * 32
- vmovdqu offset(%rsp), W14
- vpxor W14, W16, W16
- offset = ((\memW - 8) & 15) * 32
- vpxor offset(%rsp), W16, W16
- offset = ((\memW - 3) & 15) * 32
- vpxor offset(%rsp), W16, W16
- vpsrld $(32-1), W16, \regF
- vpslld $1, W16, W16
- vpor W16, \regF, \regF
-
- ROTATE_W
-
- offset = ((\memW - 0) & 15) * 32
- vmovdqu \regF, offset(%rsp)
- vpaddd \regF, \regE, \regE
- PROLD_nd \regT, 5, \regF, \regA
- vpaddd \regT, \regE, \regE
- \MAGIC \regF,\regB,\regC,\regD,\regT ## FUN = MAGIC_Fi(B,C,D)
- PROLD \regB,30, \regT
- vpaddd \regF, \regE, \regE
-.endm
-
-########################################################################
-########################################################################
-########################################################################
-
-## FRAMESZ plus pushes must be an odd multiple of 8
-YMM_SAVE = (15-15)*32
-FRAMESZ = 32*16 + YMM_SAVE
-_YMM = FRAMESZ - YMM_SAVE
-
-#define VMOVPS vmovups
-
-IDX = %rax
-inp0 = %r9
-inp1 = %r10
-inp2 = %r11
-inp3 = %r12
-inp4 = %r13
-inp5 = %r14
-inp6 = %r15
-inp7 = %rcx
-arg1 = %rdi
-arg2 = %rsi
-RSP_SAVE = %rdx
-
-# ymm0 A
-# ymm1 B
-# ymm2 C
-# ymm3 D
-# ymm4 E
-# ymm5 F AA
-# ymm6 T0 BB
-# ymm7 T1 CC
-# ymm8 T2 DD
-# ymm9 T3 EE
-# ymm10 T4 TMP
-# ymm11 T5 FUN
-# ymm12 T6 K
-# ymm13 T7 W14
-# ymm14 T8 W15
-# ymm15 T9 W16
-
-
-A = %ymm0
-B = %ymm1
-C = %ymm2
-D = %ymm3
-E = %ymm4
-F = %ymm5
-T0 = %ymm6
-T1 = %ymm7
-T2 = %ymm8
-T3 = %ymm9
-T4 = %ymm10
-T5 = %ymm11
-T6 = %ymm12
-T7 = %ymm13
-T8 = %ymm14
-T9 = %ymm15
-
-AA = %ymm5
-BB = %ymm6
-CC = %ymm7
-DD = %ymm8
-EE = %ymm9
-TMP = %ymm10
-FUN = %ymm11
-K = %ymm12
-W14 = %ymm13
-W15 = %ymm14
-W16 = %ymm15
-
-.macro ROTATE_ARGS
- TMP_ = E
- E = D
- D = C
- C = B
- B = A
- A = TMP_
-.endm
-
-.macro ROTATE_W
-TMP_ = W16
-W16 = W15
-W15 = W14
-W14 = TMP_
-.endm
-
-# 8 streams x 5 32bit words per digest x 4 bytes per word
-#define DIGEST_SIZE (8*5*4)
-
-.align 32
-
-# void sha1_x8_avx2(void **input_data, UINT128 *digest, UINT32 size)
-# arg 1 : pointer to array[4] of pointer to input data
-# arg 2 : size (in blocks) ;; assumed to be >= 1
-#
-ENTRY(sha1_x8_avx2)
-
- # save callee-saved clobbered registers to comply with C function ABI
- push %r12
- push %r13
- push %r14
- push %r15
-
- #save rsp
- mov %rsp, RSP_SAVE
- sub $FRAMESZ, %rsp
-
- #align rsp to 32 Bytes
- and $~0x1F, %rsp
-
- ## Initialize digests
- vmovdqu 0*32(arg1), A
- vmovdqu 1*32(arg1), B
- vmovdqu 2*32(arg1), C
- vmovdqu 3*32(arg1), D
- vmovdqu 4*32(arg1), E
-
- ## transpose input onto stack
- mov _data_ptr+0*8(arg1),inp0
- mov _data_ptr+1*8(arg1),inp1
- mov _data_ptr+2*8(arg1),inp2
- mov _data_ptr+3*8(arg1),inp3
- mov _data_ptr+4*8(arg1),inp4
- mov _data_ptr+5*8(arg1),inp5
- mov _data_ptr+6*8(arg1),inp6
- mov _data_ptr+7*8(arg1),inp7
-
- xor IDX, IDX
-lloop:
- vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), F
- I=0
-.rep 2
- VMOVPS (inp0, IDX), T0
- VMOVPS (inp1, IDX), T1
- VMOVPS (inp2, IDX), T2
- VMOVPS (inp3, IDX), T3
- VMOVPS (inp4, IDX), T4
- VMOVPS (inp5, IDX), T5
- VMOVPS (inp6, IDX), T6
- VMOVPS (inp7, IDX), T7
-
- TRANSPOSE8 T0, T1, T2, T3, T4, T5, T6, T7, T8, T9
- vpshufb F, T0, T0
- vmovdqu T0, (I*8)*32(%rsp)
- vpshufb F, T1, T1
- vmovdqu T1, (I*8+1)*32(%rsp)
- vpshufb F, T2, T2
- vmovdqu T2, (I*8+2)*32(%rsp)
- vpshufb F, T3, T3
- vmovdqu T3, (I*8+3)*32(%rsp)
- vpshufb F, T4, T4
- vmovdqu T4, (I*8+4)*32(%rsp)
- vpshufb F, T5, T5
- vmovdqu T5, (I*8+5)*32(%rsp)
- vpshufb F, T6, T6
- vmovdqu T6, (I*8+6)*32(%rsp)
- vpshufb F, T7, T7
- vmovdqu T7, (I*8+7)*32(%rsp)
- add $32, IDX
- I = (I+1)
-.endr
- # save old digests
- vmovdqu A,AA
- vmovdqu B,BB
- vmovdqu C,CC
- vmovdqu D,DD
- vmovdqu E,EE
-
-##
-## perform 0-79 steps
-##
- vmovdqu K00_19(%rip), K
-## do rounds 0...15
- I = 0
-.rep 16
- SHA1_STEP_00_15 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 16...19
- vmovdqu ((16 - 16) & 15) * 32 (%rsp), W16
- vmovdqu ((16 - 15) & 15) * 32 (%rsp), W15
-.rep 4
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 20...39
- vmovdqu K20_39(%rip), K
-.rep 20
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F1
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 40...59
- vmovdqu K40_59(%rip), K
-.rep 20
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F2
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 60...79
- vmovdqu K60_79(%rip), K
-.rep 20
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F3
- ROTATE_ARGS
- I = (I+1)
-.endr
-
- vpaddd AA,A,A
- vpaddd BB,B,B
- vpaddd CC,C,C
- vpaddd DD,D,D
- vpaddd EE,E,E
-
- sub $1, arg2
- jne lloop
-
- # write out digests
- vmovdqu A, 0*32(arg1)
- vmovdqu B, 1*32(arg1)
- vmovdqu C, 2*32(arg1)
- vmovdqu D, 3*32(arg1)
- vmovdqu E, 4*32(arg1)
-
- # update input pointers
- add IDX, inp0
- add IDX, inp1
- add IDX, inp2
- add IDX, inp3
- add IDX, inp4
- add IDX, inp5
- add IDX, inp6
- add IDX, inp7
- mov inp0, _data_ptr (arg1)
- mov inp1, _data_ptr + 1*8(arg1)
- mov inp2, _data_ptr + 2*8(arg1)
- mov inp3, _data_ptr + 3*8(arg1)
- mov inp4, _data_ptr + 4*8(arg1)
- mov inp5, _data_ptr + 5*8(arg1)
- mov inp6, _data_ptr + 6*8(arg1)
- mov inp7, _data_ptr + 7*8(arg1)
-
- ################
- ## Postamble
-
- mov RSP_SAVE, %rsp
-
- # restore callee-saved clobbered registers
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-
- ret
-ENDPROC(sha1_x8_avx2)
-
-
-.section .rodata.cst32.K00_19, "aM", @progbits, 32
-.align 32
-K00_19:
-.octa 0x5A8279995A8279995A8279995A827999
-.octa 0x5A8279995A8279995A8279995A827999
-
-.section .rodata.cst32.K20_39, "aM", @progbits, 32
-.align 32
-K20_39:
-.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
-.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
-
-.section .rodata.cst32.K40_59, "aM", @progbits, 32
-.align 32
-K40_59:
-.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
-.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
-
-.section .rodata.cst32.K60_79, "aM", @progbits, 32
-.align 32
-K60_79:
-.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
-.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
-
-.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
-.align 32
-PSHUFFLE_BYTE_FLIP_MASK:
-.octa 0x0c0d0e0f08090a0b0405060700010203
-.octa 0x0c0d0e0f08090a0b0405060700010203
diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile
deleted file mode 100644
index 53ad6e7..00000000
--- a/arch/x86/crypto/sha256-mb/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Arch-specific CryptoAPI modules.
-#
-
-OBJECT_FILES_NON_STANDARD := y
-
-avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
- $(comma)4)$(comma)%ymm2,yes,no)
-ifeq ($(avx2_supported),yes)
- obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb.o
- sha256-mb-y := sha256_mb.o sha256_mb_mgr_flush_avx2.o \
- sha256_mb_mgr_init_avx2.o sha256_mb_mgr_submit_avx2.o sha256_x8_avx2.o
-endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
deleted file mode 100644
index 97c5fc43..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ /dev/null
@@ -1,1013 +0,0 @@
-/*
- * Multi buffer SHA256 algorithm Glue Code
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/cryptohash.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <asm/byteorder.h>
-#include <linux/hardirq.h>
-#include <asm/fpu/api.h>
-#include "sha256_mb_ctx.h"
-
-#define FLUSH_INTERVAL 1000 /* in usec */
-
-static struct mcryptd_alg_state sha256_mb_alg_state;
-
-struct sha256_mb_ctx {
- struct mcryptd_ahash *mcryptd_tfm;
-};
-
-static inline struct mcryptd_hash_request_ctx
- *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx)
-{
- struct ahash_request *areq;
-
- areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
- return container_of(areq, struct mcryptd_hash_request_ctx, areq);
-}
-
-static inline struct ahash_request
- *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
-{
- return container_of((void *) ctx, struct ahash_request, __ctx);
-}
-
-static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct ahash_request *areq)
-{
- rctx->flag = HASH_UPDATE;
-}
-
-static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state);
-static asmlinkage struct job_sha256* (*sha256_job_mgr_submit)
- (struct sha256_mb_mgr *state, struct job_sha256 *job);
-static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
- (struct sha256_mb_mgr *state);
-static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
- (struct sha256_mb_mgr *state);
-
-inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
- uint64_t total_len)
-{
- uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1);
-
- memset(&padblock[i], 0, SHA256_BLOCK_SIZE);
- padblock[i] = 0x80;
-
- i += ((SHA256_BLOCK_SIZE - 1) &
- (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1)))
- + 1 + SHA256_PADLENGTHFIELD_SIZE;
-
-#if SHA256_PADLENGTHFIELD_SIZE == 16
- *((uint64_t *) &padblock[i - 16]) = 0;
-#endif
-
- *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
-
- /* Number of extra blocks to hash */
- return i >> SHA256_LOG2_BLOCK_SIZE;
-}
-
-static struct sha256_hash_ctx
- *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr,
- struct sha256_hash_ctx *ctx)
-{
- while (ctx) {
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Clear PROCESSING bit */
- ctx->status = HASH_CTX_STS_COMPLETE;
- return ctx;
- }
-
- /*
- * If the extra blocks are empty, begin hashing what remains
- * in the user's buffer.
- */
- if (ctx->partial_block_buffer_length == 0 &&
- ctx->incoming_buffer_length) {
-
- const void *buffer = ctx->incoming_buffer;
- uint32_t len = ctx->incoming_buffer_length;
- uint32_t copy_len;
-
- /*
- * Only entire blocks can be hashed.
- * Copy remainder to extra blocks buffer.
- */
- copy_len = len & (SHA256_BLOCK_SIZE-1);
-
- if (copy_len) {
- len -= copy_len;
- memcpy(ctx->partial_block_buffer,
- ((const char *) buffer + len),
- copy_len);
- ctx->partial_block_buffer_length = copy_len;
- }
-
- ctx->incoming_buffer_length = 0;
-
- /* len should be a multiple of the block size now */
- assert((len % SHA256_BLOCK_SIZE) == 0);
-
- /* Set len to the number of blocks to be hashed */
- len >>= SHA256_LOG2_BLOCK_SIZE;
-
- if (len) {
-
- ctx->job.buffer = (uint8_t *) buffer;
- ctx->job.len = len;
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
- }
-
- /*
- * If the extra blocks are not empty, then we are
- * either on the last block(s) or we need more
- * user input before continuing.
- */
- if (ctx->status & HASH_CTX_STS_LAST) {
-
- uint8_t *buf = ctx->partial_block_buffer;
- uint32_t n_extra_blocks =
- sha256_pad(buf, ctx->total_length);
-
- ctx->status = (HASH_CTX_STS_PROCESSING |
- HASH_CTX_STS_COMPLETE);
- ctx->job.buffer = buf;
- ctx->job.len = (uint32_t) n_extra_blocks;
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
-
- ctx->status = HASH_CTX_STS_IDLE;
- return ctx;
- }
-
- return NULL;
-}
-
-static struct sha256_hash_ctx
- *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr)
-{
- /*
- * If get_comp_job returns NULL, there are no jobs complete.
- * If get_comp_job returns a job, verify that it is safe to return to
- * the user. If it is not ready, resubmit the job to finish processing.
- * If sha256_ctx_mgr_resubmit returned a job, it is ready to be
- * returned. Otherwise, all jobs currently being managed by the
- * hash_ctx_mgr still need processing.
- */
- struct sha256_hash_ctx *ctx;
-
- ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr);
- return sha256_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr)
-{
- sha256_job_mgr_init(&mgr->mgr);
-}
-
-static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
- struct sha256_hash_ctx *ctx,
- const void *buffer,
- uint32_t len,
- int flags)
-{
- if (flags & ~(HASH_UPDATE | HASH_LAST)) {
- /* User should not pass anything other than UPDATE or LAST */
- ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_PROCESSING) {
- /* Cannot submit to a currently processing job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Cannot update a finished job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
- return ctx;
- }
-
- /* If we made it here, there was no error during this call to submit */
- ctx->error = HASH_CTX_ERROR_NONE;
-
- /* Store buffer ptr info from user */
- ctx->incoming_buffer = buffer;
- ctx->incoming_buffer_length = len;
-
- /*
- * Store the user's request flags and mark this ctx as currently
- * being processed.
- */
- ctx->status = (flags & HASH_LAST) ?
- (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
- HASH_CTX_STS_PROCESSING;
-
- /* Advance byte counter */
- ctx->total_length += len;
-
- /*
- * If there is anything currently buffered in the extra blocks,
- * append to it until it contains a whole block.
- * Or if the user's buffer contains less than a whole block,
- * append as much as possible to the extra block.
- */
- if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) {
- /*
- * Compute how many bytes to copy from user buffer into
- * extra block
- */
- uint32_t copy_len = SHA256_BLOCK_SIZE -
- ctx->partial_block_buffer_length;
- if (len < copy_len)
- copy_len = len;
-
- if (copy_len) {
- /* Copy and update relevant pointers and counters */
- memcpy(
- &ctx->partial_block_buffer[ctx->partial_block_buffer_length],
- buffer, copy_len);
-
- ctx->partial_block_buffer_length += copy_len;
- ctx->incoming_buffer = (const void *)
- ((const char *)buffer + copy_len);
- ctx->incoming_buffer_length = len - copy_len;
- }
-
- /* The extra block should never contain more than 1 block */
- assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
-
- /*
- * If the extra block buffer contains exactly 1 block,
- * it can be hashed.
- */
- if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
- ctx->partial_block_buffer_length = 0;
-
- ctx->job.buffer = ctx->partial_block_buffer;
- ctx->job.len = 1;
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
- }
- }
-
- return sha256_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr)
-{
- struct sha256_hash_ctx *ctx;
-
- while (1) {
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_flush(&mgr->mgr);
-
- /* If flush returned 0, there are no more jobs in flight. */
- if (!ctx)
- return NULL;
-
- /*
- * If flush returned a job, resubmit the job to finish
- * processing.
- */
- ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
-
- /*
- * If sha256_ctx_mgr_resubmit returned a job, it is ready to
- * be returned. Otherwise, all jobs currently being managed by
- * the sha256_ctx_mgr still need processing. Loop.
- */
- if (ctx)
- return ctx;
- }
-}
-
-static int sha256_mb_init(struct ahash_request *areq)
-{
- struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
-
- hash_ctx_init(sctx);
- sctx->job.result_digest[0] = SHA256_H0;
- sctx->job.result_digest[1] = SHA256_H1;
- sctx->job.result_digest[2] = SHA256_H2;
- sctx->job.result_digest[3] = SHA256_H3;
- sctx->job.result_digest[4] = SHA256_H4;
- sctx->job.result_digest[5] = SHA256_H5;
- sctx->job.result_digest[6] = SHA256_H6;
- sctx->job.result_digest[7] = SHA256_H7;
- sctx->total_length = 0;
- sctx->partial_block_buffer_length = 0;
- sctx->status = HASH_CTX_STS_IDLE;
-
- return 0;
-}
-
-static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
-{
- int i;
- struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
- __be32 *dst = (__be32 *) rctx->out;
-
- for (i = 0; i < 8; ++i)
- dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
-
- return 0;
-}
-
-static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
- struct mcryptd_alg_cstate *cstate, bool flush)
-{
- int flag = HASH_UPDATE;
- int nbytes, err = 0;
- struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
- struct sha256_hash_ctx *sha_ctx;
-
- /* more work ? */
- while (!(rctx->flag & HASH_DONE)) {
- nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
- if (nbytes < 0) {
- err = nbytes;
- goto out;
- }
- /* check if the walk is done */
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- if (rctx->flag & HASH_FINAL)
- flag |= HASH_LAST;
-
- }
- sha_ctx = (struct sha256_hash_ctx *)
- ahash_request_ctx(&rctx->areq);
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx,
- rctx->walk.data, nbytes, flag);
- if (!sha_ctx) {
- if (flush)
- sha_ctx = sha256_ctx_mgr_flush(cstate->mgr);
- }
- kernel_fpu_end();
- if (sha_ctx)
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- else {
- rctx = NULL;
- goto out;
- }
- }
-
- /* copy the results */
- if (rctx->flag & HASH_FINAL)
- sha256_mb_set_results(rctx);
-
-out:
- *ret_rctx = rctx;
- return err;
-}
-
-static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate,
- int err)
-{
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha256_hash_ctx *sha_ctx;
- struct mcryptd_hash_request_ctx *req_ctx;
- int ret;
-
- /* remove from work list */
- spin_lock(&cstate->work_lock);
- list_del(&rctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- if (irqs_disabled())
- rctx->complete(&req->base, err);
- else {
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
-
- /* check to see if there are other jobs that are done */
- sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
- while (sha_ctx) {
- req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&req_ctx, cstate, false);
- if (req_ctx) {
- spin_lock(&cstate->work_lock);
- list_del(&req_ctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- req = cast_mcryptd_ctx_to_req(req_ctx);
- if (irqs_disabled())
- req_ctx->complete(&req->base, ret);
- else {
- local_bh_disable();
- req_ctx->complete(&req->base, ret);
- local_bh_enable();
- }
- }
- sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
- }
-
- return 0;
-}
-
-static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate)
-{
- unsigned long next_flush;
- unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
-
- /* initialize tag */
- rctx->tag.arrival = jiffies; /* tag the arrival time */
- rctx->tag.seq_num = cstate->next_seq_num++;
- next_flush = rctx->tag.arrival + delay;
- rctx->tag.expire = next_flush;
-
- spin_lock(&cstate->work_lock);
- list_add_tail(&rctx->waiter, &cstate->work_list);
- spin_unlock(&cstate->work_lock);
-
- mcryptd_arm_flusher(cstate, delay);
-}
-
-static int sha256_mb_update(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha256_hash_ctx *sha_ctx;
- int ret = 0, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk))
- rctx->flag |= HASH_DONE;
-
- /* submit */
- sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
- sha256_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, HASH_UPDATE);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
-
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha256_mb_finup(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha256_hash_ctx *sha_ctx;
- int ret = 0, flag = HASH_UPDATE, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- flag = HASH_LAST;
- }
-
- /* submit */
- rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
- sha256_mb_add_list(rctx, cstate);
-
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, flag);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha256_mb_final(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
-
- struct sha256_hash_ctx *sha_ctx;
- int ret = 0;
- u8 data;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- rctx->flag |= HASH_DONE | HASH_FINAL;
-
- sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
- /* flag HASH_FINAL and 0 data size */
- sha256_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
- HASH_LAST);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha256_mb_export(struct ahash_request *areq, void *out)
-{
- struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha256_mb_import(struct ahash_request *areq, const void *in)
-{
- struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
-
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha256_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
-
- return 0;
-}
-
-static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm)
-{
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- sizeof(struct sha256_hash_ctx));
-
- return 0;
-}
-
-static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static struct ahash_alg sha256_mb_areq_alg = {
- .init = sha256_mb_init,
- .update = sha256_mb_update,
- .final = sha256_mb_final,
- .finup = sha256_mb_finup,
- .export = sha256_mb_export,
- .import = sha256_mb_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_hash_ctx),
- .base = {
- .cra_name = "__sha256-mb",
- .cra_driver_name = "__intel_sha256-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread
- * sleep
- */
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha256_mb_areq_alg.halg.base.cra_list),
- .cra_init = sha256_mb_areq_init_tfm,
- .cra_exit = sha256_mb_areq_exit_tfm,
- .cra_ctxsize = sizeof(struct sha256_hash_ctx),
- }
- }
-};
-
-static int sha256_mb_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_init(mcryptd_req);
-}
-
-static int sha256_mb_async_update(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_update(mcryptd_req);
-}
-
-static int sha256_mb_async_finup(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_finup(mcryptd_req);
-}
-
-static int sha256_mb_async_final(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_final(mcryptd_req);
-}
-
-static int sha256_mb_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_digest(mcryptd_req);
-}
-
-static int sha256_mb_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_export(mcryptd_req, out);
-}
-
-static int sha256_mb_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
- struct mcryptd_hash_request_ctx *rctx;
- struct ahash_request *areq;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- rctx = ahash_request_ctx(mcryptd_req);
- areq = &rctx->areq;
-
- ahash_request_set_tfm(areq, child);
- ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req);
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static struct ahash_alg sha256_mb_async_alg = {
- .init = sha256_mb_async_init,
- .update = sha256_mb_async_update,
- .final = sha256_mb_async_final,
- .finup = sha256_mb_async_finup,
- .export = sha256_mb_async_export,
- .import = sha256_mb_async_import,
- .digest = sha256_mb_async_digest,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_hash_ctx),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256_mb",
- /*
- * Low priority, since with few concurrent hash requests
- * this is extremely slow due to the flush delay. Users
- * whose workloads would benefit from this can request
- * it explicitly by driver name, or can increase its
- * priority at runtime using NETLINK_CRYPTO.
- */
- .cra_priority = 50,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha256_mb_async_alg.halg.base.cra_list),
- .cra_init = sha256_mb_async_init_tfm,
- .cra_exit = sha256_mb_async_exit_tfm,
- .cra_ctxsize = sizeof(struct sha256_mb_ctx),
- .cra_alignmask = 0,
- },
- },
-};
-
-static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate)
-{
- struct mcryptd_hash_request_ctx *rctx;
- unsigned long cur_time;
- unsigned long next_flush = 0;
- struct sha256_hash_ctx *sha_ctx;
-
-
- cur_time = jiffies;
-
- while (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- if (time_before(cur_time, rctx->tag.expire))
- break;
- kernel_fpu_begin();
- sha_ctx = (struct sha256_hash_ctx *)
- sha256_ctx_mgr_flush(cstate->mgr);
- kernel_fpu_end();
- if (!sha_ctx) {
- pr_err("sha256_mb error: nothing got"
- " flushed for non-empty list\n");
- break;
- }
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- sha_finish_walk(&rctx, cstate, true);
- sha_complete_job(rctx, cstate, 0);
- }
-
- if (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- /* get the hash context and then flush time */
- next_flush = rctx->tag.expire;
- mcryptd_arm_flusher(cstate, get_delay(next_flush));
- }
- return next_flush;
-}
-
-static int __init sha256_mb_mod_init(void)
-{
-
- int cpu;
- int err;
- struct mcryptd_alg_cstate *cpu_state;
-
- /* check for dependent cpu features */
- if (!boot_cpu_has(X86_FEATURE_AVX2) ||
- !boot_cpu_has(X86_FEATURE_BMI2))
- return -ENODEV;
-
- /* initialize multibuffer structures */
- sha256_mb_alg_state.alg_cstate = alloc_percpu
- (struct mcryptd_alg_cstate);
-
- sha256_job_mgr_init = sha256_mb_mgr_init_avx2;
- sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2;
- sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2;
- sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2;
-
- if (!sha256_mb_alg_state.alg_cstate)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
- cpu_state->next_flush = 0;
- cpu_state->next_seq_num = 0;
- cpu_state->flusher_engaged = false;
- INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
- cpu_state->cpu = cpu;
- cpu_state->alg_state = &sha256_mb_alg_state;
- cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr),
- GFP_KERNEL);
- if (!cpu_state->mgr)
- goto err2;
- sha256_ctx_mgr_init(cpu_state->mgr);
- INIT_LIST_HEAD(&cpu_state->work_list);
- spin_lock_init(&cpu_state->work_lock);
- }
- sha256_mb_alg_state.flusher = &sha256_mb_flusher;
-
- err = crypto_register_ahash(&sha256_mb_areq_alg);
- if (err)
- goto err2;
- err = crypto_register_ahash(&sha256_mb_async_alg);
- if (err)
- goto err1;
-
-
- return 0;
-err1:
- crypto_unregister_ahash(&sha256_mb_areq_alg);
-err2:
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha256_mb_alg_state.alg_cstate);
- return -ENODEV;
-}
-
-static void __exit sha256_mb_mod_fini(void)
-{
- int cpu;
- struct mcryptd_alg_cstate *cpu_state;
-
- crypto_unregister_ahash(&sha256_mb_async_alg);
- crypto_unregister_ahash(&sha256_mb_areq_alg);
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha256_mb_alg_state.alg_cstate);
-}
-
-module_init(sha256_mb_mod_init);
-module_exit(sha256_mb_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated");
-
-MODULE_ALIAS_CRYPTO("sha256");
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
deleted file mode 100644
index 7c43254..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Header file for multi buffer SHA256 context
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SHA_MB_CTX_INTERNAL_H
-#define _SHA_MB_CTX_INTERNAL_H
-
-#include "sha256_mb_mgr.h"
-
-#define HASH_UPDATE 0x00
-#define HASH_LAST 0x01
-#define HASH_DONE 0x02
-#define HASH_FINAL 0x04
-
-#define HASH_CTX_STS_IDLE 0x00
-#define HASH_CTX_STS_PROCESSING 0x01
-#define HASH_CTX_STS_LAST 0x02
-#define HASH_CTX_STS_COMPLETE 0x04
-
-enum hash_ctx_error {
- HASH_CTX_ERROR_NONE = 0,
- HASH_CTX_ERROR_INVALID_FLAGS = -1,
- HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
- HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
-
-#ifdef HASH_CTX_DEBUG
- HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4,
-#endif
-};
-
-
-#define hash_ctx_user_data(ctx) ((ctx)->user_data)
-#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
-#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
-#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
-#define hash_ctx_status(ctx) ((ctx)->status)
-#define hash_ctx_error(ctx) ((ctx)->error)
-#define hash_ctx_init(ctx) \
- do { \
- (ctx)->error = HASH_CTX_ERROR_NONE; \
- (ctx)->status = HASH_CTX_STS_COMPLETE; \
- } while (0)
-
-
-/* Hash Constants and Typedefs */
-#define SHA256_DIGEST_LENGTH 8
-#define SHA256_LOG2_BLOCK_SIZE 6
-
-#define SHA256_PADLENGTHFIELD_SIZE 8
-
-#ifdef SHA_MB_DEBUG
-#define assert(expr) \
-do { \
- if (unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-#else
-#define assert(expr) do {} while (0)
-#endif
-
-struct sha256_ctx_mgr {
- struct sha256_mb_mgr mgr;
-};
-
-/* typedef struct sha256_ctx_mgr sha256_ctx_mgr; */
-
-struct sha256_hash_ctx {
- /* Must be at struct offset 0 */
- struct job_sha256 job;
- /* status flag */
- int status;
- /* error flag */
- int error;
-
- uint64_t total_length;
- const void *incoming_buffer;
- uint32_t incoming_buffer_length;
- uint8_t partial_block_buffer[SHA256_BLOCK_SIZE * 2];
- uint32_t partial_block_buffer_length;
- void *user_data;
-};
-
-#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
deleted file mode 100644
index b01ae40..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Header file for multi buffer SHA256 algorithm manager
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __SHA_MB_MGR_H
-#define __SHA_MB_MGR_H
-
-#include <linux/types.h>
-
-#define NUM_SHA256_DIGEST_WORDS 8
-
-enum job_sts { STS_UNKNOWN = 0,
- STS_BEING_PROCESSED = 1,
- STS_COMPLETED = 2,
- STS_INTERNAL_ERROR = 3,
- STS_ERROR = 4
-};
-
-struct job_sha256 {
- u8 *buffer;
- u32 len;
- u32 result_digest[NUM_SHA256_DIGEST_WORDS] __aligned(32);
- enum job_sts status;
- void *user_data;
-};
-
-/* SHA256 out-of-order scheduler */
-
-/* typedef uint32_t sha8_digest_array[8][8]; */
-
-struct sha256_args_x8 {
- uint32_t digest[8][8];
- uint8_t *data_ptr[8];
-};
-
-struct sha256_lane_data {
- struct job_sha256 *job_in_lane;
-};
-
-struct sha256_mb_mgr {
- struct sha256_args_x8 args;
-
- uint32_t lens[8];
-
- /* each byte is index (0...7) of unused lanes */
- uint64_t unused_lanes;
- /* byte 4 is set to FF as a flag */
- struct sha256_lane_data ldata[8];
-};
-
-
-#define SHA256_MB_MGR_NUM_LANES_AVX2 8
-
-void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state);
-struct job_sha256 *sha256_mb_mgr_submit_avx2(struct sha256_mb_mgr *state,
- struct job_sha256 *job);
-struct job_sha256 *sha256_mb_mgr_flush_avx2(struct sha256_mb_mgr *state);
-struct job_sha256 *sha256_mb_mgr_get_comp_job_avx2(struct sha256_mb_mgr *state);
-
-#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
deleted file mode 100644
index 5c377ba..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Header file for multi buffer SHA256 algorithm data structure
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# Macros for defining data structures
-
-# Usage example
-
-#START_FIELDS # JOB_AES
-### name size align
-#FIELD _plaintext, 8, 8 # pointer to plaintext
-#FIELD _ciphertext, 8, 8 # pointer to ciphertext
-#FIELD _IV, 16, 8 # IV
-#FIELD _keys, 8, 8 # pointer to keys
-#FIELD _len, 4, 4 # length in bytes
-#FIELD _status, 4, 4 # status enumeration
-#FIELD _user_data, 8, 8 # pointer to user data
-#UNION _union, size1, align1, \
-# size2, align2, \
-# size3, align3, \
-# ...
-#END_FIELDS
-#%assign _JOB_AES_size _FIELD_OFFSET
-#%assign _JOB_AES_align _STRUCT_ALIGN
-
-#########################################################################
-
-# Alternate "struc-like" syntax:
-# STRUCT job_aes2
-# RES_Q .plaintext, 1
-# RES_Q .ciphertext, 1
-# RES_DQ .IV, 1
-# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
-# RES_U .union, size1, align1, \
-# size2, align2, \
-# ...
-# ENDSTRUCT
-# # Following only needed if nesting
-# %assign job_aes2_size _FIELD_OFFSET
-# %assign job_aes2_align _STRUCT_ALIGN
-#
-# RES_* macros take a name, a count and an optional alignment.
-# The count in in terms of the base size of the macro, and the
-# default alignment is the base size.
-# The macros are:
-# Macro Base size
-# RES_B 1
-# RES_W 2
-# RES_D 4
-# RES_Q 8
-# RES_DQ 16
-# RES_Y 32
-# RES_Z 64
-#
-# RES_U defines a union. It's arguments are a name and two or more
-# pairs of "size, alignment"
-#
-# The two assigns are only needed if this structure is being nested
-# within another. Even if the assigns are not done, one can still use
-# STRUCT_NAME_size as the size of the structure.
-#
-# Note that for nesting, you still need to assign to STRUCT_NAME_size.
-#
-# The differences between this and using "struc" directly are that each
-# type is implicitly aligned to its natural length (although this can be
-# over-ridden with an explicit third parameter), and that the structure
-# is padded at the end to its overall alignment.
-#
-
-#########################################################################
-
-#ifndef _DATASTRUCT_ASM_
-#define _DATASTRUCT_ASM_
-
-#define SZ8 8*SHA256_DIGEST_WORD_SIZE
-#define ROUNDS 64*SZ8
-#define PTR_SZ 8
-#define SHA256_DIGEST_WORD_SIZE 4
-#define MAX_SHA256_LANES 8
-#define SHA256_DIGEST_WORDS 8
-#define SHA256_DIGEST_ROW_SIZE (MAX_SHA256_LANES * SHA256_DIGEST_WORD_SIZE)
-#define SHA256_DIGEST_SIZE (SHA256_DIGEST_ROW_SIZE * SHA256_DIGEST_WORDS)
-#define SHA256_BLK_SZ 64
-
-# START_FIELDS
-.macro START_FIELDS
- _FIELD_OFFSET = 0
- _STRUCT_ALIGN = 0
-.endm
-
-# FIELD name size align
-.macro FIELD name size align
- _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
- \name = _FIELD_OFFSET
- _FIELD_OFFSET = _FIELD_OFFSET + (\size)
-.if (\align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = \align
-.endif
-.endm
-
-# END_FIELDS
-.macro END_FIELDS
- _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
-.endm
-
-########################################################################
-
-.macro STRUCT p1
-START_FIELDS
-.struc \p1
-.endm
-
-.macro ENDSTRUCT
- tmp = _FIELD_OFFSET
- END_FIELDS
- tmp = (_FIELD_OFFSET - %%tmp)
-.if (tmp > 0)
- .lcomm tmp
-.endif
-.endstruc
-.endm
-
-## RES_int name size align
-.macro RES_int p1 p2 p3
- name = \p1
- size = \p2
- align = .\p3
-
- _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
-.align align
-.lcomm name size
- _FIELD_OFFSET = _FIELD_OFFSET + (size)
-.if (align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = align
-.endif
-.endm
-
-# macro RES_B name, size [, align]
-.macro RES_B _name, _size, _align=1
-RES_int _name _size _align
-.endm
-
-# macro RES_W name, size [, align]
-.macro RES_W _name, _size, _align=2
-RES_int _name 2*(_size) _align
-.endm
-
-# macro RES_D name, size [, align]
-.macro RES_D _name, _size, _align=4
-RES_int _name 4*(_size) _align
-.endm
-
-# macro RES_Q name, size [, align]
-.macro RES_Q _name, _size, _align=8
-RES_int _name 8*(_size) _align
-.endm
-
-# macro RES_DQ name, size [, align]
-.macro RES_DQ _name, _size, _align=16
-RES_int _name 16*(_size) _align
-.endm
-
-# macro RES_Y name, size [, align]
-.macro RES_Y _name, _size, _align=32
-RES_int _name 32*(_size) _align
-.endm
-
-# macro RES_Z name, size [, align]
-.macro RES_Z _name, _size, _align=64
-RES_int _name 64*(_size) _align
-.endm
-
-#endif
-
-
-########################################################################
-#### Define SHA256 Out Of Order Data Structures
-########################################################################
-
-START_FIELDS # LANE_DATA
-### name size align
-FIELD _job_in_lane, 8, 8 # pointer to job object
-END_FIELDS
-
- _LANE_DATA_size = _FIELD_OFFSET
- _LANE_DATA_align = _STRUCT_ALIGN
-
-########################################################################
-
-START_FIELDS # SHA256_ARGS_X4
-### name size align
-FIELD _digest, 4*8*8, 4 # transposed digest
-FIELD _data_ptr, 8*8, 8 # array of pointers to data
-END_FIELDS
-
- _SHA256_ARGS_X4_size = _FIELD_OFFSET
- _SHA256_ARGS_X4_align = _STRUCT_ALIGN
- _SHA256_ARGS_X8_size = _FIELD_OFFSET
- _SHA256_ARGS_X8_align = _STRUCT_ALIGN
-
-#######################################################################
-
-START_FIELDS # MB_MGR
-### name size align
-FIELD _args, _SHA256_ARGS_X4_size, _SHA256_ARGS_X4_align
-FIELD _lens, 4*8, 8
-FIELD _unused_lanes, 8, 8
-FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align
-END_FIELDS
-
- _MB_MGR_size = _FIELD_OFFSET
- _MB_MGR_align = _STRUCT_ALIGN
-
-_args_digest = _args + _digest
-_args_data_ptr = _args + _data_ptr
-
-#######################################################################
-
-START_FIELDS #STACK_FRAME
-### name size align
-FIELD _data, 16*SZ8, 1 # transposed digest
-FIELD _digest, 8*SZ8, 1 # array of pointers to data
-FIELD _ytmp, 4*SZ8, 1
-FIELD _rsp, 8, 1
-END_FIELDS
-
- _STACK_FRAME_size = _FIELD_OFFSET
- _STACK_FRAME_align = _STRUCT_ALIGN
-
-#######################################################################
-
-########################################################################
-#### Define constants
-########################################################################
-
-#define STS_UNKNOWN 0
-#define STS_BEING_PROCESSED 1
-#define STS_COMPLETED 2
-
-########################################################################
-#### Define JOB_SHA256 structure
-########################################################################
-
-START_FIELDS # JOB_SHA256
-
-### name size align
-FIELD _buffer, 8, 8 # pointer to buffer
-FIELD _len, 8, 8 # length in bytes
-FIELD _result_digest, 8*4, 32 # Digest (output)
-FIELD _status, 4, 4
-FIELD _user_data, 8, 8
-END_FIELDS
-
- _JOB_SHA256_size = _FIELD_OFFSET
- _JOB_SHA256_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
deleted file mode 100644
index d2364c5..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,307 +0,0 @@
-/*
- * Flush routine for SHA256 multibuffer
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha256_mb_mgr_datastruct.S"
-
-.extern sha256_x8_avx2
-
-#LINUX register definitions
-#define arg1 %rdi
-#define arg2 %rsi
-
-# Common register definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-
-# idx must be a register not clobberred by sha1_mult
-#define idx %r8
-#define DWORD_idx %r8d
-
-#define unused_lanes %rbx
-#define lane_data %rbx
-#define tmp2 %rbx
-#define tmp2_w %ebx
-
-#define job_rax %rax
-#define tmp1 %rax
-#define size_offset %rax
-#define tmp %rax
-#define start_offset %rax
-
-#define tmp3 %arg1
-
-#define extra_blocks %arg2
-#define p %arg2
-
-.macro LABEL prefix n
-\prefix\n\():
-.endm
-
-.macro JNE_SKIP i
-jne skip_\i
-.endm
-
-.altmacro
-.macro SET_OFFSET _offset
-offset = \_offset
-.endm
-.noaltmacro
-
-# JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state)
-# arg 1 : rcx : state
-ENTRY(sha256_mb_mgr_flush_avx2)
- FRAME_BEGIN
- push %rbx
-
- # If bit (32+3) is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $32+3, unused_lanes
- jc return_null
-
- # find a lane with a non-null job
- xor idx, idx
- offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne one(%rip), idx
- offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne two(%rip), idx
- offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne three(%rip), idx
- offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne four(%rip), idx
- offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne five(%rip), idx
- offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne six(%rip), idx
- offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne seven(%rip), idx
-
- # copy idx to empty lanes
-copy_lane_data:
- offset = (_args + _data_ptr)
- mov offset(state,idx,8), tmp
-
- I = 0
-.rep 8
- offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
-.altmacro
- JNE_SKIP %I
- offset = (_args + _data_ptr + 8*I)
- mov tmp, offset(state)
- offset = (_lens + 4*I)
- movl $0xFFFFFFFF, offset(state)
-LABEL skip_ %I
- I = (I+1)
-.noaltmacro
-.endr
-
- # Find min length
- vmovdqu _lens+0*16(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqu %xmm0, _lens+0*16(state)
- vmovdqu %xmm1, _lens+1*16(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha256_x8_avx2
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
-
- mov unused_lanes, _unused_lanes(state)
- movl $0xFFFFFFFF, _lens(state,idx,4)
-
- vmovd _args_digest(state , idx, 4) , %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- vmovd _args_digest+4*32(state, idx, 4), %xmm1
- vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
-
- vmovdqu %xmm0, _result_digest(job_rax)
- offset = (_result_digest + 1*16)
- vmovdqu %xmm1, offset(job_rax)
-
-return:
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha256_mb_mgr_flush_avx2)
-
-##############################################################################
-
-.align 16
-ENTRY(sha256_mb_mgr_get_comp_job_avx2)
- push %rbx
-
- ## if bit 32+3 is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $(32+3), unused_lanes
- jc .return_null
-
- # Find min length
- vmovdqu _lens(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
-
- vmovd %xmm2, DWORD_idx
- test $~0xF, idx
- jnz .return_null
-
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- vmovd _args_digest+4*32(state, idx, 4), %xmm1
- vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
-
- vmovdqu %xmm0, _result_digest(job_rax)
- offset = (_result_digest + 1*16)
- vmovdqu %xmm1, offset(job_rax)
-
- pop %rbx
-
- ret
-
-.return_null:
- xor job_rax, job_rax
- pop %rbx
- ret
-ENDPROC(sha256_mb_mgr_get_comp_job_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
-.octa 0x000000000000000000000000FFFFFFF0
-
-.section .rodata.cst8, "aM", @progbits, 8
-.align 8
-one:
-.quad 1
-two:
-.quad 2
-three:
-.quad 3
-four:
-.quad 4
-five:
-.quad 5
-six:
-.quad 6
-seven:
-.quad 7
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
deleted file mode 100644
index b0c4983..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Initialization code for multi buffer SHA256 algorithm for AVX2
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "sha256_mb_mgr.h"
-
-void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state)
-{
- unsigned int j;
-
- state->unused_lanes = 0xF76543210ULL;
- for (j = 0; j < 8; j++) {
- state->lens[j] = 0xFFFFFFFF;
- state->ldata[j].job_in_lane = NULL;
- }
-}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
deleted file mode 100644
index b36ae74..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Buffer submit code for multi buffer SHA256 algorithm
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha256_mb_mgr_datastruct.S"
-
-.extern sha256_x8_avx2
-
-# LINUX register definitions
-arg1 = %rdi
-arg2 = %rsi
-size_offset = %rcx
-tmp2 = %rcx
-extra_blocks = %rdx
-
-# Common definitions
-#define state arg1
-#define job %rsi
-#define len2 arg2
-#define p2 arg2
-
-# idx must be a register not clobberred by sha1_x8_avx2
-idx = %r8
-DWORD_idx = %r8d
-last_len = %r8
-
-p = %r11
-start_offset = %r11
-
-unused_lanes = %rbx
-BYTE_unused_lanes = %bl
-
-job_rax = %rax
-len = %rax
-DWORD_len = %eax
-
-lane = %r12
-tmp3 = %r12
-
-tmp = %r9
-DWORD_tmp = %r9d
-
-lane_data = %r10
-
-# JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job)
-# arg 1 : rcx : state
-# arg 2 : rdx : job
-ENTRY(sha256_mb_mgr_submit_avx2)
- FRAME_BEGIN
- push %rbx
- push %r12
-
- mov _unused_lanes(state), unused_lanes
- mov unused_lanes, lane
- and $0xF, lane
- shr $4, unused_lanes
- imul $_LANE_DATA_size, lane, lane_data
- movl $STS_BEING_PROCESSED, _status(job)
- lea _ldata(state, lane_data), lane_data
- mov unused_lanes, _unused_lanes(state)
- movl _len(job), DWORD_len
-
- mov job, _job_in_lane(lane_data)
- shl $4, len
- or lane, len
-
- movl DWORD_len, _lens(state , lane, 4)
-
- # Load digest words from result_digest
- vmovdqu _result_digest(job), %xmm0
- vmovdqu _result_digest+1*16(job), %xmm1
- vmovd %xmm0, _args_digest(state, lane, 4)
- vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
- vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
- vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
- vmovd %xmm1, _args_digest+4*32(state , lane, 4)
-
- vpextrd $1, %xmm1, _args_digest+5*32(state , lane, 4)
- vpextrd $2, %xmm1, _args_digest+6*32(state , lane, 4)
- vpextrd $3, %xmm1, _args_digest+7*32(state , lane, 4)
-
- mov _buffer(job), p
- mov p, _args_data_ptr(state, lane, 8)
-
- cmp $0xF, unused_lanes
- jne return_null
-
-start_loop:
- # Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqa %xmm0, _lens + 0*16(state)
- vmovdqa %xmm1, _lens + 1*16(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha256_x8_avx2
-
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- mov _unused_lanes(state), unused_lanes
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state,idx,4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
- vmovd _args_digest+4*32(state, idx, 4), %xmm1
-
- vpinsrd $1, _args_digest+5*32(state , idx, 4), %xmm1, %xmm1
- vpinsrd $2, _args_digest+6*32(state , idx, 4), %xmm1, %xmm1
- vpinsrd $3, _args_digest+7*32(state , idx, 4), %xmm1, %xmm1
-
- vmovdqu %xmm0, _result_digest(job_rax)
- vmovdqu %xmm1, _result_digest+1*16(job_rax)
-
-return:
- pop %r12
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-
-ENDPROC(sha256_mb_mgr_submit_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
- .octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
deleted file mode 100644
index 1687c80..00000000
--- a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * Multi-buffer SHA256 algorithm hash compute routine
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include "sha256_mb_mgr_datastruct.S"
-
-## code to compute oct SHA256 using SSE-256
-## outer calling routine takes care of save and restore of XMM registers
-## Logic designed/laid out by JDG
-
-## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; %ymm0-15
-## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15
-## Linux preserves: rdi rbp r8
-##
-## clobbers %ymm0-15
-
-arg1 = %rdi
-arg2 = %rsi
-reg3 = %rcx
-reg4 = %rdx
-
-# Common definitions
-STATE = arg1
-INP_SIZE = arg2
-
-IDX = %rax
-ROUND = %rbx
-TBL = reg3
-
-inp0 = %r9
-inp1 = %r10
-inp2 = %r11
-inp3 = %r12
-inp4 = %r13
-inp5 = %r14
-inp6 = %r15
-inp7 = reg4
-
-a = %ymm0
-b = %ymm1
-c = %ymm2
-d = %ymm3
-e = %ymm4
-f = %ymm5
-g = %ymm6
-h = %ymm7
-
-T1 = %ymm8
-
-a0 = %ymm12
-a1 = %ymm13
-a2 = %ymm14
-TMP = %ymm15
-TMP0 = %ymm6
-TMP1 = %ymm7
-
-TT0 = %ymm8
-TT1 = %ymm9
-TT2 = %ymm10
-TT3 = %ymm11
-TT4 = %ymm12
-TT5 = %ymm13
-TT6 = %ymm14
-TT7 = %ymm15
-
-# Define stack usage
-
-# Assume stack aligned to 32 bytes before call
-# Therefore FRAMESZ mod 32 must be 32-8 = 24
-
-#define FRAMESZ 0x388
-
-#define VMOVPS vmovups
-
-# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
-# "transpose" data in {r0...r7} using temps {t0...t1}
-# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
-# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
-# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
-# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
-# r4 = {e7 e6 e5 e4 e3 e2 e1 e0}
-# r5 = {f7 f6 f5 f4 f3 f2 f1 f0}
-# r6 = {g7 g6 g5 g4 g3 g2 g1 g0}
-# r7 = {h7 h6 h5 h4 h3 h2 h1 h0}
-#
-# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
-# r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
-# r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
-# r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
-# r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
-# r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
-# r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
-# r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
-#
-
-.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1
- # process top half (r0..r3) {a...d}
- vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
- vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
- vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
- vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
- vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
- vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
- vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3}
- vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0}
-
- # use r2 in place of t0
- # process bottom half (r4..r7) {e...h}
- vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0}
- vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2}
- vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0}
- vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2}
- vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1}
- vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2}
- vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3}
- vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0}
-
- vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6
- vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2
- vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5
- vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1
- vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7
- vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3
- vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4
- vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0
-
-.endm
-
-.macro ROTATE_ARGS
-TMP_ = h
-h = g
-g = f
-f = e
-e = d
-d = c
-c = b
-b = a
-a = TMP_
-.endm
-
-.macro _PRORD reg imm tmp
- vpslld $(32-\imm),\reg,\tmp
- vpsrld $\imm,\reg, \reg
- vpor \tmp,\reg, \reg
-.endm
-
-# PRORD_nd reg, imm, tmp, src
-.macro _PRORD_nd reg imm tmp src
- vpslld $(32-\imm), \src, \tmp
- vpsrld $\imm, \src, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-# PRORD dst/src, amt
-.macro PRORD reg imm
- _PRORD \reg,\imm,TMP
-.endm
-
-# PRORD_nd dst, src, amt
-.macro PRORD_nd reg tmp imm
- _PRORD_nd \reg, \imm, TMP, \tmp
-.endm
-
-# arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_00_15 _T1 i
- PRORD_nd a0,e,5 # sig1: a0 = (e >> 5)
-
- vpxor g, f, a2 # ch: a2 = f^g
- vpand e,a2, a2 # ch: a2 = (f^g)&e
- vpxor g, a2, a2 # a2 = ch
-
- PRORD_nd a1,e,25 # sig1: a1 = (e >> 25)
-
- vmovdqu \_T1,(SZ8*(\i & 0xf))(%rsp)
- vpaddd (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
- vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
- PRORD a0, 6 # sig1: a0 = (e >> 6) ^ (e >> 11)
- vpaddd a2, h, h # h = h + ch
- PRORD_nd a2,a,11 # sig0: a2 = (a >> 11)
- vpaddd \_T1,h, h # h = h + ch + W + K
- vpxor a1, a0, a0 # a0 = sigma1
- PRORD_nd a1,a,22 # sig0: a1 = (a >> 22)
- vpxor c, a, \_T1 # maj: T1 = a^c
- add $SZ8, ROUND # ROUND++
- vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
- vpaddd a0, h, h
- vpaddd h, d, d
- vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
- PRORD a2,2 # sig0: a2 = (a >> 2) ^ (a >> 13)
- vpxor a1, a2, a2 # a2 = sig0
- vpand c, a, a1 # maj: a1 = a&c
- vpor \_T1, a1, a1 # a1 = maj
- vpaddd a1, h, h # h = h + ch + W + K + maj
- vpaddd a2, h, h # h = h + ch + W + K + maj + sigma0
- ROTATE_ARGS
-.endm
-
-# arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_16_XX _T1 i
- vmovdqu (SZ8*((\i-15)&0xf))(%rsp), \_T1
- vmovdqu (SZ8*((\i-2)&0xf))(%rsp), a1
- vmovdqu \_T1, a0
- PRORD \_T1,11
- vmovdqu a1, a2
- PRORD a1,2
- vpxor a0, \_T1, \_T1
- PRORD \_T1, 7
- vpxor a2, a1, a1
- PRORD a1, 17
- vpsrld $3, a0, a0
- vpxor a0, \_T1, \_T1
- vpsrld $10, a2, a2
- vpxor a2, a1, a1
- vpaddd (SZ8*((\i-16)&0xf))(%rsp), \_T1, \_T1
- vpaddd (SZ8*((\i-7)&0xf))(%rsp), a1, a1
- vpaddd a1, \_T1, \_T1
-
- ROUND_00_15 \_T1,\i
-.endm
-
-# SHA256_ARGS:
-# UINT128 digest[8]; // transposed digests
-# UINT8 *data_ptr[4];
-
-# void sha256_x8_avx2(SHA256_ARGS *args, UINT64 bytes);
-# arg 1 : STATE : pointer to array of pointers to input data
-# arg 2 : INP_SIZE : size of input in blocks
- # general registers preserved in outer calling routine
- # outer calling routine saves all the XMM registers
- # save rsp, allocate 32-byte aligned for local variables
-ENTRY(sha256_x8_avx2)
-
- # save callee-saved clobbered registers to comply with C function ABI
- push %r12
- push %r13
- push %r14
- push %r15
-
- mov %rsp, IDX
- sub $FRAMESZ, %rsp
- and $~0x1F, %rsp
- mov IDX, _rsp(%rsp)
-
- # Load the pre-transposed incoming digest.
- vmovdqu 0*SHA256_DIGEST_ROW_SIZE(STATE),a
- vmovdqu 1*SHA256_DIGEST_ROW_SIZE(STATE),b
- vmovdqu 2*SHA256_DIGEST_ROW_SIZE(STATE),c
- vmovdqu 3*SHA256_DIGEST_ROW_SIZE(STATE),d
- vmovdqu 4*SHA256_DIGEST_ROW_SIZE(STATE),e
- vmovdqu 5*SHA256_DIGEST_ROW_SIZE(STATE),f
- vmovdqu 6*SHA256_DIGEST_ROW_SIZE(STATE),g
- vmovdqu 7*SHA256_DIGEST_ROW_SIZE(STATE),h
-
- lea K256_8(%rip),TBL
-
- # load the address of each of the 4 message lanes
- # getting ready to transpose input onto stack
- mov _args_data_ptr+0*PTR_SZ(STATE),inp0
- mov _args_data_ptr+1*PTR_SZ(STATE),inp1
- mov _args_data_ptr+2*PTR_SZ(STATE),inp2
- mov _args_data_ptr+3*PTR_SZ(STATE),inp3
- mov _args_data_ptr+4*PTR_SZ(STATE),inp4
- mov _args_data_ptr+5*PTR_SZ(STATE),inp5
- mov _args_data_ptr+6*PTR_SZ(STATE),inp6
- mov _args_data_ptr+7*PTR_SZ(STATE),inp7
-
- xor IDX, IDX
-lloop:
- xor ROUND, ROUND
-
- # save old digest
- vmovdqu a, _digest(%rsp)
- vmovdqu b, _digest+1*SZ8(%rsp)
- vmovdqu c, _digest+2*SZ8(%rsp)
- vmovdqu d, _digest+3*SZ8(%rsp)
- vmovdqu e, _digest+4*SZ8(%rsp)
- vmovdqu f, _digest+5*SZ8(%rsp)
- vmovdqu g, _digest+6*SZ8(%rsp)
- vmovdqu h, _digest+7*SZ8(%rsp)
- i = 0
-.rep 2
- VMOVPS i*32(inp0, IDX), TT0
- VMOVPS i*32(inp1, IDX), TT1
- VMOVPS i*32(inp2, IDX), TT2
- VMOVPS i*32(inp3, IDX), TT3
- VMOVPS i*32(inp4, IDX), TT4
- VMOVPS i*32(inp5, IDX), TT5
- VMOVPS i*32(inp6, IDX), TT6
- VMOVPS i*32(inp7, IDX), TT7
- vmovdqu g, _ytmp(%rsp)
- vmovdqu h, _ytmp+1*SZ8(%rsp)
- TRANSPOSE8 TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7, TMP0, TMP1
- vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP1
- vmovdqu _ytmp(%rsp), g
- vpshufb TMP1, TT0, TT0
- vpshufb TMP1, TT1, TT1
- vpshufb TMP1, TT2, TT2
- vpshufb TMP1, TT3, TT3
- vpshufb TMP1, TT4, TT4
- vpshufb TMP1, TT5, TT5
- vpshufb TMP1, TT6, TT6
- vpshufb TMP1, TT7, TT7
- vmovdqu _ytmp+1*SZ8(%rsp), h
- vmovdqu TT4, _ytmp(%rsp)
- vmovdqu TT5, _ytmp+1*SZ8(%rsp)
- vmovdqu TT6, _ytmp+2*SZ8(%rsp)
- vmovdqu TT7, _ytmp+3*SZ8(%rsp)
- ROUND_00_15 TT0,(i*8+0)
- vmovdqu _ytmp(%rsp), TT0
- ROUND_00_15 TT1,(i*8+1)
- vmovdqu _ytmp+1*SZ8(%rsp), TT1
- ROUND_00_15 TT2,(i*8+2)
- vmovdqu _ytmp+2*SZ8(%rsp), TT2
- ROUND_00_15 TT3,(i*8+3)
- vmovdqu _ytmp+3*SZ8(%rsp), TT3
- ROUND_00_15 TT0,(i*8+4)
- ROUND_00_15 TT1,(i*8+5)
- ROUND_00_15 TT2,(i*8+6)
- ROUND_00_15 TT3,(i*8+7)
- i = (i+1)
-.endr
- add $64, IDX
- i = (i*8)
-
- jmp Lrounds_16_xx
-.align 16
-Lrounds_16_xx:
-.rep 16
- ROUND_16_XX T1, i
- i = (i+1)
-.endr
-
- cmp $ROUNDS,ROUND
- jb Lrounds_16_xx
-
- # add old digest
- vpaddd _digest+0*SZ8(%rsp), a, a
- vpaddd _digest+1*SZ8(%rsp), b, b
- vpaddd _digest+2*SZ8(%rsp), c, c
- vpaddd _digest+3*SZ8(%rsp), d, d
- vpaddd _digest+4*SZ8(%rsp), e, e
- vpaddd _digest+5*SZ8(%rsp), f, f
- vpaddd _digest+6*SZ8(%rsp), g, g
- vpaddd _digest+7*SZ8(%rsp), h, h
-
- sub $1, INP_SIZE # unit is blocks
- jne lloop
-
- # write back to memory (state object) the transposed digest
- vmovdqu a, 0*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu b, 1*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu c, 2*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu d, 3*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu e, 4*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu f, 5*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu g, 6*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu h, 7*SHA256_DIGEST_ROW_SIZE(STATE)
-
- # update input pointers
- add IDX, inp0
- mov inp0, _args_data_ptr+0*8(STATE)
- add IDX, inp1
- mov inp1, _args_data_ptr+1*8(STATE)
- add IDX, inp2
- mov inp2, _args_data_ptr+2*8(STATE)
- add IDX, inp3
- mov inp3, _args_data_ptr+3*8(STATE)
- add IDX, inp4
- mov inp4, _args_data_ptr+4*8(STATE)
- add IDX, inp5
- mov inp5, _args_data_ptr+5*8(STATE)
- add IDX, inp6
- mov inp6, _args_data_ptr+6*8(STATE)
- add IDX, inp7
- mov inp7, _args_data_ptr+7*8(STATE)
-
- # Postamble
- mov _rsp(%rsp), %rsp
-
- # restore callee-saved clobbered registers
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-
- ret
-ENDPROC(sha256_x8_avx2)
-
-.section .rodata.K256_8, "a", @progbits
-.align 64
-K256_8:
- .octa 0x428a2f98428a2f98428a2f98428a2f98
- .octa 0x428a2f98428a2f98428a2f98428a2f98
- .octa 0x71374491713744917137449171374491
- .octa 0x71374491713744917137449171374491
- .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
- .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
- .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
- .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
- .octa 0x3956c25b3956c25b3956c25b3956c25b
- .octa 0x3956c25b3956c25b3956c25b3956c25b
- .octa 0x59f111f159f111f159f111f159f111f1
- .octa 0x59f111f159f111f159f111f159f111f1
- .octa 0x923f82a4923f82a4923f82a4923f82a4
- .octa 0x923f82a4923f82a4923f82a4923f82a4
- .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
- .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
- .octa 0xd807aa98d807aa98d807aa98d807aa98
- .octa 0xd807aa98d807aa98d807aa98d807aa98
- .octa 0x12835b0112835b0112835b0112835b01
- .octa 0x12835b0112835b0112835b0112835b01
- .octa 0x243185be243185be243185be243185be
- .octa 0x243185be243185be243185be243185be
- .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
- .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
- .octa 0x72be5d7472be5d7472be5d7472be5d74
- .octa 0x72be5d7472be5d7472be5d7472be5d74
- .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
- .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
- .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
- .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
- .octa 0xc19bf174c19bf174c19bf174c19bf174
- .octa 0xc19bf174c19bf174c19bf174c19bf174
- .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
- .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
- .octa 0xefbe4786efbe4786efbe4786efbe4786
- .octa 0xefbe4786efbe4786efbe4786efbe4786
- .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
- .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
- .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
- .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
- .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
- .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
- .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
- .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
- .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
- .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
- .octa 0x76f988da76f988da76f988da76f988da
- .octa 0x76f988da76f988da76f988da76f988da
- .octa 0x983e5152983e5152983e5152983e5152
- .octa 0x983e5152983e5152983e5152983e5152
- .octa 0xa831c66da831c66da831c66da831c66d
- .octa 0xa831c66da831c66da831c66da831c66d
- .octa 0xb00327c8b00327c8b00327c8b00327c8
- .octa 0xb00327c8b00327c8b00327c8b00327c8
- .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
- .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
- .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
- .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
- .octa 0xd5a79147d5a79147d5a79147d5a79147
- .octa 0xd5a79147d5a79147d5a79147d5a79147
- .octa 0x06ca635106ca635106ca635106ca6351
- .octa 0x06ca635106ca635106ca635106ca6351
- .octa 0x14292967142929671429296714292967
- .octa 0x14292967142929671429296714292967
- .octa 0x27b70a8527b70a8527b70a8527b70a85
- .octa 0x27b70a8527b70a8527b70a8527b70a85
- .octa 0x2e1b21382e1b21382e1b21382e1b2138
- .octa 0x2e1b21382e1b21382e1b21382e1b2138
- .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
- .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
- .octa 0x53380d1353380d1353380d1353380d13
- .octa 0x53380d1353380d1353380d1353380d13
- .octa 0x650a7354650a7354650a7354650a7354
- .octa 0x650a7354650a7354650a7354650a7354
- .octa 0x766a0abb766a0abb766a0abb766a0abb
- .octa 0x766a0abb766a0abb766a0abb766a0abb
- .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
- .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
- .octa 0x92722c8592722c8592722c8592722c85
- .octa 0x92722c8592722c8592722c8592722c85
- .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
- .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
- .octa 0xa81a664ba81a664ba81a664ba81a664b
- .octa 0xa81a664ba81a664ba81a664ba81a664b
- .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
- .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
- .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
- .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
- .octa 0xd192e819d192e819d192e819d192e819
- .octa 0xd192e819d192e819d192e819d192e819
- .octa 0xd6990624d6990624d6990624d6990624
- .octa 0xd6990624d6990624d6990624d6990624
- .octa 0xf40e3585f40e3585f40e3585f40e3585
- .octa 0xf40e3585f40e3585f40e3585f40e3585
- .octa 0x106aa070106aa070106aa070106aa070
- .octa 0x106aa070106aa070106aa070106aa070
- .octa 0x19a4c11619a4c11619a4c11619a4c116
- .octa 0x19a4c11619a4c11619a4c11619a4c116
- .octa 0x1e376c081e376c081e376c081e376c08
- .octa 0x1e376c081e376c081e376c081e376c08
- .octa 0x2748774c2748774c2748774c2748774c
- .octa 0x2748774c2748774c2748774c2748774c
- .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
- .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
- .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
- .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
- .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
- .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
- .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
- .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
- .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
- .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
- .octa 0x748f82ee748f82ee748f82ee748f82ee
- .octa 0x748f82ee748f82ee748f82ee748f82ee
- .octa 0x78a5636f78a5636f78a5636f78a5636f
- .octa 0x78a5636f78a5636f78a5636f78a5636f
- .octa 0x84c8781484c8781484c8781484c87814
- .octa 0x84c8781484c8781484c8781484c87814
- .octa 0x8cc702088cc702088cc702088cc70208
- .octa 0x8cc702088cc702088cc702088cc70208
- .octa 0x90befffa90befffa90befffa90befffa
- .octa 0x90befffa90befffa90befffa90befffa
- .octa 0xa4506ceba4506ceba4506ceba4506ceb
- .octa 0xa4506ceba4506ceba4506ceba4506ceb
- .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
- .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
- .octa 0xc67178f2c67178f2c67178f2c67178f2
- .octa 0xc67178f2c67178f2c67178f2c67178f2
-
-.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
-.align 32
-PSHUFFLE_BYTE_FLIP_MASK:
-.octa 0x0c0d0e0f08090a0b0405060700010203
-.octa 0x0c0d0e0f08090a0b0405060700010203
-
-.section .rodata.cst256.K256, "aM", @progbits, 256
-.align 64
-.global K256
-K256:
- .int 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
- .int 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
- .int 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
- .int 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
- .int 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
- .int 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
- .int 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
- .int 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
- .int 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
- .int 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
- .int 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
- .int 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
- .int 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
- .int 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
- .int 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
- .int 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
diff --git a/arch/x86/crypto/sha512-mb/Makefile b/arch/x86/crypto/sha512-mb/Makefile
deleted file mode 100644
index 90f1ef6..00000000
--- a/arch/x86/crypto/sha512-mb/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Arch-specific CryptoAPI modules.
-#
-
-avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
- $(comma)4)$(comma)%ymm2,yes,no)
-ifeq ($(avx2_supported),yes)
- obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb.o
- sha512-mb-y := sha512_mb.o sha512_mb_mgr_flush_avx2.o \
- sha512_mb_mgr_init_avx2.o sha512_mb_mgr_submit_avx2.o sha512_x4_avx2.o
-endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
deleted file mode 100644
index 26b8567..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * Multi buffer SHA512 algorithm Glue Code
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/cryptohash.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <asm/byteorder.h>
-#include <linux/hardirq.h>
-#include <asm/fpu/api.h>
-#include "sha512_mb_ctx.h"
-
-#define FLUSH_INTERVAL 1000 /* in usec */
-
-static struct mcryptd_alg_state sha512_mb_alg_state;
-
-struct sha512_mb_ctx {
- struct mcryptd_ahash *mcryptd_tfm;
-};
-
-static inline struct mcryptd_hash_request_ctx
- *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx *hash_ctx)
-{
- struct ahash_request *areq;
-
- areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
- return container_of(areq, struct mcryptd_hash_request_ctx, areq);
-}
-
-static inline struct ahash_request
- *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
-{
- return container_of((void *) ctx, struct ahash_request, __ctx);
-}
-
-static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct ahash_request *areq)
-{
- rctx->flag = HASH_UPDATE;
-}
-
-static asmlinkage void (*sha512_job_mgr_init)(struct sha512_mb_mgr *state);
-static asmlinkage struct job_sha512* (*sha512_job_mgr_submit)
- (struct sha512_mb_mgr *state,
- struct job_sha512 *job);
-static asmlinkage struct job_sha512* (*sha512_job_mgr_flush)
- (struct sha512_mb_mgr *state);
-static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job)
- (struct sha512_mb_mgr *state);
-
-inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
- uint64_t total_len)
-{
- uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1);
-
- memset(&padblock[i], 0, SHA512_BLOCK_SIZE);
- padblock[i] = 0x80;
-
- i += ((SHA512_BLOCK_SIZE - 1) &
- (0 - (total_len + SHA512_PADLENGTHFIELD_SIZE + 1)))
- + 1 + SHA512_PADLENGTHFIELD_SIZE;
-
-#if SHA512_PADLENGTHFIELD_SIZE == 16
- *((uint64_t *) &padblock[i - 16]) = 0;
-#endif
-
- *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
-
- /* Number of extra blocks to hash */
- return i >> SHA512_LOG2_BLOCK_SIZE;
-}
-
-static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit
- (struct sha512_ctx_mgr *mgr, struct sha512_hash_ctx *ctx)
-{
- while (ctx) {
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Clear PROCESSING bit */
- ctx->status = HASH_CTX_STS_COMPLETE;
- return ctx;
- }
-
- /*
- * If the extra blocks are empty, begin hashing what remains
- * in the user's buffer.
- */
- if (ctx->partial_block_buffer_length == 0 &&
- ctx->incoming_buffer_length) {
-
- const void *buffer = ctx->incoming_buffer;
- uint32_t len = ctx->incoming_buffer_length;
- uint32_t copy_len;
-
- /*
- * Only entire blocks can be hashed.
- * Copy remainder to extra blocks buffer.
- */
- copy_len = len & (SHA512_BLOCK_SIZE-1);
-
- if (copy_len) {
- len -= copy_len;
- memcpy(ctx->partial_block_buffer,
- ((const char *) buffer + len),
- copy_len);
- ctx->partial_block_buffer_length = copy_len;
- }
-
- ctx->incoming_buffer_length = 0;
-
- /* len should be a multiple of the block size now */
- assert((len % SHA512_BLOCK_SIZE) == 0);
-
- /* Set len to the number of blocks to be hashed */
- len >>= SHA512_LOG2_BLOCK_SIZE;
-
- if (len) {
-
- ctx->job.buffer = (uint8_t *) buffer;
- ctx->job.len = len;
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_submit(&mgr->mgr,
- &ctx->job);
- continue;
- }
- }
-
- /*
- * If the extra blocks are not empty, then we are
- * either on the last block(s) or we need more
- * user input before continuing.
- */
- if (ctx->status & HASH_CTX_STS_LAST) {
-
- uint8_t *buf = ctx->partial_block_buffer;
- uint32_t n_extra_blocks =
- sha512_pad(buf, ctx->total_length);
-
- ctx->status = (HASH_CTX_STS_PROCESSING |
- HASH_CTX_STS_COMPLETE);
- ctx->job.buffer = buf;
- ctx->job.len = (uint32_t) n_extra_blocks;
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
-
- if (ctx)
- ctx->status = HASH_CTX_STS_IDLE;
- return ctx;
- }
-
- return NULL;
-}
-
-static struct sha512_hash_ctx
- *sha512_ctx_mgr_get_comp_ctx(struct mcryptd_alg_cstate *cstate)
-{
- /*
- * If get_comp_job returns NULL, there are no jobs complete.
- * If get_comp_job returns a job, verify that it is safe to return to
- * the user.
- * If it is not ready, resubmit the job to finish processing.
- * If sha512_ctx_mgr_resubmit returned a job, it is ready to be
- * returned.
- * Otherwise, all jobs currently being managed by the hash_ctx_mgr
- * still need processing.
- */
- struct sha512_ctx_mgr *mgr;
- struct sha512_hash_ctx *ctx;
- unsigned long flags;
-
- mgr = cstate->mgr;
- spin_lock_irqsave(&cstate->work_lock, flags);
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_get_comp_job(&mgr->mgr);
- ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
- return ctx;
-}
-
-static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
-{
- sha512_job_mgr_init(&mgr->mgr);
-}
-
-static struct sha512_hash_ctx
- *sha512_ctx_mgr_submit(struct mcryptd_alg_cstate *cstate,
- struct sha512_hash_ctx *ctx,
- const void *buffer,
- uint32_t len,
- int flags)
-{
- struct sha512_ctx_mgr *mgr;
- unsigned long irqflags;
-
- mgr = cstate->mgr;
- spin_lock_irqsave(&cstate->work_lock, irqflags);
- if (flags & ~(HASH_UPDATE | HASH_LAST)) {
- /* User should not pass anything other than UPDATE or LAST */
- ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
- goto unlock;
- }
-
- if (ctx->status & HASH_CTX_STS_PROCESSING) {
- /* Cannot submit to a currently processing job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
- goto unlock;
- }
-
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Cannot update a finished job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
- goto unlock;
- }
-
- /*
- * If we made it here, there were no errors during this call to
- * submit
- */
- ctx->error = HASH_CTX_ERROR_NONE;
-
- /* Store buffer ptr info from user */
- ctx->incoming_buffer = buffer;
- ctx->incoming_buffer_length = len;
-
- /*
- * Store the user's request flags and mark this ctx as currently being
- * processed.
- */
- ctx->status = (flags & HASH_LAST) ?
- (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
- HASH_CTX_STS_PROCESSING;
-
- /* Advance byte counter */
- ctx->total_length += len;
-
- /*
- * If there is anything currently buffered in the extra blocks,
- * append to it until it contains a whole block.
- * Or if the user's buffer contains less than a whole block,
- * append as much as possible to the extra block.
- */
- if (ctx->partial_block_buffer_length || len < SHA512_BLOCK_SIZE) {
- /* Compute how many bytes to copy from user buffer into extra
- * block
- */
- uint32_t copy_len = SHA512_BLOCK_SIZE -
- ctx->partial_block_buffer_length;
- if (len < copy_len)
- copy_len = len;
-
- if (copy_len) {
- /* Copy and update relevant pointers and counters */
- memcpy
- (&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
- buffer, copy_len);
-
- ctx->partial_block_buffer_length += copy_len;
- ctx->incoming_buffer = (const void *)
- ((const char *)buffer + copy_len);
- ctx->incoming_buffer_length = len - copy_len;
- }
-
- /* The extra block should never contain more than 1 block
- * here
- */
- assert(ctx->partial_block_buffer_length <= SHA512_BLOCK_SIZE);
-
- /* If the extra block buffer contains exactly 1 block, it can
- * be hashed.
- */
- if (ctx->partial_block_buffer_length >= SHA512_BLOCK_SIZE) {
- ctx->partial_block_buffer_length = 0;
-
- ctx->job.buffer = ctx->partial_block_buffer;
- ctx->job.len = 1;
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
- }
- }
-
- ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
-unlock:
- spin_unlock_irqrestore(&cstate->work_lock, irqflags);
- return ctx;
-}
-
-static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct mcryptd_alg_cstate *cstate)
-{
- struct sha512_ctx_mgr *mgr;
- struct sha512_hash_ctx *ctx;
- unsigned long flags;
-
- mgr = cstate->mgr;
- spin_lock_irqsave(&cstate->work_lock, flags);
- while (1) {
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_flush(&mgr->mgr);
-
- /* If flush returned 0, there are no more jobs in flight. */
- if (!ctx)
- break;
-
- /*
- * If flush returned a job, resubmit the job to finish
- * processing.
- */
- ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
-
- /*
- * If sha512_ctx_mgr_resubmit returned a job, it is ready to
- * be returned. Otherwise, all jobs currently being managed by
- * the sha512_ctx_mgr still need processing. Loop.
- */
- if (ctx)
- break;
- }
- spin_unlock_irqrestore(&cstate->work_lock, flags);
- return ctx;
-}
-
-static int sha512_mb_init(struct ahash_request *areq)
-{
- struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
-
- hash_ctx_init(sctx);
- sctx->job.result_digest[0] = SHA512_H0;
- sctx->job.result_digest[1] = SHA512_H1;
- sctx->job.result_digest[2] = SHA512_H2;
- sctx->job.result_digest[3] = SHA512_H3;
- sctx->job.result_digest[4] = SHA512_H4;
- sctx->job.result_digest[5] = SHA512_H5;
- sctx->job.result_digest[6] = SHA512_H6;
- sctx->job.result_digest[7] = SHA512_H7;
- sctx->total_length = 0;
- sctx->partial_block_buffer_length = 0;
- sctx->status = HASH_CTX_STS_IDLE;
-
- return 0;
-}
-
-static int sha512_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
-{
- int i;
- struct sha512_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
- __be64 *dst = (__be64 *) rctx->out;
-
- for (i = 0; i < 8; ++i)
- dst[i] = cpu_to_be64(sctx->job.result_digest[i]);
-
- return 0;
-}
-
-static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
- struct mcryptd_alg_cstate *cstate, bool flush)
-{
- int flag = HASH_UPDATE;
- int nbytes, err = 0;
- struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
- struct sha512_hash_ctx *sha_ctx;
-
- /* more work ? */
- while (!(rctx->flag & HASH_DONE)) {
- nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
- if (nbytes < 0) {
- err = nbytes;
- goto out;
- }
- /* check if the walk is done */
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- if (rctx->flag & HASH_FINAL)
- flag |= HASH_LAST;
-
- }
- sha_ctx = (struct sha512_hash_ctx *)
- ahash_request_ctx(&rctx->areq);
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx,
- rctx->walk.data, nbytes, flag);
- if (!sha_ctx) {
- if (flush)
- sha_ctx = sha512_ctx_mgr_flush(cstate);
- }
- kernel_fpu_end();
- if (sha_ctx)
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- else {
- rctx = NULL;
- goto out;
- }
- }
-
- /* copy the results */
- if (rctx->flag & HASH_FINAL)
- sha512_mb_set_results(rctx);
-
-out:
- *ret_rctx = rctx;
- return err;
-}
-
-static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate,
- int err)
-{
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha512_hash_ctx *sha_ctx;
- struct mcryptd_hash_request_ctx *req_ctx;
- int ret;
- unsigned long flags;
-
- /* remove from work list */
- spin_lock_irqsave(&cstate->work_lock, flags);
- list_del(&rctx->waiter);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
-
- if (irqs_disabled())
- rctx->complete(&req->base, err);
- else {
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
-
- /* check to see if there are other jobs that are done */
- sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
- while (sha_ctx) {
- req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&req_ctx, cstate, false);
- if (req_ctx) {
- spin_lock_irqsave(&cstate->work_lock, flags);
- list_del(&req_ctx->waiter);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
-
- req = cast_mcryptd_ctx_to_req(req_ctx);
- if (irqs_disabled())
- req_ctx->complete(&req->base, ret);
- else {
- local_bh_disable();
- req_ctx->complete(&req->base, ret);
- local_bh_enable();
- }
- }
- sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
- }
-
- return 0;
-}
-
-static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate)
-{
- unsigned long next_flush;
- unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
- unsigned long flags;
-
- /* initialize tag */
- rctx->tag.arrival = jiffies; /* tag the arrival time */
- rctx->tag.seq_num = cstate->next_seq_num++;
- next_flush = rctx->tag.arrival + delay;
- rctx->tag.expire = next_flush;
-
- spin_lock_irqsave(&cstate->work_lock, flags);
- list_add_tail(&rctx->waiter, &cstate->work_list);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
-
- mcryptd_arm_flusher(cstate, delay);
-}
-
-static int sha512_mb_update(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha512_hash_ctx *sha_ctx;
- int ret = 0, nbytes;
-
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk))
- rctx->flag |= HASH_DONE;
-
- /* submit */
- sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
- sha512_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
- nbytes, HASH_UPDATE);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
-
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha512_mb_finup(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha512_hash_ctx *sha_ctx;
- int ret = 0, flag = HASH_UPDATE, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- flag = HASH_LAST;
- }
-
- /* submit */
- rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
- sha512_mb_add_list(rctx, cstate);
-
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
- nbytes, flag);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha512_mb_final(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
-
- struct sha512_hash_ctx *sha_ctx;
- int ret = 0;
- u8 data;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- rctx->flag |= HASH_DONE | HASH_FINAL;
-
- sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
- /* flag HASH_FINAL and 0 data size */
- sha512_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, &data, 0, HASH_LAST);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha512_mb_export(struct ahash_request *areq, void *out)
-{
- struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha512_mb_import(struct ahash_request *areq, const void *in)
-{
- struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha512_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
-
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha512-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha512_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
-
- return 0;
-}
-
-static void sha512_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static int sha512_mb_areq_init_tfm(struct crypto_tfm *tfm)
-{
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- sizeof(struct sha512_hash_ctx));
-
- return 0;
-}
-
-static void sha512_mb_areq_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static struct ahash_alg sha512_mb_areq_alg = {
- .init = sha512_mb_init,
- .update = sha512_mb_update,
- .final = sha512_mb_final,
- .finup = sha512_mb_finup,
- .export = sha512_mb_export,
- .import = sha512_mb_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct sha512_hash_ctx),
- .base = {
- .cra_name = "__sha512-mb",
- .cra_driver_name = "__intel_sha512-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread
- * sleep
- */
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha512_mb_areq_alg.halg.base.cra_list),
- .cra_init = sha512_mb_areq_init_tfm,
- .cra_exit = sha512_mb_areq_exit_tfm,
- .cra_ctxsize = sizeof(struct sha512_hash_ctx),
- }
- }
-};
-
-static int sha512_mb_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_init(mcryptd_req);
-}
-
-static int sha512_mb_async_update(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_update(mcryptd_req);
-}
-
-static int sha512_mb_async_finup(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_finup(mcryptd_req);
-}
-
-static int sha512_mb_async_final(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_final(mcryptd_req);
-}
-
-static int sha512_mb_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_digest(mcryptd_req);
-}
-
-static int sha512_mb_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_export(mcryptd_req, out);
-}
-
-static int sha512_mb_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
- struct mcryptd_hash_request_ctx *rctx;
- struct ahash_request *areq;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- rctx = ahash_request_ctx(mcryptd_req);
-
- areq = &rctx->areq;
-
- ahash_request_set_tfm(areq, child);
- ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req);
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static struct ahash_alg sha512_mb_async_alg = {
- .init = sha512_mb_async_init,
- .update = sha512_mb_async_update,
- .final = sha512_mb_async_final,
- .finup = sha512_mb_async_finup,
- .digest = sha512_mb_async_digest,
- .export = sha512_mb_async_export,
- .import = sha512_mb_async_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct sha512_hash_ctx),
- .base = {
- .cra_name = "sha512",
- .cra_driver_name = "sha512_mb",
- /*
- * Low priority, since with few concurrent hash requests
- * this is extremely slow due to the flush delay. Users
- * whose workloads would benefit from this can request
- * it explicitly by driver name, or can increase its
- * priority at runtime using NETLINK_CRYPTO.
- */
- .cra_priority = 50,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha512_mb_async_alg.halg.base.cra_list),
- .cra_init = sha512_mb_async_init_tfm,
- .cra_exit = sha512_mb_async_exit_tfm,
- .cra_ctxsize = sizeof(struct sha512_mb_ctx),
- .cra_alignmask = 0,
- },
- },
-};
-
-static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate)
-{
- struct mcryptd_hash_request_ctx *rctx;
- unsigned long cur_time;
- unsigned long next_flush = 0;
- struct sha512_hash_ctx *sha_ctx;
-
-
- cur_time = jiffies;
-
- while (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- if time_before(cur_time, rctx->tag.expire)
- break;
- kernel_fpu_begin();
- sha_ctx = (struct sha512_hash_ctx *)
- sha512_ctx_mgr_flush(cstate);
- kernel_fpu_end();
- if (!sha_ctx) {
- pr_err("sha512_mb error: nothing got flushed for"
- " non-empty list\n");
- break;
- }
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- sha_finish_walk(&rctx, cstate, true);
- sha_complete_job(rctx, cstate, 0);
- }
-
- if (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- /* get the hash context and then flush time */
- next_flush = rctx->tag.expire;
- mcryptd_arm_flusher(cstate, get_delay(next_flush));
- }
- return next_flush;
-}
-
-static int __init sha512_mb_mod_init(void)
-{
-
- int cpu;
- int err;
- struct mcryptd_alg_cstate *cpu_state;
-
- /* check for dependent cpu features */
- if (!boot_cpu_has(X86_FEATURE_AVX2) ||
- !boot_cpu_has(X86_FEATURE_BMI2))
- return -ENODEV;
-
- /* initialize multibuffer structures */
- sha512_mb_alg_state.alg_cstate =
- alloc_percpu(struct mcryptd_alg_cstate);
-
- sha512_job_mgr_init = sha512_mb_mgr_init_avx2;
- sha512_job_mgr_submit = sha512_mb_mgr_submit_avx2;
- sha512_job_mgr_flush = sha512_mb_mgr_flush_avx2;
- sha512_job_mgr_get_comp_job = sha512_mb_mgr_get_comp_job_avx2;
-
- if (!sha512_mb_alg_state.alg_cstate)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
- cpu_state->next_flush = 0;
- cpu_state->next_seq_num = 0;
- cpu_state->flusher_engaged = false;
- INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
- cpu_state->cpu = cpu;
- cpu_state->alg_state = &sha512_mb_alg_state;
- cpu_state->mgr = kzalloc(sizeof(struct sha512_ctx_mgr),
- GFP_KERNEL);
- if (!cpu_state->mgr)
- goto err2;
- sha512_ctx_mgr_init(cpu_state->mgr);
- INIT_LIST_HEAD(&cpu_state->work_list);
- spin_lock_init(&cpu_state->work_lock);
- }
- sha512_mb_alg_state.flusher = &sha512_mb_flusher;
-
- err = crypto_register_ahash(&sha512_mb_areq_alg);
- if (err)
- goto err2;
- err = crypto_register_ahash(&sha512_mb_async_alg);
- if (err)
- goto err1;
-
-
- return 0;
-err1:
- crypto_unregister_ahash(&sha512_mb_areq_alg);
-err2:
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha512_mb_alg_state.alg_cstate);
- return -ENODEV;
-}
-
-static void __exit sha512_mb_mod_fini(void)
-{
- int cpu;
- struct mcryptd_alg_cstate *cpu_state;
-
- crypto_unregister_ahash(&sha512_mb_async_alg);
- crypto_unregister_ahash(&sha512_mb_areq_alg);
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha512_mb_alg_state.alg_cstate);
-}
-
-module_init(sha512_mb_mod_init);
-module_exit(sha512_mb_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated");
-
-MODULE_ALIAS("sha512");
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
deleted file mode 100644
index e5c465b..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Header file for multi buffer SHA512 context
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SHA_MB_CTX_INTERNAL_H
-#define _SHA_MB_CTX_INTERNAL_H
-
-#include "sha512_mb_mgr.h"
-
-#define HASH_UPDATE 0x00
-#define HASH_LAST 0x01
-#define HASH_DONE 0x02
-#define HASH_FINAL 0x04
-
-#define HASH_CTX_STS_IDLE 0x00
-#define HASH_CTX_STS_PROCESSING 0x01
-#define HASH_CTX_STS_LAST 0x02
-#define HASH_CTX_STS_COMPLETE 0x04
-
-enum hash_ctx_error {
- HASH_CTX_ERROR_NONE = 0,
- HASH_CTX_ERROR_INVALID_FLAGS = -1,
- HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
- HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
-};
-
-#define hash_ctx_user_data(ctx) ((ctx)->user_data)
-#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
-#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
-#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
-#define hash_ctx_status(ctx) ((ctx)->status)
-#define hash_ctx_error(ctx) ((ctx)->error)
-#define hash_ctx_init(ctx) \
- do { \
- (ctx)->error = HASH_CTX_ERROR_NONE; \
- (ctx)->status = HASH_CTX_STS_COMPLETE; \
- } while (0)
-
-/* Hash Constants and Typedefs */
-#define SHA512_DIGEST_LENGTH 8
-#define SHA512_LOG2_BLOCK_SIZE 7
-
-#define SHA512_PADLENGTHFIELD_SIZE 16
-
-#ifdef SHA_MB_DEBUG
-#define assert(expr) \
-do { \
- if (unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-#else
-#define assert(expr) do {} while (0)
-#endif
-
-struct sha512_ctx_mgr {
- struct sha512_mb_mgr mgr;
-};
-
-/* typedef struct sha512_ctx_mgr sha512_ctx_mgr; */
-
-struct sha512_hash_ctx {
- /* Must be at struct offset 0 */
- struct job_sha512 job;
- /* status flag */
- int status;
- /* error flag */
- int error;
-
- uint64_t total_length;
- const void *incoming_buffer;
- uint32_t incoming_buffer_length;
- uint8_t partial_block_buffer[SHA512_BLOCK_SIZE * 2];
- uint32_t partial_block_buffer_length;
- void *user_data;
-};
-
-#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
deleted file mode 100644
index 178f17e..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Header file for multi buffer SHA512 algorithm manager
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __SHA_MB_MGR_H
-#define __SHA_MB_MGR_H
-
-#include <linux/types.h>
-
-#define NUM_SHA512_DIGEST_WORDS 8
-
-enum job_sts {STS_UNKNOWN = 0,
- STS_BEING_PROCESSED = 1,
- STS_COMPLETED = 2,
- STS_INTERNAL_ERROR = 3,
- STS_ERROR = 4
-};
-
-struct job_sha512 {
- u8 *buffer;
- u64 len;
- u64 result_digest[NUM_SHA512_DIGEST_WORDS] __aligned(32);
- enum job_sts status;
- void *user_data;
-};
-
-struct sha512_args_x4 {
- uint64_t digest[8][4];
- uint8_t *data_ptr[4];
-};
-
-struct sha512_lane_data {
- struct job_sha512 *job_in_lane;
-};
-
-struct sha512_mb_mgr {
- struct sha512_args_x4 args;
-
- uint64_t lens[4];
-
- /* each byte is index (0...7) of unused lanes */
- uint64_t unused_lanes;
- /* byte 4 is set to FF as a flag */
- struct sha512_lane_data ldata[4];
-};
-
-#define SHA512_MB_MGR_NUM_LANES_AVX2 4
-
-void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state);
-struct job_sha512 *sha512_mb_mgr_submit_avx2(struct sha512_mb_mgr *state,
- struct job_sha512 *job);
-struct job_sha512 *sha512_mb_mgr_flush_avx2(struct sha512_mb_mgr *state);
-struct job_sha512 *sha512_mb_mgr_get_comp_job_avx2(struct sha512_mb_mgr *state);
-
-#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
deleted file mode 100644
index cf2636d..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Header file for multi buffer SHA256 algorithm data structure
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# Macros for defining data structures
-
-# Usage example
-
-#START_FIELDS # JOB_AES
-### name size align
-#FIELD _plaintext, 8, 8 # pointer to plaintext
-#FIELD _ciphertext, 8, 8 # pointer to ciphertext
-#FIELD _IV, 16, 8 # IV
-#FIELD _keys, 8, 8 # pointer to keys
-#FIELD _len, 4, 4 # length in bytes
-#FIELD _status, 4, 4 # status enumeration
-#FIELD _user_data, 8, 8 # pointer to user data
-#UNION _union, size1, align1, \
-# size2, align2, \
-# size3, align3, \
-# ...
-#END_FIELDS
-#%assign _JOB_AES_size _FIELD_OFFSET
-#%assign _JOB_AES_align _STRUCT_ALIGN
-
-#########################################################################
-
-# Alternate "struc-like" syntax:
-# STRUCT job_aes2
-# RES_Q .plaintext, 1
-# RES_Q .ciphertext, 1
-# RES_DQ .IV, 1
-# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
-# RES_U .union, size1, align1, \
-# size2, align2, \
-# ...
-# ENDSTRUCT
-# # Following only needed if nesting
-# %assign job_aes2_size _FIELD_OFFSET
-# %assign job_aes2_align _STRUCT_ALIGN
-#
-# RES_* macros take a name, a count and an optional alignment.
-# The count in in terms of the base size of the macro, and the
-# default alignment is the base size.
-# The macros are:
-# Macro Base size
-# RES_B 1
-# RES_W 2
-# RES_D 4
-# RES_Q 8
-# RES_DQ 16
-# RES_Y 32
-# RES_Z 64
-#
-# RES_U defines a union. It's arguments are a name and two or more
-# pairs of "size, alignment"
-#
-# The two assigns are only needed if this structure is being nested
-# within another. Even if the assigns are not done, one can still use
-# STRUCT_NAME_size as the size of the structure.
-#
-# Note that for nesting, you still need to assign to STRUCT_NAME_size.
-#
-# The differences between this and using "struc" directly are that each
-# type is implicitly aligned to its natural length (although this can be
-# over-ridden with an explicit third parameter), and that the structure
-# is padded at the end to its overall alignment.
-#
-
-#########################################################################
-
-#ifndef _DATASTRUCT_ASM_
-#define _DATASTRUCT_ASM_
-
-#define PTR_SZ 8
-#define SHA512_DIGEST_WORD_SIZE 8
-#define SHA512_MB_MGR_NUM_LANES_AVX2 4
-#define NUM_SHA512_DIGEST_WORDS 8
-#define SZ4 4*SHA512_DIGEST_WORD_SIZE
-#define ROUNDS 80*SZ4
-#define SHA512_DIGEST_ROW_SIZE (SHA512_MB_MGR_NUM_LANES_AVX2 * 8)
-
-# START_FIELDS
-.macro START_FIELDS
- _FIELD_OFFSET = 0
- _STRUCT_ALIGN = 0
-.endm
-
-# FIELD name size align
-.macro FIELD name size align
- _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
- \name = _FIELD_OFFSET
- _FIELD_OFFSET = _FIELD_OFFSET + (\size)
-.if (\align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = \align
-.endif
-.endm
-
-# END_FIELDS
-.macro END_FIELDS
- _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
-.endm
-
-.macro STRUCT p1
-START_FIELDS
-.struc \p1
-.endm
-
-.macro ENDSTRUCT
- tmp = _FIELD_OFFSET
- END_FIELDS
- tmp = (_FIELD_OFFSET - ##tmp)
-.if (tmp > 0)
- .lcomm tmp
-.endm
-
-## RES_int name size align
-.macro RES_int p1 p2 p3
- name = \p1
- size = \p2
- align = .\p3
-
- _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
-.align align
-.lcomm name size
- _FIELD_OFFSET = _FIELD_OFFSET + (size)
-.if (align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = align
-.endif
-.endm
-
-# macro RES_B name, size [, align]
-.macro RES_B _name, _size, _align=1
-RES_int _name _size _align
-.endm
-
-# macro RES_W name, size [, align]
-.macro RES_W _name, _size, _align=2
-RES_int _name 2*(_size) _align
-.endm
-
-# macro RES_D name, size [, align]
-.macro RES_D _name, _size, _align=4
-RES_int _name 4*(_size) _align
-.endm
-
-# macro RES_Q name, size [, align]
-.macro RES_Q _name, _size, _align=8
-RES_int _name 8*(_size) _align
-.endm
-
-# macro RES_DQ name, size [, align]
-.macro RES_DQ _name, _size, _align=16
-RES_int _name 16*(_size) _align
-.endm
-
-# macro RES_Y name, size [, align]
-.macro RES_Y _name, _size, _align=32
-RES_int _name 32*(_size) _align
-.endm
-
-# macro RES_Z name, size [, align]
-.macro RES_Z _name, _size, _align=64
-RES_int _name 64*(_size) _align
-.endm
-
-#endif
-
-###################################################################
-### Define SHA512 Out Of Order Data Structures
-###################################################################
-
-START_FIELDS # LANE_DATA
-### name size align
-FIELD _job_in_lane, 8, 8 # pointer to job object
-END_FIELDS
-
- _LANE_DATA_size = _FIELD_OFFSET
- _LANE_DATA_align = _STRUCT_ALIGN
-
-####################################################################
-
-START_FIELDS # SHA512_ARGS_X4
-### name size align
-FIELD _digest, 8*8*4, 4 # transposed digest
-FIELD _data_ptr, 8*4, 8 # array of pointers to data
-END_FIELDS
-
- _SHA512_ARGS_X4_size = _FIELD_OFFSET
- _SHA512_ARGS_X4_align = _STRUCT_ALIGN
-
-#####################################################################
-
-START_FIELDS # MB_MGR
-### name size align
-FIELD _args, _SHA512_ARGS_X4_size, _SHA512_ARGS_X4_align
-FIELD _lens, 8*4, 8
-FIELD _unused_lanes, 8, 8
-FIELD _ldata, _LANE_DATA_size*4, _LANE_DATA_align
-END_FIELDS
-
- _MB_MGR_size = _FIELD_OFFSET
- _MB_MGR_align = _STRUCT_ALIGN
-
-_args_digest = _args + _digest
-_args_data_ptr = _args + _data_ptr
-
-#######################################################################
-
-#######################################################################
-#### Define constants
-#######################################################################
-
-#define STS_UNKNOWN 0
-#define STS_BEING_PROCESSED 1
-#define STS_COMPLETED 2
-
-#######################################################################
-#### Define JOB_SHA512 structure
-#######################################################################
-
-START_FIELDS # JOB_SHA512
-### name size align
-FIELD _buffer, 8, 8 # pointer to buffer
-FIELD _len, 8, 8 # length in bytes
-FIELD _result_digest, 8*8, 32 # Digest (output)
-FIELD _status, 4, 4
-FIELD _user_data, 8, 8
-END_FIELDS
-
- _JOB_SHA512_size = _FIELD_OFFSET
- _JOB_SHA512_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
deleted file mode 100644
index 7c629ca..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Flush routine for SHA512 multibuffer
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha512_mb_mgr_datastruct.S"
-
-.extern sha512_x4_avx2
-
-# LINUX register definitions
-#define arg1 %rdi
-#define arg2 %rsi
-
-# idx needs to be other than arg1, arg2, rbx, r12
-#define idx %rdx
-
-# Common definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-
-#define unused_lanes %rbx
-#define lane_data %rbx
-#define tmp2 %rbx
-
-#define job_rax %rax
-#define tmp1 %rax
-#define size_offset %rax
-#define tmp %rax
-#define start_offset %rax
-
-#define tmp3 arg1
-
-#define extra_blocks arg2
-#define p arg2
-
-#define tmp4 %r8
-#define lens0 %r8
-
-#define lens1 %r9
-#define lens2 %r10
-#define lens3 %r11
-
-.macro LABEL prefix n
-\prefix\n\():
-.endm
-
-.macro JNE_SKIP i
-jne skip_\i
-.endm
-
-.altmacro
-.macro SET_OFFSET _offset
-offset = \_offset
-.endm
-.noaltmacro
-
-# JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state)
-# arg 1 : rcx : state
-ENTRY(sha512_mb_mgr_flush_avx2)
- FRAME_BEGIN
- push %rbx
-
- # If bit (32+3) is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $32+7, unused_lanes
- jc return_null
-
- # find a lane with a non-null job
- xor idx, idx
- offset = (_ldata + 1*_LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne one(%rip), idx
- offset = (_ldata + 2*_LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne two(%rip), idx
- offset = (_ldata + 3*_LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne three(%rip), idx
-
- # copy idx to empty lanes
-copy_lane_data:
- offset = (_args + _data_ptr)
- mov offset(state,idx,8), tmp
-
- I = 0
-.rep 4
- offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
-.altmacro
- JNE_SKIP %I
- offset = (_args + _data_ptr + 8*I)
- mov tmp, offset(state)
- offset = (_lens + 8*I +4)
- movl $0xFFFFFFFF, offset(state)
-LABEL skip_ %I
- I = (I+1)
-.noaltmacro
-.endr
-
- # Find min length
- mov _lens + 0*8(state),lens0
- mov lens0,idx
- mov _lens + 1*8(state),lens1
- cmp idx,lens1
- cmovb lens1,idx
- mov _lens + 2*8(state),lens2
- cmp idx,lens2
- cmovb lens2,idx
- mov _lens + 3*8(state),lens3
- cmp idx,lens3
- cmovb lens3,idx
- mov idx,len2
- and $0xF,idx
- and $~0xFF,len2
- jz len_is_0
-
- sub len2, lens0
- sub len2, lens1
- sub len2, lens2
- sub len2, lens3
- shr $32,len2
- mov lens0, _lens + 0*8(state)
- mov lens1, _lens + 1*8(state)
- mov lens2, _lens + 2*8(state)
- mov lens3, _lens + 3*8(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha512_x4_avx2
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $8, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens+4(state, idx, 8)
-
- vmovq _args_digest+0*32(state, idx, 8), %xmm0
- vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
- vmovq _args_digest+2*32(state, idx, 8), %xmm1
- vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
- vmovq _args_digest+4*32(state, idx, 8), %xmm2
- vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
- vmovq _args_digest+6*32(state, idx, 8), %xmm3
- vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
-
- vmovdqu %xmm0, _result_digest(job_rax)
- vmovdqu %xmm1, _result_digest+1*16(job_rax)
- vmovdqu %xmm2, _result_digest+2*16(job_rax)
- vmovdqu %xmm3, _result_digest+3*16(job_rax)
-
-return:
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha512_mb_mgr_flush_avx2)
-.align 16
-
-ENTRY(sha512_mb_mgr_get_comp_job_avx2)
- push %rbx
-
- mov _unused_lanes(state), unused_lanes
- bt $(32+7), unused_lanes
- jc .return_null
-
- # Find min length
- mov _lens(state),lens0
- mov lens0,idx
- mov _lens+1*8(state),lens1
- cmp idx,lens1
- cmovb lens1,idx
- mov _lens+2*8(state),lens2
- cmp idx,lens2
- cmovb lens2,idx
- mov _lens+3*8(state),lens3
- cmp idx,lens3
- cmovb lens3,idx
- test $~0xF,idx
- jnz .return_null
- and $0xF,idx
-
- #process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $8, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens+4(state, idx, 8)
-
- vmovq _args_digest(state, idx, 8), %xmm0
- vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
- vmovq _args_digest+2*32(state, idx, 8), %xmm1
- vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
- vmovq _args_digest+4*32(state, idx, 8), %xmm2
- vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
- vmovq _args_digest+6*32(state, idx, 8), %xmm3
- vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
-
- vmovdqu %xmm0, _result_digest+0*16(job_rax)
- vmovdqu %xmm1, _result_digest+1*16(job_rax)
- vmovdqu %xmm2, _result_digest+2*16(job_rax)
- vmovdqu %xmm3, _result_digest+3*16(job_rax)
-
- pop %rbx
-
- ret
-
-.return_null:
- xor job_rax, job_rax
- pop %rbx
- ret
-ENDPROC(sha512_mb_mgr_get_comp_job_avx2)
-
-.section .rodata.cst8.one, "aM", @progbits, 8
-.align 8
-one:
-.quad 1
-
-.section .rodata.cst8.two, "aM", @progbits, 8
-.align 8
-two:
-.quad 2
-
-.section .rodata.cst8.three, "aM", @progbits, 8
-.align 8
-three:
-.quad 3
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
deleted file mode 100644
index d088050..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Initialization code for multi buffer SHA256 algorithm for AVX2
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "sha512_mb_mgr.h"
-
-void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
-{
- unsigned int j;
-
- /* initially all lanes are unused */
- state->lens[0] = 0xFFFFFFFF00000000;
- state->lens[1] = 0xFFFFFFFF00000001;
- state->lens[2] = 0xFFFFFFFF00000002;
- state->lens[3] = 0xFFFFFFFF00000003;
-
- state->unused_lanes = 0xFF03020100;
- for (j = 0; j < 4; j++)
- state->ldata[j].job_in_lane = NULL;
-}
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
deleted file mode 100644
index 4ba709b..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Buffer submit code for multi buffer SHA512 algorithm
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha512_mb_mgr_datastruct.S"
-
-.extern sha512_x4_avx2
-
-#define arg1 %rdi
-#define arg2 %rsi
-
-#define idx %rdx
-#define last_len %rdx
-
-#define size_offset %rcx
-#define tmp2 %rcx
-
-# Common definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-#define p2 arg2
-
-#define p %r11
-#define start_offset %r11
-
-#define unused_lanes %rbx
-
-#define job_rax %rax
-#define len %rax
-
-#define lane %r12
-#define tmp3 %r12
-#define lens3 %r12
-
-#define extra_blocks %r8
-#define lens0 %r8
-
-#define tmp %r9
-#define lens1 %r9
-
-#define lane_data %r10
-#define lens2 %r10
-
-#define DWORD_len %eax
-
-# JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job)
-# arg 1 : rcx : state
-# arg 2 : rdx : job
-ENTRY(sha512_mb_mgr_submit_avx2)
- FRAME_BEGIN
- push %rbx
- push %r12
-
- mov _unused_lanes(state), unused_lanes
- movzb %bl,lane
- shr $8, unused_lanes
- imul $_LANE_DATA_size, lane,lane_data
- movl $STS_BEING_PROCESSED, _status(job)
- lea _ldata(state, lane_data), lane_data
- mov unused_lanes, _unused_lanes(state)
- movl _len(job), DWORD_len
-
- mov job, _job_in_lane(lane_data)
- movl DWORD_len,_lens+4(state , lane, 8)
-
- # Load digest words from result_digest
- vmovdqu _result_digest+0*16(job), %xmm0
- vmovdqu _result_digest+1*16(job), %xmm1
- vmovdqu _result_digest+2*16(job), %xmm2
- vmovdqu _result_digest+3*16(job), %xmm3
-
- vmovq %xmm0, _args_digest(state, lane, 8)
- vpextrq $1, %xmm0, _args_digest+1*32(state , lane, 8)
- vmovq %xmm1, _args_digest+2*32(state , lane, 8)
- vpextrq $1, %xmm1, _args_digest+3*32(state , lane, 8)
- vmovq %xmm2, _args_digest+4*32(state , lane, 8)
- vpextrq $1, %xmm2, _args_digest+5*32(state , lane, 8)
- vmovq %xmm3, _args_digest+6*32(state , lane, 8)
- vpextrq $1, %xmm3, _args_digest+7*32(state , lane, 8)
-
- mov _buffer(job), p
- mov p, _args_data_ptr(state, lane, 8)
-
- cmp $0xFF, unused_lanes
- jne return_null
-
-start_loop:
-
- # Find min length
- mov _lens+0*8(state),lens0
- mov lens0,idx
- mov _lens+1*8(state),lens1
- cmp idx,lens1
- cmovb lens1, idx
- mov _lens+2*8(state),lens2
- cmp idx,lens2
- cmovb lens2,idx
- mov _lens+3*8(state),lens3
- cmp idx,lens3
- cmovb lens3,idx
- mov idx,len2
- and $0xF,idx
- and $~0xFF,len2
- jz len_is_0
-
- sub len2,lens0
- sub len2,lens1
- sub len2,lens2
- sub len2,lens3
- shr $32,len2
- mov lens0, _lens + 0*8(state)
- mov lens1, _lens + 1*8(state)
- mov lens2, _lens + 2*8(state)
- mov lens3, _lens + 3*8(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha512_x4_avx2
- # state and idx are intact
-
-len_is_0:
-
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- mov _unused_lanes(state), unused_lanes
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- shl $8, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF,_lens+4(state,idx,8)
- vmovq _args_digest+0*32(state , idx, 8), %xmm0
- vpinsrq $1, _args_digest+1*32(state , idx, 8), %xmm0, %xmm0
- vmovq _args_digest+2*32(state , idx, 8), %xmm1
- vpinsrq $1, _args_digest+3*32(state , idx, 8), %xmm1, %xmm1
- vmovq _args_digest+4*32(state , idx, 8), %xmm2
- vpinsrq $1, _args_digest+5*32(state , idx, 8), %xmm2, %xmm2
- vmovq _args_digest+6*32(state , idx, 8), %xmm3
- vpinsrq $1, _args_digest+7*32(state , idx, 8), %xmm3, %xmm3
-
- vmovdqu %xmm0, _result_digest + 0*16(job_rax)
- vmovdqu %xmm1, _result_digest + 1*16(job_rax)
- vmovdqu %xmm2, _result_digest + 2*16(job_rax)
- vmovdqu %xmm3, _result_digest + 3*16(job_rax)
-
-return:
- pop %r12
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha512_mb_mgr_submit_avx2)
-
-/* UNUSED?
-.section .rodata.cst16, "aM", @progbits, 16
-.align 16
-H0: .int 0x6a09e667
-H1: .int 0xbb67ae85
-H2: .int 0x3c6ef372
-H3: .int 0xa54ff53a
-H4: .int 0x510e527f
-H5: .int 0x9b05688c
-H6: .int 0x1f83d9ab
-H7: .int 0x5be0cd19
-*/
diff --git a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
deleted file mode 100644
index e22e907..00000000
--- a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Multi-buffer SHA512 algorithm hash compute routine
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey(a)linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# code to compute quad SHA512 using AVX2
-# use YMMs to tackle the larger digest size
-# outer calling routine takes care of save and restore of XMM registers
-# Logic designed/laid out by JDG
-
-# Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15
-# Stack must be aligned to 32 bytes before call
-# Linux clobbers: rax rbx rcx rsi r8 r9 r10 r11 r12
-# Linux preserves: rcx rdx rdi rbp r13 r14 r15
-# clobbers ymm0-15
-
-#include <linux/linkage.h>
-#include "sha512_mb_mgr_datastruct.S"
-
-arg1 = %rdi
-arg2 = %rsi
-
-# Common definitions
-STATE = arg1
-INP_SIZE = arg2
-
-IDX = %rax
-ROUND = %rbx
-TBL = %r8
-
-inp0 = %r9
-inp1 = %r10
-inp2 = %r11
-inp3 = %r12
-
-a = %ymm0
-b = %ymm1
-c = %ymm2
-d = %ymm3
-e = %ymm4
-f = %ymm5
-g = %ymm6
-h = %ymm7
-
-a0 = %ymm8
-a1 = %ymm9
-a2 = %ymm10
-
-TT0 = %ymm14
-TT1 = %ymm13
-TT2 = %ymm12
-TT3 = %ymm11
-TT4 = %ymm10
-TT5 = %ymm9
-
-T1 = %ymm14
-TMP = %ymm15
-
-# Define stack usage
-STACK_SPACE1 = SZ4*16 + NUM_SHA512_DIGEST_WORDS*SZ4 + 24
-
-#define VMOVPD vmovupd
-_digest = SZ4*16
-
-# transpose r0, r1, r2, r3, t0, t1
-# "transpose" data in {r0..r3} using temps {t0..t3}
-# Input looks like: {r0 r1 r2 r3}
-# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
-# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
-# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
-# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
-#
-# output looks like: {t0 r1 r0 r3}
-# t0 = {d1 d0 c1 c0 b1 b0 a1 a0}
-# r1 = {d3 d2 c3 c2 b3 b2 a3 a2}
-# r0 = {d5 d4 c5 c4 b5 b4 a5 a4}
-# r3 = {d7 d6 c7 c6 b7 b6 a7 a6}
-
-.macro TRANSPOSE r0 r1 r2 r3 t0 t1
- vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
- vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
- vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
- vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
-
- vperm2f128 $0x20, \r2, \r0, \r1 # h6...a6
- vperm2f128 $0x31, \r2, \r0, \r3 # h2...a2
- vperm2f128 $0x31, \t1, \t0, \r0 # h5...a5
- vperm2f128 $0x20, \t1, \t0, \t0 # h1...a1
-.endm
-
-.macro ROTATE_ARGS
-TMP_ = h
-h = g
-g = f
-f = e
-e = d
-d = c
-c = b
-b = a
-a = TMP_
-.endm
-
-# PRORQ reg, imm, tmp
-# packed-rotate-right-double
-# does a rotate by doing two shifts and an or
-.macro _PRORQ reg imm tmp
- vpsllq $(64-\imm),\reg,\tmp
- vpsrlq $\imm,\reg, \reg
- vpor \tmp,\reg, \reg
-.endm
-
-# non-destructive
-# PRORQ_nd reg, imm, tmp, src
-.macro _PRORQ_nd reg imm tmp src
- vpsllq $(64-\imm), \src, \tmp
- vpsrlq $\imm, \src, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-# PRORQ dst/src, amt
-.macro PRORQ reg imm
- _PRORQ \reg, \imm, TMP
-.endm
-
-# PRORQ_nd dst, src, amt
-.macro PRORQ_nd reg tmp imm
- _PRORQ_nd \reg, \imm, TMP, \tmp
-.endm
-
-#; arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_00_15 _T1 i
- PRORQ_nd a0, e, (18-14) # sig1: a0 = (e >> 4)
-
- vpxor g, f, a2 # ch: a2 = f^g
- vpand e,a2, a2 # ch: a2 = (f^g)&e
- vpxor g, a2, a2 # a2 = ch
-
- PRORQ_nd a1,e,41 # sig1: a1 = (e >> 25)
-
- offset = SZ4*(\i & 0xf)
- vmovdqu \_T1,offset(%rsp)
- vpaddq (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
- vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
- PRORQ a0, 14 # sig1: a0 = (e >> 6) ^ (e >> 11)
- vpaddq a2, h, h # h = h + ch
- PRORQ_nd a2,a,6 # sig0: a2 = (a >> 11)
- vpaddq \_T1,h, h # h = h + ch + W + K
- vpxor a1, a0, a0 # a0 = sigma1
- vmovdqu a,\_T1
- PRORQ_nd a1,a,39 # sig0: a1 = (a >> 22)
- vpxor c, \_T1, \_T1 # maj: T1 = a^c
- add $SZ4, ROUND # ROUND++
- vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
- vpaddq a0, h, h
- vpaddq h, d, d
- vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
- PRORQ a2,28 # sig0: a2 = (a >> 2) ^ (a >> 13)
- vpxor a1, a2, a2 # a2 = sig0
- vpand c, a, a1 # maj: a1 = a&c
- vpor \_T1, a1, a1 # a1 = maj
- vpaddq a1, h, h # h = h + ch + W + K + maj
- vpaddq a2, h, h # h = h + ch + W + K + maj + sigma0
- ROTATE_ARGS
-.endm
-
-
-#; arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_16_XX _T1 i
- vmovdqu SZ4*((\i-15)&0xf)(%rsp), \_T1
- vmovdqu SZ4*((\i-2)&0xf)(%rsp), a1
- vmovdqu \_T1, a0
- PRORQ \_T1,7
- vmovdqu a1, a2
- PRORQ a1,42
- vpxor a0, \_T1, \_T1
- PRORQ \_T1, 1
- vpxor a2, a1, a1
- PRORQ a1, 19
- vpsrlq $7, a0, a0
- vpxor a0, \_T1, \_T1
- vpsrlq $6, a2, a2
- vpxor a2, a1, a1
- vpaddq SZ4*((\i-16)&0xf)(%rsp), \_T1, \_T1
- vpaddq SZ4*((\i-7)&0xf)(%rsp), a1, a1
- vpaddq a1, \_T1, \_T1
-
- ROUND_00_15 \_T1,\i
-.endm
-
-
-# void sha512_x4_avx2(void *STATE, const int INP_SIZE)
-# arg 1 : STATE : pointer to input data
-# arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
-ENTRY(sha512_x4_avx2)
- # general registers preserved in outer calling routine
- # outer calling routine saves all the XMM registers
- # save callee-saved clobbered registers to comply with C function ABI
- push %r12
- push %r13
- push %r14
- push %r15
-
- sub $STACK_SPACE1, %rsp
-
- # Load the pre-transposed incoming digest.
- vmovdqu 0*SHA512_DIGEST_ROW_SIZE(STATE),a
- vmovdqu 1*SHA512_DIGEST_ROW_SIZE(STATE),b
- vmovdqu 2*SHA512_DIGEST_ROW_SIZE(STATE),c
- vmovdqu 3*SHA512_DIGEST_ROW_SIZE(STATE),d
- vmovdqu 4*SHA512_DIGEST_ROW_SIZE(STATE),e
- vmovdqu 5*SHA512_DIGEST_ROW_SIZE(STATE),f
- vmovdqu 6*SHA512_DIGEST_ROW_SIZE(STATE),g
- vmovdqu 7*SHA512_DIGEST_ROW_SIZE(STATE),h
-
- lea K512_4(%rip),TBL
-
- # load the address of each of the 4 message lanes
- # getting ready to transpose input onto stack
- mov _data_ptr+0*PTR_SZ(STATE),inp0
- mov _data_ptr+1*PTR_SZ(STATE),inp1
- mov _data_ptr+2*PTR_SZ(STATE),inp2
- mov _data_ptr+3*PTR_SZ(STATE),inp3
-
- xor IDX, IDX
-lloop:
- xor ROUND, ROUND
-
- # save old digest
- vmovdqu a, _digest(%rsp)
- vmovdqu b, _digest+1*SZ4(%rsp)
- vmovdqu c, _digest+2*SZ4(%rsp)
- vmovdqu d, _digest+3*SZ4(%rsp)
- vmovdqu e, _digest+4*SZ4(%rsp)
- vmovdqu f, _digest+5*SZ4(%rsp)
- vmovdqu g, _digest+6*SZ4(%rsp)
- vmovdqu h, _digest+7*SZ4(%rsp)
- i = 0
-.rep 4
- vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP
- VMOVPD i*32(inp0, IDX), TT2
- VMOVPD i*32(inp1, IDX), TT1
- VMOVPD i*32(inp2, IDX), TT4
- VMOVPD i*32(inp3, IDX), TT3
- TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5
- vpshufb TMP, TT0, TT0
- vpshufb TMP, TT1, TT1
- vpshufb TMP, TT2, TT2
- vpshufb TMP, TT3, TT3
- ROUND_00_15 TT0,(i*4+0)
- ROUND_00_15 TT1,(i*4+1)
- ROUND_00_15 TT2,(i*4+2)
- ROUND_00_15 TT3,(i*4+3)
- i = (i+1)
-.endr
- add $128, IDX
-
- i = (i*4)
-
- jmp Lrounds_16_xx
-.align 16
-Lrounds_16_xx:
-.rep 16
- ROUND_16_XX T1, i
- i = (i+1)
-.endr
- cmp $0xa00,ROUND
- jb Lrounds_16_xx
-
- # add old digest
- vpaddq _digest(%rsp), a, a
- vpaddq _digest+1*SZ4(%rsp), b, b
- vpaddq _digest+2*SZ4(%rsp), c, c
- vpaddq _digest+3*SZ4(%rsp), d, d
- vpaddq _digest+4*SZ4(%rsp), e, e
- vpaddq _digest+5*SZ4(%rsp), f, f
- vpaddq _digest+6*SZ4(%rsp), g, g
- vpaddq _digest+7*SZ4(%rsp), h, h
-
- sub $1, INP_SIZE # unit is blocks
- jne lloop
-
- # write back to memory (state object) the transposed digest
- vmovdqu a, 0*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu b, 1*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu c, 2*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu d, 3*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu e, 4*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu f, 5*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu g, 6*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu h, 7*SHA512_DIGEST_ROW_SIZE(STATE)
-
- # update input data pointers
- add IDX, inp0
- mov inp0, _data_ptr+0*PTR_SZ(STATE)
- add IDX, inp1
- mov inp1, _data_ptr+1*PTR_SZ(STATE)
- add IDX, inp2
- mov inp2, _data_ptr+2*PTR_SZ(STATE)
- add IDX, inp3
- mov inp3, _data_ptr+3*PTR_SZ(STATE)
-
- #;;;;;;;;;;;;;;;
- #; Postamble
- add $STACK_SPACE1, %rsp
- # restore callee-saved clobbered registers
-
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-
- # outer calling routine restores XMM and other GP registers
- ret
-ENDPROC(sha512_x4_avx2)
-
-.section .rodata.K512_4, "a", @progbits
-.align 64
-K512_4:
- .octa 0x428a2f98d728ae22428a2f98d728ae22,\
- 0x428a2f98d728ae22428a2f98d728ae22
- .octa 0x7137449123ef65cd7137449123ef65cd,\
- 0x7137449123ef65cd7137449123ef65cd
- .octa 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f,\
- 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f
- .octa 0xe9b5dba58189dbbce9b5dba58189dbbc,\
- 0xe9b5dba58189dbbce9b5dba58189dbbc
- .octa 0x3956c25bf348b5383956c25bf348b538,\
- 0x3956c25bf348b5383956c25bf348b538
- .octa 0x59f111f1b605d01959f111f1b605d019,\
- 0x59f111f1b605d01959f111f1b605d019
- .octa 0x923f82a4af194f9b923f82a4af194f9b,\
- 0x923f82a4af194f9b923f82a4af194f9b
- .octa 0xab1c5ed5da6d8118ab1c5ed5da6d8118,\
- 0xab1c5ed5da6d8118ab1c5ed5da6d8118
- .octa 0xd807aa98a3030242d807aa98a3030242,\
- 0xd807aa98a3030242d807aa98a3030242
- .octa 0x12835b0145706fbe12835b0145706fbe,\
- 0x12835b0145706fbe12835b0145706fbe
- .octa 0x243185be4ee4b28c243185be4ee4b28c,\
- 0x243185be4ee4b28c243185be4ee4b28c
- .octa 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2,\
- 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2
- .octa 0x72be5d74f27b896f72be5d74f27b896f,\
- 0x72be5d74f27b896f72be5d74f27b896f
- .octa 0x80deb1fe3b1696b180deb1fe3b1696b1,\
- 0x80deb1fe3b1696b180deb1fe3b1696b1
- .octa 0x9bdc06a725c712359bdc06a725c71235,\
- 0x9bdc06a725c712359bdc06a725c71235
- .octa 0xc19bf174cf692694c19bf174cf692694,\
- 0xc19bf174cf692694c19bf174cf692694
- .octa 0xe49b69c19ef14ad2e49b69c19ef14ad2,\
- 0xe49b69c19ef14ad2e49b69c19ef14ad2
- .octa 0xefbe4786384f25e3efbe4786384f25e3,\
- 0xefbe4786384f25e3efbe4786384f25e3
- .octa 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5,\
- 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5
- .octa 0x240ca1cc77ac9c65240ca1cc77ac9c65,\
- 0x240ca1cc77ac9c65240ca1cc77ac9c65
- .octa 0x2de92c6f592b02752de92c6f592b0275,\
- 0x2de92c6f592b02752de92c6f592b0275
- .octa 0x4a7484aa6ea6e4834a7484aa6ea6e483,\
- 0x4a7484aa6ea6e4834a7484aa6ea6e483
- .octa 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4,\
- 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4
- .octa 0x76f988da831153b576f988da831153b5,\
- 0x76f988da831153b576f988da831153b5
- .octa 0x983e5152ee66dfab983e5152ee66dfab,\
- 0x983e5152ee66dfab983e5152ee66dfab
- .octa 0xa831c66d2db43210a831c66d2db43210,\
- 0xa831c66d2db43210a831c66d2db43210
- .octa 0xb00327c898fb213fb00327c898fb213f,\
- 0xb00327c898fb213fb00327c898fb213f
- .octa 0xbf597fc7beef0ee4bf597fc7beef0ee4,\
- 0xbf597fc7beef0ee4bf597fc7beef0ee4
- .octa 0xc6e00bf33da88fc2c6e00bf33da88fc2,\
- 0xc6e00bf33da88fc2c6e00bf33da88fc2
- .octa 0xd5a79147930aa725d5a79147930aa725,\
- 0xd5a79147930aa725d5a79147930aa725
- .octa 0x06ca6351e003826f06ca6351e003826f,\
- 0x06ca6351e003826f06ca6351e003826f
- .octa 0x142929670a0e6e70142929670a0e6e70,\
- 0x142929670a0e6e70142929670a0e6e70
- .octa 0x27b70a8546d22ffc27b70a8546d22ffc,\
- 0x27b70a8546d22ffc27b70a8546d22ffc
- .octa 0x2e1b21385c26c9262e1b21385c26c926,\
- 0x2e1b21385c26c9262e1b21385c26c926
- .octa 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed,\
- 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed
- .octa 0x53380d139d95b3df53380d139d95b3df,\
- 0x53380d139d95b3df53380d139d95b3df
- .octa 0x650a73548baf63de650a73548baf63de,\
- 0x650a73548baf63de650a73548baf63de
- .octa 0x766a0abb3c77b2a8766a0abb3c77b2a8,\
- 0x766a0abb3c77b2a8766a0abb3c77b2a8
- .octa 0x81c2c92e47edaee681c2c92e47edaee6,\
- 0x81c2c92e47edaee681c2c92e47edaee6
- .octa 0x92722c851482353b92722c851482353b,\
- 0x92722c851482353b92722c851482353b
- .octa 0xa2bfe8a14cf10364a2bfe8a14cf10364,\
- 0xa2bfe8a14cf10364a2bfe8a14cf10364
- .octa 0xa81a664bbc423001a81a664bbc423001,\
- 0xa81a664bbc423001a81a664bbc423001
- .octa 0xc24b8b70d0f89791c24b8b70d0f89791,\
- 0xc24b8b70d0f89791c24b8b70d0f89791
- .octa 0xc76c51a30654be30c76c51a30654be30,\
- 0xc76c51a30654be30c76c51a30654be30
- .octa 0xd192e819d6ef5218d192e819d6ef5218,\
- 0xd192e819d6ef5218d192e819d6ef5218
- .octa 0xd69906245565a910d69906245565a910,\
- 0xd69906245565a910d69906245565a910
- .octa 0xf40e35855771202af40e35855771202a,\
- 0xf40e35855771202af40e35855771202a
- .octa 0x106aa07032bbd1b8106aa07032bbd1b8,\
- 0x106aa07032bbd1b8106aa07032bbd1b8
- .octa 0x19a4c116b8d2d0c819a4c116b8d2d0c8,\
- 0x19a4c116b8d2d0c819a4c116b8d2d0c8
- .octa 0x1e376c085141ab531e376c085141ab53,\
- 0x1e376c085141ab531e376c085141ab53
- .octa 0x2748774cdf8eeb992748774cdf8eeb99,\
- 0x2748774cdf8eeb992748774cdf8eeb99
- .octa 0x34b0bcb5e19b48a834b0bcb5e19b48a8,\
- 0x34b0bcb5e19b48a834b0bcb5e19b48a8
- .octa 0x391c0cb3c5c95a63391c0cb3c5c95a63,\
- 0x391c0cb3c5c95a63391c0cb3c5c95a63
- .octa 0x4ed8aa4ae3418acb4ed8aa4ae3418acb,\
- 0x4ed8aa4ae3418acb4ed8aa4ae3418acb
- .octa 0x5b9cca4f7763e3735b9cca4f7763e373,\
- 0x5b9cca4f7763e3735b9cca4f7763e373
- .octa 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3,\
- 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3
- .octa 0x748f82ee5defb2fc748f82ee5defb2fc,\
- 0x748f82ee5defb2fc748f82ee5defb2fc
- .octa 0x78a5636f43172f6078a5636f43172f60,\
- 0x78a5636f43172f6078a5636f43172f60
- .octa 0x84c87814a1f0ab7284c87814a1f0ab72,\
- 0x84c87814a1f0ab7284c87814a1f0ab72
- .octa 0x8cc702081a6439ec8cc702081a6439ec,\
- 0x8cc702081a6439ec8cc702081a6439ec
- .octa 0x90befffa23631e2890befffa23631e28,\
- 0x90befffa23631e2890befffa23631e28
- .octa 0xa4506cebde82bde9a4506cebde82bde9,\
- 0xa4506cebde82bde9a4506cebde82bde9
- .octa 0xbef9a3f7b2c67915bef9a3f7b2c67915,\
- 0xbef9a3f7b2c67915bef9a3f7b2c67915
- .octa 0xc67178f2e372532bc67178f2e372532b,\
- 0xc67178f2e372532bc67178f2e372532b
- .octa 0xca273eceea26619cca273eceea26619c,\
- 0xca273eceea26619cca273eceea26619c
- .octa 0xd186b8c721c0c207d186b8c721c0c207,\
- 0xd186b8c721c0c207d186b8c721c0c207
- .octa 0xeada7dd6cde0eb1eeada7dd6cde0eb1e,\
- 0xeada7dd6cde0eb1eeada7dd6cde0eb1e
- .octa 0xf57d4f7fee6ed178f57d4f7fee6ed178,\
- 0xf57d4f7fee6ed178f57d4f7fee6ed178
- .octa 0x06f067aa72176fba06f067aa72176fba,\
- 0x06f067aa72176fba06f067aa72176fba
- .octa 0x0a637dc5a2c898a60a637dc5a2c898a6,\
- 0x0a637dc5a2c898a60a637dc5a2c898a6
- .octa 0x113f9804bef90dae113f9804bef90dae,\
- 0x113f9804bef90dae113f9804bef90dae
- .octa 0x1b710b35131c471b1b710b35131c471b,\
- 0x1b710b35131c471b1b710b35131c471b
- .octa 0x28db77f523047d8428db77f523047d84,\
- 0x28db77f523047d8428db77f523047d84
- .octa 0x32caab7b40c7249332caab7b40c72493,\
- 0x32caab7b40c7249332caab7b40c72493
- .octa 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc,\
- 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc
- .octa 0x431d67c49c100d4c431d67c49c100d4c,\
- 0x431d67c49c100d4c431d67c49c100d4c
- .octa 0x4cc5d4becb3e42b64cc5d4becb3e42b6,\
- 0x4cc5d4becb3e42b64cc5d4becb3e42b6
- .octa 0x597f299cfc657e2a597f299cfc657e2a,\
- 0x597f299cfc657e2a597f299cfc657e2a
- .octa 0x5fcb6fab3ad6faec5fcb6fab3ad6faec,\
- 0x5fcb6fab3ad6faec5fcb6fab3ad6faec
- .octa 0x6c44198c4a4758176c44198c4a475817,\
- 0x6c44198c4a4758176c44198c4a475817
-
-.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
-.align 32
-PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607
- .octa 0x18191a1b1c1d1e1f1011121314151617
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 0fb9586..0ec4767 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -213,20 +213,6 @@ config CRYPTO_CRYPTD
converts an arbitrary synchronous software crypto algorithm
into an asynchronous algorithm that executes in a kernel thread.
-config CRYPTO_MCRYPTD
- tristate "Software async multi-buffer crypto daemon"
- select CRYPTO_BLKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MANAGER
- select CRYPTO_WORKQUEUE
- help
- This is a generic software asynchronous crypto daemon that
- provides the kernel thread to assist multi-buffer crypto
- algorithms for submitting jobs and flushing jobs in multi-buffer
- crypto algorithms. Multi-buffer crypto algorithms are executed
- in the context of this kernel thread and drivers can post
- their crypto request asynchronously to be processed by this daemon.
-
config CRYPTO_AUTHENC
tristate "Authenc support"
select CRYPTO_AEAD
@@ -848,54 +834,6 @@ config CRYPTO_SHA1_PPC_SPE
SHA-1 secure hash standard (DFIPS 180-4) implemented
using powerpc SPE SIMD instruction set.
-config CRYPTO_SHA1_MB
- tristate "SHA1 digest algorithm (x86_64 Multi-Buffer, Experimental)"
- depends on X86 && 64BIT
- select CRYPTO_SHA1
- select CRYPTO_HASH
- select CRYPTO_MCRYPTD
- help
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using multi-buffer technique. This algorithm computes on
- multiple data lanes concurrently with SIMD instructions for
- better throughput. It should not be enabled by default but
- used when there is significant amount of work to keep the keep
- the data lanes filled to get performance benefit. If the data
- lanes remain unfilled, a flush operation will be initiated to
- process the crypto jobs, adding a slight latency.
-
-config CRYPTO_SHA256_MB
- tristate "SHA256 digest algorithm (x86_64 Multi-Buffer, Experimental)"
- depends on X86 && 64BIT
- select CRYPTO_SHA256
- select CRYPTO_HASH
- select CRYPTO_MCRYPTD
- help
- SHA-256 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using multi-buffer technique. This algorithm computes on
- multiple data lanes concurrently with SIMD instructions for
- better throughput. It should not be enabled by default but
- used when there is significant amount of work to keep the keep
- the data lanes filled to get performance benefit. If the data
- lanes remain unfilled, a flush operation will be initiated to
- process the crypto jobs, adding a slight latency.
-
-config CRYPTO_SHA512_MB
- tristate "SHA512 digest algorithm (x86_64 Multi-Buffer, Experimental)"
- depends on X86 && 64BIT
- select CRYPTO_SHA512
- select CRYPTO_HASH
- select CRYPTO_MCRYPTD
- help
- SHA-512 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using multi-buffer technique. This algorithm computes on
- multiple data lanes concurrently with SIMD instructions for
- better throughput. It should not be enabled by default but
- used when there is significant amount of work to keep the keep
- the data lanes filled to get performance benefit. If the data
- lanes remain unfilled, a flush operation will be initiated to
- process the crypto jobs, adding a slight latency.
-
config CRYPTO_SHA256
tristate "SHA224 and SHA256 digest algorithm"
select CRYPTO_HASH
diff --git a/crypto/Makefile b/crypto/Makefile
index f6a234d..d719843 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -93,7 +93,6 @@ obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o
obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o
obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
-obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o
obj-$(CONFIG_CRYPTO_DES) += des_generic.o
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
deleted file mode 100644
index f141521..00000000
--- a/crypto/mcryptd.c
+++ /dev/null
@@ -1,675 +0,0 @@
-/*
- * Software multibuffer async crypto daemon.
- *
- * Copyright (c) 2014 Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * Adapted from crypto daemon.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/aead.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <linux/sched/stat.h>
-#include <linux/slab.h>
-
-#define MCRYPTD_MAX_CPU_QLEN 100
-#define MCRYPTD_BATCH 9
-
-static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
- unsigned int tail);
-
-struct mcryptd_flush_list {
- struct list_head list;
- struct mutex lock;
-};
-
-static struct mcryptd_flush_list __percpu *mcryptd_flist;
-
-struct hashd_instance_ctx {
- struct crypto_ahash_spawn spawn;
- struct mcryptd_queue *queue;
-};
-
-static void mcryptd_queue_worker(struct work_struct *work);
-
-void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
-{
- struct mcryptd_flush_list *flist;
-
- if (!cstate->flusher_engaged) {
- /* put the flusher on the flush list */
- flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
- mutex_lock(&flist->lock);
- list_add_tail(&cstate->flush_list, &flist->list);
- cstate->flusher_engaged = true;
- cstate->next_flush = jiffies + delay;
- queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
- &cstate->flush, delay);
- mutex_unlock(&flist->lock);
- }
-}
-EXPORT_SYMBOL(mcryptd_arm_flusher);
-
-static int mcryptd_init_queue(struct mcryptd_queue *queue,
- unsigned int max_cpu_qlen)
-{
- int cpu;
- struct mcryptd_cpu_queue *cpu_queue;
-
- queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
- pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
- if (!queue->cpu_queue)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
- pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
- crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
- INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
- spin_lock_init(&cpu_queue->q_lock);
- }
- return 0;
-}
-
-static void mcryptd_fini_queue(struct mcryptd_queue *queue)
-{
- int cpu;
- struct mcryptd_cpu_queue *cpu_queue;
-
- for_each_possible_cpu(cpu) {
- cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
- BUG_ON(cpu_queue->queue.qlen);
- }
- free_percpu(queue->cpu_queue);
-}
-
-static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
- struct crypto_async_request *request,
- struct mcryptd_hash_request_ctx *rctx)
-{
- int cpu, err;
- struct mcryptd_cpu_queue *cpu_queue;
-
- cpu_queue = raw_cpu_ptr(queue->cpu_queue);
- spin_lock(&cpu_queue->q_lock);
- cpu = smp_processor_id();
- rctx->tag.cpu = smp_processor_id();
-
- err = crypto_enqueue_request(&cpu_queue->queue, request);
- pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
- cpu, cpu_queue, request);
- spin_unlock(&cpu_queue->q_lock);
- queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
-
- return err;
-}
-
-/*
- * Try to opportunisticlly flush the partially completed jobs if
- * crypto daemon is the only task running.
- */
-static void mcryptd_opportunistic_flush(void)
-{
- struct mcryptd_flush_list *flist;
- struct mcryptd_alg_cstate *cstate;
-
- flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
- while (single_task_running()) {
- mutex_lock(&flist->lock);
- cstate = list_first_entry_or_null(&flist->list,
- struct mcryptd_alg_cstate, flush_list);
- if (!cstate || !cstate->flusher_engaged) {
- mutex_unlock(&flist->lock);
- return;
- }
- list_del(&cstate->flush_list);
- cstate->flusher_engaged = false;
- mutex_unlock(&flist->lock);
- cstate->alg_state->flusher(cstate);
- }
-}
-
-/*
- * Called in workqueue context, do one real cryption work (via
- * req->complete) and reschedule itself if there are more work to
- * do.
- */
-static void mcryptd_queue_worker(struct work_struct *work)
-{
- struct mcryptd_cpu_queue *cpu_queue;
- struct crypto_async_request *req, *backlog;
- int i;
-
- /*
- * Need to loop through more than once for multi-buffer to
- * be effective.
- */
-
- cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
- i = 0;
- while (i < MCRYPTD_BATCH || single_task_running()) {
-
- spin_lock_bh(&cpu_queue->q_lock);
- backlog = crypto_get_backlog(&cpu_queue->queue);
- req = crypto_dequeue_request(&cpu_queue->queue);
- spin_unlock_bh(&cpu_queue->q_lock);
-
- if (!req) {
- mcryptd_opportunistic_flush();
- return;
- }
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
- req->complete(req, 0);
- if (!cpu_queue->queue.qlen)
- return;
- ++i;
- }
- if (cpu_queue->queue.qlen)
- queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
-}
-
-void mcryptd_flusher(struct work_struct *__work)
-{
- struct mcryptd_alg_cstate *alg_cpu_state;
- struct mcryptd_alg_state *alg_state;
- struct mcryptd_flush_list *flist;
- int cpu;
-
- cpu = smp_processor_id();
- alg_cpu_state = container_of(to_delayed_work(__work),
- struct mcryptd_alg_cstate, flush);
- alg_state = alg_cpu_state->alg_state;
- if (alg_cpu_state->cpu != cpu)
- pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
- cpu, alg_cpu_state->cpu);
-
- if (alg_cpu_state->flusher_engaged) {
- flist = per_cpu_ptr(mcryptd_flist, cpu);
- mutex_lock(&flist->lock);
- list_del(&alg_cpu_state->flush_list);
- alg_cpu_state->flusher_engaged = false;
- mutex_unlock(&flist->lock);
- alg_state->flusher(alg_cpu_state);
- }
-}
-EXPORT_SYMBOL_GPL(mcryptd_flusher);
-
-static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
-
- return ictx->queue;
-}
-
-static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
- unsigned int tail)
-{
- char *p;
- struct crypto_instance *inst;
- int err;
-
- p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- inst = (void *)(p + head);
-
- err = -ENAMETOOLONG;
- if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_inst;
-
- memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
-
- inst->alg.cra_priority = alg->cra_priority + 50;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
-
-out:
- return p;
-
-out_free_inst:
- kfree(p);
- p = ERR_PTR(err);
- goto out;
-}
-
-static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
- u32 *mask)
-{
- struct crypto_attr_type *algt;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return false;
-
- *type |= algt->type & CRYPTO_ALG_INTERNAL;
- *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
-
- if (*type & *mask & CRYPTO_ALG_INTERNAL)
- return true;
- else
- return false;
-}
-
-static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_ahash_spawn *spawn = &ictx->spawn;
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_ahash *hash;
-
- hash = crypto_spawn_ahash(spawn);
- if (IS_ERR(hash))
- return PTR_ERR(hash);
-
- ctx->child = hash;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct mcryptd_hash_request_ctx) +
- crypto_ahash_reqsize(hash));
- return 0;
-}
-
-static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_ahash(ctx->child);
-}
-
-static int mcryptd_hash_setkey(struct crypto_ahash *parent,
- const u8 *key, unsigned int keylen)
-{
- struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
- struct crypto_ahash *child = ctx->child;
- int err;
-
- crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(child, key, keylen);
- crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
-
-static int mcryptd_hash_enqueue(struct ahash_request *req,
- crypto_completion_t complete)
-{
- int ret;
-
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct mcryptd_queue *queue =
- mcryptd_get_queue(crypto_ahash_tfm(tfm));
-
- rctx->complete = req->base.complete;
- req->base.complete = complete;
-
- ret = mcryptd_enqueue_request(queue, &req->base, rctx);
-
- return ret;
-}
-
-static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
-{
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_ahash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct ahash_request *desc = &rctx->areq;
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- ahash_request_set_tfm(desc, child);
- ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req_async);
-
- rctx->out = req->result;
- err = crypto_ahash_init(desc);
-
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_init_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_init);
-}
-
-static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
-{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- rctx->out = req->result;
- err = crypto_ahash_update(&rctx->areq);
- if (err) {
- req->base.complete = rctx->complete;
- goto out;
- }
-
- return;
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_update_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_update);
-}
-
-static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
-{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- rctx->out = req->result;
- err = crypto_ahash_final(&rctx->areq);
- if (err) {
- req->base.complete = rctx->complete;
- goto out;
- }
-
- return;
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_final_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_final);
-}
-
-static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
-{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
- rctx->out = req->result;
- err = crypto_ahash_finup(&rctx->areq);
-
- if (err) {
- req->base.complete = rctx->complete;
- goto out;
- }
-
- return;
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
-}
-
-static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
-{
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_ahash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct ahash_request *desc = &rctx->areq;
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- ahash_request_set_tfm(desc, child);
- ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req_async);
-
- rctx->out = req->result;
- err = crypto_ahash_init(desc) ?: crypto_ahash_finup(desc);
-
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
-}
-
-static int mcryptd_hash_export(struct ahash_request *req, void *out)
-{
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- return crypto_ahash_export(&rctx->areq, out);
-}
-
-static int mcryptd_hash_import(struct ahash_request *req, const void *in)
-{
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- return crypto_ahash_import(&rctx->areq, in);
-}
-
-static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
- struct mcryptd_queue *queue)
-{
- struct hashd_instance_ctx *ctx;
- struct ahash_instance *inst;
- struct hash_alg_common *halg;
- struct crypto_alg *alg;
- u32 type = 0;
- u32 mask = 0;
- int err;
-
- if (!mcryptd_check_internal(tb, &type, &mask))
- return -EINVAL;
-
- halg = ahash_attr_alg(tb[1], type, mask);
- if (IS_ERR(halg))
- return PTR_ERR(halg);
-
- alg = &halg->base;
- pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
- inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
- sizeof(*ctx));
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
-
- ctx = ahash_instance_ctx(inst);
- ctx->queue = queue;
-
- err = crypto_init_ahash_spawn(&ctx->spawn, halg,
- ahash_crypto_instance(inst));
- if (err)
- goto out_free_inst;
-
- inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
- (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
- CRYPTO_ALG_OPTIONAL_KEY));
-
- inst->alg.halg.digestsize = halg->digestsize;
- inst->alg.halg.statesize = halg->statesize;
- inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
-
- inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
- inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
-
- inst->alg.init = mcryptd_hash_init_enqueue;
- inst->alg.update = mcryptd_hash_update_enqueue;
- inst->alg.final = mcryptd_hash_final_enqueue;
- inst->alg.finup = mcryptd_hash_finup_enqueue;
- inst->alg.export = mcryptd_hash_export;
- inst->alg.import = mcryptd_hash_import;
- if (crypto_hash_alg_has_setkey(halg))
- inst->alg.setkey = mcryptd_hash_setkey;
- inst->alg.digest = mcryptd_hash_digest_enqueue;
-
- err = ahash_register_instance(tmpl, inst);
- if (err) {
- crypto_drop_ahash(&ctx->spawn);
-out_free_inst:
- kfree(inst);
- }
-
-out_put_alg:
- crypto_mod_put(alg);
- return err;
-}
-
-static struct mcryptd_queue mqueue;
-
-static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct crypto_attr_type *algt;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_DIGEST:
- return mcryptd_create_hash(tmpl, tb, &mqueue);
- break;
- }
-
- return -EINVAL;
-}
-
-static void mcryptd_free(struct crypto_instance *inst)
-{
- struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
- struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
-
- switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_drop_ahash(&hctx->spawn);
- kfree(ahash_instance(inst));
- return;
- default:
- crypto_drop_spawn(&ctx->spawn);
- kfree(inst);
- }
-}
-
-static struct crypto_template mcryptd_tmpl = {
- .name = "mcryptd",
- .create = mcryptd_create,
- .free = mcryptd_free,
- .module = THIS_MODULE,
-};
-
-struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask)
-{
- char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
- struct crypto_ahash *tfm;
-
- if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
- "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-EINVAL);
- tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
- if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
- crypto_free_ahash(tfm);
- return ERR_PTR(-EINVAL);
- }
-
- return __mcryptd_ahash_cast(tfm);
-}
-EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
-
-struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
-{
- struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
-
- return ctx->child;
-}
-EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
-
-struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
-{
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- return &rctx->areq;
-}
-EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
-
-void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
-{
- crypto_free_ahash(&tfm->base);
-}
-EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
-
-static int __init mcryptd_init(void)
-{
- int err, cpu;
- struct mcryptd_flush_list *flist;
-
- mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
- for_each_possible_cpu(cpu) {
- flist = per_cpu_ptr(mcryptd_flist, cpu);
- INIT_LIST_HEAD(&flist->list);
- mutex_init(&flist->lock);
- }
-
- err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
- if (err) {
- free_percpu(mcryptd_flist);
- return err;
- }
-
- err = crypto_register_template(&mcryptd_tmpl);
- if (err) {
- mcryptd_fini_queue(&mqueue);
- free_percpu(mcryptd_flist);
- }
-
- return err;
-}
-
-static void __exit mcryptd_exit(void)
-{
- mcryptd_fini_queue(&mqueue);
- crypto_unregister_template(&mcryptd_tmpl);
- free_percpu(mcryptd_flist);
-}
-
-subsys_initcall(mcryptd_init);
-module_exit(mcryptd_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
-MODULE_ALIAS_CRYPTO("mcryptd");
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
deleted file mode 100644
index b67404f..00000000
--- a/include/crypto/mcryptd.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Software async multibuffer crypto daemon headers
- *
- * Author:
- * Tim Chen <tim.c.chen(a)linux.intel.com>
- *
- * Copyright (c) 2014, Intel Corporation.
- */
-
-#ifndef _CRYPTO_MCRYPT_H
-#define _CRYPTO_MCRYPT_H
-
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <crypto/hash.h>
-
-struct mcryptd_ahash {
- struct crypto_ahash base;
-};
-
-static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
- struct crypto_ahash *tfm)
-{
- return (struct mcryptd_ahash *)tfm;
-}
-
-struct mcryptd_cpu_queue {
- struct crypto_queue queue;
- spinlock_t q_lock;
- struct work_struct work;
-};
-
-struct mcryptd_queue {
- struct mcryptd_cpu_queue __percpu *cpu_queue;
-};
-
-struct mcryptd_instance_ctx {
- struct crypto_spawn spawn;
- struct mcryptd_queue *queue;
-};
-
-struct mcryptd_hash_ctx {
- struct crypto_ahash *child;
- struct mcryptd_alg_state *alg_state;
-};
-
-struct mcryptd_tag {
- /* seq number of request */
- unsigned seq_num;
- /* arrival time of request */
- unsigned long arrival;
- unsigned long expire;
- int cpu;
-};
-
-struct mcryptd_hash_request_ctx {
- struct list_head waiter;
- crypto_completion_t complete;
- struct mcryptd_tag tag;
- struct crypto_hash_walk walk;
- u8 *out;
- int flag;
- struct ahash_request areq;
-};
-
-struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask);
-struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
-struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req);
-void mcryptd_free_ahash(struct mcryptd_ahash *tfm);
-void mcryptd_flusher(struct work_struct *work);
-
-enum mcryptd_req_type {
- MCRYPTD_NONE,
- MCRYPTD_UPDATE,
- MCRYPTD_FINUP,
- MCRYPTD_DIGEST,
- MCRYPTD_FINAL
-};
-
-struct mcryptd_alg_cstate {
- unsigned long next_flush;
- unsigned next_seq_num;
- bool flusher_engaged;
- struct delayed_work flush;
- int cpu;
- struct mcryptd_alg_state *alg_state;
- void *mgr;
- spinlock_t work_lock;
- struct list_head work_list;
- struct list_head flush_list;
-};
-
-struct mcryptd_alg_state {
- struct mcryptd_alg_cstate __percpu *alg_cstate;
- unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate);
-};
-
-/* return delay in jiffies from current time */
-static inline unsigned long get_delay(unsigned long t)
-{
- long delay;
-
- delay = (long) t - (long) jiffies;
- if (delay <= 0)
- return 0;
- else
- return (unsigned long) delay;
-}
-
-void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay);
-
-#endif
--
1.8.3
1
1