Kernel
Threads by month
- ----- 2025 -----
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
December 2022
- 10 participants
- 47 discussions

30 Dec '22
From: Zhou Guanghui <zhouguanghui1(a)huawei.com>
ascend inclusion
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I67NC1
CVE: NA
------------------------------------------
Charge Buddy hugepage to memcg when kmemcg is disabled. If kmemcg
is enabled, we can alse use kmemcg to charge buddy hugepages.
Signed-off-by: Zhou Guanghui <zhouguanghui1(a)huawei.com>
Reviewed-by: Kefeng Wang <wangkefeng.wang(a)huawei.com>
Signed-off-by: Yongqiang Liu <liuyongqiang13(a)huawei.com>
---
include/linux/share_pool.h | 30 ++++++++++++++++++++++++++++++
mm/hugetlb.c | 3 +++
mm/share_pool.c | 28 +++++++++++++++++++++++-----
3 files changed, 56 insertions(+), 5 deletions(-)
diff --git a/include/linux/share_pool.h b/include/linux/share_pool.h
index c58f0dce0b20..2ebfb3d5be13 100644
--- a/include/linux/share_pool.h
+++ b/include/linux/share_pool.h
@@ -8,6 +8,8 @@
#include <linux/printk.h>
#include <linux/hashtable.h>
#include <linux/numa.h>
+#include <linux/hugetlb.h>
+#include <linux/memcontrol.h>
#define SP_HUGEPAGE (1 << 0)
#define SP_HUGEPAGE_ONLY (1 << 1)
@@ -411,6 +413,26 @@ extern bool sp_check_mmap_addr(unsigned long addr, unsigned long flags);
extern int sp_id_of_current(void);
extern int mg_sp_id_of_current(void);
+
+static inline void sp_kmemcg_uncharge_hpage(struct page *page)
+{
+ if (!sp_is_enabled())
+ return;
+
+ if (memcg_kmem_enabled() && PageKmemcg(page)) {
+ int order = huge_page_order(page_hstate(page));
+
+ __memcg_kmem_uncharge(page, order);
+ }
+}
+
+static inline void sp_memcg_uncharge_hpage(struct page *page)
+{
+ if (!sp_is_enabled())
+ return;
+
+ mem_cgroup_uncharge(page);
+}
#else
static inline int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id)
@@ -684,6 +706,14 @@ static inline int mg_sp_id_of_current(void)
return -EPERM;
}
+static inline void sp_kmemcg_uncharge_hpage(struct page *page)
+{
+}
+
+static inline void sp_memcg_uncharge_hpage(struct page *page)
+{
+}
+
#endif
#endif /* LINUX_SHARE_POOL_H */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index fb1f90eed7fc..495d8b5b38fc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -38,6 +38,7 @@
#include <linux/node.h>
#include <linux/userfaultfd_k.h>
#include <linux/page_owner.h>
+#include <linux/share_pool.h>
#include "internal.h"
int hugetlb_max_hstate __read_mostly;
@@ -1311,6 +1312,7 @@ void free_huge_page(struct page *page)
(struct hugepage_subpool *)page_private(page);
bool restore_reserve;
+ sp_kmemcg_uncharge_hpage(page);
set_page_private(page, 0);
page->mapping = NULL;
VM_BUG_ON_PAGE(page_count(page), page);
@@ -1345,6 +1347,7 @@ void free_huge_page(struct page *page)
h->resv_huge_pages++;
if (PageHugeTemporary(page)) {
+ sp_memcg_uncharge_hpage(page);
list_del(&page->lru);
ClearPageHugeTemporary(page);
update_and_free_page(h, page);
diff --git a/mm/share_pool.c b/mm/share_pool.c
index d2591c73a503..5d8344fe805a 100644
--- a/mm/share_pool.c
+++ b/mm/share_pool.c
@@ -4555,6 +4555,8 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
int err;
int node_id;
struct sp_area *spa;
+ bool charge_hpage = false;
+ struct mem_cgroup *memcg;
spa = vma->vm_private_data;
if (!spa) {
@@ -4572,10 +4574,11 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
page = alloc_huge_page(vma, haddr, 0);
if (IS_ERR(page)) {
- page = alloc_huge_page_node(hstate_file(vma->vm_file),
- node_id);
+ page = hugetlb_alloc_hugepage(node_id, HUGETLB_ALLOC_BUDDY);
if (!page)
page = ERR_PTR(-ENOMEM);
+ else if (!PageKmemcg(page))
+ charge_hpage = true;
}
if (IS_ERR(page)) {
ptl = huge_pte_lock(h, mm, ptep);
@@ -4588,12 +4591,24 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
ret = vmf_error(PTR_ERR(page));
goto out;
}
+
+ if (charge_hpage &&
+ mem_cgroup_try_charge_delay(page, vma->vm_mm, GFP_KERNEL, &memcg, true)) {
+ put_page(page);
+ ret = vmf_error(-ENOMEM);
+ goto out;
+ }
+
__SetPageUptodate(page);
new_page = true;
/* sharepool pages are all shared */
err = huge_add_to_page_cache(page, mapping, idx);
if (err) {
+ if (charge_hpage) {
+ mem_cgroup_cancel_charge(page, memcg, true);
+ charge_hpage = false;
+ }
put_page(page);
if (err == -EEXIST)
goto retry;
@@ -4601,7 +4616,6 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
}
}
-
ptl = huge_pte_lock(h, mm, ptep);
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (idx >= size)
@@ -4618,11 +4632,13 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
hugetlb_count_add(pages_per_huge_page(h), mm);
+ if (charge_hpage)
+ mem_cgroup_commit_charge(page, memcg, false, true);
+
spin_unlock(ptl);
- if (new_page) {
+ if (new_page)
SetPagePrivate(&page[1]);
- }
unlock_page(page);
out:
@@ -4631,6 +4647,8 @@ vm_fault_t sharepool_no_page(struct mm_struct *mm,
backout:
spin_unlock(ptl);
unlock_page(page);
+ if (charge_hpage)
+ mem_cgroup_cancel_charge(page, memcg, true);
put_page(page);
goto out;
}
--
2.25.1
1
0
您好!
Kernel SIG 邀请您参加 2022-12-30 14:00 召开的Zoom会议(自动录制)
会议主题:openEuler Kernel SIG双周例会
会议内容:
1. 进展update
2. 议题征集中
欢迎大家积极申报议题(新增议题可以回复邮件反馈,或者录入会议看板)
会议链接:https://us06web.zoom.us/j/84061666239?pwd=ZGNrZUo0RW5DWWhOQTRaVW53Y0gyUT09
会议纪要:https://etherpad.openeuler.org/p/Kernel-meetings
温馨提醒:建议接入会议后修改参会人的姓名,也可以使用您在gitee.com的ID
更多资讯尽在:https://openeuler.org/zh/
Hello!
openEuler Kernel SIG invites you to attend the Zoom conference(auto recording) will be held at 2022-12-30 14:00,
The subject of the conference is openEuler Kernel SIG双周例会,
Summary:
1. 进展update
2. 议题征集中
欢迎大家积极申报议题(新增议题可以回复邮件反馈,或者录入会议看板)
You can join the meeting at https://us06web.zoom.us/j/84061666239?pwd=ZGNrZUo0RW5DWWhOQTRaVW53Y0gyUT09.
Add topics at https://etherpad.openeuler.org/p/Kernel-meetings.
Note: You are advised to change the participant name after joining the conference or use your ID at gitee.com.
More information: https://openeuler.org/en/
2
1

30 Dec '22
3SNIC inclusion
category: features
bugzilla: https://gitee.com/openeuler/kernel/issues/I64M90
CVE: NA
v1->v2:
1. Update openEuler/MAINTAINERS
This commit is to suppport 3SNIC 3S5XX RAID/HBA controllers.
RAID controllers support RAID 0/1/5/6/10/50/60 modes;
HBA controlllers support RAID 0/1/10 modes.
RAID/HBA support SAS/SATA HDD/SSD.
Signed-off-by: liangry <liangry1(a)3snic.com>
Reviewed-by: Jiang Yu<yujiang(a)3snic.com>
Reviewed-by: Steven Song <steven.song(a)3snic.com>
---
Documentation/scsi/sssraid.rst | 83 +
MAINTAINERS | 7 +
arch/arm64/configs/openeuler_defconfig | 1 +
arch/x86/configs/openeuler_defconfig | 1 +
drivers/scsi/Kconfig | 1 +
drivers/scsi/Makefile | 1 +
drivers/scsi/sssraid/Kconfig | 14 +
drivers/scsi/sssraid/Makefile | 7 +
drivers/scsi/sssraid/sssraid.h | 971 ++++++++++
drivers/scsi/sssraid/sssraid_debug.h | 34 +
drivers/scsi/sssraid/sssraid_fw.c | 1724 +++++++++++++++++
drivers/scsi/sssraid/sssraid_os.c | 2337 ++++++++++++++++++++++++
openEuler/MAINTAINERS | 6 +
13 files changed, 5187 insertions(+)
create mode 100644 Documentation/scsi/sssraid.rst
create mode 100644 drivers/scsi/sssraid/Kconfig
create mode 100644 drivers/scsi/sssraid/Makefile
create mode 100644 drivers/scsi/sssraid/sssraid.h
create mode 100644 drivers/scsi/sssraid/sssraid_debug.h
create mode 100644 drivers/scsi/sssraid/sssraid_fw.c
create mode 100644 drivers/scsi/sssraid/sssraid_os.c
diff --git a/Documentation/scsi/sssraid.rst b/Documentation/scsi/sssraid.rst
new file mode 100644
index 000000000000..d56e5f7c478c
--- /dev/null
+++ b/Documentation/scsi/sssraid.rst
@@ -0,0 +1,83 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================================
+SSSRAID - 3SNIC SCSI RAID Controller driver
+==============================================
+
+This file describes the SSSRAID SCSI driver for 3SNIC
+(http://www.3snic.com) RAID controllers. The SSSRAID
+driver is the first generation RAID driver for 3SNIC Corp.
+
+For 3SNIC SSSRAID controller support, enable the SSSRAID driver
+when configuring the kernel.
+
+SSSRAID specific entries in /sys
+=================================
+
+SSSRAID host attributes
+------------------------
+ - /sys/class/scsi_host/host*/csts_pp
+ - /sys/class/scsi_host/host*/csts_shst
+ - /sys/class/scsi_host/host*/csts_cfs
+ - /sys/class/scsi_host/host*/csts_rdy
+ - /sys/class/scsi_host/host*/fw_version
+
+ The host csts_pp attribute is a read only attribute. This attribute
+ indicates whether the controller is processing commands. If this attribute
+ is set to ‘1’, then the controller is processing commands normally. If
+ this attribute is cleared to ‘0’, then the controller has temporarily stopped
+ processing commands in order to handle an event (e.g., firmware activation).
+
+ The host csts_shst attribute is a read only attribute. This attribute
+ indicates status of shutdown processing.The shutdown status values are defined
+ as:
+ ====== ==============================
+ Value Definition
+ ====== ==============================
+ 00b Normal operation
+ 01b Shutdown processing occurring
+ 10b Shutdown processing complete
+ 11b Reserved
+ ====== ==============================
+ The host csts_cfs attribute is a read only attribute. This attribute is set to
+ ’1’ when a fatal controller error occurred that could not be communicated in the
+ appropriate Completion Queue. This bit is cleared to ‘0’ when a fatal controller
+ error has not occurred.
+
+ The host csts_rdy attribute is a read only attribute. This attribute is set to
+ ‘1’ when the controller is ready to process submission queue entries.
+
+ The fw_version attribute is read-only and will return the driver version and the
+ controller firmware version.
+
+SSSRAID scsi device attributes
+------------------------------
+ - /sys/class/scsi_device/X\:X\:X\:X/device/raid_level
+ - /sys/class/scsi_device/X\:X\:X\:X/device/raid_state
+ - /sys/class/scsi_device/X\:X\:X\:X/device/raid_resync
+
+ The device raid_level attribute is a read only attribute. This attribute indicates
+ RAID level of scsi device(will dispaly "NA" if scsi device is not virtual disk type).
+
+ The device raid_state attribute is read-only and indicates RAID status of scsi
+ device(will dispaly "NA" if scsi device is not virtual disk type).
+
+ The device raid_resync attribute is read-only and indicates RAID rebuild processing
+ of scsi device(will dispaly "NA" if scsi device is not virtual disk type).
+
+Supported devices
+=================
+
+ =================== ======= =======================================
+ PCI ID (pci.ids) OEM Product
+ =================== ======= =======================================
+ 1F3F:2100 3SNIC 3S510(HBA:8Ports,1G DDR)
+ 1F3F:2100 3SNIC 3S520(HBA:16Ports,1G DDR)
+ 1F3F:2100 3SNIC 3S530(HBA:32Ports,1G DDR)
+ 1F3F:2100 3SNIC 3S540(HBA:40Ports,1G DDR)
+ 1F3F:2200 3SNIC 3S580(RAID:16Ports,2G cache)
+ 1F3F:2200 3SNIC 3S585(RAID:16Ports,4G cache)
+ 1F3F:2200 3SNIC 3S590(RAID:32Ports,4G cache)
+ 1F3F:2200 3SNIC 3S5A0(RAID:40Ports,2G cache)
+ 1F3F:2200 3SNIC 3S5A5(RAID:40Ports,4G cache)
+ =================== ======= =======================================
diff --git a/MAINTAINERS b/MAINTAINERS
index 9ae209633628..e6470640f764 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -16669,6 +16669,13 @@ M: Jan-Benedict Glaw <jbglaw(a)lug-owl.de>
S: Maintained
F: arch/alpha/kernel/srm_env.c
+SSSRAID SCSI/Raid DRIVERS
+M: Steven Song <steven.song(a)3snic.com>
+L: linux-scsi(a)vger.kernel.org
+S: Maintained
+F: Documentation/scsi/sssraid.rst
+F: drivers/scsi/sssraid/
+
ST LSM6DSx IMU IIO DRIVER
M: Lorenzo Bianconi <lorenzo.bianconi83(a)gmail.com>
L: linux-iio(a)vger.kernel.org
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 30384eacad4f..dc4ba0d2b694 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -2394,6 +2394,7 @@ CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE=y
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
CONFIG_MEGARAID_SAS=m
+CONFIG_SCSI_3SNIC_SSSRAID=m
CONFIG_SCSI_MPT3SAS=m
CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index 5ada612f1d75..df16f123515b 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -2355,6 +2355,7 @@ CONFIG_SCSI_AACRAID=m
# CONFIG_MEGARAID_LEGACY is not set
CONFIG_MEGARAID_SAS=m
CONFIG_SCSI_MPT3SAS=m
+CONFIG_SCSI_3SNIC_SSSRAID=m
CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 0fbe4edeccd0..7339d6179e5a 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -481,6 +481,7 @@ config SCSI_ARCMSR
source "drivers/scsi/esas2r/Kconfig"
source "drivers/scsi/megaraid/Kconfig.megaraid"
+source "drivers/scsi/sssraid/Kconfig"
source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
source "drivers/scsi/ufs/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 78a3c832394c..01bb908fb832 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -92,6 +92,7 @@ obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
obj-$(CONFIG_SCSI_HPSA) += hpsa.o
obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi/
+obj-$(CONFIG_SCSI_3SNIC_SSSRAID) += sssraid/
obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
diff --git a/drivers/scsi/sssraid/Kconfig b/drivers/scsi/sssraid/Kconfig
new file mode 100644
index 000000000000..769f68732bf3
--- /dev/null
+++ b/drivers/scsi/sssraid/Kconfig
@@ -0,0 +1,14 @@
+#
+# Kernel configuration file for the 3SNIC
+#
+
+config SCSI_3SNIC_SSSRAID
+ tristate "3SNIC sssraid Adapter"
+ depends on PCI && SCSI
+ select BLK_DEV_BSGLIB
+ depends on ARM64 || X86_64
+ help
+ This driver supports 3SNIC 3S5xx serial RAID controller, which has
+ PCI Express Gen4 interface with host and supports SAS/SATA HDD/SSD.
+ To compile this driver as a module, choose M here: the module will
+ be called sssraid.
diff --git a/drivers/scsi/sssraid/Makefile b/drivers/scsi/sssraid/Makefile
new file mode 100644
index 000000000000..c80605866b42
--- /dev/null
+++ b/drivers/scsi/sssraid/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the 3SNIC sssraid drivers.
+#
+
+obj-$(CONFIG_SCSI_3SNIC_SSSRAID) += sssraid.o
+
+sssraid-objs := sssraid_os.o sssraid_fw.o
diff --git a/drivers/scsi/sssraid/sssraid.h b/drivers/scsi/sssraid/sssraid.h
new file mode 100644
index 000000000000..bae668ef9c98
--- /dev/null
+++ b/drivers/scsi/sssraid/sssraid.h
@@ -0,0 +1,971 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 3SNIC Information Technology, Ltd */
+
+/* 3SNIC RAID SSSXXX Series Linux Driver */
+
+#ifndef __SSSRAID_H_
+#define __SSSRAID_H_
+
+#define SSSRAID_DRIVER_VERSION "1.0.0.0"
+#define SSSRAID_DRIVER_RELDATE "12-December-2022"
+
+#define SSSRAID_DRIVER_NAME "sssraid"
+
+#define SSSRAID_NAME_LENGTH 32
+
+/*
+ * SSSRAID Vendor ID and Device IDs
+ */
+#define PCI_VENDOR_ID_3SNIC_LOGIC 0x1F3F
+
+#define SSSRAID_SERVER_DEVICE_HBA_DID 0x2100
+#define SSSRAID_SERVER_DEVICE_RAID_DID 0x2200
+
+#define SSSRAID_CAP_MQES(cap) ((cap) & 0xffff)
+#define SSSRAID_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define SSSRAID_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define SSSRAID_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define SSSRAID_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
+#define SSSRAID_CAP_DMAMASK(cap) (((cap) >> 37) & 0xff)
+
+#define SSSRAID_DEFAULT_MAX_CHANNEL 4
+#define SSSRAID_DEFAULT_MAX_ID 240
+#define SSSRAID_DEFAULT_MAX_LUN_PER_HOST 8
+#define MAX_SECTORS 2048
+
+/*
+ * Time define
+ */
+#define SSSRAID_WATCHDOG_INTERVAL 1000 /* in milli seconds */
+#define SSSRAID_PORTENABLE_TIMEOUT 300
+
+/*
+ * SSSRAID queue and entry size for Admin and I/O type
+ */
+#define IO_SQE_SIZE sizeof(struct sssraid_ioq_command)
+#define ADMIN_SQE_SIZE sizeof(struct sssraid_admin_command)
+#define SQE_SIZE(qid) (((qid) > 0) ? IO_SQE_SIZE : ADMIN_SQE_SIZE)
+#define CQ_SIZE(depth) ((depth) * sizeof(struct sssraid_completion))
+#define SQ_SIZE(qid, depth) ((depth) * SQE_SIZE(qid))
+
+#define SENSE_SIZE(depth) ((depth) * SCSI_SENSE_BUFFERSIZE)
+
+#define SSSRAID_AQ_DEPTH 128
+#define SSSRAID_NR_AEN_COMMANDS 16
+#define SSSRAID_AQ_BLK_MQ_DEPTH (SSSRAID_AQ_DEPTH - SSSRAID_NR_AEN_COMMANDS)
+#define SSSRAID_AQ_MQ_TAG_DEPTH (SSSRAID_AQ_BLK_MQ_DEPTH - 1)
+
+#define SSSRAID_ADMIN_QUEUE_NUM 1
+#define SSSRAID_PTCMDS_PERQ 1
+#define SSSRAID_IO_BLK_MQ_DEPTH (hdev->scsi_qd)
+#define SSSRAID_NR_IOQ_PTCMDS (SSSRAID_PTCMDS_PERQ * hdev->shost->nr_hw_queues)
+
+#define FUA_MASK 0x08
+#define SSSRAID_MINORS BIT(MINORBITS)
+#define SSSRAID_RW_FUA BIT(14)
+
+#define COMMAND_IS_WRITE(cmd) ((cmd)->common.opcode & 1)
+
+#define SSSRAID_IO_IOSQES 7
+#define SSSRAID_IO_IOCQES 4
+#define PRP_ENTRY_SIZE 8
+
+#define SMALL_POOL_SIZE 256
+#define MAX_SMALL_POOL_NUM 16
+#define MAX_CMD_PER_DEV 64
+#define MAX_CDB_LEN 16
+
+#define SSSRAID_UP_TO_MULTY4(x) (((x) + 4) & (~0x03))
+
+#define CQE_STATUS_SUCCESS (0x0)
+
+#define IO_6_DEFAULT_TX_LEN 256
+
+#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct sssraid_sgl_desc))
+
+#define SSSRAID_CAP_TIMEOUT_UNIT_MS (HZ / 2)
+
+extern u32 admin_tmout;
+#define ADMIN_TIMEOUT (admin_tmout * HZ)
+
+#define SSSRAID_WAIT_ABNL_CMD_TIMEOUT 6
+
+#define SSSRAID_DMA_MSK_BIT_MAX 64
+
+enum {
+ SSSRAID_SGL_FMT_DATA_DESC = 0x00,
+ SSSRAID_SGL_FMT_SEG_DESC = 0x02,
+ SSSRAID_SGL_FMT_LAST_SEG_DESC = 0x03,
+ SSSRAID_KEY_SGL_FMT_DATA_DESC = 0x04,
+ SSSRAID_TRANSPORT_SGL_DATA_DESC = 0x05
+};
+
+
+enum {
+ SSSRAID_REQ_CANCELLED = (1 << 0),
+ SSSRAID_REQ_USERCMD = (1 << 1),
+};
+
+enum {
+ SSSRAID_SC_SUCCESS = 0x0,
+ SSSRAID_SC_INVALID_OPCODE = 0x1,
+ SSSRAID_SC_INVALID_FIELD = 0x2,
+
+ SSSRAID_SC_ABORT_LIMIT = 0x103,
+ SSSRAID_SC_ABORT_MISSING = 0x104,
+ SSSRAID_SC_ASYNC_LIMIT = 0x105,
+
+ SSSRAID_SC_DNR = 0x4000,
+};
+
+enum {
+ SSSRAID_REG_CAP = 0x0000,
+ SSSRAID_REG_CC = 0x0014,
+ SSSRAID_REG_CSTS = 0x001c,
+ SSSRAID_REG_AQA = 0x0024,
+ SSSRAID_REG_ASQ = 0x0028,
+ SSSRAID_REG_ACQ = 0x0030,
+ SSSRAID_REG_DBS = 0x1000,
+};
+
+enum {
+ SSSRAID_CC_ENABLE = 1 << 0,
+ SSSRAID_CC_CSS_NVM = 0 << 4,
+ SSSRAID_CC_MPS_SHIFT = 7,
+ SSSRAID_CC_AMS_SHIFT = 11,
+ SSSRAID_CC_SHN_SHIFT = 14,
+ SSSRAID_CC_IOSQES_SHIFT = 16,
+ SSSRAID_CC_IOCQES_SHIFT = 20,
+ SSSRAID_CC_AMS_RR = 0 << SSSRAID_CC_AMS_SHIFT,
+ SSSRAID_CC_SHN_NONE = 0 << SSSRAID_CC_SHN_SHIFT,
+ SSSRAID_CC_IOSQES = SSSRAID_IO_IOSQES << SSSRAID_CC_IOSQES_SHIFT,
+ SSSRAID_CC_IOCQES = SSSRAID_IO_IOCQES << SSSRAID_CC_IOCQES_SHIFT,
+ SSSRAID_CC_SHN_NORMAL = 1 << SSSRAID_CC_SHN_SHIFT,
+ SSSRAID_CC_SHN_MASK = 3 << SSSRAID_CC_SHN_SHIFT,
+ SSSRAID_CSTS_CFS_SHIFT = 1,
+ SSSRAID_CSTS_SHST_SHIFT = 2,
+ SSSRAID_CSTS_PP_SHIFT = 5,
+ SSSRAID_CSTS_RDY = 1 << 0,
+ SSSRAID_CSTS_SHST_CMPLT = 2 << 2,
+ SSSRAID_CSTS_SHST_MASK = 3 << 2,
+ SSSRAID_CSTS_CFS_MASK = 1 << SSSRAID_CSTS_CFS_SHIFT,
+ SSSRAID_CSTS_PP_MASK = 1 << SSSRAID_CSTS_PP_SHIFT,
+};
+
+enum {
+ SSSRAID_ADMIN_DELETE_SQ = 0x00,
+ SSSRAID_ADMIN_CREATE_SQ = 0x01,
+ SSSRAID_ADMIN_DELETE_CQ = 0x04,
+ SSSRAID_ADMIN_CREATE_CQ = 0x05,
+ SSSRAID_ADMIN_ABORT_CMD = 0x08,
+ SSSRAID_ADMIN_SET_FEATURES = 0x09,
+ SSSRAID_ADMIN_ASYNC_EVENT = 0x0c,
+ SSSRAID_ADMIN_GET_INFO = 0xc6,
+ SSSRAID_ADMIN_RESET = 0xc8,
+};
+
+enum {
+ SSSRAID_GET_INFO_CTRL = 0,
+ SSSRAID_GET_INFO_DEV_LIST = 1,
+};
+
+enum sssraid_scsi_rst_type {
+ SSSRAID_RESET_TARGET = 0,
+ SSSRAID_RESET_BUS = 1,
+};
+
+enum {
+ SSSRAID_AEN_ERROR = 0,
+ SSSRAID_AEN_NOTICE = 2,
+ SSSRAID_AEN_VS = 7,
+};
+
+enum {
+ SSSRAID_AEN_DEV_CHANGED = 0x00,
+ SSSRAID_AEN_FW_ACT_START = 0x01,
+ SSSRAID_AEN_HOST_PROBING = 0x10,
+};
+
+enum {
+ SSSRAID_AEN_TIMESYN = 0x00,
+ SSSRAID_AEN_FW_ACT_FINISH = 0x02,
+ SSSRAID_AEN_EVENT_MIN = 0x80,
+ SSSRAID_AEN_EVENT_MAX = 0xff,
+};
+
+enum {
+ SSSRAID_CMD_WRITE = 0x01,
+ SSSRAID_CMD_READ = 0x02,
+
+ SSSRAID_CMD_NONIO_NONE = 0x80,
+ SSSRAID_CMD_NONIO_TODEV = 0x81,
+ SSSRAID_CMD_NONIO_FROMDEV = 0x82,
+};
+
+enum {
+ SSSRAID_QUEUE_PHYS_CONTIG = (1 << 0),
+ SSSRAID_CQ_IRQ_ENABLED = (1 << 1),
+
+ SSSRAID_FEAT_NUM_QUEUES = 0x07,
+ SSSRAID_FEAT_ASYNC_EVENT = 0x0b,
+ SSSRAID_FEAT_TIMESTAMP = 0x0e,
+};
+
+enum sssraid_state {
+ SSSRAID_NEW,
+ SSSRAID_LIVE,
+ SSSRAID_RESETTING,
+ SSSRAID_DELETING,
+ SSSRAID_DEAD,
+};
+
+enum {
+ SSSRAID_CARD_HBA,
+ SSSRAID_CARD_RAID,
+};
+
+enum sssraid_cmd_type {
+ SSSRAID_CMD_ADM,
+ SSSRAID_CMD_IOPT,
+};
+
+/*
+ * SSSRAID completion queue entry struct
+ */
+struct sssraid_completion {
+ __le32 result;
+ union {
+ struct {
+ __u8 sense_len;
+ __u8 resv[3];
+ };
+ __le32 result1;
+ };
+ __le16 sq_head;
+ __le16 sq_id;
+ __le16 cmd_id;
+ __le16 status;
+};
+
+/*
+ * SSSRAID firmware controller properties
+ */
+struct sssraid_ctrl_info {
+ __le32 nd;
+ __le16 max_cmds;
+ __le16 max_channel;
+ __le32 max_tgt_id;
+ __le16 max_lun;
+ __le16 max_num_sge;
+ __le16 lun_num_in_boot;
+ __u8 mdts;
+ __u8 acl;
+ __u8 aerl;
+ __u8 card_type;
+ __u16 rsvd;
+ __le32 rtd3e;
+ __u8 sn[32];
+ __u8 fr[16];
+ __u8 rsvd1[4020];
+};
+
+struct sssraid_intr_info {
+ struct sssraid_dev *hdev;
+ u16 msix_index;
+ struct sssraid_cqueue *cqinfo;
+ char name[SSSRAID_NAME_LENGTH];
+};
+
+struct sssraid_fwevt {
+ struct list_head list;
+ struct work_struct work;
+ struct sssraid_dev *hdev;
+ u16 event_id;
+ bool send_ack;
+ bool process_evt;
+ u32 evt_ctx;
+ struct kref ref_count;
+ char event_data[0] __aligned(4);
+};
+
+/*
+ * SSSRAID private device struct definition
+ */
+struct sssraid_dev {
+ struct pci_dev *pdev;
+ struct Scsi_Host *shost;
+ struct sssraid_squeue *sqinfo;
+ struct sssraid_cqueue *cqinfo;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool[MAX_SMALL_POOL_NUM];
+ void __iomem *bar;
+
+ u32 init_done_queue_cnt;
+ u32 ioq_depth;
+ u32 db_stride;
+ u32 __iomem *dbs;
+ struct rw_semaphore devices_rwsem;
+ int numa_node;
+ u32 page_size;
+ u32 ctrl_config;
+ u64 cap;
+ u32 instance;
+ u32 scsi_qd;
+ struct sssraid_ctrl_info *ctrl_info;
+ struct sssraid_dev_info *devices;
+
+ int logging_level;
+
+ char name[SSSRAID_NAME_LENGTH];
+ int cpu_count;
+ /*
+ * before_affinity_msix_cnt is
+ * min("FW support IO Queue count", num_online_cpus)+1
+ */
+ u16 before_affinity_msix_cnt;
+
+ struct sssraid_cmd *adm_cmds;
+ struct list_head adm_cmd_list;
+ spinlock_t adm_cmd_lock;
+
+ struct sssraid_cmd *ioq_ptcmds;
+ struct list_head ioq_pt_list;
+ spinlock_t ioq_pt_lock;
+
+ int reset_flag;
+
+ enum sssraid_state state;
+ spinlock_t state_lock;
+
+ struct request_queue *bsg_queue;
+
+ u8 intr_enabled;
+
+ struct sssraid_intr_info *intr_info;
+ u32 intr_info_count;
+
+ char watchdog_work_q_name[20];
+ struct workqueue_struct *watchdog_work_q;
+ struct delayed_work watchdog_work;
+ spinlock_t watchdog_lock;
+
+ char fwevt_worker_name[SSSRAID_NAME_LENGTH];
+ struct workqueue_struct *fwevt_worker_thread;
+ spinlock_t fwevt_lock;
+ struct list_head fwevt_list;
+
+ struct sssraid_fwevt *current_event;
+};
+
+/*
+ * SSSRAID scatter list descriptor
+ */
+struct sssraid_sgl_desc {
+ __le64 addr;
+ __le32 length;
+ __u8 rsvd[3];
+ __u8 type;
+};
+
+union sssraid_data_ptr {
+ struct {
+ __le64 prp1;
+ __le64 prp2;
+ };
+ struct sssraid_sgl_desc sgl;
+};
+
+/*
+ * SSSRAID general admin class command format struct
+ */
+struct sssraid_admin_common_command {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __le32 cdw2[4];
+ union sssraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+struct sssraid_features {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[2];
+ union sssraid_data_ptr dptr;
+ __le32 fid;
+ __le32 dword11;
+ __le32 dword12;
+ __le32 dword13;
+ __le32 dword14;
+ __le32 dword15;
+};
+
+/*
+ * SSSRAID create completion queue command struct
+ */
+struct sssraid_create_cq {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 cqid;
+ __le16 qsize;
+ __le16 cq_flags;
+ __le16 irq_vector;
+ __u32 rsvd12[4];
+};
+
+/*
+ * SSSRAID create submission queue command struct
+ */
+struct sssraid_create_sq {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 sqid;
+ __le16 qsize;
+ __le16 sq_flags;
+ __le16 cqid;
+ __u32 rsvd12[4];
+};
+
+/*
+ * SSSRAID delete submission queue command struct
+ */
+struct sssraid_delete_queue {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __u32 rsvd1[9];
+ __le16 qid;
+ __u16 rsvd10;
+ __u32 rsvd11[5];
+};
+
+/*
+ * SSSRAID access to information command struct
+ */
+struct sssraid_get_info {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __u32 rsvd2[4];
+ union sssraid_data_ptr dptr;
+ __u8 type;
+ __u8 rsvd10[3];
+ __le32 cdw11;
+ __u32 rsvd12[4];
+};
+
+/*
+ * User command struct
+ */
+struct sssraid_usr_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ union {
+ struct {
+ __le16 subopcode;
+ __le16 rsvd1;
+ } info_0;
+ __le32 cdw2;
+ };
+ union {
+ struct {
+ __le16 data_len;
+ __le16 param_len;
+ } info_1;
+ __le32 cdw3;
+ };
+ __u64 metadata;
+ union sssraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+enum {
+ SSSRAID_CMD_FLAG_SGL_METABUF = (1 << 6),
+ SSSRAID_CMD_FLAG_SGL_METASEG = (1 << 7),
+ SSSRAID_CMD_FLAG_SGL_ALL = SSSRAID_CMD_FLAG_SGL_METABUF | SSSRAID_CMD_FLAG_SGL_METASEG,
+};
+
+enum sssraid_cmd_state {
+ SSSRAID_CMD_IDLE = 0,
+ SSSRAID_CMD_IN_FLIGHT = 1,
+ SSSRAID_CMD_COMPLETE = 2,
+ SSSRAID_CMD_TIMEOUT = 3,
+ SSSRAID_CMD_TMO_COMPLETE = 4,
+};
+
+/*
+ * SSSRAID abort command struct
+ */
+struct sssraid_abort_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __le16 sqid;
+ __le16 cid;
+ __u32 rsvd11[5];
+};
+
+/*
+ * SSSRAID reset command struct
+ */
+struct sssraid_reset_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __u8 type;
+ __u8 rsvd10[3];
+ __u32 rsvd11[5];
+};
+
+/*
+ * SSSRAID admin class command set struct
+ */
+struct sssraid_admin_command {
+ union {
+ struct sssraid_admin_common_command common;
+ struct sssraid_features features;
+ struct sssraid_create_cq create_cq;
+ struct sssraid_create_sq create_sq;
+ struct sssraid_delete_queue delete_queue;
+ struct sssraid_get_info get_info;
+ struct sssraid_abort_cmd abort;
+ struct sssraid_reset_cmd reset;
+ struct sssraid_usr_cmd usr_cmd;
+ };
+};
+
+/*
+ * SSSRAID general IO class command format struct
+ */
+struct sssraid_ioq_common_command {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __le32 cdw3[3];
+ union sssraid_data_ptr dptr;
+ __le32 cdw10[6];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __le32 cdw26[6];
+};
+
+/*
+ * SSSRAID read or write command struct
+ */
+struct sssraid_rw_command {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union sssraid_data_ptr dptr;
+ __le64 slba;
+ __le16 nlb;
+ __le16 control;
+ __u32 rsvd13[3];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct sssraid_scsi_nonio {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_length;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union sssraid_data_ptr dptr;
+ __u32 rsvd10[5];
+ __le32 buffer_len;
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+/*
+ * SSSRAID IO class command struct
+ */
+struct sssraid_ioq_command {
+ union {
+ struct sssraid_ioq_common_command common;
+ struct sssraid_rw_command rw;
+ struct sssraid_scsi_nonio scsi_nonio;
+ };
+};
+
+/*
+ * SSSRAID passthru command struct
+ */
+struct sssraid_passthru_common_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 data_len;
+ __u16 param_len;
+ } info_1;
+ __u32 cdw3;
+ };
+ __u64 metadata;
+
+ __u64 addr;
+ __u64 prp2;
+
+ __u32 cdw10;
+ __u32 cdw11;
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 cdw15;
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct sssraid_ioq_passthru_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 res_sense_len;
+ __u8 cdb_len;
+ __u8 rsvd0;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_1;
+ __u32 cdw3;
+ };
+ union {
+ struct {
+ __u16 rsvd;
+ __u16 param_len;
+ } info_2;
+ __u32 cdw4;
+ };
+ __u32 cdw5;
+ __u64 addr;
+ __u64 prp2;
+ union {
+ struct {
+ __u16 eid;
+ __u16 sid;
+ } info_3;
+ __u32 cdw10;
+ };
+ union {
+ struct {
+ __u16 did;
+ __u8 did_flag;
+ __u8 rsvd2;
+ } info_4;
+ __u32 cdw11;
+ };
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 data_len;
+ __u32 cdw16;
+ __u32 cdw17;
+ __u32 cdw18;
+ __u32 cdw19;
+ __u32 cdw20;
+ __u32 cdw21;
+ __u32 cdw22;
+ __u32 cdw23;
+ __u64 sense_addr;
+ __u32 cdw26[4];
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct sssraid_bsg_request {
+ u32 msgcode;
+ u32 control;
+ union {
+ struct sssraid_passthru_common_cmd admcmd;
+ struct sssraid_ioq_passthru_cmd ioqcmd;
+ };
+};
+
+enum {
+ SSSRAID_BSG_ADM,
+ SSSRAID_BSG_IOQ,
+};
+
+/*
+ * define the transfer command struct
+ */
+struct sssraid_cmd {
+ u16 qid;
+ u16 cid;
+ u32 result0;
+ u32 result1;
+ u16 status;
+ void *priv;
+ enum sssraid_cmd_state state;
+ struct completion cmd_done;
+ struct list_head list;
+};
+
+/*
+ * define the SSSRAID physical queue struct
+ */
+struct sssraid_squeue {
+ struct sssraid_dev *hdev;
+ spinlock_t sq_lock; /* spinlock for lock handling */
+
+ void *sq_cmds;
+
+ dma_addr_t sq_dma_addr;
+ u32 __iomem *q_db;
+ u8 cq_phase;
+ u8 sqes;
+ u16 qidx;
+ u16 sq_tail;
+ u16 last_cq_head;
+ u16 q_depth;
+ void *sense;
+ dma_addr_t sense_dma_addr;
+ struct dma_pool *prp_small_pool;
+};
+
+struct sssraid_cqueue {
+ struct sssraid_dev *hdev;
+
+ spinlock_t cq_lock ____cacheline_aligned_in_smp; /* spinlock for lock handling */
+
+ struct sssraid_completion *cqes;
+
+ dma_addr_t cq_dma_addr;
+ u8 cq_phase;
+ u16 cq_head;
+ u16 last_cq_head;
+};
+
+/*
+ * define the SSSRAID IO queue descriptor struct
+ */
+struct sssraid_iod {
+ struct sssraid_squeue *sqinfo;
+ enum sssraid_cmd_state state;
+ int npages;
+ u32 nsge;
+ u32 length;
+ bool use_sgl;
+ dma_addr_t first_dma;
+ void *sense;
+ dma_addr_t sense_dma;
+ struct scatterlist *sg;
+ void *list[0];
+};
+
+/*
+ * define the SSSRAID scsi device attribution and information
+ */
+#define SSSRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01)
+#define SSSRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0)
+#define SSSRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02)
+#define SSSRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20)
+#define SSSRAID_DEV_DISK_TYPE(attr) ((attr) & 0x1e)
+
+#define SSSRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01)
+#define SSSRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02)
+
+/*
+ * define the SSSRAID scsi device identifier
+ */
+enum {
+ SSSRAID_SAS_HDD_VD = 0x04,
+ SSSRAID_SATA_HDD_VD = 0x08,
+ SSSRAID_SAS_SSD_VD = 0x0c,
+ SSSRAID_SATA_SSD_VD = 0x10,
+ SSSRAID_NVME_SSD_VD = 0x14,
+ SSSRAID_SAS_HDD_PD = 0x06,
+ SSSRAID_SATA_HDD_PD = 0x0a,
+ SSSRAID_SAS_SSD_PD = 0x0e,
+ SSSRAID_SATA_SSD_PD = 0x12,
+ SSSRAID_NVME_SSD_PD = 0x16,
+};
+
+/*
+ * define the SSSRAID scsi device queue depth
+ */
+#define SSSRAID_HDD_PD_QD 64
+#define SSSRAID_HDD_VD_QD 256
+#define SSSRAID_SSD_PD_QD 64
+#define SSSRAID_SSD_VD_QD 256
+
+#define BGTASK_TYPE_REBUILD 4
+#define USR_CMD_READ 0xc2
+#define USR_CMD_RDLEN 0x1000
+#define USR_CMD_VDINFO 0x704
+#define USR_CMD_BGTASK 0x504
+#define VDINFO_PARAM_LEN 0x04
+
+/*
+ * SSSRAID virtual device information struct
+ */
+struct sssraid_vd_info {
+ __u8 name[32];
+ __le16 id;
+ __u8 rg_id;
+ __u8 rg_level;
+ __u8 sg_num;
+ __u8 sg_disk_num;
+ __u8 vd_status;
+ __u8 vd_type;
+ __u8 rsvd1[4056];
+};
+
+#define MAX_REALTIME_BGTASK_NUM 32
+
+struct bgtask_info {
+ __u8 type;
+ __u8 progress;
+ __u8 rate;
+ __u8 rsvd0;
+ __le16 vd_id;
+ __le16 time_left;
+ __u8 rsvd1[4];
+};
+
+struct sssraid_bgtask {
+ __u8 sw;
+ __u8 task_num;
+ __u8 rsvd[6];
+ struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM];
+};
+
+/*
+ * SSSRAID scsi device information struct
+ */
+struct sssraid_dev_info {
+ __le32 hdid;
+ __le16 target;
+ __u8 channel;
+ __u8 lun;
+ __u8 attr;
+ __u8 flag;
+ __le16 max_io_kb;
+};
+
+#define IOQ_PT_DATA_LEN 4096
+#define MAX_DEV_ENTRY_PER_PAGE_4K 340
+struct sssraid_dev_list {
+ __le32 dev_num;
+ __u32 rsvd0[3];
+ struct sssraid_dev_info devices[MAX_DEV_ENTRY_PER_PAGE_4K];
+};
+
+/*
+ * SSSRAID scsi device host data struct
+ */
+struct sssraid_sdev_hostdata {
+ u32 hdid;
+ u16 max_io_kb;
+ u8 attr;
+ u8 flag;
+ u8 rg_id;
+ u8 rsvd[3];
+};
+
+extern unsigned char small_pool_num;
+extern u32 io_queue_depth;
+irqreturn_t sssraid_isr_poll(int irq, void *privdata);
+bool sssraid_poll_cq(struct sssraid_dev *hdev, u16 qidx, int cid);
+void sssraid_submit_cmd(struct sssraid_squeue *sqinfo, const void *cmd);
+int sssraid_get_dev_list(struct sssraid_dev *hdev, struct sssraid_dev_info *devices);
+int sssraid_submit_admin_sync_cmd(struct sssraid_dev *hdev, struct sssraid_admin_command *cmd,
+ u32 *result0, u32 *result1, u32 timeout);
+int sssraid_send_abort_cmd(struct sssraid_dev *hdev, u32 hdid, u16 qidx, u16 cid);
+int sssraid_send_reset_cmd(struct sssraid_dev *hdev, u8 type, u32 hdid);
+void sssraid_adm_timeout(struct sssraid_dev *hdev, struct sssraid_cmd *cmd);
+int sssraid_init_ioc(struct sssraid_dev *hdev, u8 re_init);
+void sssraid_cleanup_ioc(struct sssraid_dev *hdev, u8 re_init);
+int sssraid_soft_reset_handler(struct sssraid_dev *hdev);
+void sssraid_free_iod_res(struct sssraid_dev *hdev, struct sssraid_iod *iod);
+bool sssraid_change_host_state(struct sssraid_dev *hdev, enum sssraid_state newstate);
+int sssraid_configure_timestamp(struct sssraid_dev *hdev);
+int sssraid_init_ctrl_info(struct sssraid_dev *hdev);
+struct sssraid_cmd *sssraid_get_cmd(struct sssraid_dev *hdev, enum sssraid_cmd_type type);
+void sssraid_put_cmd(struct sssraid_dev *hdev, struct sssraid_cmd *cmd,
+ enum sssraid_cmd_type type);
+int sssraid_send_event_ack(struct sssraid_dev *hdev, u8 event,
+ u32 event_ctx, u16 cid);
+struct sssraid_fwevt *sssraid_alloc_fwevt(int len);
+void sssraid_fwevt_add_to_list(struct sssraid_dev *hdev,
+ struct sssraid_fwevt *fwevt);
+void sssraid_cleanup_fwevt_list(struct sssraid_dev *hdev);
+void sssraid_ioc_enable_intr(struct sssraid_dev *hdev);
+void sssraid_ioc_disable_intr(struct sssraid_dev *hdev);
+void sssraid_cleanup_resources(struct sssraid_dev *hdev);
+void sssraid_complete_cqes(struct sssraid_dev *hdev, u16 qidx, u16 start, u16 end);
+int sssraid_io_map_data(struct sssraid_dev *hdev, struct sssraid_iod *iod,
+ struct scsi_cmnd *scmd, struct sssraid_ioq_command *ioq_cmd);
+void sssraid_map_status(struct sssraid_iod *iod, struct scsi_cmnd *scmd,
+ struct sssraid_completion *cqe);
+void sssraid_scan_disk(struct sssraid_dev *hdev);
+void sssraid_complete_aen(struct sssraid_dev *hdev, struct sssraid_completion *cqe);
+void sssraid_back_all_io(struct sssraid_dev *hdev);
+
+static inline void **sssraid_iod_list(struct sssraid_iod *iod)
+{
+ return iod->list;
+}
+
+#end
diff --git a/drivers/scsi/sssraid/sssraid_debug.h b/drivers/scsi/sssraid/sssraid_debug.h
new file mode 100644
index 000000000000..d48c57e275f7
--- /dev/null
+++ b/drivers/scsi/sssraid/sssraid_debug.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 3SNIC Information Technology, Ltd */
+
+/* 3SNIC RAID SSSXXX Series Linux Driver */
+
+#ifndef SSSRAID_DEBUG_H_INCLUDED
+#define SSSRAID_DEBUG_H_INCLUDED
+
+/*
+ * debug levels
+ */
+#define SSSRAID_DEBUG 0x00000001
+
+/*
+ * debug macros
+ */
+
+#define ioc_err(ioc, fmt, ...) \
+ pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_notice(ioc, fmt, ...) \
+ pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_warn(ioc, fmt, ...) \
+ pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_info(ioc, fmt, ...) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+
+
+#define dbgprint(IOC, FMT, ...) \
+ do { \
+ if (unlikely(IOC->logging_level & SSSRAID_DEBUG)) \
+ pr_info("%s: " FMT, (IOC)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#endif /* SSSRAID_DEBUG_H_INCLUDED */
diff --git a/drivers/scsi/sssraid/sssraid_fw.c b/drivers/scsi/sssraid/sssraid_fw.c
new file mode 100644
index 000000000000..54287cf70558
--- /dev/null
+++ b/drivers/scsi/sssraid/sssraid_fw.c
@@ -0,0 +1,1724 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 3SNIC Information Technology, Ltd */
+
+/* 3SNIC RAID SSSXXX Series Linux Driver */
+
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/sysfs.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+#include <linux/debugfs.h>
+#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <linux/sort.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dbg.h>
+
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/once.h>
+#include <linux/sched/signal.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "sssraid.h"
+#include "sssraid_debug.h"
+
+static int sssraid_wait_ready(struct sssraid_dev *hdev, u64 cap, bool enabled)
+{
+ unsigned long timeout =
+ ((SSSRAID_CAP_TIMEOUT(cap) + 1) * SSSRAID_CAP_TIMEOUT_UNIT_MS) + jiffies;
+ u32 bit = enabled ? SSSRAID_CSTS_RDY : 0;
+
+ while ((readl(hdev->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_RDY) != bit) {
+ usleep_range(1000, 2000);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+ if (time_after(jiffies, timeout)) {
+ ioc_err(hdev, "Device not ready; aborting %s\n",
+ enabled ? "initialisation" : "reset");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int sssraid_enable_ctrl(struct sssraid_dev *hdev)
+{
+ u64 cap = hdev->cap;
+ u32 dev_page_min = SSSRAID_CAP_MPSMIN(cap) + 12;
+ u32 page_shift = PAGE_SHIFT;
+
+ if (page_shift < dev_page_min) {
+ ioc_err(hdev, "Minimum device page size[%u], too large for host[%u]\n",
+ 1U << dev_page_min, 1U << page_shift);
+ return -ENODEV;
+ }
+
+ page_shift = min_t(unsigned int, SSSRAID_CAP_MPSMAX(cap) + 12, PAGE_SHIFT);
+ hdev->page_size = 1U << page_shift;
+
+ hdev->ctrl_config = SSSRAID_CC_CSS_NVM;
+ hdev->ctrl_config |= (page_shift - 12) << SSSRAID_CC_MPS_SHIFT;
+ hdev->ctrl_config |= SSSRAID_CC_AMS_RR | SSSRAID_CC_SHN_NONE;
+ hdev->ctrl_config |= SSSRAID_CC_IOSQES | SSSRAID_CC_IOCQES;
+ hdev->ctrl_config |= SSSRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + SSSRAID_REG_CC);
+
+ return sssraid_wait_ready(hdev, cap, true);
+}
+
+static int sssraid_disable_ctrl(struct sssraid_dev *hdev)
+{
+ hdev->ctrl_config &= ~SSSRAID_CC_SHN_MASK;
+ hdev->ctrl_config &= ~SSSRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + SSSRAID_REG_CC);
+
+ return sssraid_wait_ready(hdev, hdev->cap, false);
+}
+
+static int sssraid_shutdown_ctrl(struct sssraid_dev *hdev)
+{
+ unsigned long timeout = le32_to_cpu(hdev->ctrl_info->rtd3e) / 1000000 * HZ + jiffies;
+
+ hdev->ctrl_config &= ~SSSRAID_CC_SHN_MASK;
+ hdev->ctrl_config |= SSSRAID_CC_SHN_NORMAL;
+ writel(hdev->ctrl_config, hdev->bar + SSSRAID_REG_CC);
+
+ while ((readl(hdev->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_SHST_MASK) !=
+ SSSRAID_CSTS_SHST_CMPLT) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ ioc_err(hdev, "Device shutdown incomplete; abort shutdown\n");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int sssraid_remap_bar(struct sssraid_dev *hdev, u32 size)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (size > pci_resource_len(pdev, 0)) {
+ ioc_err(hdev, "Input size[%u] exceed bar0 length[%llu]\n",
+ size, pci_resource_len(pdev, 0));
+ return -ENODEV;
+ }
+
+ if (hdev->bar)
+ iounmap(hdev->bar);
+
+ hdev->bar = ioremap(pci_resource_start(pdev, 0), size);
+ if (!hdev->bar) {
+ ioc_err(hdev, "ioremap for bar0 failed\n");
+ return -ENODEV;
+ }
+ hdev->dbs = hdev->bar + SSSRAID_REG_DBS;
+
+ return 0;
+}
+
+static int sssraid_create_dma_pools(struct sssraid_dev *hdev)
+{
+ int i;
+ char poolname[20] = { 0 };
+
+ hdev->prp_page_pool = dma_pool_create("prp list page", &hdev->pdev->dev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+
+ if (!hdev->prp_page_pool) {
+ ioc_err(hdev, "create prp_page_pool failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < small_pool_num; i++) {
+ sprintf(poolname, "prp_list_256_%d", i);
+ hdev->prp_small_pool[i] = dma_pool_create(poolname, &hdev->pdev->dev,
+ SMALL_POOL_SIZE,
+ SMALL_POOL_SIZE, 0);
+
+ if (!hdev->prp_small_pool[i]) {
+ ioc_err(hdev, "create prp_small_pool %d failed\n", i);
+ goto destroy_prp_small_pool;
+ }
+ }
+
+ return 0;
+
+destroy_prp_small_pool:
+ while (i > 0)
+ dma_pool_destroy(hdev->prp_small_pool[--i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+
+ return -ENOMEM;
+}
+
+static void sssraid_destroy_dma_pools(struct sssraid_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < small_pool_num; i++)
+ dma_pool_destroy(hdev->prp_small_pool[i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+}
+
+static int sssraid_alloc_resources(struct sssraid_dev *hdev)
+{
+ int retval, nqueue;
+
+ hdev->ctrl_info = kzalloc_node(sizeof(*hdev->ctrl_info), GFP_KERNEL, hdev->numa_node);
+ if (!hdev->ctrl_info)
+ return -ENOMEM;
+
+ retval = sssraid_create_dma_pools(hdev);
+ if (retval)
+ goto free_ctrl_info;
+ /* not num_online_cpus */
+ nqueue = num_possible_cpus() + 1;
+ hdev->cqinfo = kcalloc_node(nqueue, sizeof(struct sssraid_cqueue),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->cqinfo) {
+ retval = -ENOMEM;
+ goto destroy_dma_pools;
+ }
+
+ hdev->sqinfo = kcalloc_node(nqueue, sizeof(struct sssraid_squeue),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->sqinfo) {
+ retval = -ENOMEM;
+ goto free_cqueues;
+ }
+
+ /* sssraid_alloc_admin_cmds moved to sssraid_init_ioc */
+
+ ioc_info(hdev, "queues num: %d\n", nqueue);
+
+ return 0;
+
+free_cqueues:
+ kfree(hdev->cqinfo);
+destroy_dma_pools:
+ sssraid_destroy_dma_pools(hdev);
+free_ctrl_info:
+ kfree(hdev->ctrl_info);
+
+ return retval;
+}
+
+void sssraid_ioc_enable_intr(struct sssraid_dev *hdev)
+{
+ hdev->intr_enabled = 1;
+}
+
+void sssraid_ioc_disable_intr(struct sssraid_dev *hdev)
+{
+ u16 i, max_vectors;
+
+ hdev->intr_enabled = 0;
+ max_vectors = hdev->intr_info_count;
+
+ for (i = 0; i < max_vectors; i++)
+ synchronize_irq(pci_irq_vector(hdev->pdev, i));
+}
+
+static int sssraid_setup_resources(struct sssraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int retval = 0;
+ u64 maskbit = SSSRAID_DMA_MSK_BIT_MAX;
+
+ if (pci_enable_device_mem(pdev)) {
+ ioc_err(hdev, "pci_enable_device_mem: failed\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ retval = pci_request_mem_regions(pdev, SSSRAID_DRIVER_NAME);
+ if (retval) {
+ ioc_err(hdev, "fail to request memory regions\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ /* get cap value at first, so keep
+ * sssraid_remap_bar(hdev, SSSRAID_REG_DBS + 4096)
+ * ioremap(pci_resource_start(..)) still in sssraid_remap_bar
+ */
+ retval = sssraid_remap_bar(hdev, SSSRAID_REG_DBS + 4096);
+ if (retval) {
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ pci_set_master(pdev);
+
+ if (readl(hdev->bar + SSSRAID_REG_CSTS) == U32_MAX) {
+ retval = -ENODEV;
+ ioc_err(hdev, "Read csts register failed\n");
+ goto out_failed;
+ }
+
+ hdev->cap = lo_hi_readq(hdev->bar + SSSRAID_REG_CAP);
+ hdev->ioq_depth = min_t(u32, SSSRAID_CAP_MQES(hdev->cap) + 1, io_queue_depth);
+ hdev->scsi_qd = hdev->ioq_depth - SSSRAID_PTCMDS_PERQ;
+ hdev->db_stride = 1 << SSSRAID_CAP_STRIDE(hdev->cap);
+
+ maskbit = SSSRAID_CAP_DMAMASK(hdev->cap);
+ if (maskbit < 32 || maskbit > SSSRAID_DMA_MSK_BIT_MAX) {
+ ioc_err(hdev, "err, dma mask invalid[%llu], set to default\n", maskbit);
+ maskbit = SSSRAID_DMA_MSK_BIT_MAX;
+ }
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit))) {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ ioc_err(hdev, "set dma mask[32] and coherent failed\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+ ioc_info(hdev, "set dma mask[32] success\n");
+ } else {
+ ioc_info(hdev, "set dma mask[%llu] success\n", maskbit);
+ }
+
+ /* pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES) moved to setup_isr */
+
+ pci_set_drvdata(pdev, hdev->shost);
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_save_state(pdev);
+
+ sssraid_ioc_disable_intr(hdev);
+
+ return retval;
+
+out_failed:
+ sssraid_cleanup_resources(hdev);
+ return retval;
+}
+
+static int sssraid_alloc_admin_cmds(struct sssraid_dev *hdev)
+{
+ u16 i;
+
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+ spin_lock_init(&hdev->adm_cmd_lock);
+
+ hdev->adm_cmds = kcalloc_node(SSSRAID_AQ_BLK_MQ_DEPTH, sizeof(struct sssraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->adm_cmds) {
+ ioc_err(hdev, "Alloc admin cmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SSSRAID_AQ_BLK_MQ_DEPTH; i++) {
+ hdev->adm_cmds[i].qid = 0;
+ hdev->adm_cmds[i].cid = i;
+ list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list);
+ }
+
+ ioc_info(hdev, "Alloc admin cmds success, num[%d]\n", SSSRAID_AQ_BLK_MQ_DEPTH);
+
+ return 0;
+}
+
+static int sssraid_alloc_qpair(struct sssraid_dev *hdev, u16 qidx, u16 depth)
+{
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[qidx];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+ int retval = 0;
+
+ if (hdev->init_done_queue_cnt > qidx) {
+ ioc_info(hdev, "warn: queue[%d] is exist\n", qidx);
+ return 0;
+ }
+
+ cqinfo->cqes = dma_alloc_coherent(&hdev->pdev->dev, CQ_SIZE(depth),
+ &cqinfo->cq_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!cqinfo->cqes)
+ return -ENOMEM;
+
+ sqinfo->sq_cmds = dma_alloc_coherent(&hdev->pdev->dev, SQ_SIZE(qidx, depth),
+ &sqinfo->sq_dma_addr, GFP_KERNEL);
+ if (!sqinfo->sq_cmds) {
+ retval = -ENOMEM;
+ goto free_cqes;
+ }
+
+ /* alloc sense buffer */
+ sqinfo->sense = dma_alloc_coherent(&hdev->pdev->dev, SENSE_SIZE(depth),
+ &sqinfo->sense_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!sqinfo->sense) {
+ retval = -ENOMEM;
+ goto free_sq_cmds;
+ }
+
+ spin_lock_init(&sqinfo->sq_lock);
+ spin_lock_init(&cqinfo->cq_lock);
+ cqinfo->hdev = hdev;
+ sqinfo->hdev = hdev;
+ sqinfo->q_depth = depth;
+ sqinfo->qidx = qidx;
+ /* cq_vector replaced by msix_index */
+
+ /*
+ * online_queues: completely initialized queue count: sssraid_init_queue
+ * queue_count: allocated but not completely initialized queue count: sssraid_alloc_queue
+ * online_queues/queue_count replaced by init_done_queue_cnt.
+ */
+ hdev->init_done_queue_cnt++;
+
+ return 0;
+
+free_sq_cmds:
+ dma_free_coherent(&hdev->pdev->dev, SQ_SIZE(qidx, depth), (void *)sqinfo->sq_cmds,
+ sqinfo->sq_dma_addr);
+free_cqes:
+ dma_free_coherent(&hdev->pdev->dev, CQ_SIZE(depth), (void *)cqinfo->cqes,
+ cqinfo->cq_dma_addr);
+ return retval;
+}
+
+static void sssraid_init_queue(struct sssraid_dev *hdev, u16 qidx)
+{
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[qidx];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+
+ memset((void *)cqinfo->cqes, 0, CQ_SIZE(sqinfo->q_depth));
+
+ sqinfo->sq_tail = 0;
+ cqinfo->cq_head = 0;
+ cqinfo->cq_phase = 1;
+ sqinfo->q_db = &hdev->dbs[qidx * 2 * hdev->db_stride];
+ sqinfo->prp_small_pool = hdev->prp_small_pool[qidx % small_pool_num];
+}
+
+static int sssraid_setup_admin_qpair(struct sssraid_dev *hdev)
+{
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[0];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[0];
+ u32 aqa;
+ int retval;
+
+ ioc_info(hdev, "start disable ctrl\n");
+
+ retval = sssraid_disable_ctrl(hdev);
+ if (retval)
+ return retval;
+
+ /* this func don't alloc admin queue */
+
+ aqa = sqinfo->q_depth - 1;
+ aqa |= aqa << 16;
+ writel(aqa, hdev->bar + SSSRAID_REG_AQA);
+ lo_hi_writeq(sqinfo->sq_dma_addr, hdev->bar + SSSRAID_REG_ASQ);
+ lo_hi_writeq(cqinfo->cq_dma_addr, hdev->bar + SSSRAID_REG_ACQ);
+
+ ioc_info(hdev, "start enable ctrl\n");
+
+ retval = sssraid_enable_ctrl(hdev);
+ if (retval) {
+ retval = -ENODEV;
+ return retval;
+ }
+
+ /* interrupt registry not here */
+ /* cq_vector replaced by msix_index */
+
+ sssraid_init_queue(hdev, 0);
+
+ ioc_info(hdev, "success, queuecount:[%d], pagesize[%d]\n",
+ hdev->init_done_queue_cnt, hdev->page_size);
+
+ return 0;
+}
+
+static void sssraid_cleanup_isr(struct sssraid_dev *hdev)
+{
+ u16 i;
+
+ sssraid_ioc_disable_intr(hdev);
+
+ if (!hdev->intr_info)
+ return;
+
+ for (i = 0; i < hdev->intr_info_count; i++)
+ free_irq(pci_irq_vector(hdev->pdev, i),
+ (hdev->intr_info + i));
+
+ kfree(hdev->intr_info);
+ hdev->intr_info = NULL;
+ hdev->intr_info_count = 0;
+ pci_free_irq_vectors(hdev->pdev);
+}
+
+static void sssraid_complete_adminq_cmnd(struct sssraid_dev *hdev, u16 qidx,
+ struct sssraid_completion *cqe)
+{
+ struct sssraid_cmd *adm_cmd;
+
+ adm_cmd = hdev->adm_cmds + le16_to_cpu(cqe->cmd_id);
+ if (unlikely(adm_cmd->state == SSSRAID_CMD_IDLE)) {
+ ioc_warn(hdev, "Invalid id %d completed on queue %d\n",
+ le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id));
+ return;
+ }
+
+ adm_cmd->status = le16_to_cpu(cqe->status) >> 1;
+ adm_cmd->result0 = le32_to_cpu(cqe->result);
+ adm_cmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&adm_cmd->cmd_done);
+}
+
+static inline bool sssraid_cqe_pending(struct sssraid_cqueue *cqinfo)
+{
+ return (le16_to_cpu(cqinfo->cqes[cqinfo->cq_head].status) & 1) ==
+ cqinfo->cq_phase;
+}
+
+static inline void sssraid_update_cq_head(struct sssraid_dev *hdev, u16 qidx)
+{
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[qidx];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+
+ if (++cqinfo->cq_head == sqinfo->q_depth) {
+ cqinfo->cq_head = 0;
+ cqinfo->cq_phase = !cqinfo->cq_phase;
+ }
+}
+
+static inline bool sssraid_process_cq(struct sssraid_dev *hdev, u16 qidx, u16 *start,
+ u16 *end, int tag)
+{
+ bool found = false;
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[qidx];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+
+ *start = cqinfo->cq_head;
+ while (!found && sssraid_cqe_pending(cqinfo)) {
+ if (le16_to_cpu(cqinfo->cqes[cqinfo->cq_head].cmd_id) == tag)
+ found = true;
+ sssraid_update_cq_head(hdev, qidx);
+ }
+ *end = cqinfo->cq_head;
+
+ if (*start != *end)
+ writel(cqinfo->cq_head, sqinfo->q_db + sqinfo->hdev->db_stride);
+
+ return found;
+}
+
+static irqreturn_t sssraid_isr(int irq, void *privdata)
+{
+ struct sssraid_intr_info *intr_info = privdata;
+ struct sssraid_dev *hdev = intr_info->hdev;
+ irqreturn_t ret = IRQ_NONE;
+ struct sssraid_cqueue *cqinfo;
+ u16 midx, start, end;
+
+ if (!intr_info)
+ return IRQ_NONE;
+
+ if (!hdev->intr_enabled)
+ return IRQ_NONE;
+
+ midx = intr_info->msix_index;
+ cqinfo = &hdev->cqinfo[midx];
+
+ spin_lock(&cqinfo->cq_lock);
+ if (cqinfo->cq_head != cqinfo->last_cq_head)
+ ret = IRQ_HANDLED;
+
+ sssraid_process_cq(hdev, midx, &start, &end, -1);
+ cqinfo->last_cq_head = cqinfo->cq_head;
+ spin_unlock(&cqinfo->cq_lock);
+
+ if (start != end) {
+ sssraid_complete_cqes(hdev, midx, start, end);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+irqreturn_t sssraid_isr_poll(int irq, void *privdata)
+{
+ return IRQ_NONE;
+}
+
+bool sssraid_poll_cq(struct sssraid_dev *hdev, u16 qidx, int cid)
+{
+ u16 start, end;
+ bool found;
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[qidx];
+
+ if (!sssraid_cqe_pending(cqinfo))
+ return 0;
+
+ spin_lock_irq(&cqinfo->cq_lock);
+ found = sssraid_process_cq(hdev, qidx, &start, &end, cid);
+ spin_unlock_irq(&cqinfo->cq_lock);
+
+ sssraid_complete_cqes(hdev, qidx, start, end);
+ return found;
+}
+
+static inline int sssraid_request_irq(struct sssraid_dev *hdev, u16 index)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct sssraid_intr_info *intr_info = hdev->intr_info + index;
+ int retval = 0;
+
+ intr_info->hdev = hdev;
+ intr_info->msix_index = index;
+ intr_info->cqinfo = NULL;
+
+ snprintf(intr_info->name, SSSRAID_NAME_LENGTH, "%s%d-msix%d",
+ SSSRAID_DRIVER_NAME, hdev->instance, index);
+
+ retval = request_threaded_irq(pci_irq_vector(pdev, index), sssraid_isr,
+ sssraid_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
+
+ if (retval) {
+ ioc_err(hdev, "%s: Unable to allocate interrupt %d!\n",
+ intr_info->name, pci_irq_vector(pdev, index));
+ return retval;
+ }
+
+ return retval;
+}
+
+static int sssraid_setup_isr(struct sssraid_dev *hdev, u8 setup_one)
+{
+ unsigned int irq_flags = PCI_IRQ_MSIX;
+ u16 max_vectors = 0, i;
+ int retval = 0;
+
+ struct irq_affinity desc = { .pre_vectors = 1};
+
+ sssraid_cleanup_isr(hdev);
+
+ if (setup_one)
+ max_vectors = 1;
+ else {
+ max_vectors = hdev->before_affinity_msix_cnt;
+
+ ioc_info(hdev, "MSI-x vectors requested: %d\n", max_vectors);
+ }
+
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
+
+ i = pci_alloc_irq_vectors_affinity(hdev->pdev,
+ 1, max_vectors, irq_flags, &desc);
+
+ if (i <= 0) {
+ ioc_err(hdev, "Cannot alloc irq vectors\n");
+ goto out_failed;
+ }
+ if (i != max_vectors) {
+ ioc_info(hdev,
+ "allocated vectors (%d) are less than configured (%d)\n",
+ i, max_vectors);
+
+ max_vectors = i;
+ }
+
+ hdev->intr_info = kzalloc(sizeof(struct sssraid_intr_info) * max_vectors,
+ GFP_KERNEL);
+ if (!hdev->intr_info) {
+ retval = -1;
+ pci_free_irq_vectors(hdev->pdev);
+ goto out_failed;
+ }
+
+ for (i = 0; i < max_vectors; i++) {
+ retval = sssraid_request_irq(hdev, i);
+ if (retval) {
+ hdev->intr_info_count = i; /* =i is for offload interrupt loop counter */
+ goto out_failed;
+ }
+ }
+
+ /* intr_info_count replace max_qid */
+ hdev->intr_info_count = max_vectors;
+ sssraid_ioc_enable_intr(hdev);
+ return retval;
+out_failed:
+ sssraid_cleanup_isr(hdev);
+
+ return retval;
+}
+
+static bool sssraid_adm_need_reset(struct sssraid_admin_command *cmd)
+{
+ switch (cmd->common.opcode) {
+ case SSSRAID_ADMIN_DELETE_SQ:
+ case SSSRAID_ADMIN_CREATE_SQ:
+ case SSSRAID_ADMIN_DELETE_CQ:
+ case SSSRAID_ADMIN_CREATE_CQ:
+ case SSSRAID_ADMIN_SET_FEATURES:
+ return false;
+ default:
+ return true;
+ }
+}
+
+void sssraid_submit_cmd(struct sssraid_squeue *sqinfo, const void *cmd)
+{
+ u32 sqes = SQE_SIZE(sqinfo->qidx);
+ unsigned long flags;
+ struct sssraid_admin_common_command *acd = (struct sssraid_admin_common_command *)cmd;
+
+ spin_lock_irqsave(&sqinfo->sq_lock, flags);
+ memcpy((sqinfo->sq_cmds + sqes * sqinfo->sq_tail), cmd, sqes);
+ if (++sqinfo->sq_tail == sqinfo->q_depth)
+ sqinfo->sq_tail = 0;
+
+ writel(sqinfo->sq_tail, sqinfo->q_db);
+ spin_unlock_irqrestore(&sqinfo->sq_lock, flags);
+
+ dbgprint(sqinfo->hdev, "cid[%d] qidx[%d], opcode[0x%x], flags[0x%x], hdid[%u]\n",
+ le16_to_cpu(acd->command_id), sqinfo->qidx, acd->opcode, acd->flags,
+ le32_to_cpu(acd->hdid));
+}
+
+int sssraid_submit_admin_sync_cmd(struct sssraid_dev *hdev, struct sssraid_admin_command *cmd,
+ u32 *result0, u32 *result1, u32 timeout)
+{
+ struct sssraid_cmd *adm_cmd = sssraid_get_cmd(hdev, SSSRAID_CMD_ADM);
+
+ if (!adm_cmd) {
+ ioc_err(hdev, "err, get admin cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ /*
+ * watch dog not as optimized as
+ * init_completion/complete
+ */
+ init_completion(&adm_cmd->cmd_done);
+
+ cmd->common.command_id = cpu_to_le16(adm_cmd->cid);
+ sssraid_submit_cmd(&hdev->sqinfo[0], cmd);
+
+ if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) {
+ ioc_err(hdev, "cid[%d] qidx[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+ adm_cmd->cid, adm_cmd->qid, cmd->usr_cmd.opcode,
+ cmd->usr_cmd.info_0.subopcode);
+
+ /* reset controller if admin timeout */
+ if (sssraid_adm_need_reset(cmd))
+ sssraid_adm_timeout(hdev, adm_cmd);
+
+ sssraid_put_cmd(hdev, adm_cmd, SSSRAID_CMD_ADM);
+ return -ETIME;
+ }
+
+ if (result0)
+ *result0 = adm_cmd->result0;
+ if (result1)
+ *result1 = adm_cmd->result1;
+
+ sssraid_put_cmd(hdev, adm_cmd, SSSRAID_CMD_ADM);
+
+ return adm_cmd->status;
+}
+
+static int sssraid_get_ctrl_info(struct sssraid_dev *hdev, struct sssraid_ctrl_info *ctrl_info)
+{
+ struct sssraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int retval;
+
+ data_ptr = dma_alloc_coherent(&hdev->pdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = SSSRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = SSSRAID_GET_INFO_CTRL;
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ retval = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!retval)
+ memcpy(ctrl_info, data_ptr, sizeof(struct sssraid_ctrl_info));
+
+ dma_free_coherent(&hdev->pdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return retval;
+}
+
+int sssraid_init_ctrl_info(struct sssraid_dev *hdev)
+{
+ int retval;
+
+ hdev->ctrl_info->nd = cpu_to_le32(240);
+ hdev->ctrl_info->mdts = 8;
+ hdev->ctrl_info->max_cmds = cpu_to_le16(4096);
+ hdev->ctrl_info->max_num_sge = cpu_to_le16(128);
+ hdev->ctrl_info->max_channel = cpu_to_le16(4);
+ hdev->ctrl_info->max_tgt_id = cpu_to_le32(3239);
+ hdev->ctrl_info->max_lun = cpu_to_le16(2);
+
+ retval = sssraid_get_ctrl_info(hdev, hdev->ctrl_info);
+ if (retval)
+ ioc_err(hdev, "init controller info failed: %d\n", retval);
+
+ ioc_info(hdev, "nd = %d\n", le32_to_cpu(hdev->ctrl_info->nd));
+ ioc_info(hdev, "max_cmd = %d\n", le16_to_cpu(hdev->ctrl_info->max_cmds));
+ ioc_info(hdev, "max_channel = %d\n", le16_to_cpu(hdev->ctrl_info->max_channel));
+ ioc_info(hdev, "max_tgt_id = %d\n", le32_to_cpu(hdev->ctrl_info->max_tgt_id));
+ ioc_info(hdev, "max_lun = %d\n", le16_to_cpu(hdev->ctrl_info->max_lun));
+ ioc_info(hdev, "max_num_sge = %d\n", le16_to_cpu(hdev->ctrl_info->max_num_sge));
+ ioc_info(hdev, "lun_num_boot = %d\n", le16_to_cpu(hdev->ctrl_info->lun_num_in_boot));
+ ioc_info(hdev, "mdts = %d\n", hdev->ctrl_info->mdts);
+ ioc_info(hdev, "acl = %d\n", hdev->ctrl_info->acl);
+ ioc_info(hdev, "aer1 = %d\n", hdev->ctrl_info->aerl);
+ ioc_info(hdev, "card_type = %d\n", hdev->ctrl_info->card_type);
+ ioc_info(hdev, "rtd3e = %d\n", le32_to_cpu(hdev->ctrl_info->rtd3e));
+ ioc_info(hdev, "sn = %s\n", hdev->ctrl_info->sn);
+ ioc_info(hdev, "fr = %s\n", hdev->ctrl_info->fr);
+
+ if (!hdev->ctrl_info->aerl)
+ hdev->ctrl_info->aerl = 1;
+ if (hdev->ctrl_info->aerl > SSSRAID_NR_AEN_COMMANDS)
+ hdev->ctrl_info->aerl = SSSRAID_NR_AEN_COMMANDS;
+
+ return 0;
+}
+
+static int sssraid_set_features(struct sssraid_dev *hdev, u32 fid, u32 dword11, void *buffer,
+ size_t buflen, u32 *result)
+{
+ struct sssraid_admin_command admin_cmd;
+ int ret;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+
+ if (buffer && buflen) {
+ data_ptr = dma_alloc_coherent(&hdev->pdev->dev, buflen, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memcpy(data_ptr, buffer, buflen);
+ }
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.features.opcode = SSSRAID_ADMIN_SET_FEATURES;
+ admin_cmd.features.fid = cpu_to_le32(fid);
+ admin_cmd.features.dword11 = cpu_to_le32(dword11);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, result, NULL, 0);
+
+ if (data_ptr)
+ dma_free_coherent(&hdev->pdev->dev, buflen, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int sssraid_set_queue_cnt(struct sssraid_dev *hdev, u32 *cnt)
+{
+ u32 q_cnt = (*cnt - 1) | ((*cnt - 1) << 16);
+ u32 nr_ioqs, result;
+ int status;
+
+ status = sssraid_set_features(hdev, SSSRAID_FEAT_NUM_QUEUES, q_cnt, NULL, 0, &result);
+ if (status) {
+ ioc_err(hdev, "Set queue count failed, status: %d\n",
+ status);
+ return -EIO;
+ }
+
+ nr_ioqs = min(result & 0xffff, result >> 16) + 1;
+ *cnt = min(*cnt, nr_ioqs);
+ if (*cnt == 0) {
+ ioc_err(hdev, "Illegal queue count: zero\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int sssraid_create_cq(struct sssraid_dev *hdev, u16 qidx)
+{
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[qidx];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+ struct sssraid_admin_command admin_cmd;
+ int flags = SSSRAID_QUEUE_PHYS_CONTIG | SSSRAID_CQ_IRQ_ENABLED;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_cq.opcode = SSSRAID_ADMIN_CREATE_CQ;
+ admin_cmd.create_cq.prp1 = cpu_to_le64(cqinfo->cq_dma_addr);
+ admin_cmd.create_cq.cqid = cpu_to_le16(qidx);
+ admin_cmd.create_cq.qsize = cpu_to_le16(sqinfo->q_depth - 1);
+ admin_cmd.create_cq.cq_flags = cpu_to_le16(flags);
+ admin_cmd.create_cq.irq_vector = cpu_to_le16(qidx);
+
+ return sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static int sssraid_create_io_cq(struct sssraid_dev *hdev, u16 qidx)
+{
+ int retval;
+ struct sssraid_cqueue *cqinfo = hdev->cqinfo + qidx;
+ u16 midx = qidx;
+
+ retval = sssraid_create_cq(hdev, qidx);
+ if (retval)
+ return retval;
+
+ /*
+ * cqinfo initialization at sssraid_init_queue
+ */
+ hdev->intr_info[midx].cqinfo = cqinfo;
+
+ return retval;
+}
+
+static int sssraid_create_sq(struct sssraid_dev *hdev, u16 qidx)
+{
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+ struct sssraid_admin_command admin_cmd;
+ int flags = SSSRAID_QUEUE_PHYS_CONTIG;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_sq.opcode = SSSRAID_ADMIN_CREATE_SQ;
+ admin_cmd.create_sq.prp1 = cpu_to_le64(sqinfo->sq_dma_addr);
+ admin_cmd.create_sq.sqid = cpu_to_le16(qidx);
+ admin_cmd.create_sq.qsize = cpu_to_le16(sqinfo->q_depth - 1);
+ admin_cmd.create_sq.sq_flags = cpu_to_le16(flags);
+ admin_cmd.create_sq.cqid = cpu_to_le16(qidx);
+
+ return sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static int sssraid_create_io_sq(struct sssraid_dev *hdev, u16 qidx)
+{
+ return sssraid_create_sq(hdev, qidx);
+}
+
+int sssraid_get_dev_list(struct sssraid_dev *hdev, struct sssraid_dev_info *devices)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ struct sssraid_admin_command admin_cmd;
+ struct sssraid_dev_list *list_buf;
+ dma_addr_t data_dma = 0;
+ u32 i, idx, hdid, ndev;
+ int ret = 0;
+
+ list_buf = dma_alloc_coherent(&hdev->pdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!list_buf)
+ return -ENOMEM;
+
+ for (idx = 0; idx < nd;) {
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = SSSRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = SSSRAID_GET_INFO_DEV_LIST;
+ admin_cmd.get_info.cdw11 = cpu_to_le32(idx);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+
+ if (ret) {
+ ioc_err(hdev, "Get device list failed, nd: %u, idx: %u, ret: %d\n",
+ nd, idx, ret);
+ goto out;
+ }
+ ndev = le32_to_cpu(list_buf->dev_num);
+
+ ioc_info(hdev, "ndev numbers: %u\n", ndev);
+
+ for (i = 0; i < ndev; i++) {
+ hdid = le32_to_cpu(list_buf->devices[i].hdid);
+ ioc_info(hdev, "list_buf->devices[%d], hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ i, hdid, le16_to_cpu(list_buf->devices[i].target),
+ list_buf->devices[i].channel,
+ list_buf->devices[i].lun,
+ list_buf->devices[i].attr);
+ if (hdid > nd || hdid == 0) {
+ ioc_err(hdev, "err, hdid[%d] invalid\n", hdid);
+ continue;
+ }
+ memcpy(&devices[hdid - 1], &list_buf->devices[i],
+ sizeof(struct sssraid_dev_info));
+ }
+ idx += ndev;
+
+ if (ndev < MAX_DEV_ENTRY_PER_PAGE_4K)
+ break;
+ }
+
+out:
+ dma_free_coherent(&hdev->pdev->dev, PAGE_SIZE, list_buf, data_dma);
+ return ret;
+}
+
+/* send abort command by admin queue temporary */
+int sssraid_send_abort_cmd(struct sssraid_dev *hdev, u32 hdid, u16 qidx, u16 cid)
+{
+ struct sssraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.abort.opcode = SSSRAID_ADMIN_ABORT_CMD;
+ admin_cmd.abort.hdid = cpu_to_le32(hdid);
+ admin_cmd.abort.sqid = cpu_to_le16(qidx);
+ admin_cmd.abort.cid = cpu_to_le16(cid);
+
+ return sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+/* send reset command by admin quueue temporary */
+int sssraid_send_reset_cmd(struct sssraid_dev *hdev, u8 type, u32 hdid)
+{
+ struct sssraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.reset.opcode = SSSRAID_ADMIN_RESET;
+ admin_cmd.reset.hdid = cpu_to_le32(hdid);
+ admin_cmd.reset.type = type;
+
+ return sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static int sssraid_delete_queue(struct sssraid_dev *hdev, u8 op, u16 qidx)
+{
+ struct sssraid_admin_command admin_cmd;
+ int retval;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.delete_queue.opcode = op;
+ admin_cmd.delete_queue.qid = cpu_to_le16(qidx);
+
+ retval = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+
+ if (retval)
+ ioc_err(hdev, "Delete %s:[%d] failed\n",
+ (op == SSSRAID_ADMIN_DELETE_CQ) ? "cq" : "sq", qidx);
+
+ return retval;
+}
+
+static int sssraid_delete_cq(struct sssraid_dev *hdev, u16 qidx)
+{
+ return sssraid_delete_queue(hdev, SSSRAID_ADMIN_DELETE_CQ, qidx);
+}
+
+void sssraid_adm_timeout(struct sssraid_dev *hdev, struct sssraid_cmd *cmd)
+{
+ /* command may be returned because controller reset */
+ if (READ_ONCE(cmd->state) == SSSRAID_CMD_COMPLETE)
+ return;
+
+ if (!sssraid_change_host_state(hdev, SSSRAID_RESETTING)) {
+ ioc_info(hdev, "can't change to reset state\n");
+ return;
+ }
+ sssraid_soft_reset_handler(hdev);
+}
+
+static int sssraid_create_io_qpair(struct sssraid_dev *hdev, u16 qidx)
+{
+ int retval;
+
+ retval = sssraid_create_io_cq(hdev, qidx);
+ if (retval)
+ return retval;
+
+ retval = sssraid_create_io_sq(hdev, qidx);
+ if (retval)
+ goto delete_cq;
+
+ /* intr_info.msix_index substitute cq_vector */
+
+ /* io interrupt registry:
+ * not here, put above
+ */
+
+ sssraid_init_queue(hdev, qidx);
+
+ return 0;
+
+delete_cq:
+ sssraid_delete_cq(hdev, qidx);
+
+ return retval;
+}
+
+static int sssraid_setup_io_qpair(struct sssraid_dev *hdev)
+{
+ u32 i, num_queues;
+ int retval = 0;
+
+ num_queues = min(hdev->intr_info_count, hdev->init_done_queue_cnt - 1);
+ for (i = 1; i <= num_queues; i++) {
+ retval = sssraid_create_io_qpair(hdev, i);
+ if (retval) {
+ ioc_err(hdev, "Create queue[%d] failed\n", i);
+ break;
+ }
+ }
+
+ ioc_info(hdev, "init_done_queue_cnt[%d], intr_info_count[%d] num_queues[%d]",
+ hdev->init_done_queue_cnt, /*hdev->online_queues,*/
+ hdev->intr_info_count, num_queues);
+
+ return retval >= 0 ? 0 : retval;
+}
+
+static int sssraid_alloc_ioq_ptcmds(struct sssraid_dev *hdev)
+{
+ int i;
+ int ptnum = SSSRAID_NR_IOQ_PTCMDS;
+
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
+ spin_lock_init(&hdev->ioq_pt_lock);
+
+ hdev->ioq_ptcmds = kcalloc_node(ptnum, sizeof(struct sssraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->ioq_ptcmds) {
+ ioc_err(hdev, "Alloc ioq_ptcmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ptnum; i++) {
+ hdev->ioq_ptcmds[i].qid = i / SSSRAID_PTCMDS_PERQ + 1;
+ hdev->ioq_ptcmds[i].cid = i % SSSRAID_PTCMDS_PERQ + SSSRAID_IO_BLK_MQ_DEPTH;
+ list_add_tail(&(hdev->ioq_ptcmds[i].list), &hdev->ioq_pt_list);
+ }
+
+ ioc_info(hdev, "Alloc ioq_ptcmds success, ptnum[%d]\n", ptnum);
+
+ return 0;
+}
+
+int sssraid_send_event_ack(struct sssraid_dev *hdev, u8 event,
+ u32 event_ctx, u16 cid)
+{
+ /* event,event_ctx no use at this time */
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[0];
+ struct sssraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = SSSRAID_ADMIN_ASYNC_EVENT;
+ admin_cmd.common.command_id = cpu_to_le16(cid);
+
+ sssraid_submit_cmd(sqinfo, &admin_cmd);
+ ioc_info(hdev, "send aen, cid[%d]\n", cid);
+
+ return 0;
+}
+
+static void sssraid_handle_aen_notice(struct sssraid_dev *hdev, u32 result)
+{
+ switch ((result & 0xff00) >> 8) {
+ case SSSRAID_AEN_DEV_CHANGED:
+ sssraid_scan_disk(hdev);
+ break;
+ case SSSRAID_AEN_FW_ACT_START:
+ ioc_info(hdev, "fw activation starting\n");
+ break;
+ case SSSRAID_AEN_HOST_PROBING:
+ break;
+ default:
+ ioc_warn(hdev, "async event result %08x\n", result);
+ }
+}
+
+static void sssraid_handle_aen_vs(struct sssraid_dev *hdev, u32 result, u32 result1)
+{
+ switch ((result & 0xff00) >> 8) {
+ case SSSRAID_AEN_TIMESYN:
+ sssraid_configure_timestamp(hdev);
+ break;
+ case SSSRAID_AEN_FW_ACT_FINISH:
+ ioc_info(hdev, "fw activation finish\n");
+ if (sssraid_init_ctrl_info(hdev))
+ ioc_err(hdev, "get ctrl info failed after fw act\n");
+ break;
+ case SSSRAID_AEN_EVENT_MIN ... SSSRAID_AEN_EVENT_MAX:
+ ioc_info(hdev, "rcv card event[%d], param1[0x%x] param2[0x%x]\n",
+ (result & 0xff00) >> 8, result, result1);
+ break;
+ default:
+ ioc_warn(hdev, "async event result: 0x%x\n", result);
+ }
+}
+
+static inline void sssraid_send_all_aen(struct sssraid_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->ctrl_info->aerl; i++)
+ sssraid_send_event_ack(hdev, 0, 0, i + SSSRAID_AQ_BLK_MQ_DEPTH);
+}
+
+static int sssraid_dev_list_init(struct sssraid_dev *hdev)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+
+ hdev->devices = kzalloc_node(nd * sizeof(struct sssraid_dev_info),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->devices)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int sssraid_configure_timestamp(struct sssraid_dev *hdev)
+{
+ __le64 ts;
+ int retval;
+
+ ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
+ retval = sssraid_set_features(hdev, SSSRAID_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), NULL);
+
+ if (retval)
+ ioc_err(hdev, "set timestamp failed: %d\n", retval);
+ return retval;
+}
+
+int sssraid_init_ioc(struct sssraid_dev *hdev, u8 re_init)
+{
+ int retval = 0;
+ int i;
+ u32 nr_ioqs, bar_size;
+
+ if (!re_init) {
+ hdev->cpu_count = num_online_cpus();
+
+ retval = sssraid_alloc_resources(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to alloc resources:error %d\n",
+ retval);
+ goto out_nocleanup;
+ }
+ }
+
+ /* reset need re-setup */
+ retval = sssraid_setup_resources(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to setup resources:error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ if (!re_init) {
+ retval = sssraid_alloc_admin_cmds(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to alloc admin cmds:error %d\n",
+ retval);
+ goto out_failed;
+ }
+ /* put here:
+ * alloc admin queue
+ */
+ retval = sssraid_alloc_qpair(hdev, 0, SSSRAID_AQ_DEPTH);
+ if (retval) {
+ ioc_err(hdev, "Failed to alloc admin queue:error %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+
+ retval = sssraid_setup_admin_qpair(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to setup admin queue:error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ /* 1. unregister all interrupt
+ * 2. admin interrupt registry
+ */
+ retval = sssraid_setup_isr(hdev, 1);
+ if (retval) {
+ ioc_err(hdev, "Failed to setup ISR error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ retval = sssraid_init_ctrl_info(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to get ctrl info error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ nr_ioqs = hdev->cpu_count;
+ retval = sssraid_set_queue_cnt(hdev, &nr_ioqs);
+ if (retval) {
+ ioc_err(hdev, "Failed to set queue cnt error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ hdev->before_affinity_msix_cnt = nr_ioqs + 1;
+
+ /* 1. unregister all interrupt
+ * 2. admin interrupt re-registry
+ * 3. io interrupt registry
+ */
+ retval = sssraid_setup_isr(hdev, 0);
+ if (retval) {
+ ioc_err(hdev, "Failed to re-setup ISR, error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ /* remap */
+ bar_size = SSSRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride);
+ retval = sssraid_remap_bar(hdev, bar_size);
+ if (retval) {
+ ioc_err(hdev, "Failed to re-map bar, error %d\n",
+ retval);
+ goto out_failed;
+ }
+ hdev->sqinfo[0].q_db = hdev->dbs;
+
+ /* num_vecs no sense, abandon */
+
+ if (!re_init) {
+ for (i = hdev->init_done_queue_cnt; i <= hdev->intr_info_count; i++) {
+ retval = sssraid_alloc_qpair(hdev, i, hdev->ioq_depth);
+ if (retval) {
+ ioc_err(hdev, "Failed to alloc io queue:error %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+ ioc_info(hdev, "intr_info_count: %d, init_done_queue_cnt: %d, ioq_depth: %d\n",
+ hdev->intr_info_count, hdev->init_done_queue_cnt, hdev->ioq_depth);
+ }
+
+ retval = sssraid_setup_io_qpair(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to setup io qpair, error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ if (!re_init) {
+ retval = sssraid_alloc_ioq_ptcmds(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to alloc ioq ptcmds, error %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+
+ sssraid_send_all_aen(hdev);
+
+ if (!re_init) {
+ retval = sssraid_dev_list_init(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to init device list, error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ retval = sssraid_configure_timestamp(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to configure timestamp, error %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+
+ return retval;
+
+out_failed:
+ sssraid_cleanup_ioc(hdev, re_init);
+out_nocleanup:
+ return retval;
+}
+
+void sssraid_cleanup_resources(struct sssraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ sssraid_cleanup_isr(hdev);
+
+ if (hdev->bar) {
+ iounmap(hdev->bar);
+ hdev->bar = NULL;
+ }
+
+ if (pci_is_enabled(pdev)) {
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+ }
+}
+
+static void sssraid_free_ioq_ptcmds(struct sssraid_dev *hdev)
+{
+ kfree(hdev->ioq_ptcmds);
+ hdev->ioq_ptcmds = NULL;
+
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
+}
+
+static void sssraid_delete_io_queues(struct sssraid_dev *hdev)
+{
+ u16 queues = hdev->init_done_queue_cnt - 1;
+ u8 opcode = SSSRAID_ADMIN_DELETE_SQ;
+ u16 i, pass;
+
+ if (!pci_device_is_present(hdev->pdev)) {
+ ioc_err(hdev, "pci_device is not present, skip disable io queues\n");
+ return;
+ }
+
+ if (hdev->init_done_queue_cnt < 2) {
+ ioc_err(hdev, "err, io queue has been delete\n");
+ return;
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ for (i = queues; i > 0; i--)
+ if (sssraid_delete_queue(hdev, opcode, i))
+ break;
+
+ opcode = SSSRAID_ADMIN_DELETE_CQ;
+ }
+}
+
+void sssraid_complete_aen(struct sssraid_dev *hdev, struct sssraid_completion *cqe)
+{
+ u32 result = le32_to_cpu(cqe->result);
+
+ ioc_info(hdev, "rcv aen, cid[%d], status[0x%x], result[0x%x]\n",
+ le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->status) >> 1, result);
+
+ /*
+ * The response to event moved from this func.
+ * sssraid_send_aen changed to name sssraid_send_event_ack
+ */
+
+ if ((le16_to_cpu(cqe->status) >> 1) != SSSRAID_SC_SUCCESS)
+ return;
+ switch (result & 0x7) {
+ case SSSRAID_AEN_NOTICE:
+ sssraid_handle_aen_notice(hdev, result);
+ break;
+ case SSSRAID_AEN_VS:
+ sssraid_handle_aen_vs(hdev, result, le32_to_cpu(cqe->result1));
+ break;
+ default:
+ ioc_warn(hdev, "Unsupported async event type: %u\n",
+ result & 0x7);
+ break;
+ }
+}
+
+void sssraid_free_iod_res(struct sssraid_dev *hdev, struct sssraid_iod *iod)
+{
+ const int last_prp = hdev->page_size / sizeof(__le64) - 1;
+ dma_addr_t dma_addr, next_dma_addr;
+ struct sssraid_sgl_desc *sg_list;
+ __le64 *prp_list;
+ void *addr;
+ int i;
+
+ dma_addr = iod->first_dma;
+ if (iod->npages == 0)
+ dma_pool_free(iod->sqinfo->prp_small_pool, sssraid_iod_list(iod)[0], dma_addr);
+
+ for (i = 0; i < iod->npages; i++) {
+ addr = sssraid_iod_list(iod)[i];
+
+ if (iod->use_sgl) {
+ sg_list = addr;
+ next_dma_addr =
+ le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
+ } else {
+ prp_list = addr;
+ next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+ }
+
+ dma_pool_free(hdev->prp_page_pool, addr, dma_addr);
+ dma_addr = next_dma_addr;
+ }
+
+ iod->sense = NULL;
+ iod->npages = -1;
+}
+
+static void sssraid_complete_ioq_sync_cmnd(struct sssraid_dev *hdev, u16 qidx,
+ struct sssraid_completion *cqe)
+{
+ struct sssraid_cmd *ptcmd;
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+
+ ptcmd = hdev->ioq_ptcmds + (sqinfo->qidx - 1) * SSSRAID_PTCMDS_PERQ +
+ le16_to_cpu(cqe->cmd_id) - SSSRAID_IO_BLK_MQ_DEPTH;
+
+ ptcmd->status = le16_to_cpu(cqe->status) >> 1;
+ ptcmd->result0 = le32_to_cpu(cqe->result);
+ ptcmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&ptcmd->cmd_done);
+}
+
+static void sssraid_complete_ioq_cmnd(struct sssraid_dev *hdev, u16 qidx,
+ struct sssraid_completion *cqe)
+{
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct sssraid_iod *iod;
+ struct request *req;
+ unsigned long elapsed;
+
+ tags = hdev->shost->tag_set.tags[sqinfo->qidx - 1];
+
+ req = blk_mq_tag_to_rq(tags, le16_to_cpu(cqe->cmd_id));
+ if (unlikely(!req || !blk_mq_request_started(req))) {
+ ioc_warn(hdev, "Invalid id %d completed on queue %d\n",
+ le16_to_cpu(cqe->cmd_id), sqinfo->qidx);
+ return;
+ }
+
+ scmd = blk_mq_rq_to_pdu(req);
+ iod = scsi_cmd_priv(scmd);
+
+ elapsed = jiffies - scmd->jiffies_at_alloc;
+ dbgprint(hdev, "cid[%d] qidx[%d] finish IO cost %3ld.%3ld seconds\n",
+ le16_to_cpu(cqe->cmd_id), sqinfo->qidx, elapsed / HZ, elapsed % HZ);
+
+ if (cmpxchg(&iod->state, SSSRAID_CMD_IN_FLIGHT, SSSRAID_CMD_COMPLETE) !=
+ SSSRAID_CMD_IN_FLIGHT) {
+ ioc_warn(hdev, "cid[%d] qidx[%d] enters abnormal handler, cost %3ld.%3ld seconds\n",
+ le16_to_cpu(cqe->cmd_id), sqinfo->qidx, elapsed / HZ, elapsed % HZ);
+ WRITE_ONCE(iod->state, SSSRAID_CMD_TMO_COMPLETE);
+
+ if (iod->nsge) {
+ iod->nsge = 0;
+ scsi_dma_unmap(scmd);
+ }
+ sssraid_free_iod_res(hdev, iod);
+
+ return;
+ }
+
+ sssraid_map_status(iod, scmd, cqe);
+ if (iod->nsge) {
+ iod->nsge = 0;
+ scsi_dma_unmap(scmd);
+ }
+ sssraid_free_iod_res(hdev, iod);
+ scmd->scsi_done(scmd);
+}
+
+static void sssraid_process_admin_cq(struct sssraid_dev *hdev,
+ struct sssraid_squeue *sqinfo,
+ struct sssraid_completion *cqe)
+{
+ struct sssraid_fwevt *fwevt = NULL;
+ u16 cid = le16_to_cpu(cqe->cmd_id), sz;
+
+ if (likely(cid < SSSRAID_AQ_BLK_MQ_DEPTH))
+ sssraid_complete_adminq_cmnd(hdev, sqinfo->qidx, cqe);
+ else {
+ sz = sizeof(*cqe);
+ fwevt = sssraid_alloc_fwevt(sz);
+ if (!fwevt) {
+ ioc_info(hdev, "%s :failure at %s:%d/%s()!\n",
+ __func__, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ memcpy(fwevt->event_data, cqe, sz);
+ fwevt->hdev = hdev;
+ fwevt->event_id = 0; /* evt_type:0 */
+ fwevt->send_ack = 1; /* ack_req:1 */
+ fwevt->process_evt = 1; /* process_evt_bh:1 */
+ fwevt->evt_ctx = 0; /* 0 */
+ sssraid_fwevt_add_to_list(hdev, fwevt);
+ }
+}
+
+static void sssraid_process_io_cq(struct sssraid_dev *hdev,
+ struct sssraid_squeue *sqinfo,
+ struct sssraid_completion *cqe)
+{
+ u16 cid = le16_to_cpu(cqe->cmd_id);
+
+ if (likely(cid < SSSRAID_IO_BLK_MQ_DEPTH))
+ sssraid_complete_ioq_cmnd(hdev, sqinfo->qidx, cqe);
+ else /* io sync handle */
+ sssraid_complete_ioq_sync_cmnd(hdev, sqinfo->qidx, cqe);
+}
+
+static inline void sssraid_handle_cqe(struct sssraid_dev *hdev, u16 mdix, u16 didx)
+{
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[mdix];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[mdix];
+
+ struct sssraid_completion *cqe = &cqinfo->cqes[didx];
+ u16 cid = le16_to_cpu(cqe->cmd_id);
+
+ if (unlikely(cid >= sqinfo->q_depth)) {
+ ioc_err(hdev, "Invalid command id[%d] completed on queue %d\n",
+ cid, cqe->sq_id);
+ return;
+ }
+
+ dbgprint(hdev, "cid[%d] mdix[%d], result[0x%x], sq_id[%d], status[0x%x]\n",
+ cid, sqinfo->qidx, le32_to_cpu(cqe->result),
+ le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status));
+
+ if (!mdix) /* admin */
+ sssraid_process_admin_cq(hdev, sqinfo, cqe);
+ else /* io */
+ sssraid_process_io_cq(hdev, sqinfo, cqe);
+}
+
+void sssraid_complete_cqes(struct sssraid_dev *hdev, u16 midx, u16 start, u16 end)
+{
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[midx];
+
+ while (start != end) {
+ sssraid_handle_cqe(hdev, midx, start);
+ if (++start == sqinfo->q_depth)
+ start = 0;
+ }
+}
+
+static void sssraid_disable_admin_queue(struct sssraid_dev *hdev, bool shutdown)
+{
+ struct sssraid_cqueue *adm_cqinfo = &hdev->cqinfo[0];
+ u16 start, end;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ sssraid_shutdown_ctrl(hdev);
+ else
+ sssraid_disable_ctrl(hdev);
+ }
+
+ if (hdev->init_done_queue_cnt == 0) {
+ ioc_err(hdev, "err, admin queue has been delete\n");
+ return;
+ }
+
+ spin_lock_irq(&adm_cqinfo->cq_lock);
+ sssraid_process_cq(hdev, 0, &start, &end, -1);
+ spin_unlock_irq(&adm_cqinfo->cq_lock);
+ sssraid_complete_cqes(hdev, 0, start, end);
+}
+
+static void sssraid_free_all_queues(struct sssraid_dev *hdev)
+{
+ int i;
+ struct sssraid_cqueue *cqinfo;
+ struct sssraid_squeue *sqinfo;
+
+ for (i = 0; i < hdev->init_done_queue_cnt; i++) {
+ cqinfo = &hdev->cqinfo[i];
+ sqinfo = &hdev->sqinfo[i];
+ dma_free_coherent(&hdev->pdev->dev, CQ_SIZE(sqinfo->q_depth),
+ (void *)cqinfo->cqes, cqinfo->cq_dma_addr);
+ dma_free_coherent(&hdev->pdev->dev, SQ_SIZE(sqinfo->qidx, sqinfo->q_depth),
+ sqinfo->sq_cmds, sqinfo->sq_dma_addr);
+ dma_free_coherent(&hdev->pdev->dev, SENSE_SIZE(sqinfo->q_depth),
+ sqinfo->sense, sqinfo->sense_dma_addr);
+ }
+
+ hdev->init_done_queue_cnt = 0;
+}
+
+static void sssraid_free_admin_cmds(struct sssraid_dev *hdev)
+{
+ kfree(hdev->adm_cmds);
+ hdev->adm_cmds = NULL;
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+}
+
+static void sssraid_free_resources(struct sssraid_dev *hdev)
+{
+ sssraid_free_admin_cmds(hdev);
+ kfree(hdev->sqinfo);
+ kfree(hdev->cqinfo);
+ sssraid_destroy_dma_pools(hdev);
+ kfree(hdev->ctrl_info);
+}
+
+void sssraid_cleanup_ioc(struct sssraid_dev *hdev, u8 re_init)
+{
+ if (!re_init)
+ sssraid_free_ioq_ptcmds(hdev);
+
+ sssraid_delete_io_queues(hdev);
+ sssraid_disable_admin_queue(hdev, !re_init);
+
+ if (!re_init)
+ sssraid_free_all_queues(hdev);
+
+ sssraid_ioc_disable_intr(hdev);
+ sssraid_cleanup_resources(hdev);
+
+ if (!re_init)
+ sssraid_free_resources(hdev);
+
+}
+
+int sssraid_soft_reset_handler(struct sssraid_dev *hdev)
+{
+ int retval = 0;
+
+ if (hdev->state != SSSRAID_RESETTING) {
+ ioc_err(hdev, "err, host is not reset state\n");
+ return retval;
+ }
+
+ ioc_info(hdev, "enter host reset\n");
+
+ sssraid_ioc_disable_intr(hdev);
+ sssraid_cleanup_fwevt_list(hdev);
+
+ /* realize above here:
+ * sssraid_dev_disable -> sssraid_back_all_io
+ */
+ sssraid_back_all_io(hdev);
+ /*
+ * realize sssraid_dev_disable,
+ * i.e. sssraid_cleanup_ioc(1)
+ */
+ if (hdev->ctrl_config & SSSRAID_CC_ENABLE) {
+ ioc_info(hdev, "start dev_disable\n");
+ sssraid_cleanup_ioc(hdev, 1);
+ }
+
+ retval = sssraid_init_ioc(hdev, 1);
+ if (retval) {
+ ioc_err(hdev, "init ioc fail.\n");
+ return retval;
+ }
+
+ sssraid_change_host_state(hdev, SSSRAID_LIVE);
+
+ return retval;
+}
diff --git a/drivers/scsi/sssraid/sssraid_os.c b/drivers/scsi/sssraid/sssraid_os.c
new file mode 100644
index 000000000000..16d2b94d78f3
--- /dev/null
+++ b/drivers/scsi/sssraid/sssraid_os.c
@@ -0,0 +1,2337 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 3SNIC Information Technology, Ltd */
+
+/* 3SNIC RAID SSSXXX Series Linux Driver */
+
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/sysfs.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+#include <linux/debugfs.h>
+#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <linux/sort.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dbg.h>
+
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/once.h>
+#include <linux/sched/signal.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "sssraid.h"
+#include "sssraid_debug.h"
+
+u32 admin_tmout = 60;
+module_param(admin_tmout, uint, 0644);
+MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)");
+
+static u32 scmd_tmout_rawdisk = 180;
+module_param(scmd_tmout_rawdisk, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_rawdisk, "scsi commands timeout for rawdisk(seconds)");
+
+static u32 scmd_tmout_vd = 180;
+module_param(scmd_tmout_vd, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_vd, "scsi commands timeout for vd(seconds)");
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops ioq_depth_ops = {
+ .set = ioq_depth_set,
+ .get = param_get_uint,
+};
+
+u32 io_queue_depth = 1024;
+module_param_cb(io_queue_depth, &ioq_depth_ops, &io_queue_depth, 0644);
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+
+static int logging_level_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+
+ return param_set_byte(val, kp);
+}
+
+static const struct kernel_param_ops logging_level_ops = {
+ .set = logging_level_set,
+ .get = param_get_byte,
+};
+
+static unsigned char logging_level;
+module_param_cb(logging_level, &logging_level_ops, &logging_level, 0644);
+MODULE_PARM_DESC(logging_level, "set log level, default zero for switch off");
+
+static int small_pool_num_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+ if (n > MAX_SMALL_POOL_NUM)
+ n = MAX_SMALL_POOL_NUM;
+ if (n < 1)
+ n = 1;
+ *((u8 *)kp->arg) = n;
+
+ return 0;
+}
+
+static const struct kernel_param_ops small_pool_num_ops = {
+ .set = small_pool_num_set,
+ .get = param_get_byte,
+};
+
+/* Small pools are used to save PRP for small IOs.It was
+ * found that the spinlock of a single pool conflicts a
+ * lot with multiple CPUs.So multiple pools are introduced
+ * to reduce the conflictions.
+ */
+unsigned char small_pool_num = 4;
+module_param_cb(small_pool_num, &small_pool_num_ops, &small_pool_num, 0644);
+MODULE_PARM_DESC(small_pool_num, "set prp small pool num, default 4, MAX 16");
+
+//static struct class *sssraid_class;
+
+enum FW_STAT_CODE {
+ FW_STAT_OK = 0,
+ FW_STAT_NEED_CHECK,
+ FW_STAT_ERROR,
+ FW_STAT_EP_PCIE_ERROR,
+ FW_STAT_NAC_DMA_ERROR,
+ FW_STAT_ABORTED,
+ FW_STAT_NEED_RETRY
+};
+
+static const char * const raid_levels[] = {"0", "1", "5", "6", "10", "50", "60", "NA"};
+
+static const char * const raid_states[] = {
+ "NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED", "FORMATTING", "SANITIZING",
+ "INITIALIZING", "INITIALIZE_FAIL", "DELETING", "DELETE_FAIL", "WRITE_PROTECT"
+};
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0;
+ int ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < 2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+/*
+ * common
+ */
+static struct class *sssraid_class;
+
+struct sssraid_fwevt *sssraid_alloc_fwevt(int len)
+{
+ struct sssraid_fwevt *fwevt;
+
+ fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
+ if (!fwevt)
+ return NULL;
+
+ kref_init(&fwevt->ref_count);
+ return fwevt;
+
+}
+
+static void sssraid_fwevt_free(struct kref *r)
+{
+ kfree(container_of(r, struct sssraid_fwevt, ref_count));
+}
+
+static void sssraid_fwevt_get(struct sssraid_fwevt *fwevt)
+{
+ kref_get(&fwevt->ref_count);
+}
+
+static void sssraid_fwevt_put(struct sssraid_fwevt *fwevt)
+{
+ kref_put(&fwevt->ref_count, sssraid_fwevt_free);
+}
+
+static void sssraid_fwevt_del_from_list(struct sssraid_dev *hdev,
+ struct sssraid_fwevt *fwevt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdev->fwevt_lock, flags);
+ if (!list_empty(&fwevt->list)) {
+ list_del_init(&fwevt->list);
+ /*
+ * Put fwevt reference count after
+ * removing it from fwevt_list
+ */
+ sssraid_fwevt_put(fwevt);
+ }
+ spin_unlock_irqrestore(&hdev->fwevt_lock, flags);
+}
+
+static void sssraid_fwevt_bh(struct sssraid_dev *hdev,
+ struct sssraid_fwevt *fwevt)
+{
+ struct sssraid_completion *cqe;
+
+ hdev->current_event = fwevt;
+ sssraid_fwevt_del_from_list(hdev, fwevt);
+
+ cqe = (struct sssraid_completion *)fwevt->event_data;
+
+ if (!fwevt->process_evt)
+ goto evt_ack;
+
+ sssraid_complete_aen(hdev, cqe);
+
+evt_ack:
+ /* event response put here: event has been handled. */
+ sssraid_send_event_ack(hdev, fwevt->event_id,
+ fwevt->evt_ctx, le16_to_cpu(cqe->cmd_id));
+ sssraid_fwevt_put(fwevt);
+ hdev->current_event = NULL;
+}
+
+static void sssraid_fwevt_worker(struct work_struct *work)
+{
+ struct sssraid_fwevt *fwevt = container_of(work, struct sssraid_fwevt,
+ work);
+ sssraid_fwevt_bh(fwevt->hdev, fwevt);
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ sssraid_fwevt_put(fwevt);
+}
+
+void sssraid_fwevt_add_to_list(struct sssraid_dev *hdev,
+ struct sssraid_fwevt *fwevt)
+{
+ unsigned long flags;
+
+ if (!hdev->fwevt_worker_thread)
+ return;
+
+ spin_lock_irqsave(&hdev->fwevt_lock, flags);
+ /* get fwevt reference count while adding it to fwevt_list */
+ sssraid_fwevt_get(fwevt);
+ INIT_LIST_HEAD(&fwevt->list);
+ list_add_tail(&fwevt->list, &hdev->fwevt_list);
+ INIT_WORK(&fwevt->work, sssraid_fwevt_worker);
+ /* get fwevt reference count while enqueueing it to worker queue */
+ sssraid_fwevt_get(fwevt);
+ queue_work(hdev->fwevt_worker_thread, &fwevt->work);
+ spin_unlock_irqrestore(&hdev->fwevt_lock, flags);
+}
+
+static struct sssraid_fwevt *sssraid_dequeue_fwevt(
+ struct sssraid_dev *hdev)
+{
+ unsigned long flags;
+ struct sssraid_fwevt *fwevt = NULL;
+
+ spin_lock_irqsave(&hdev->fwevt_lock, flags);
+ if (!list_empty(&hdev->fwevt_list)) {
+ fwevt = list_first_entry(&hdev->fwevt_list,
+ struct sssraid_fwevt, list);
+ list_del_init(&fwevt->list);
+ /*
+ * Put fwevt reference count after
+ * removing it from fwevt_list
+ */
+ sssraid_fwevt_put(fwevt);
+ }
+ spin_unlock_irqrestore(&hdev->fwevt_lock, flags);
+
+ return fwevt;
+}
+
+void sssraid_cleanup_fwevt_list(struct sssraid_dev *hdev)
+{
+ struct sssraid_fwevt *fwevt = NULL;
+
+ if ((list_empty(&hdev->fwevt_list) && !hdev->current_event) ||
+ !hdev->fwevt_worker_thread)
+ return;
+
+ while ((fwevt = sssraid_dequeue_fwevt(hdev)) ||
+ (fwevt = hdev->current_event)) {
+ /*
+ * Wait on the fwevt to complete. If this returns 1, then
+ * the event was never executed, and we need a put for the
+ * reference the work had on the fwevt.
+ *
+ * If it did execute, we wait for it to finish, and the put will
+ * happen from sssraid_process_fwevt()
+ */
+ if (cancel_work_sync(&fwevt->work)) {
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ sssraid_fwevt_put(fwevt);
+ /*
+ * Put fwevt reference count to neutralize
+ * kref_init increment
+ */
+ sssraid_fwevt_put(fwevt);
+ }
+ }
+}
+
+/*
+ * common 1
+ */
+static int sssraid_npages_prp(struct sssraid_dev *hdev)
+{
+ u32 size = 1U << ((hdev->ctrl_info->mdts) * 1U) << 12;
+ u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size);
+
+ return DIV_ROUND_UP(PRP_ENTRY_SIZE * nprps, hdev->page_size - PRP_ENTRY_SIZE);
+}
+
+static int sssraid_npages_sgl(struct sssraid_dev *hdev)
+{
+ u32 nsge = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+
+ return DIV_ROUND_UP(nsge * sizeof(struct sssraid_sgl_desc), hdev->page_size);
+}
+
+static u32 sssraid_cmd_size(struct sssraid_dev *hdev)
+{
+ u32 alloc_size = sizeof(__le64 *) * max(sssraid_npages_prp(hdev), sssraid_npages_sgl(hdev));
+
+ ioc_info(hdev, "iod size: %lu, alloc_size: %u\n",
+ sizeof(struct sssraid_iod), alloc_size);
+
+ return sizeof(struct sssraid_iod) + alloc_size;
+}
+
+static int sssraid_setup_prps(struct sssraid_dev *hdev, struct sssraid_iod *iod)
+{
+ struct scatterlist *sg = iod->sg;
+ u64 dma_addr = sg_dma_address(sg);
+ int dma_len = sg_dma_len(sg);
+ __le64 *prp_list, *old_prp_list;
+ int page_size = hdev->page_size;
+ int offset = dma_addr & (page_size - 1);
+ void **list = sssraid_iod_list(iod);
+ int length = iod->length;
+ struct dma_pool *pool;
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ length -= (page_size - offset);
+ if (length <= 0) {
+ iod->first_dma = 0;
+ return 0;
+ }
+
+ dma_len -= (page_size - offset);
+ if (dma_len) {
+ dma_addr += (page_size - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= page_size) {
+ iod->first_dma = dma_addr;
+ return 0;
+ }
+
+ nprps = DIV_ROUND_UP(length, page_size);
+ if (nprps <= (SMALL_POOL_SIZE / PRP_ENTRY_SIZE)) {
+ pool = iod->sqinfo->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ dev_err_ratelimited(&hdev->pdev->dev, "Allocate first prp_list memory failed\n");
+ iod->first_dma = dma_addr;
+ iod->npages = -1;
+ return -ENOMEM;
+ }
+ list[0] = prp_list;
+ iod->first_dma = prp_dma;
+ i = 0;
+ for (;;) {
+ if (i == page_size / PRP_ENTRY_SIZE) {
+ old_prp_list = prp_list;
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ dev_err_ratelimited(&hdev->pdev->dev, "Allocate %dth prp_list memory failed\n",
+ iod->npages + 1);
+ return -ENOMEM;
+ }
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= page_size;
+ dma_addr += page_size;
+ length -= page_size;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ if (unlikely(dma_len < 0))
+ goto bad_sgl;
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ return 0;
+
+bad_sgl:
+ ioc_err(hdev, "Setup prps, invalid SGL for payload: %d nents: %d\n",
+ iod->length, iod->nsge);
+ return -EIO;
+}
+
+static inline bool sssraid_is_rw_scmd(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool sssraid_is_prp(struct sssraid_dev *hdev, struct scsi_cmnd *scmd, u32 nsge)
+{
+ struct scatterlist *sg = scsi_sglist(scmd);
+ u32 page_mask = hdev->page_size - 1;
+ bool is_prp = true;
+ int i = 0;
+
+ scsi_for_each_sg(scmd, sg, nsge, i) {
+ if (i != 0 && i != nsge - 1) {
+ if ((sg_dma_len(sg) & page_mask) ||
+ (sg_dma_address(sg) & page_mask)) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ if (nsge > 1 && i == 0) {
+ if ((sg_dma_address(sg) + sg_dma_len(sg)) & page_mask) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ if (nsge > 1 && i == (nsge - 1)) {
+ if (sg_dma_address(sg) & page_mask) {
+ is_prp = false;
+ break;
+ }
+ }
+ }
+
+ return is_prp;
+}
+
+static void sssraid_sgl_set_data(struct sssraid_sgl_desc *sge, struct scatterlist *sg)
+{
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->type = SSSRAID_SGL_FMT_DATA_DESC << 4;
+}
+
+static void sssraid_sgl_set_seg(struct sssraid_sgl_desc *sge, dma_addr_t dma_addr, int entries)
+{
+ sge->addr = cpu_to_le64(dma_addr);
+ if (entries <= SGES_PER_PAGE) {
+ sge->length = cpu_to_le32(entries * sizeof(*sge));
+ sge->type = SSSRAID_SGL_FMT_LAST_SEG_DESC << 4;
+ } else {
+ sge->length = cpu_to_le32(PAGE_SIZE);
+ sge->type = SSSRAID_SGL_FMT_SEG_DESC << 4;
+ }
+}
+
+
+static int sssraid_setup_ioq_cmd_sgl(struct sssraid_dev *hdev,
+ struct scsi_cmnd *scmd, struct sssraid_ioq_command *ioq_cmd,
+ struct sssraid_iod *iod)
+{
+ struct sssraid_sgl_desc *sg_list, *link, *old_sg_list;
+ struct scatterlist *sg = scsi_sglist(scmd);
+ void **list = sssraid_iod_list(iod);
+ struct dma_pool *pool;
+ int nsge = iod->nsge;
+ dma_addr_t sgl_dma;
+ int i = 0;
+
+ ioq_cmd->common.flags |= SSSRAID_CMD_FLAG_SGL_METABUF;
+
+ if (nsge == 1) {
+ sssraid_sgl_set_data(&ioq_cmd->common.dptr.sgl, sg);
+ return 0;
+ }
+
+ if (nsge <= (SMALL_POOL_SIZE / sizeof(struct sssraid_sgl_desc))) {
+ pool = iod->sqinfo->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ dev_err_ratelimited(&hdev->pdev->dev, "Allocate first sgl_list failed\n");
+ iod->npages = -1;
+ return -ENOMEM;
+ }
+
+ list[0] = sg_list;
+ iod->first_dma = sgl_dma;
+ sssraid_sgl_set_seg(&ioq_cmd->common.dptr.sgl, sgl_dma, nsge);
+ do {
+ if (i == SGES_PER_PAGE) {
+ old_sg_list = sg_list;
+ link = &old_sg_list[SGES_PER_PAGE - 1];
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ dev_err_ratelimited(&hdev->pdev->dev, "Allocate %dth sgl_list failed\n",
+ iod->npages + 1);
+ return -ENOMEM;
+ }
+ list[iod->npages++] = sg_list;
+
+ i = 0;
+ memcpy(&sg_list[i++], link, sizeof(*link));
+ sssraid_sgl_set_seg(link, sgl_dma, nsge);
+ }
+
+ sssraid_sgl_set_data(&sg_list[i++], sg);
+ sg = sg_next(sg);
+ } while (--nsge > 0);
+
+ return 0;
+}
+
+static void sssraid_shost_init(struct sssraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u8 domain, bus;
+ u32 dev_func;
+
+ domain = pci_domain_nr(pdev->bus);
+ bus = pdev->bus->number;
+ dev_func = pdev->devfn;
+
+ hdev->shost->nr_hw_queues = hdev->init_done_queue_cnt - 1;
+ hdev->shost->can_queue = (hdev->ioq_depth - SSSRAID_PTCMDS_PERQ);
+
+ hdev->shost->sg_tablesize = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+ /* 512B per sector */
+ hdev->shost->max_sectors = (1U << ((hdev->ctrl_info->mdts) * 1U) << 12) / 512;
+ hdev->shost->cmd_per_lun = MAX_CMD_PER_DEV;
+ hdev->shost->max_channel = le16_to_cpu(hdev->ctrl_info->max_channel) - 1;
+ hdev->shost->max_id = le32_to_cpu(hdev->ctrl_info->max_tgt_id);
+ hdev->shost->max_lun = le16_to_cpu(hdev->ctrl_info->max_lun);
+
+ hdev->shost->this_id = -1;
+ hdev->shost->unique_id = (domain << 16) | (bus << 8) | dev_func;
+ hdev->shost->max_cmd_len = MAX_CDB_LEN;
+ hdev->shost->hostt->cmd_size = sssraid_cmd_size(hdev);
+}
+
+static inline void sssraid_get_tag_from_scmd(struct scsi_cmnd *scmd, u16 *qidx, u16 *cid)
+{
+ u32 tag = blk_mq_unique_tag(scmd->request);
+
+ *qidx = blk_mq_unique_tag_to_hwq(tag) + 1;
+ *cid = blk_mq_unique_tag_to_tag(tag);
+}
+
+static inline uint32_t get_unaligned_be24(const uint8_t *const p)
+{
+ return get_unaligned_be32(p - 1) & 0xffffffU;
+}
+
+static int sssraid_setup_rw_cmd(struct sssraid_dev *hdev,
+ struct sssraid_rw_command *rw,
+ struct scsi_cmnd *scmd)
+{
+ u32 start_lba_lo, start_lba_hi;
+ u32 datalength = 0;
+ u16 control = 0;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+
+ if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+ rw->opcode = SSSRAID_CMD_WRITE;
+ } else if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
+ rw->opcode = SSSRAID_CMD_READ;
+ } else {
+ ioc_err(hdev, "Invalid RW_IO for unsupported data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* 6-byte READ(0x08) or WRITE(0x0A) cdb */
+ if (scmd->cmd_len == 6) {
+ datalength = (u32)(scmd->cmnd[4] == 0 ?
+ IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]);
+ start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]);
+
+ start_lba_lo &= 0x1FFFFF;
+ }
+
+ /* 10-byte READ(0x28) or WRITE(0x2A) cdb */
+ else if (scmd->cmd_len == 10) {
+ datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SSSRAID_RW_FUA;
+ }
+
+ /* 12-byte READ(0xA8) or WRITE(0xAA) cdb */
+ else if (scmd->cmd_len == 12) {
+ datalength = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SSSRAID_RW_FUA;
+ }
+ /* 16-byte READ(0x88) or WRITE(0x8A) cdb */
+ else if (scmd->cmd_len == 16) {
+ datalength = get_unaligned_be32(&scmd->cmnd[10]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SSSRAID_RW_FUA;
+ }
+
+ if (unlikely(datalength > U16_MAX || datalength == 0)) {
+ ioc_err(hdev, "Invalid IO for illegal transfer data length: %u\n", datalength);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rw->slba = cpu_to_le64(((u64)start_lba_hi << 32) | start_lba_lo);
+ /* 0base for nlb */
+ rw->nlb = cpu_to_le16((u16)(datalength - 1));
+ rw->control = cpu_to_le16(control);
+
+ return 0;
+}
+
+static int sssraid_setup_nonio_cmd(struct sssraid_dev *hdev,
+ struct sssraid_scsi_nonio *scsi_nonio, struct scsi_cmnd *scmd)
+{
+ scsi_nonio->buffer_len = cpu_to_le32(scsi_bufflen(scmd));
+
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ scsi_nonio->opcode = SSSRAID_CMD_NONIO_NONE;
+ break;
+ case DMA_TO_DEVICE:
+ scsi_nonio->opcode = SSSRAID_CMD_NONIO_TODEV;
+ break;
+ case DMA_FROM_DEVICE:
+ scsi_nonio->opcode = SSSRAID_CMD_NONIO_FROMDEV;
+ break;
+ default:
+ ioc_err(hdev, "Invalid NON_IO for unsupported data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sssraid_setup_ioq_cmd(struct sssraid_dev *hdev,
+ struct sssraid_ioq_command *ioq_cmd, struct scsi_cmnd *scmd)
+{
+ memcpy(ioq_cmd->common.cdb, scmd->cmnd, scmd->cmd_len);
+ ioq_cmd->common.cdb_len = scmd->cmd_len;
+
+ if (sssraid_is_rw_scmd(scmd))
+ return sssraid_setup_rw_cmd(hdev, &ioq_cmd->rw, scmd);
+ else
+ return sssraid_setup_nonio_cmd(hdev, &ioq_cmd->scsi_nonio, scmd);
+}
+
+static inline void sssraid_init_iod(struct sssraid_iod *iod)
+{
+ iod->nsge = 0;
+ iod->npages = -1;
+ iod->use_sgl = false;
+ WRITE_ONCE(iod->state, SSSRAID_CMD_IDLE);
+}
+
+int sssraid_io_map_data(struct sssraid_dev *hdev, struct sssraid_iod *iod,
+ struct scsi_cmnd *scmd, struct sssraid_ioq_command *ioq_cmd)
+{
+ int retval;
+
+ retval = scsi_dma_map(scmd);
+ if (unlikely(retval < 0))
+ return retval;
+ iod->nsge = retval;
+ /* No data to DMA, it may be scsi no-rw command */
+ if (unlikely(iod->nsge == 0))
+ return 0;
+
+ iod->length = scsi_bufflen(scmd);
+ iod->sg = scsi_sglist(scmd);
+ iod->use_sgl = !sssraid_is_prp(hdev, scmd, iod->nsge);
+
+ if (iod->use_sgl) {
+ retval = sssraid_setup_ioq_cmd_sgl(hdev, scmd, ioq_cmd, iod);
+ } else {
+ retval = sssraid_setup_prps(hdev, iod);
+ ioq_cmd->common.dptr.prp1 =
+ cpu_to_le64(sg_dma_address(iod->sg));
+ ioq_cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+ }
+
+ if (retval)
+ scsi_dma_unmap(scmd);
+
+ return retval;
+}
+
+void sssraid_map_status(struct sssraid_iod *iod, struct scsi_cmnd *scmd,
+ struct sssraid_completion *cqe)
+{
+ struct sssraid_dev *hdev = iod->sqinfo->hdev;
+
+ scsi_set_resid(scmd, 0);
+
+ switch ((le16_to_cpu(cqe->status) >> 1) & 0x7f) {
+ case FW_STAT_OK:
+ set_host_byte(scmd, DID_OK);
+ break;
+ case FW_STAT_NEED_CHECK:
+ set_host_byte(scmd, DID_OK);
+ scmd->result |= le16_to_cpu(cqe->status) >> 8;
+ if (scmd->result & SAM_STAT_CHECK_CONDITION) {
+ memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ memcpy(scmd->sense_buffer, iod->sense, SCSI_SENSE_BUFFERSIZE);
+ scmd->result = (scmd->result & 0x00ffffff) | (DRIVER_SENSE << 24);
+ }
+ break;
+ case FW_STAT_ABORTED:
+ set_host_byte(scmd, DID_ABORT);
+ break;
+ case FW_STAT_NEED_RETRY:
+ set_host_byte(scmd, DID_REQUEUE);
+ break;
+ default:
+ set_host_byte(scmd, DID_BAD_TARGET);
+ ioc_warn(hdev, "cid[%d] qid[%d] bad status[0x%x]\n",
+ le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id),
+ le16_to_cpu(cqe->status));
+ break;
+ }
+}
+
+
+struct sssraid_cmd *sssraid_get_cmd(struct sssraid_dev *hdev, enum sssraid_cmd_type type)
+{
+ struct sssraid_cmd *cmd = NULL;
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == SSSRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(slock, flags);
+ ioc_err(hdev, "err, cmd[%d] list empty\n", type);
+ return NULL;
+ }
+ cmd = list_entry(head->next, struct sssraid_cmd, list);
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(slock, flags);
+
+ WRITE_ONCE(cmd->state, SSSRAID_CMD_IN_FLIGHT);
+
+ return cmd;
+}
+
+static int sssraid_add_device(struct sssraid_dev *hdev, struct sssraid_dev_info *device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ ioc_info(hdev, "add device, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
+ sdev = scsi_device_lookup(shost, device->channel, le16_to_cpu(device->target), 0);
+ if (sdev) {
+ ioc_warn(hdev, "Device is already exist, channel: %d, target_id: %d, lun: %d\n",
+ device->channel, le16_to_cpu(device->target), 0);
+ scsi_device_put(sdev);
+ return -EEXIST;
+ }
+ scsi_add_device(shost, device->channel, le16_to_cpu(device->target), 0);
+ return 0;
+}
+
+static int sssraid_rescan_device(struct sssraid_dev *hdev, struct sssraid_dev_info *device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ ioc_info(hdev, "rescan device, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
+ sdev = scsi_device_lookup(shost, device->channel, le16_to_cpu(device->target), 0);
+ if (!sdev) {
+ ioc_warn(hdev, "device is not exit rescan it, channel: %d, target_id: %d, lun: %d\n",
+ device->channel, le16_to_cpu(device->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int sssraid_remove_device(struct sssraid_dev *hdev, struct sssraid_dev_info *org_device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ ioc_info(hdev, "remove device, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ le32_to_cpu(org_device->hdid), le16_to_cpu(org_device->target),
+ org_device->channel, org_device->lun, org_device->attr);
+
+ sdev = scsi_device_lookup(shost, org_device->channel, le16_to_cpu(org_device->target), 0);
+ if (!sdev) {
+ ioc_warn(hdev, "device is not exit remove it, channel: %d, target_id: %d, lun: %d\n",
+ org_device->channel, le16_to_cpu(org_device->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int luntarget_cmp_func(const void *l, const void *r)
+{
+ const struct sssraid_dev_info *ln = l;
+ const struct sssraid_dev_info *rn = r;
+ int l_attr = SSSRAID_DEV_INFO_ATTR_BOOT(ln->attr);
+ int r_attr = SSSRAID_DEV_INFO_ATTR_BOOT(rn->attr);
+
+ /* boot first */
+ if (l_attr != r_attr)
+ return (r_attr - l_attr);
+
+ if (ln->channel == rn->channel)
+ return le16_to_cpu(ln->target) - le16_to_cpu(rn->target);
+
+ return ln->channel - rn->channel;
+}
+
+void sssraid_scan_disk(struct sssraid_dev *hdev)
+{
+ struct sssraid_dev_info *devices, *org_devices;
+ struct sssraid_dev_info *sortdevice;
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ u8 flag, org_flag;
+ int i, ret;
+ int count = 0;
+
+ devices = kcalloc(nd, sizeof(struct sssraid_dev_info), GFP_KERNEL);
+ if (!devices)
+ return;
+
+ sortdevice = kcalloc(nd, sizeof(struct sssraid_dev_info), GFP_KERNEL);
+ if (!sortdevice)
+ goto free_list;
+
+ ret = sssraid_get_dev_list(hdev, devices);
+ if (ret)
+ goto free_all;
+ org_devices = hdev->devices;
+ for (i = 0; i < nd; i++) {
+ org_flag = org_devices[i].flag;
+ flag = devices[i].flag;
+
+ dbgprint(hdev, "i: %d, org_flag: 0x%x, flag: 0x%x\n", i, org_flag, flag);
+
+ if (SSSRAID_DEV_INFO_FLAG_VALID(flag)) {
+ if (!SSSRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->devices_rwsem);
+ memcpy(&org_devices[i], &devices[i],
+ sizeof(struct sssraid_dev_info));
+ memcpy(&sortdevice[count++], &devices[i],
+ sizeof(struct sssraid_dev_info));
+ up_write(&hdev->devices_rwsem);
+ } else if (SSSRAID_DEV_INFO_FLAG_CHANGE(flag)) {
+ sssraid_rescan_device(hdev, &devices[i]);
+ }
+ } else {
+ if (SSSRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->devices_rwsem);
+ org_devices[i].flag &= 0xfe;
+ up_write(&hdev->devices_rwsem);
+ sssraid_remove_device(hdev, &org_devices[i]);
+ }
+ }
+ }
+
+ ioc_info(hdev, "scan work add device count = %d\n", count);
+
+ sort(sortdevice, count, sizeof(sortdevice[0]), luntarget_cmp_func, NULL);
+
+ for (i = 0; i < count; i++)
+ sssraid_add_device(hdev, &sortdevice[i]);
+
+free_all:
+ kfree(sortdevice);
+free_list:
+ kfree(devices);
+}
+
+static int sssraid_wait_abnl_cmd_done(struct sssraid_iod *iod)
+{
+ u16 times = 0;
+
+ do {
+ if (READ_ONCE(iod->state) == SSSRAID_CMD_TMO_COMPLETE)
+ break;
+ msleep(500);
+ times++;
+ } while (times <= SSSRAID_WAIT_ABNL_CMD_TIMEOUT);
+
+ /* wait command completion timeout after abort/reset success */
+ if (times >= SSSRAID_WAIT_ABNL_CMD_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static bool sssraid_check_scmd_completed(struct scsi_cmnd *scmd)
+{
+ struct sssraid_dev *hdev = shost_priv(scmd->device->host);
+ struct sssraid_iod *iod = scsi_cmd_priv(scmd);
+ struct sssraid_squeue *sqinfo;
+ u16 hwq, cid;
+
+ sssraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ sqinfo = &hdev->sqinfo[hwq];
+ if (READ_ONCE(iod->state) == SSSRAID_CMD_COMPLETE || sssraid_poll_cq(hdev, hwq, cid)) {
+ ioc_warn(hdev, "cid[%d] qidx[%d] has been completed\n",
+ cid, sqinfo->qidx);
+ return true;
+ }
+ return false;
+}
+
+static int sssraid_scsi_reset(struct scsi_cmnd *scmd, enum sssraid_scsi_rst_type rst)
+{
+ struct sssraid_dev *hdev = shost_priv(scmd->device->host);
+ struct sssraid_iod *iod = scsi_cmd_priv(scmd);
+ struct sssraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SSSRAID_LIVE || !sssraid_wait_abnl_cmd_done(iod) ||
+ sssraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ sssraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ ioc_warn(hdev, "cid[%d] qidx[%d] timeout, %s reset\n", cid, hwq,
+ rst ? "bus" : "target");
+ ret = sssraid_send_reset_cmd(hdev, rst, hostdata->hdid);
+ if (ret == 0) {
+ ret = sssraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ ioc_warn(hdev, "cid[%d] qidx[%d] %s reset failed, not found\n",
+ cid, hwq, rst ? "bus" : "target");
+ return FAILED;
+ }
+
+ ioc_warn(hdev, "cid[%d] qidx[%d] %s reset success\n", cid, hwq,
+ rst ? "bus" : "target");
+ return SUCCESS;
+ }
+
+ ioc_warn(hdev, "cid[%d] qidx[%d] ret[%d] %s reset failed\n", cid, hwq, ret,
+ rst ? "bus" : "target");
+ return FAILED;
+}
+
+bool sssraid_change_host_state(struct sssraid_dev *hdev, enum sssraid_state newstate)
+{
+ unsigned long flags;
+ enum sssraid_state oldstate;
+ bool change = false;
+
+ spin_lock_irqsave(&hdev->state_lock, flags);
+
+ oldstate = hdev->state;
+ switch (newstate) {
+ case SSSRAID_LIVE:
+ switch (oldstate) {
+ case SSSRAID_NEW:
+ case SSSRAID_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SSSRAID_RESETTING:
+ switch (oldstate) {
+ case SSSRAID_LIVE:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SSSRAID_DELETING:
+ if (oldstate != SSSRAID_DELETING)
+ change = true;
+ break;
+ case SSSRAID_DEAD:
+ switch (oldstate) {
+ case SSSRAID_NEW:
+ case SSSRAID_LIVE:
+ case SSSRAID_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (change)
+ hdev->state = newstate;
+ spin_unlock_irqrestore(&hdev->state_lock, flags);
+
+ ioc_info(hdev, "[%d]->[%d], change[%d]\n", oldstate, newstate, change);
+
+ return change;
+}
+
+static int sssraid_get_qd_by_disk(u8 attr)
+{
+ switch (SSSRAID_DEV_DISK_TYPE(attr)) {
+ case SSSRAID_SAS_HDD_VD:
+ case SSSRAID_SATA_HDD_VD:
+ return SSSRAID_HDD_VD_QD;
+ case SSSRAID_SAS_SSD_VD:
+ case SSSRAID_SATA_SSD_VD:
+ case SSSRAID_NVME_SSD_VD:
+ return SSSRAID_SSD_VD_QD;
+ case SSSRAID_SAS_HDD_PD:
+ case SSSRAID_SATA_HDD_PD:
+ return SSSRAID_HDD_PD_QD;
+ case SSSRAID_SAS_SSD_PD:
+ case SSSRAID_SATA_SSD_PD:
+ case SSSRAID_NVME_SSD_PD:
+ return SSSRAID_SSD_PD_QD;
+ default:
+ return MAX_CMD_PER_DEV;
+ }
+}
+
+static int sssraid_match_dev(struct sssraid_dev *hdev, u16 idx, struct scsi_device *sdev)
+{
+ if (SSSRAID_DEV_INFO_FLAG_VALID(hdev->devices[idx].flag)) {
+ if (sdev->channel == hdev->devices[idx].channel &&
+ sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
+ sdev->lun < hdev->devices[idx].lun) {
+ ioc_info(hdev, "Match device success, channel:target:lun[%d:%d:%d]\n",
+ hdev->devices[idx].channel,
+ hdev->devices[idx].target,
+ hdev->devices[idx].lun);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int sssraid_bsg_map_data(struct sssraid_dev *hdev, struct bsg_job *job,
+ struct sssraid_admin_command *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct sssraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ iod->sg = job->request_payload.sg_list;
+ iod->nsge = job->request_payload.sg_cnt;
+ iod->length = job->request_payload.payload_len;
+ iod->use_sgl = false;
+ iod->npages = -1;
+
+ if (!iod->nsge)
+ goto out;
+
+ ret = dma_map_sg_attrs(&hdev->pdev->dev, iod->sg, iod->nsge, dma_dir, DMA_ATTR_NO_WARN);
+ if (!ret)
+ goto out;
+
+ ret = sssraid_setup_prps(hdev, iod);
+ if (ret)
+ goto unmap;
+
+ cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+
+ return 0;
+
+unmap:
+ dma_unmap_sg(&hdev->pdev->dev, iod->sg, iod->nsge, dma_dir);
+out:
+ return ret;
+}
+
+static void sssraid_bsg_unmap_data(struct sssraid_dev *hdev, struct bsg_job *job)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct sssraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ if (iod->nsge)
+ dma_unmap_sg(&hdev->pdev->dev, iod->sg, iod->nsge, dma_dir);
+
+ sssraid_free_iod_res(hdev, iod);
+}
+
+void sssraid_put_cmd(struct sssraid_dev *hdev, struct sssraid_cmd *cmd,
+ enum sssraid_cmd_type type)
+{
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == SSSRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ WRITE_ONCE(cmd->state, SSSRAID_CMD_IDLE);
+ list_add_tail(&cmd->list, head);
+ spin_unlock_irqrestore(slock, flags);
+}
+
+static int sssraid_user_admin_cmd(struct sssraid_dev *hdev, struct bsg_job *job)
+{
+ struct sssraid_bsg_request *bsg_req = job->request;
+ struct sssraid_passthru_common_cmd *cmd = &(bsg_req->admcmd);
+ struct sssraid_admin_command admin_cmd;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+ u32 result[2] = {0};
+ int status;
+
+ if (hdev->state >= SSSRAID_RESETTING) {
+ ioc_err(hdev, "err, host state:[%d] is not right\n",
+ hdev->state);
+ return -EBUSY;
+ }
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = cmd->opcode;
+ admin_cmd.common.flags = cmd->flags;
+ admin_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ admin_cmd.common.cdw2[0] = cpu_to_le32(cmd->cdw2);
+ admin_cmd.common.cdw2[1] = cpu_to_le32(cmd->cdw3);
+ admin_cmd.common.cdw10 = cpu_to_le32(cmd->cdw10);
+ admin_cmd.common.cdw11 = cpu_to_le32(cmd->cdw11);
+ admin_cmd.common.cdw12 = cpu_to_le32(cmd->cdw12);
+ admin_cmd.common.cdw13 = cpu_to_le32(cmd->cdw13);
+ admin_cmd.common.cdw14 = cpu_to_le32(cmd->cdw14);
+ admin_cmd.common.cdw15 = cpu_to_le32(cmd->cdw15);
+
+ status = sssraid_bsg_map_data(hdev, job, &admin_cmd);
+ if (status) {
+ ioc_err(hdev, "err, map data failed\n");
+ return status;
+ }
+
+ status = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, &result[0], &result[1], timeout);
+ if (status >= 0) {
+ job->reply_len = sizeof(result);
+ memcpy(job->reply, result, sizeof(result));
+ }
+
+ if (status)
+ ioc_info(hdev, "opcode[0x%x] subopcode[0x%x], status[0x%x] result0[0x%x] result1[0x%x]\n",
+ cmd->opcode, cmd->info_0.subopcode, status, result[0], result[1]);
+
+ sssraid_bsg_unmap_data(hdev, job);
+
+ return status;
+}
+
+static int sssraid_submit_ioq_sync_cmd(struct sssraid_dev *hdev, struct sssraid_ioq_command *cmd,
+ u32 *result, u32 *reslen, u32 timeout)
+{
+ int ret;
+ dma_addr_t sense_dma;
+ struct sssraid_squeue *sqinfo;
+ void *sense_addr = NULL;
+ struct sssraid_cmd *pt_cmd = sssraid_get_cmd(hdev, SSSRAID_CMD_IOPT);
+
+ if (!pt_cmd) {
+ ioc_err(hdev, "err, get sqinfo cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&pt_cmd->cmd_done);
+
+ sqinfo = &hdev->sqinfo[pt_cmd->qid];
+ ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE;
+ sense_addr = sqinfo->sense + ret;
+ sense_dma = sqinfo->sense_dma_addr + ret;
+
+ cmd->common.sense_addr = cpu_to_le64(sense_dma);
+ cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+ cmd->common.command_id = cpu_to_le16(pt_cmd->cid);
+
+ sssraid_submit_cmd(sqinfo, cmd);
+
+ if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) {
+ ioc_err(hdev, "cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+ pt_cmd->cid, pt_cmd->qid, cmd->common.opcode,
+ (le32_to_cpu(cmd->common.cdw3[0]) & 0xffff));
+
+ /* reset controller if admin timeout */
+ sssraid_adm_timeout(hdev, pt_cmd);
+
+ sssraid_put_cmd(hdev, pt_cmd, SSSRAID_CMD_IOPT);
+ return -ETIME;
+ }
+
+ if (result && reslen) {
+ if ((pt_cmd->status & 0x17f) == 0x101) {
+ memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE);
+ *reslen = SCSI_SENSE_BUFFERSIZE;
+ }
+ }
+
+ sssraid_put_cmd(hdev, pt_cmd, SSSRAID_CMD_IOPT);
+
+ return pt_cmd->status;
+}
+
+static int sssraid_user_ioq_cmd(struct sssraid_dev *hdev, struct bsg_job *job)
+{
+ struct sssraid_bsg_request *bsg_req = (struct sssraid_bsg_request *)(job->request);
+ struct sssraid_ioq_passthru_cmd *cmd = &(bsg_req->ioqcmd);
+ struct sssraid_ioq_command ioq_cmd;
+ int status = 0;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+
+ if (cmd->data_len > IOQ_PT_DATA_LEN) {
+ ioc_err(hdev, "data len bigger than 4k\n");
+ return -EFAULT;
+ }
+
+ if (hdev->state != SSSRAID_LIVE) {
+ ioc_err(hdev, "err, host state:[%d] is not live\n",
+ hdev->state);
+ return -EBUSY;
+ }
+
+ ioc_info(hdev, "opcode[0x%x] subopcode[0x%x] init, datalen[%d]\n",
+ cmd->opcode, cmd->info_1.subopcode, cmd->data_len);
+
+ memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+ ioq_cmd.common.opcode = cmd->opcode;
+ ioq_cmd.common.flags = cmd->flags;
+ ioq_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ ioq_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len);
+ ioq_cmd.common.cdb_len = cmd->info_0.cdb_len;
+ ioq_cmd.common.rsvd2 = cmd->info_0.rsvd0;
+ ioq_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3);
+ ioq_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4);
+ ioq_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5);
+
+ ioq_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10);
+ ioq_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11);
+ ioq_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12);
+ ioq_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13);
+ ioq_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14);
+ ioq_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len);
+
+ memcpy(ioq_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len);
+
+ ioq_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]);
+ ioq_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]);
+ ioq_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]);
+ ioq_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]);
+
+ status = sssraid_bsg_map_data(hdev, job, (struct sssraid_admin_command *)&ioq_cmd);
+ if (status) {
+ ioc_err(hdev, "err, map data failed\n");
+ return status;
+ }
+
+ status = sssraid_submit_ioq_sync_cmd(hdev, &ioq_cmd, job->reply, &job->reply_len, timeout);
+ if (status)
+ ioc_info(hdev, "opcode[0x%x] subopcode[0x%x], status[0x%x], reply_len[%d]\n",
+ cmd->opcode, cmd->info_1.subopcode, status, job->reply_len);
+
+ sssraid_bsg_unmap_data(hdev, job);
+
+ return status;
+}
+
+
+/* bsg dispatch user command */
+static int sssraid_bsg_host_dispatch(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = dev_to_shost(job->dev);
+ struct sssraid_dev *hdev = shost_priv(shost);
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct sssraid_bsg_request *bsg_req = job->request;
+ int ret = -ENOMSG;
+
+ job->reply_len = 0;
+
+ if (bsg_req == NULL || job->request_len != sizeof(struct sssraid_bsg_request)) {
+ bsg_job_done(job, ret, 0);
+ return 0;
+ }
+ dbgprint(hdev, "bsg msgcode[%d] msglen[%d] timeout[%d];"
+ "reqnsge[%d], reqlen[%d]\n",
+ bsg_req->msgcode, job->request_len, rq->timeout,
+ job->request_payload.sg_cnt, job->request_payload.payload_len);
+
+ switch (bsg_req->msgcode) {
+ case SSSRAID_BSG_ADM:
+ ret = sssraid_user_admin_cmd(hdev, job);
+ break;
+ case SSSRAID_BSG_IOQ:
+ ret = sssraid_user_ioq_cmd(hdev, job);
+ break;
+ default:
+ ioc_info(hdev, "unsupport msgcode[%d]\n", bsg_req->msgcode);
+ break;
+ }
+
+ if (ret > 0)
+ ret = ret | (ret << 8);
+
+ bsg_job_done(job, ret, 0);
+ return 0;
+}
+
+static inline void sssraid_remove_bsg(struct sssraid_dev *hdev)
+{
+ if (hdev->bsg_queue) {
+ bsg_unregister_queue(hdev->bsg_queue);
+ blk_cleanup_queue(hdev->bsg_queue);
+ }
+}
+
+static void sssraid_back_fault_cqe(struct sssraid_squeue *sqinfo, struct sssraid_completion *cqe)
+{
+ struct sssraid_dev *hdev = sqinfo->hdev;
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct sssraid_iod *iod;
+ struct request *req;
+
+ tags = hdev->shost->tag_set.tags[sqinfo->qidx - 1];
+ req = blk_mq_tag_to_rq(tags, le16_to_cpu(cqe->cmd_id));
+ if (unlikely(!req || !blk_mq_request_started(req)))
+ return;
+
+ scmd = blk_mq_rq_to_pdu(req);
+ iod = scsi_cmd_priv(scmd);
+
+ if (READ_ONCE(iod->state) != SSSRAID_CMD_IN_FLIGHT &&
+ READ_ONCE(iod->state) != SSSRAID_CMD_TIMEOUT)
+ return;
+
+ WRITE_ONCE(iod->state, SSSRAID_CMD_TMO_COMPLETE);
+ set_host_byte(scmd, DID_NO_CONNECT);
+ if (iod->nsge)
+ scsi_dma_unmap(scmd);
+ sssraid_free_iod_res(hdev, iod);
+ scmd->scsi_done(scmd);
+ ioc_warn(hdev, "Back fault CQE, cid[%d] qidx[%d]\n",
+ le16_to_cpu(cqe->cmd_id), sqinfo->qidx);
+}
+
+void sssraid_back_all_io(struct sssraid_dev *hdev)
+{
+ int i, j;
+ struct sssraid_squeue *sqinfo;
+ struct sssraid_completion cqe = { 0 };
+
+ scsi_block_requests(hdev->shost);
+
+ for (i = 1; i <= hdev->shost->nr_hw_queues; i++) {
+ sqinfo = &hdev->sqinfo[i];
+ for (j = 0; j < hdev->scsi_qd; j++) {
+ cqe.cmd_id = cpu_to_le16(j);
+ sssraid_back_fault_cqe(sqinfo, &cqe);
+ }
+ }
+
+ scsi_unblock_requests(hdev->shost);
+
+ j = SSSRAID_AQ_BLK_MQ_DEPTH;
+ for (i = 0; i < j; i++) {
+ if (READ_ONCE(hdev->adm_cmds[i].state) == SSSRAID_CMD_IN_FLIGHT) {
+ ioc_info(hdev, "Back adm, cid[%d]\n", i);
+ hdev->adm_cmds[i].status = 0xFFFF;
+ WRITE_ONCE(hdev->adm_cmds[i].state, SSSRAID_CMD_COMPLETE);
+ complete(&(hdev->adm_cmds[i].cmd_done));
+ }
+ }
+
+ j = SSSRAID_NR_IOQ_PTCMDS;
+ for (i = 0; i < j; i++) {
+ if (READ_ONCE(hdev->ioq_ptcmds[i].state) == SSSRAID_CMD_IN_FLIGHT) {
+ hdev->ioq_ptcmds[i].status = 0xFFFF;
+ WRITE_ONCE(hdev->ioq_ptcmds[i].state, SSSRAID_CMD_COMPLETE);
+ complete(&(hdev->ioq_ptcmds[i].cmd_done));
+ }
+ }
+}
+
+/**
+ * static struct scsi_host_template sssraid_driver_template
+ */
+static int sssraid_scan_finished(struct Scsi_Host *shost,
+ unsigned long time)
+{
+ struct sssraid_dev *hdev = shost_priv(shost);
+
+ sssraid_scan_disk(hdev);
+
+ return 1;
+}
+
+/* eh_target_reset_handler call back */
+static int sssraid_eh_target_reset(struct scsi_cmnd *scmd)
+{
+ return sssraid_scsi_reset(scmd, SSSRAID_RESET_TARGET);
+}
+
+/* eh_bus_reset_handler call back */
+static int sssraid_bus_reset_handler(struct scsi_cmnd *scmd)
+{
+ return sssraid_scsi_reset(scmd, SSSRAID_RESET_BUS);
+}
+
+/* eh_host_reset_handler call back */
+static int sssraid_eh_host_reset(struct scsi_cmnd *scmd)
+{
+ u16 hwq, cid;
+ struct sssraid_dev *hdev = shost_priv(scmd->device->host);
+
+ scsi_print_command(scmd);
+ if (hdev->state != SSSRAID_LIVE || sssraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ sssraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ ioc_warn(hdev, "cid[%d] qidx[%d] host reset\n", cid, hwq);
+
+ /* It's useless:
+ * old code sssraid_reset_work_sync
+ * queue_work(reset_work) at first,
+ * then flush_work to synchronize.
+ */
+ if (!sssraid_change_host_state(hdev, SSSRAID_RESETTING)) {
+ ioc_info(hdev, "can't change to reset state\n");
+ return FAILED;
+ }
+ if (sssraid_soft_reset_handler(hdev)) {
+ ioc_warn(hdev, "cid[%d] qidx[%d] host reset failed\n", cid, hwq);
+ return FAILED;
+ }
+
+ ioc_warn(hdev, "cid[%d] qidx[%d] host reset success\n", cid, hwq);
+
+ return SUCCESS;
+}
+
+/* host_reset call back */
+static int sssraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ int ret;
+ struct sssraid_dev *hdev = shost_priv(shost);
+
+ ioc_info(hdev, "start sysfs host reset cmd\n");
+ if (!sssraid_change_host_state(hdev, SSSRAID_RESETTING)) {
+ ioc_info(hdev, "can't change to reset state\n");
+ return -EBUSY;
+ }
+ ret = sssraid_soft_reset_handler(hdev);
+ ioc_info(hdev, "stop sysfs host reset cmd[%d]\n", ret);
+
+ return ret;
+}
+
+/* queuecommand call back */
+static int sssraid_qcmd(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct sssraid_iod *iod = scsi_cmd_priv(scmd);
+ struct sssraid_dev *hdev = shost_priv(shost);
+ struct scsi_device *sdev = scmd->device;
+ struct sssraid_sdev_hostdata *hostdata = sdev->hostdata;
+ u16 hwq, cid;
+ struct sssraid_squeue *sq;
+ struct sssraid_ioq_command ioq_cmd;
+ int retval;
+
+ if (unlikely(hdev->state == SSSRAID_RESETTING))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ if (unlikely(hdev->state != SSSRAID_LIVE)) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (unlikely(hdev->logging_level & SSSRAID_DEBUG))
+ scsi_print_command(scmd);
+
+ sssraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ hostdata = sdev->hostdata;
+ sq = &hdev->sqinfo[hwq];
+
+ memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+ ioq_cmd.rw.hdid = cpu_to_le32(hostdata->hdid);
+ ioq_cmd.rw.command_id = cpu_to_le16(cid);
+
+ retval = sssraid_setup_ioq_cmd(hdev, &ioq_cmd, scmd);
+ if (unlikely(retval)) {
+ set_host_byte(scmd, DID_ERROR);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ iod->sense = sq->sense + retval;
+ iod->sense_dma = sq->sense_dma_addr + retval;
+ ioq_cmd.common.sense_addr = cpu_to_le64(iod->sense_dma);
+ ioq_cmd.common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+
+ sssraid_init_iod(iod);
+
+ iod->sqinfo = sq;
+ sssraid_io_map_data(hdev, iod, scmd, &ioq_cmd);
+ if (unlikely(retval)) {
+ ioc_err(hdev, "sssraid_io_map_data Err.\n");
+ set_host_byte(scmd, DID_ERROR);
+ scmd->scsi_done(scmd);
+ retval = 0;
+ goto deinit_iod;
+ }
+
+ WRITE_ONCE(iod->state, SSSRAID_CMD_IN_FLIGHT);
+ sssraid_submit_cmd(sq, &ioq_cmd);
+
+ return 0;
+
+deinit_iod:
+ sssraid_free_iod_res(hdev, iod);
+ return retval;
+}
+
+/* change_queue_depth call back:
+ * keep as old
+ */
+
+/* slave_configure call back */
+static int sssraid_slave_configure(struct scsi_device *sdev)
+{
+ int qd = MAX_CMD_PER_DEV;
+ unsigned int timeout = scmd_tmout_rawdisk * HZ;
+ struct sssraid_dev *hdev = shost_priv(sdev->host);
+ struct sssraid_sdev_hostdata *hostdata = sdev->hostdata;
+ u32 max_sec = sdev->host->max_sectors;
+
+ if (hostdata) {
+ if (SSSRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ timeout = scmd_tmout_vd * HZ;
+ else if (SSSRAID_DEV_INFO_ATTR_RAWDISK(hostdata->attr))
+ timeout = scmd_tmout_rawdisk * HZ;
+ max_sec = hostdata->max_io_kb << 1;
+ qd = sssraid_get_qd_by_disk(hostdata->attr);
+ } else {
+ ioc_err(hdev, "err, sdev->hostdata is null\n");
+ }
+
+ blk_queue_rq_timeout(sdev->request_queue, timeout);
+ sdev->eh_timeout = timeout;
+ scsi_change_queue_depth(sdev, qd);
+
+ if ((max_sec == 0) || (max_sec > sdev->host->max_sectors))
+ max_sec = sdev->host->max_sectors;
+
+ blk_queue_max_hw_sectors(sdev->request_queue, max_sec);
+
+ ioc_info(hdev, "sdev->channel:id:lun[%d:%d:%lld], scmd_timeout[%d]s, maxsec[%d]\n",
+ sdev->channel, sdev->id, sdev->lun, timeout / HZ, max_sec);
+
+ return 0;
+}
+
+/* slave_alloc call back */
+static int sssraid_slave_alloc(struct scsi_device *sdev)
+{
+ struct sssraid_sdev_hostdata *hostdata;
+ struct sssraid_dev *hdev;
+ u16 idx;
+
+ hdev = shost_priv(sdev->host);
+ hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+ if (!hostdata) {
+ ioc_err(hdev, "Alloc scsi host data memory failed\n");
+ return -ENOMEM;
+ }
+
+ down_read(&hdev->devices_rwsem);
+ for (idx = 0; idx < le32_to_cpu(hdev->ctrl_info->nd); idx++) {
+ if (sssraid_match_dev(hdev, idx, sdev))
+ goto scan_host;
+ }
+ up_read(&hdev->devices_rwsem);
+
+ kfree(hostdata);
+ return -ENXIO;
+
+scan_host:
+ hostdata->hdid = le32_to_cpu(hdev->devices[idx].hdid);
+ hostdata->max_io_kb = le16_to_cpu(hdev->devices[idx].max_io_kb);
+ hostdata->attr = hdev->devices[idx].attr;
+ hostdata->flag = hdev->devices[idx].flag;
+ hostdata->rg_id = 0xff;
+ sdev->hostdata = hostdata;
+ up_read(&hdev->devices_rwsem);
+ return 0;
+}
+
+/* slave_destroy call back */
+static void sssraid_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+/* eh_timed_out call back */
+static enum blk_eh_timer_return sssraid_scmd_timeout(struct scsi_cmnd *scmd)
+{
+ struct sssraid_iod *iod = scsi_cmd_priv(scmd);
+ unsigned int timeout = scmd->device->request_queue->rq_timeout;
+
+ if (sssraid_check_scmd_completed(scmd))
+ goto out;
+
+ if (time_after(jiffies, scmd->jiffies_at_alloc + timeout)) {
+ if (cmpxchg(&iod->state, SSSRAID_CMD_IN_FLIGHT, SSSRAID_CMD_TIMEOUT) ==
+ SSSRAID_CMD_IN_FLIGHT) {
+ return BLK_EH_DONE;
+ }
+ }
+out:
+ return BLK_EH_RESET_TIMER;
+}
+
+/* eh_abort_handler call back */
+static int sssraid_abort_handler(struct scsi_cmnd *scmd)
+{
+ struct sssraid_dev *hdev = shost_priv(scmd->device->host);
+ struct sssraid_iod *iod = scsi_cmd_priv(scmd);
+ struct sssraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SSSRAID_LIVE || !sssraid_wait_abnl_cmd_done(iod) ||
+ sssraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ sssraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ ioc_warn(hdev, "cid[%d] qidx[%d] timeout, aborting\n", cid, hwq);
+ ret = sssraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid);
+ if (ret != -ETIME) {
+ ret = sssraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ ioc_warn(hdev, "cid[%d] qidx[%d] abort failed, not found\n", cid, hwq);
+ return FAILED;
+ }
+ ioc_warn(hdev, "cid[%d] qidx[%d] abort succ\n", cid, hwq);
+ return SUCCESS;
+ }
+ ioc_warn(hdev, "cid[%d] qidx[%d] abort failed, timeout\n", cid, hwq);
+ return FAILED;
+}
+
+static ssize_t csts_pp_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sssraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_PP_MASK);
+ ret >>= SSSRAID_CSTS_PP_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_shst_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sssraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_SHST_MASK);
+ ret >>= SSSRAID_CSTS_SHST_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_cfs_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sssraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_CFS_MASK);
+ ret >>= SSSRAID_CSTS_CFS_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_rdy_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sssraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev))
+ ret = (readl(hdev->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_RDY);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t fw_version_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sssraid_dev *hdev = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fr);
+}
+
+static DEVICE_ATTR_RO(csts_pp);
+static DEVICE_ATTR_RO(csts_shst);
+static DEVICE_ATTR_RO(csts_cfs);
+static DEVICE_ATTR_RO(csts_rdy);
+static DEVICE_ATTR_RO(fw_version);
+
+static struct device_attribute *sssraid_host_attrs[] = {
+ &dev_attr_csts_pp,
+ &dev_attr_csts_shst,
+ &dev_attr_csts_cfs,
+ &dev_attr_csts_rdy,
+ &dev_attr_fw_version,
+ NULL,
+};
+
+static int sssraid_get_vd_info(struct sssraid_dev *hdev, struct sssraid_vd_info *vd_info, u16 vid)
+{
+ struct sssraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ if (hdev->state >= SSSRAID_RESETTING) {
+ ioc_err(hdev, "err, host state[%d] is not right\n", hdev->state);
+ return -EBUSY;
+ }
+
+ data_ptr = dma_alloc_coherent(&hdev->pdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN);
+ admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(vd_info, data_ptr, sizeof(struct sssraid_vd_info));
+
+ dma_free_coherent(&hdev->pdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int sssraid_get_bgtask(struct sssraid_dev *hdev, struct sssraid_bgtask *bgtask)
+{
+ struct sssraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ if (hdev->state >= SSSRAID_RESETTING) {
+ ioc_err(hdev, "err, host state[%d] is not right\n", hdev->state);
+ return -EBUSY;
+ }
+
+ data_ptr = dma_alloc_coherent(&hdev->pdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(bgtask, data_ptr, sizeof(struct sssraid_bgtask));
+
+ dma_free_coherent(&hdev->pdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static ssize_t raid_level_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct sssraid_dev *hdev;
+ struct sssraid_vd_info *vd_info;
+ struct sssraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SSSRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = sssraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1;
+
+ ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ?
+ vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1);
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]);
+}
+
+static ssize_t raid_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct sssraid_dev *hdev;
+ struct sssraid_vd_info *vd_info;
+ struct sssraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SSSRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = sssraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret) {
+ vd_info->vd_status = 0;
+ vd_info->rg_id = 0xff;
+ }
+
+ ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ? vd_info->vd_status : 0;
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]);
+}
+
+static ssize_t raid_resync_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct sssraid_dev *hdev;
+ struct sssraid_vd_info *vd_info;
+ struct sssraid_bgtask *bgtask;
+ struct sssraid_sdev_hostdata *hostdata;
+ u8 rg_id, i, progress = 0;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SSSRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = sssraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ goto out;
+
+ rg_id = vd_info->rg_id;
+
+ bgtask = (struct sssraid_bgtask *)vd_info;
+ ret = sssraid_get_bgtask(hdev, bgtask);
+ if (ret)
+ goto out;
+ for (i = 0; i < bgtask->task_num; i++) {
+ if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) &&
+ (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id))
+ progress = bgtask->bgtask[i].progress;
+ }
+
+out:
+ kfree(vd_info);
+ return snprintf(buf, PAGE_SIZE, "%d\n", progress);
+}
+
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(raid_state);
+static DEVICE_ATTR_RO(raid_resync);
+
+static struct device_attribute *sssraid_dev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_raid_state,
+ &dev_attr_raid_resync,
+ NULL,
+};
+
+static struct scsi_host_template sssraid_driver_template = {
+ .module = THIS_MODULE,
+ .name = "3SNIC Logic sssraid driver",
+ .proc_name = "sssraid",
+ .queuecommand = sssraid_qcmd,
+ .slave_alloc = sssraid_slave_alloc,
+ .slave_destroy = sssraid_slave_destroy,
+ .slave_configure = sssraid_slave_configure,
+ .scan_finished = sssraid_scan_finished,
+ .eh_timed_out = sssraid_scmd_timeout,
+ .eh_abort_handler = sssraid_abort_handler,
+ .eh_target_reset_handler = sssraid_eh_target_reset,
+ .eh_bus_reset_handler = sssraid_bus_reset_handler,
+ .eh_host_reset_handler = sssraid_eh_host_reset,
+ .change_queue_depth = scsi_change_queue_depth,
+ .host_tagset = 0,
+ .this_id = -1,
+ .unchecked_isa_dma = 0,
+ .shost_attrs = sssraid_host_attrs,
+ .sdev_attrs = sssraid_dev_attrs,
+ .host_reset = sssraid_sysfs_host_reset,
+};
+
+/**
+ * sssraid_probe - PCI probe callback
+ * @pdev: PCI device instance
+ * @id: PCI device ID details
+ *
+ * controller initialization routine.
+ * Allocate per adapter instance through shost_priv and
+ * initialize controller specific data structures, initializae
+ * the controller hardware, add shost to the SCSI subsystem.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+
+static int
+sssraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct sssraid_dev *hdev;
+ struct Scsi_Host *shost;
+ int node;
+ char bsg_name[15];
+ int retval = 0;
+
+ node = dev_to_node(&pdev->dev);
+ if (node == NUMA_NO_NODE) {
+ node = first_memory_node;
+ set_dev_node(&pdev->dev, node);
+ }
+
+ shost = scsi_host_alloc(&sssraid_driver_template, sizeof(*hdev));
+ if (!shost) {
+ retval = -ENODEV;
+ ioc_err(hdev, "Failed to allocate scsi host\n");
+ goto shost_failed;
+ }
+
+ hdev = shost_priv(shost);
+ hdev->numa_node = node;
+ hdev->instance = shost->host_no; /* for device instance */
+ sprintf(hdev->name, "%s%d", SSSRAID_DRIVER_NAME, hdev->instance);
+
+ init_rwsem(&hdev->devices_rwsem);
+ spin_lock_init(&hdev->state_lock);
+
+ spin_lock_init(&hdev->fwevt_lock);
+ spin_lock_init(&hdev->watchdog_lock);
+
+ INIT_LIST_HEAD(&hdev->fwevt_list);
+
+// logging_level = 1; //garden test
+ hdev->logging_level = logging_level; /* according to log_debug_switch*/
+
+ snprintf(hdev->fwevt_worker_name, sizeof(hdev->fwevt_worker_name),
+ "%s%d_fwevt_wrkr", SSSRAID_DRIVER_NAME, hdev->instance);
+ hdev->fwevt_worker_thread = alloc_ordered_workqueue(
+ hdev->fwevt_worker_name, WQ_MEM_RECLAIM);
+ if (!hdev->fwevt_worker_thread) {
+ ioc_err(hdev, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ retval = -ENODEV;
+ goto out_fwevtthread_failed;
+ }
+
+ hdev->shost = shost;
+ hdev->pdev = pdev;
+
+ if (sssraid_init_ioc(hdev, 0)) {
+ ioc_err(hdev, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ retval = -ENODEV;
+ goto out_iocinit_failed;
+ }
+
+ sssraid_shost_init(hdev);
+
+ retval = scsi_add_host(shost, &pdev->dev);
+ if (retval) {
+ ioc_err(hdev, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto addhost_failed;
+ }
+
+ snprintf(bsg_name, sizeof(bsg_name), "%s%d", SSSRAID_DRIVER_NAME, shost->host_no);
+ hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name,
+ sssraid_bsg_host_dispatch, NULL, sssraid_cmd_size(hdev));
+ if (IS_ERR(hdev->bsg_queue)) {
+ ioc_err(hdev, "err, setup bsg failed\n");
+ hdev->bsg_queue = NULL;
+ goto bsg_setup_failed;
+ }
+
+ sssraid_change_host_state(hdev, SSSRAID_LIVE);
+
+ scsi_scan_host(shost);
+ return retval;
+
+bsg_setup_failed:
+ scsi_remove_host(shost);
+addhost_failed:
+ sssraid_cleanup_ioc(hdev, 0);
+out_iocinit_failed:
+ destroy_workqueue(hdev->fwevt_worker_thread);
+out_fwevtthread_failed:
+ scsi_host_put(shost);
+shost_failed:
+ return retval;
+}
+
+static void sssraid_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+
+ if (!shost)
+ return;
+
+ ioc_info(hdev, "enter sssraid remove\n");
+
+ hdev = shost_priv(shost);
+
+ sssraid_change_host_state(hdev, SSSRAID_DELETING);
+
+ if (!pci_device_is_present(pdev))
+ sssraid_back_all_io(hdev);
+
+ sssraid_cleanup_fwevt_list(hdev);
+ destroy_workqueue(hdev->fwevt_worker_thread);
+
+ sssraid_remove_bsg(hdev);
+ scsi_remove_host(shost);
+ sssraid_cleanup_ioc(hdev, 0);
+
+ scsi_host_put(shost);
+}
+
+static void sssraid_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+
+ if (!shost)
+ return;
+
+ hdev = shost_priv(shost);
+
+ sssraid_cleanup_fwevt_list(hdev);
+ destroy_workqueue(hdev->fwevt_worker_thread);
+ sssraid_cleanup_ioc(hdev, 0);
+}
+
+#ifdef CONFIG_PM
+static int sssraid_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+ pci_power_t device_state;
+
+ if (!shost)
+ return 0;
+
+ hdev = shost_priv(shost);
+
+ while (hdev->state == SSSRAID_RESETTING)
+ ssleep(1);
+ sssraid_cleanup_fwevt_list(hdev);
+ scsi_block_requests(shost);
+ sssraid_cleanup_ioc(hdev, 1);
+
+ device_state = pci_choose_state(pdev, state);
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, device_state);
+
+ return 0;
+}
+
+static int sssraid_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+ pci_power_t device_state = pdev->current_state;
+
+ if (!shost)
+ return 0;
+
+ hdev = shost_priv(shost);
+
+ ioc_info(hdev, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ hdev->pdev = pdev;
+ hdev->cpu_count = num_online_cpus();
+
+ /* sssraid_setup_resources in sssraid_init_ioc */
+ sssraid_init_ioc(hdev, 1);
+ scsi_unblock_requests(shost);
+
+ return 0;
+}
+#endif
+
+static pci_ers_result_t sssraid_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+
+ if (!shost)
+ return PCI_ERS_RESULT_NONE;
+
+ hdev = shost_priv(shost);
+
+ ioc_info(hdev, "enter pci error detect, state:%d\n", state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ ioc_warn(hdev, "channel is normal, do nothing\n");
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ ioc_warn(hdev, "channel io frozen, need reset controller\n");
+
+ scsi_block_requests(hdev->shost);
+
+ sssraid_change_host_state(hdev, SSSRAID_RESETTING);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ ioc_warn(hdev, "channel io failure, request disconnect\n");
+
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t sssraid_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+
+ if (!shost)
+ return PCI_ERS_RESULT_NONE;
+
+ hdev = shost_priv(shost);
+
+ ioc_info(hdev, "restart after slot reset\n");
+
+ pci_restore_state(pdev);
+
+ sssraid_soft_reset_handler(hdev);
+
+ scsi_unblock_requests(hdev->shost);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void sssraid_reset_done(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+
+ if (!shost)
+ return;
+
+ hdev = shost_priv(shost);
+
+ ioc_info(hdev, "enter sssraid reset done\n");
+}
+
+static struct pci_error_handlers sssraid_err_handler = {
+ .error_detected = sssraid_pci_error_detected,
+ .slot_reset = sssraid_pci_slot_reset,
+ .reset_done = sssraid_reset_done,
+};
+
+static const struct pci_device_id sssraid_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_3SNIC_LOGIC, SSSRAID_SERVER_DEVICE_HBA_DID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_3SNIC_LOGIC, SSSRAID_SERVER_DEVICE_RAID_DID) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sssraid_pci_id_table);
+
+static struct pci_driver sssraid_pci_driver = {
+ .name = SSSRAID_DRIVER_NAME,
+ .id_table = sssraid_pci_id_table,
+ .probe = sssraid_probe,
+ .remove = sssraid_remove,
+ .shutdown = sssraid_shutdown,
+#ifdef CONFIG_PM
+ .suspend = sssraid_suspend,
+ .resume = sssraid_resume,
+#endif
+ .err_handler = &sssraid_err_handler,
+};
+
+static int __init sssraid_init(void)
+{
+ int ret_val;
+
+ pr_info("Loading %s version %s\n", SSSRAID_DRIVER_NAME,
+ SSSRAID_DRIVER_VERSION);
+
+ sssraid_class = class_create(THIS_MODULE, "sssraid");
+ if (IS_ERR(sssraid_class)) {
+ ret_val = PTR_ERR(sssraid_class);
+ return ret_val;
+ }
+
+ ret_val = pci_register_driver(&sssraid_pci_driver);
+
+ return ret_val;
+}
+
+static void __exit sssraid_exit(void)
+{
+ pr_info("Unloading %s version %s\n", SSSRAID_DRIVER_NAME,
+ SSSRAID_DRIVER_VERSION);
+
+ class_destroy(sssraid_class);
+ pci_unregister_driver(&sssraid_pci_driver);
+}
+
+MODULE_AUTHOR("liangry1(a)3snic.com");
+MODULE_DESCRIPTION("3SNIC Information Technology SSSRAID Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SSSRAID_DRIVER_VERSION);
+module_init(sssraid_init);
+module_exit(sssraid_exit);
diff --git a/openEuler/MAINTAINERS b/openEuler/MAINTAINERS
index 35b5a8f81fed..fc8b6e06fc04 100644
--- a/openEuler/MAINTAINERS
+++ b/openEuler/MAINTAINERS
@@ -76,6 +76,12 @@ F: include/linux/wait.h
F: include/uapi/linux/sched.h
F: kernel/sched/
+SSSRAID SCSI/Raid DRIVERS
+M: Steven Song <steven.song(a)3snic.com>
+S: Maintained
+F: Documentation/scsi/sssraid.rst
+F: drivers/scsi/sssraid/
+
THE REST
M: xiexiuqi(a)huawei.com
M: zhengzengkai(a)huawei.com
--
2.25.1
1
0
3SNIC inclusion
category: features
bugzilla: https://gitee.com/openeuler/kernel/issues/I64M90
CVE: NA
This commit is to suppport 3SNIC 3S5XX RAID/HBA controllers.
RAID controllers support RAID 0/1/5/6/10/50/60 modes;
HBA controlllers support RAID 0/1/10 modes.
RAID/HBA support SAS/SATA HDD/SSD.
Signed-off-by: liangry <liangry1(a)3snic.com>
Reviewed-by: Jiang Yu<yujiang(a)3snic.com>
Reviewed-by: Steven Song <steven.song(a)3snic.com>
---
Documentation/scsi/sssraid.rst | 83 +
MAINTAINERS | 7 +
arch/arm64/configs/openeuler_defconfig | 1 +
arch/x86/configs/openeuler_defconfig | 1 +
drivers/scsi/Kconfig | 1 +
drivers/scsi/Makefile | 1 +
drivers/scsi/sssraid/Kconfig | 14 +
drivers/scsi/sssraid/Makefile | 7 +
drivers/scsi/sssraid/sssraid.h | 971 ++++++++++
drivers/scsi/sssraid/sssraid_debug.h | 34 +
drivers/scsi/sssraid/sssraid_fw.c | 1724 +++++++++++++++++
drivers/scsi/sssraid/sssraid_os.c | 2337 ++++++++++++++++++++++++
12 files changed, 5181 insertions(+)
create mode 100644 Documentation/scsi/sssraid.rst
create mode 100644 drivers/scsi/sssraid/Kconfig
create mode 100644 drivers/scsi/sssraid/Makefile
create mode 100644 drivers/scsi/sssraid/sssraid.h
create mode 100644 drivers/scsi/sssraid/sssraid_debug.h
create mode 100644 drivers/scsi/sssraid/sssraid_fw.c
create mode 100644 drivers/scsi/sssraid/sssraid_os.c
diff --git a/Documentation/scsi/sssraid.rst b/Documentation/scsi/sssraid.rst
new file mode 100644
index 000000000000..d56e5f7c478c
--- /dev/null
+++ b/Documentation/scsi/sssraid.rst
@@ -0,0 +1,83 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================================
+SSSRAID - 3SNIC SCSI RAID Controller driver
+==============================================
+
+This file describes the SSSRAID SCSI driver for 3SNIC
+(http://www.3snic.com) RAID controllers. The SSSRAID
+driver is the first generation RAID driver for 3SNIC Corp.
+
+For 3SNIC SSSRAID controller support, enable the SSSRAID driver
+when configuring the kernel.
+
+SSSRAID specific entries in /sys
+=================================
+
+SSSRAID host attributes
+------------------------
+ - /sys/class/scsi_host/host*/csts_pp
+ - /sys/class/scsi_host/host*/csts_shst
+ - /sys/class/scsi_host/host*/csts_cfs
+ - /sys/class/scsi_host/host*/csts_rdy
+ - /sys/class/scsi_host/host*/fw_version
+
+ The host csts_pp attribute is a read only attribute. This attribute
+ indicates whether the controller is processing commands. If this attribute
+ is set to ‘1’, then the controller is processing commands normally. If
+ this attribute is cleared to ‘0’, then the controller has temporarily stopped
+ processing commands in order to handle an event (e.g., firmware activation).
+
+ The host csts_shst attribute is a read only attribute. This attribute
+ indicates status of shutdown processing.The shutdown status values are defined
+ as:
+ ====== ==============================
+ Value Definition
+ ====== ==============================
+ 00b Normal operation
+ 01b Shutdown processing occurring
+ 10b Shutdown processing complete
+ 11b Reserved
+ ====== ==============================
+ The host csts_cfs attribute is a read only attribute. This attribute is set to
+ ’1’ when a fatal controller error occurred that could not be communicated in the
+ appropriate Completion Queue. This bit is cleared to ‘0’ when a fatal controller
+ error has not occurred.
+
+ The host csts_rdy attribute is a read only attribute. This attribute is set to
+ ‘1’ when the controller is ready to process submission queue entries.
+
+ The fw_version attribute is read-only and will return the driver version and the
+ controller firmware version.
+
+SSSRAID scsi device attributes
+------------------------------
+ - /sys/class/scsi_device/X\:X\:X\:X/device/raid_level
+ - /sys/class/scsi_device/X\:X\:X\:X/device/raid_state
+ - /sys/class/scsi_device/X\:X\:X\:X/device/raid_resync
+
+ The device raid_level attribute is a read only attribute. This attribute indicates
+ RAID level of scsi device(will dispaly "NA" if scsi device is not virtual disk type).
+
+ The device raid_state attribute is read-only and indicates RAID status of scsi
+ device(will dispaly "NA" if scsi device is not virtual disk type).
+
+ The device raid_resync attribute is read-only and indicates RAID rebuild processing
+ of scsi device(will dispaly "NA" if scsi device is not virtual disk type).
+
+Supported devices
+=================
+
+ =================== ======= =======================================
+ PCI ID (pci.ids) OEM Product
+ =================== ======= =======================================
+ 1F3F:2100 3SNIC 3S510(HBA:8Ports,1G DDR)
+ 1F3F:2100 3SNIC 3S520(HBA:16Ports,1G DDR)
+ 1F3F:2100 3SNIC 3S530(HBA:32Ports,1G DDR)
+ 1F3F:2100 3SNIC 3S540(HBA:40Ports,1G DDR)
+ 1F3F:2200 3SNIC 3S580(RAID:16Ports,2G cache)
+ 1F3F:2200 3SNIC 3S585(RAID:16Ports,4G cache)
+ 1F3F:2200 3SNIC 3S590(RAID:32Ports,4G cache)
+ 1F3F:2200 3SNIC 3S5A0(RAID:40Ports,2G cache)
+ 1F3F:2200 3SNIC 3S5A5(RAID:40Ports,4G cache)
+ =================== ======= =======================================
diff --git a/MAINTAINERS b/MAINTAINERS
index 9ae209633628..e6470640f764 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -16669,6 +16669,13 @@ M: Jan-Benedict Glaw <jbglaw(a)lug-owl.de>
S: Maintained
F: arch/alpha/kernel/srm_env.c
+SSSRAID SCSI/Raid DRIVERS
+M: Steven Song <steven.song(a)3snic.com>
+L: linux-scsi(a)vger.kernel.org
+S: Maintained
+F: Documentation/scsi/sssraid.rst
+F: drivers/scsi/sssraid/
+
ST LSM6DSx IMU IIO DRIVER
M: Lorenzo Bianconi <lorenzo.bianconi83(a)gmail.com>
L: linux-iio(a)vger.kernel.org
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index 30384eacad4f..dc4ba0d2b694 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -2394,6 +2394,7 @@ CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE=y
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
CONFIG_MEGARAID_SAS=m
+CONFIG_SCSI_3SNIC_SSSRAID=m
CONFIG_SCSI_MPT3SAS=m
CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index 5ada612f1d75..df16f123515b 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -2355,6 +2355,7 @@ CONFIG_SCSI_AACRAID=m
# CONFIG_MEGARAID_LEGACY is not set
CONFIG_MEGARAID_SAS=m
CONFIG_SCSI_MPT3SAS=m
+CONFIG_SCSI_3SNIC_SSSRAID=m
CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 0fbe4edeccd0..7339d6179e5a 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -481,6 +481,7 @@ config SCSI_ARCMSR
source "drivers/scsi/esas2r/Kconfig"
source "drivers/scsi/megaraid/Kconfig.megaraid"
+source "drivers/scsi/sssraid/Kconfig"
source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
source "drivers/scsi/ufs/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 78a3c832394c..01bb908fb832 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -92,6 +92,7 @@ obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
obj-$(CONFIG_SCSI_HPSA) += hpsa.o
obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi/
+obj-$(CONFIG_SCSI_3SNIC_SSSRAID) += sssraid/
obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
diff --git a/drivers/scsi/sssraid/Kconfig b/drivers/scsi/sssraid/Kconfig
new file mode 100644
index 000000000000..769f68732bf3
--- /dev/null
+++ b/drivers/scsi/sssraid/Kconfig
@@ -0,0 +1,14 @@
+#
+# Kernel configuration file for the 3SNIC
+#
+
+config SCSI_3SNIC_SSSRAID
+ tristate "3SNIC sssraid Adapter"
+ depends on PCI && SCSI
+ select BLK_DEV_BSGLIB
+ depends on ARM64 || X86_64
+ help
+ This driver supports 3SNIC 3S5xx serial RAID controller, which has
+ PCI Express Gen4 interface with host and supports SAS/SATA HDD/SSD.
+ To compile this driver as a module, choose M here: the module will
+ be called sssraid.
diff --git a/drivers/scsi/sssraid/Makefile b/drivers/scsi/sssraid/Makefile
new file mode 100644
index 000000000000..c80605866b42
--- /dev/null
+++ b/drivers/scsi/sssraid/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the 3SNIC sssraid drivers.
+#
+
+obj-$(CONFIG_SCSI_3SNIC_SSSRAID) += sssraid.o
+
+sssraid-objs := sssraid_os.o sssraid_fw.o
diff --git a/drivers/scsi/sssraid/sssraid.h b/drivers/scsi/sssraid/sssraid.h
new file mode 100644
index 000000000000..bae668ef9c98
--- /dev/null
+++ b/drivers/scsi/sssraid/sssraid.h
@@ -0,0 +1,971 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 3SNIC Information Technology, Ltd */
+
+/* 3SNIC RAID SSSXXX Series Linux Driver */
+
+#ifndef __SSSRAID_H_
+#define __SSSRAID_H_
+
+#define SSSRAID_DRIVER_VERSION "1.0.0.0"
+#define SSSRAID_DRIVER_RELDATE "12-December-2022"
+
+#define SSSRAID_DRIVER_NAME "sssraid"
+
+#define SSSRAID_NAME_LENGTH 32
+
+/*
+ * SSSRAID Vendor ID and Device IDs
+ */
+#define PCI_VENDOR_ID_3SNIC_LOGIC 0x1F3F
+
+#define SSSRAID_SERVER_DEVICE_HBA_DID 0x2100
+#define SSSRAID_SERVER_DEVICE_RAID_DID 0x2200
+
+#define SSSRAID_CAP_MQES(cap) ((cap) & 0xffff)
+#define SSSRAID_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define SSSRAID_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define SSSRAID_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define SSSRAID_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
+#define SSSRAID_CAP_DMAMASK(cap) (((cap) >> 37) & 0xff)
+
+#define SSSRAID_DEFAULT_MAX_CHANNEL 4
+#define SSSRAID_DEFAULT_MAX_ID 240
+#define SSSRAID_DEFAULT_MAX_LUN_PER_HOST 8
+#define MAX_SECTORS 2048
+
+/*
+ * Time define
+ */
+#define SSSRAID_WATCHDOG_INTERVAL 1000 /* in milli seconds */
+#define SSSRAID_PORTENABLE_TIMEOUT 300
+
+/*
+ * SSSRAID queue and entry size for Admin and I/O type
+ */
+#define IO_SQE_SIZE sizeof(struct sssraid_ioq_command)
+#define ADMIN_SQE_SIZE sizeof(struct sssraid_admin_command)
+#define SQE_SIZE(qid) (((qid) > 0) ? IO_SQE_SIZE : ADMIN_SQE_SIZE)
+#define CQ_SIZE(depth) ((depth) * sizeof(struct sssraid_completion))
+#define SQ_SIZE(qid, depth) ((depth) * SQE_SIZE(qid))
+
+#define SENSE_SIZE(depth) ((depth) * SCSI_SENSE_BUFFERSIZE)
+
+#define SSSRAID_AQ_DEPTH 128
+#define SSSRAID_NR_AEN_COMMANDS 16
+#define SSSRAID_AQ_BLK_MQ_DEPTH (SSSRAID_AQ_DEPTH - SSSRAID_NR_AEN_COMMANDS)
+#define SSSRAID_AQ_MQ_TAG_DEPTH (SSSRAID_AQ_BLK_MQ_DEPTH - 1)
+
+#define SSSRAID_ADMIN_QUEUE_NUM 1
+#define SSSRAID_PTCMDS_PERQ 1
+#define SSSRAID_IO_BLK_MQ_DEPTH (hdev->scsi_qd)
+#define SSSRAID_NR_IOQ_PTCMDS (SSSRAID_PTCMDS_PERQ * hdev->shost->nr_hw_queues)
+
+#define FUA_MASK 0x08
+#define SSSRAID_MINORS BIT(MINORBITS)
+#define SSSRAID_RW_FUA BIT(14)
+
+#define COMMAND_IS_WRITE(cmd) ((cmd)->common.opcode & 1)
+
+#define SSSRAID_IO_IOSQES 7
+#define SSSRAID_IO_IOCQES 4
+#define PRP_ENTRY_SIZE 8
+
+#define SMALL_POOL_SIZE 256
+#define MAX_SMALL_POOL_NUM 16
+#define MAX_CMD_PER_DEV 64
+#define MAX_CDB_LEN 16
+
+#define SSSRAID_UP_TO_MULTY4(x) (((x) + 4) & (~0x03))
+
+#define CQE_STATUS_SUCCESS (0x0)
+
+#define IO_6_DEFAULT_TX_LEN 256
+
+#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct sssraid_sgl_desc))
+
+#define SSSRAID_CAP_TIMEOUT_UNIT_MS (HZ / 2)
+
+extern u32 admin_tmout;
+#define ADMIN_TIMEOUT (admin_tmout * HZ)
+
+#define SSSRAID_WAIT_ABNL_CMD_TIMEOUT 6
+
+#define SSSRAID_DMA_MSK_BIT_MAX 64
+
+enum {
+ SSSRAID_SGL_FMT_DATA_DESC = 0x00,
+ SSSRAID_SGL_FMT_SEG_DESC = 0x02,
+ SSSRAID_SGL_FMT_LAST_SEG_DESC = 0x03,
+ SSSRAID_KEY_SGL_FMT_DATA_DESC = 0x04,
+ SSSRAID_TRANSPORT_SGL_DATA_DESC = 0x05
+};
+
+
+enum {
+ SSSRAID_REQ_CANCELLED = (1 << 0),
+ SSSRAID_REQ_USERCMD = (1 << 1),
+};
+
+enum {
+ SSSRAID_SC_SUCCESS = 0x0,
+ SSSRAID_SC_INVALID_OPCODE = 0x1,
+ SSSRAID_SC_INVALID_FIELD = 0x2,
+
+ SSSRAID_SC_ABORT_LIMIT = 0x103,
+ SSSRAID_SC_ABORT_MISSING = 0x104,
+ SSSRAID_SC_ASYNC_LIMIT = 0x105,
+
+ SSSRAID_SC_DNR = 0x4000,
+};
+
+enum {
+ SSSRAID_REG_CAP = 0x0000,
+ SSSRAID_REG_CC = 0x0014,
+ SSSRAID_REG_CSTS = 0x001c,
+ SSSRAID_REG_AQA = 0x0024,
+ SSSRAID_REG_ASQ = 0x0028,
+ SSSRAID_REG_ACQ = 0x0030,
+ SSSRAID_REG_DBS = 0x1000,
+};
+
+enum {
+ SSSRAID_CC_ENABLE = 1 << 0,
+ SSSRAID_CC_CSS_NVM = 0 << 4,
+ SSSRAID_CC_MPS_SHIFT = 7,
+ SSSRAID_CC_AMS_SHIFT = 11,
+ SSSRAID_CC_SHN_SHIFT = 14,
+ SSSRAID_CC_IOSQES_SHIFT = 16,
+ SSSRAID_CC_IOCQES_SHIFT = 20,
+ SSSRAID_CC_AMS_RR = 0 << SSSRAID_CC_AMS_SHIFT,
+ SSSRAID_CC_SHN_NONE = 0 << SSSRAID_CC_SHN_SHIFT,
+ SSSRAID_CC_IOSQES = SSSRAID_IO_IOSQES << SSSRAID_CC_IOSQES_SHIFT,
+ SSSRAID_CC_IOCQES = SSSRAID_IO_IOCQES << SSSRAID_CC_IOCQES_SHIFT,
+ SSSRAID_CC_SHN_NORMAL = 1 << SSSRAID_CC_SHN_SHIFT,
+ SSSRAID_CC_SHN_MASK = 3 << SSSRAID_CC_SHN_SHIFT,
+ SSSRAID_CSTS_CFS_SHIFT = 1,
+ SSSRAID_CSTS_SHST_SHIFT = 2,
+ SSSRAID_CSTS_PP_SHIFT = 5,
+ SSSRAID_CSTS_RDY = 1 << 0,
+ SSSRAID_CSTS_SHST_CMPLT = 2 << 2,
+ SSSRAID_CSTS_SHST_MASK = 3 << 2,
+ SSSRAID_CSTS_CFS_MASK = 1 << SSSRAID_CSTS_CFS_SHIFT,
+ SSSRAID_CSTS_PP_MASK = 1 << SSSRAID_CSTS_PP_SHIFT,
+};
+
+enum {
+ SSSRAID_ADMIN_DELETE_SQ = 0x00,
+ SSSRAID_ADMIN_CREATE_SQ = 0x01,
+ SSSRAID_ADMIN_DELETE_CQ = 0x04,
+ SSSRAID_ADMIN_CREATE_CQ = 0x05,
+ SSSRAID_ADMIN_ABORT_CMD = 0x08,
+ SSSRAID_ADMIN_SET_FEATURES = 0x09,
+ SSSRAID_ADMIN_ASYNC_EVENT = 0x0c,
+ SSSRAID_ADMIN_GET_INFO = 0xc6,
+ SSSRAID_ADMIN_RESET = 0xc8,
+};
+
+enum {
+ SSSRAID_GET_INFO_CTRL = 0,
+ SSSRAID_GET_INFO_DEV_LIST = 1,
+};
+
+enum sssraid_scsi_rst_type {
+ SSSRAID_RESET_TARGET = 0,
+ SSSRAID_RESET_BUS = 1,
+};
+
+enum {
+ SSSRAID_AEN_ERROR = 0,
+ SSSRAID_AEN_NOTICE = 2,
+ SSSRAID_AEN_VS = 7,
+};
+
+enum {
+ SSSRAID_AEN_DEV_CHANGED = 0x00,
+ SSSRAID_AEN_FW_ACT_START = 0x01,
+ SSSRAID_AEN_HOST_PROBING = 0x10,
+};
+
+enum {
+ SSSRAID_AEN_TIMESYN = 0x00,
+ SSSRAID_AEN_FW_ACT_FINISH = 0x02,
+ SSSRAID_AEN_EVENT_MIN = 0x80,
+ SSSRAID_AEN_EVENT_MAX = 0xff,
+};
+
+enum {
+ SSSRAID_CMD_WRITE = 0x01,
+ SSSRAID_CMD_READ = 0x02,
+
+ SSSRAID_CMD_NONIO_NONE = 0x80,
+ SSSRAID_CMD_NONIO_TODEV = 0x81,
+ SSSRAID_CMD_NONIO_FROMDEV = 0x82,
+};
+
+enum {
+ SSSRAID_QUEUE_PHYS_CONTIG = (1 << 0),
+ SSSRAID_CQ_IRQ_ENABLED = (1 << 1),
+
+ SSSRAID_FEAT_NUM_QUEUES = 0x07,
+ SSSRAID_FEAT_ASYNC_EVENT = 0x0b,
+ SSSRAID_FEAT_TIMESTAMP = 0x0e,
+};
+
+enum sssraid_state {
+ SSSRAID_NEW,
+ SSSRAID_LIVE,
+ SSSRAID_RESETTING,
+ SSSRAID_DELETING,
+ SSSRAID_DEAD,
+};
+
+enum {
+ SSSRAID_CARD_HBA,
+ SSSRAID_CARD_RAID,
+};
+
+enum sssraid_cmd_type {
+ SSSRAID_CMD_ADM,
+ SSSRAID_CMD_IOPT,
+};
+
+/*
+ * SSSRAID completion queue entry struct
+ */
+struct sssraid_completion {
+ __le32 result;
+ union {
+ struct {
+ __u8 sense_len;
+ __u8 resv[3];
+ };
+ __le32 result1;
+ };
+ __le16 sq_head;
+ __le16 sq_id;
+ __le16 cmd_id;
+ __le16 status;
+};
+
+/*
+ * SSSRAID firmware controller properties
+ */
+struct sssraid_ctrl_info {
+ __le32 nd;
+ __le16 max_cmds;
+ __le16 max_channel;
+ __le32 max_tgt_id;
+ __le16 max_lun;
+ __le16 max_num_sge;
+ __le16 lun_num_in_boot;
+ __u8 mdts;
+ __u8 acl;
+ __u8 aerl;
+ __u8 card_type;
+ __u16 rsvd;
+ __le32 rtd3e;
+ __u8 sn[32];
+ __u8 fr[16];
+ __u8 rsvd1[4020];
+};
+
+struct sssraid_intr_info {
+ struct sssraid_dev *hdev;
+ u16 msix_index;
+ struct sssraid_cqueue *cqinfo;
+ char name[SSSRAID_NAME_LENGTH];
+};
+
+struct sssraid_fwevt {
+ struct list_head list;
+ struct work_struct work;
+ struct sssraid_dev *hdev;
+ u16 event_id;
+ bool send_ack;
+ bool process_evt;
+ u32 evt_ctx;
+ struct kref ref_count;
+ char event_data[0] __aligned(4);
+};
+
+/*
+ * SSSRAID private device struct definition
+ */
+struct sssraid_dev {
+ struct pci_dev *pdev;
+ struct Scsi_Host *shost;
+ struct sssraid_squeue *sqinfo;
+ struct sssraid_cqueue *cqinfo;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool[MAX_SMALL_POOL_NUM];
+ void __iomem *bar;
+
+ u32 init_done_queue_cnt;
+ u32 ioq_depth;
+ u32 db_stride;
+ u32 __iomem *dbs;
+ struct rw_semaphore devices_rwsem;
+ int numa_node;
+ u32 page_size;
+ u32 ctrl_config;
+ u64 cap;
+ u32 instance;
+ u32 scsi_qd;
+ struct sssraid_ctrl_info *ctrl_info;
+ struct sssraid_dev_info *devices;
+
+ int logging_level;
+
+ char name[SSSRAID_NAME_LENGTH];
+ int cpu_count;
+ /*
+ * before_affinity_msix_cnt is
+ * min("FW support IO Queue count", num_online_cpus)+1
+ */
+ u16 before_affinity_msix_cnt;
+
+ struct sssraid_cmd *adm_cmds;
+ struct list_head adm_cmd_list;
+ spinlock_t adm_cmd_lock;
+
+ struct sssraid_cmd *ioq_ptcmds;
+ struct list_head ioq_pt_list;
+ spinlock_t ioq_pt_lock;
+
+ int reset_flag;
+
+ enum sssraid_state state;
+ spinlock_t state_lock;
+
+ struct request_queue *bsg_queue;
+
+ u8 intr_enabled;
+
+ struct sssraid_intr_info *intr_info;
+ u32 intr_info_count;
+
+ char watchdog_work_q_name[20];
+ struct workqueue_struct *watchdog_work_q;
+ struct delayed_work watchdog_work;
+ spinlock_t watchdog_lock;
+
+ char fwevt_worker_name[SSSRAID_NAME_LENGTH];
+ struct workqueue_struct *fwevt_worker_thread;
+ spinlock_t fwevt_lock;
+ struct list_head fwevt_list;
+
+ struct sssraid_fwevt *current_event;
+};
+
+/*
+ * SSSRAID scatter list descriptor
+ */
+struct sssraid_sgl_desc {
+ __le64 addr;
+ __le32 length;
+ __u8 rsvd[3];
+ __u8 type;
+};
+
+union sssraid_data_ptr {
+ struct {
+ __le64 prp1;
+ __le64 prp2;
+ };
+ struct sssraid_sgl_desc sgl;
+};
+
+/*
+ * SSSRAID general admin class command format struct
+ */
+struct sssraid_admin_common_command {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __le32 cdw2[4];
+ union sssraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+struct sssraid_features {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[2];
+ union sssraid_data_ptr dptr;
+ __le32 fid;
+ __le32 dword11;
+ __le32 dword12;
+ __le32 dword13;
+ __le32 dword14;
+ __le32 dword15;
+};
+
+/*
+ * SSSRAID create completion queue command struct
+ */
+struct sssraid_create_cq {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 cqid;
+ __le16 qsize;
+ __le16 cq_flags;
+ __le16 irq_vector;
+ __u32 rsvd12[4];
+};
+
+/*
+ * SSSRAID create submission queue command struct
+ */
+struct sssraid_create_sq {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 sqid;
+ __le16 qsize;
+ __le16 sq_flags;
+ __le16 cqid;
+ __u32 rsvd12[4];
+};
+
+/*
+ * SSSRAID delete submission queue command struct
+ */
+struct sssraid_delete_queue {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __u32 rsvd1[9];
+ __le16 qid;
+ __u16 rsvd10;
+ __u32 rsvd11[5];
+};
+
+/*
+ * SSSRAID access to information command struct
+ */
+struct sssraid_get_info {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __u32 rsvd2[4];
+ union sssraid_data_ptr dptr;
+ __u8 type;
+ __u8 rsvd10[3];
+ __le32 cdw11;
+ __u32 rsvd12[4];
+};
+
+/*
+ * User command struct
+ */
+struct sssraid_usr_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ union {
+ struct {
+ __le16 subopcode;
+ __le16 rsvd1;
+ } info_0;
+ __le32 cdw2;
+ };
+ union {
+ struct {
+ __le16 data_len;
+ __le16 param_len;
+ } info_1;
+ __le32 cdw3;
+ };
+ __u64 metadata;
+ union sssraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+enum {
+ SSSRAID_CMD_FLAG_SGL_METABUF = (1 << 6),
+ SSSRAID_CMD_FLAG_SGL_METASEG = (1 << 7),
+ SSSRAID_CMD_FLAG_SGL_ALL = SSSRAID_CMD_FLAG_SGL_METABUF | SSSRAID_CMD_FLAG_SGL_METASEG,
+};
+
+enum sssraid_cmd_state {
+ SSSRAID_CMD_IDLE = 0,
+ SSSRAID_CMD_IN_FLIGHT = 1,
+ SSSRAID_CMD_COMPLETE = 2,
+ SSSRAID_CMD_TIMEOUT = 3,
+ SSSRAID_CMD_TMO_COMPLETE = 4,
+};
+
+/*
+ * SSSRAID abort command struct
+ */
+struct sssraid_abort_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __le16 sqid;
+ __le16 cid;
+ __u32 rsvd11[5];
+};
+
+/*
+ * SSSRAID reset command struct
+ */
+struct sssraid_reset_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __u8 type;
+ __u8 rsvd10[3];
+ __u32 rsvd11[5];
+};
+
+/*
+ * SSSRAID admin class command set struct
+ */
+struct sssraid_admin_command {
+ union {
+ struct sssraid_admin_common_command common;
+ struct sssraid_features features;
+ struct sssraid_create_cq create_cq;
+ struct sssraid_create_sq create_sq;
+ struct sssraid_delete_queue delete_queue;
+ struct sssraid_get_info get_info;
+ struct sssraid_abort_cmd abort;
+ struct sssraid_reset_cmd reset;
+ struct sssraid_usr_cmd usr_cmd;
+ };
+};
+
+/*
+ * SSSRAID general IO class command format struct
+ */
+struct sssraid_ioq_common_command {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __le32 cdw3[3];
+ union sssraid_data_ptr dptr;
+ __le32 cdw10[6];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __le32 cdw26[6];
+};
+
+/*
+ * SSSRAID read or write command struct
+ */
+struct sssraid_rw_command {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union sssraid_data_ptr dptr;
+ __le64 slba;
+ __le16 nlb;
+ __le16 control;
+ __u32 rsvd13[3];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct sssraid_scsi_nonio {
+ __u8 opcode;
+ __u8 flags;
+ __le16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_length;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union sssraid_data_ptr dptr;
+ __u32 rsvd10[5];
+ __le32 buffer_len;
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+/*
+ * SSSRAID IO class command struct
+ */
+struct sssraid_ioq_command {
+ union {
+ struct sssraid_ioq_common_command common;
+ struct sssraid_rw_command rw;
+ struct sssraid_scsi_nonio scsi_nonio;
+ };
+};
+
+/*
+ * SSSRAID passthru command struct
+ */
+struct sssraid_passthru_common_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 data_len;
+ __u16 param_len;
+ } info_1;
+ __u32 cdw3;
+ };
+ __u64 metadata;
+
+ __u64 addr;
+ __u64 prp2;
+
+ __u32 cdw10;
+ __u32 cdw11;
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 cdw15;
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct sssraid_ioq_passthru_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 res_sense_len;
+ __u8 cdb_len;
+ __u8 rsvd0;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_1;
+ __u32 cdw3;
+ };
+ union {
+ struct {
+ __u16 rsvd;
+ __u16 param_len;
+ } info_2;
+ __u32 cdw4;
+ };
+ __u32 cdw5;
+ __u64 addr;
+ __u64 prp2;
+ union {
+ struct {
+ __u16 eid;
+ __u16 sid;
+ } info_3;
+ __u32 cdw10;
+ };
+ union {
+ struct {
+ __u16 did;
+ __u8 did_flag;
+ __u8 rsvd2;
+ } info_4;
+ __u32 cdw11;
+ };
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 data_len;
+ __u32 cdw16;
+ __u32 cdw17;
+ __u32 cdw18;
+ __u32 cdw19;
+ __u32 cdw20;
+ __u32 cdw21;
+ __u32 cdw22;
+ __u32 cdw23;
+ __u64 sense_addr;
+ __u32 cdw26[4];
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct sssraid_bsg_request {
+ u32 msgcode;
+ u32 control;
+ union {
+ struct sssraid_passthru_common_cmd admcmd;
+ struct sssraid_ioq_passthru_cmd ioqcmd;
+ };
+};
+
+enum {
+ SSSRAID_BSG_ADM,
+ SSSRAID_BSG_IOQ,
+};
+
+/*
+ * define the transfer command struct
+ */
+struct sssraid_cmd {
+ u16 qid;
+ u16 cid;
+ u32 result0;
+ u32 result1;
+ u16 status;
+ void *priv;
+ enum sssraid_cmd_state state;
+ struct completion cmd_done;
+ struct list_head list;
+};
+
+/*
+ * define the SSSRAID physical queue struct
+ */
+struct sssraid_squeue {
+ struct sssraid_dev *hdev;
+ spinlock_t sq_lock; /* spinlock for lock handling */
+
+ void *sq_cmds;
+
+ dma_addr_t sq_dma_addr;
+ u32 __iomem *q_db;
+ u8 cq_phase;
+ u8 sqes;
+ u16 qidx;
+ u16 sq_tail;
+ u16 last_cq_head;
+ u16 q_depth;
+ void *sense;
+ dma_addr_t sense_dma_addr;
+ struct dma_pool *prp_small_pool;
+};
+
+struct sssraid_cqueue {
+ struct sssraid_dev *hdev;
+
+ spinlock_t cq_lock ____cacheline_aligned_in_smp; /* spinlock for lock handling */
+
+ struct sssraid_completion *cqes;
+
+ dma_addr_t cq_dma_addr;
+ u8 cq_phase;
+ u16 cq_head;
+ u16 last_cq_head;
+};
+
+/*
+ * define the SSSRAID IO queue descriptor struct
+ */
+struct sssraid_iod {
+ struct sssraid_squeue *sqinfo;
+ enum sssraid_cmd_state state;
+ int npages;
+ u32 nsge;
+ u32 length;
+ bool use_sgl;
+ dma_addr_t first_dma;
+ void *sense;
+ dma_addr_t sense_dma;
+ struct scatterlist *sg;
+ void *list[0];
+};
+
+/*
+ * define the SSSRAID scsi device attribution and information
+ */
+#define SSSRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01)
+#define SSSRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0)
+#define SSSRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02)
+#define SSSRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20)
+#define SSSRAID_DEV_DISK_TYPE(attr) ((attr) & 0x1e)
+
+#define SSSRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01)
+#define SSSRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02)
+
+/*
+ * define the SSSRAID scsi device identifier
+ */
+enum {
+ SSSRAID_SAS_HDD_VD = 0x04,
+ SSSRAID_SATA_HDD_VD = 0x08,
+ SSSRAID_SAS_SSD_VD = 0x0c,
+ SSSRAID_SATA_SSD_VD = 0x10,
+ SSSRAID_NVME_SSD_VD = 0x14,
+ SSSRAID_SAS_HDD_PD = 0x06,
+ SSSRAID_SATA_HDD_PD = 0x0a,
+ SSSRAID_SAS_SSD_PD = 0x0e,
+ SSSRAID_SATA_SSD_PD = 0x12,
+ SSSRAID_NVME_SSD_PD = 0x16,
+};
+
+/*
+ * define the SSSRAID scsi device queue depth
+ */
+#define SSSRAID_HDD_PD_QD 64
+#define SSSRAID_HDD_VD_QD 256
+#define SSSRAID_SSD_PD_QD 64
+#define SSSRAID_SSD_VD_QD 256
+
+#define BGTASK_TYPE_REBUILD 4
+#define USR_CMD_READ 0xc2
+#define USR_CMD_RDLEN 0x1000
+#define USR_CMD_VDINFO 0x704
+#define USR_CMD_BGTASK 0x504
+#define VDINFO_PARAM_LEN 0x04
+
+/*
+ * SSSRAID virtual device information struct
+ */
+struct sssraid_vd_info {
+ __u8 name[32];
+ __le16 id;
+ __u8 rg_id;
+ __u8 rg_level;
+ __u8 sg_num;
+ __u8 sg_disk_num;
+ __u8 vd_status;
+ __u8 vd_type;
+ __u8 rsvd1[4056];
+};
+
+#define MAX_REALTIME_BGTASK_NUM 32
+
+struct bgtask_info {
+ __u8 type;
+ __u8 progress;
+ __u8 rate;
+ __u8 rsvd0;
+ __le16 vd_id;
+ __le16 time_left;
+ __u8 rsvd1[4];
+};
+
+struct sssraid_bgtask {
+ __u8 sw;
+ __u8 task_num;
+ __u8 rsvd[6];
+ struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM];
+};
+
+/*
+ * SSSRAID scsi device information struct
+ */
+struct sssraid_dev_info {
+ __le32 hdid;
+ __le16 target;
+ __u8 channel;
+ __u8 lun;
+ __u8 attr;
+ __u8 flag;
+ __le16 max_io_kb;
+};
+
+#define IOQ_PT_DATA_LEN 4096
+#define MAX_DEV_ENTRY_PER_PAGE_4K 340
+struct sssraid_dev_list {
+ __le32 dev_num;
+ __u32 rsvd0[3];
+ struct sssraid_dev_info devices[MAX_DEV_ENTRY_PER_PAGE_4K];
+};
+
+/*
+ * SSSRAID scsi device host data struct
+ */
+struct sssraid_sdev_hostdata {
+ u32 hdid;
+ u16 max_io_kb;
+ u8 attr;
+ u8 flag;
+ u8 rg_id;
+ u8 rsvd[3];
+};
+
+extern unsigned char small_pool_num;
+extern u32 io_queue_depth;
+irqreturn_t sssraid_isr_poll(int irq, void *privdata);
+bool sssraid_poll_cq(struct sssraid_dev *hdev, u16 qidx, int cid);
+void sssraid_submit_cmd(struct sssraid_squeue *sqinfo, const void *cmd);
+int sssraid_get_dev_list(struct sssraid_dev *hdev, struct sssraid_dev_info *devices);
+int sssraid_submit_admin_sync_cmd(struct sssraid_dev *hdev, struct sssraid_admin_command *cmd,
+ u32 *result0, u32 *result1, u32 timeout);
+int sssraid_send_abort_cmd(struct sssraid_dev *hdev, u32 hdid, u16 qidx, u16 cid);
+int sssraid_send_reset_cmd(struct sssraid_dev *hdev, u8 type, u32 hdid);
+void sssraid_adm_timeout(struct sssraid_dev *hdev, struct sssraid_cmd *cmd);
+int sssraid_init_ioc(struct sssraid_dev *hdev, u8 re_init);
+void sssraid_cleanup_ioc(struct sssraid_dev *hdev, u8 re_init);
+int sssraid_soft_reset_handler(struct sssraid_dev *hdev);
+void sssraid_free_iod_res(struct sssraid_dev *hdev, struct sssraid_iod *iod);
+bool sssraid_change_host_state(struct sssraid_dev *hdev, enum sssraid_state newstate);
+int sssraid_configure_timestamp(struct sssraid_dev *hdev);
+int sssraid_init_ctrl_info(struct sssraid_dev *hdev);
+struct sssraid_cmd *sssraid_get_cmd(struct sssraid_dev *hdev, enum sssraid_cmd_type type);
+void sssraid_put_cmd(struct sssraid_dev *hdev, struct sssraid_cmd *cmd,
+ enum sssraid_cmd_type type);
+int sssraid_send_event_ack(struct sssraid_dev *hdev, u8 event,
+ u32 event_ctx, u16 cid);
+struct sssraid_fwevt *sssraid_alloc_fwevt(int len);
+void sssraid_fwevt_add_to_list(struct sssraid_dev *hdev,
+ struct sssraid_fwevt *fwevt);
+void sssraid_cleanup_fwevt_list(struct sssraid_dev *hdev);
+void sssraid_ioc_enable_intr(struct sssraid_dev *hdev);
+void sssraid_ioc_disable_intr(struct sssraid_dev *hdev);
+void sssraid_cleanup_resources(struct sssraid_dev *hdev);
+void sssraid_complete_cqes(struct sssraid_dev *hdev, u16 qidx, u16 start, u16 end);
+int sssraid_io_map_data(struct sssraid_dev *hdev, struct sssraid_iod *iod,
+ struct scsi_cmnd *scmd, struct sssraid_ioq_command *ioq_cmd);
+void sssraid_map_status(struct sssraid_iod *iod, struct scsi_cmnd *scmd,
+ struct sssraid_completion *cqe);
+void sssraid_scan_disk(struct sssraid_dev *hdev);
+void sssraid_complete_aen(struct sssraid_dev *hdev, struct sssraid_completion *cqe);
+void sssraid_back_all_io(struct sssraid_dev *hdev);
+
+static inline void **sssraid_iod_list(struct sssraid_iod *iod)
+{
+ return iod->list;
+}
+
+#end
diff --git a/drivers/scsi/sssraid/sssraid_debug.h b/drivers/scsi/sssraid/sssraid_debug.h
new file mode 100644
index 000000000000..d48c57e275f7
--- /dev/null
+++ b/drivers/scsi/sssraid/sssraid_debug.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 3SNIC Information Technology, Ltd */
+
+/* 3SNIC RAID SSSXXX Series Linux Driver */
+
+#ifndef SSSRAID_DEBUG_H_INCLUDED
+#define SSSRAID_DEBUG_H_INCLUDED
+
+/*
+ * debug levels
+ */
+#define SSSRAID_DEBUG 0x00000001
+
+/*
+ * debug macros
+ */
+
+#define ioc_err(ioc, fmt, ...) \
+ pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_notice(ioc, fmt, ...) \
+ pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_warn(ioc, fmt, ...) \
+ pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_info(ioc, fmt, ...) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+
+
+#define dbgprint(IOC, FMT, ...) \
+ do { \
+ if (unlikely(IOC->logging_level & SSSRAID_DEBUG)) \
+ pr_info("%s: " FMT, (IOC)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#endif /* SSSRAID_DEBUG_H_INCLUDED */
diff --git a/drivers/scsi/sssraid/sssraid_fw.c b/drivers/scsi/sssraid/sssraid_fw.c
new file mode 100644
index 000000000000..54287cf70558
--- /dev/null
+++ b/drivers/scsi/sssraid/sssraid_fw.c
@@ -0,0 +1,1724 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 3SNIC Information Technology, Ltd */
+
+/* 3SNIC RAID SSSXXX Series Linux Driver */
+
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/sysfs.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+#include <linux/debugfs.h>
+#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <linux/sort.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dbg.h>
+
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/once.h>
+#include <linux/sched/signal.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "sssraid.h"
+#include "sssraid_debug.h"
+
+static int sssraid_wait_ready(struct sssraid_dev *hdev, u64 cap, bool enabled)
+{
+ unsigned long timeout =
+ ((SSSRAID_CAP_TIMEOUT(cap) + 1) * SSSRAID_CAP_TIMEOUT_UNIT_MS) + jiffies;
+ u32 bit = enabled ? SSSRAID_CSTS_RDY : 0;
+
+ while ((readl(hdev->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_RDY) != bit) {
+ usleep_range(1000, 2000);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+ if (time_after(jiffies, timeout)) {
+ ioc_err(hdev, "Device not ready; aborting %s\n",
+ enabled ? "initialisation" : "reset");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int sssraid_enable_ctrl(struct sssraid_dev *hdev)
+{
+ u64 cap = hdev->cap;
+ u32 dev_page_min = SSSRAID_CAP_MPSMIN(cap) + 12;
+ u32 page_shift = PAGE_SHIFT;
+
+ if (page_shift < dev_page_min) {
+ ioc_err(hdev, "Minimum device page size[%u], too large for host[%u]\n",
+ 1U << dev_page_min, 1U << page_shift);
+ return -ENODEV;
+ }
+
+ page_shift = min_t(unsigned int, SSSRAID_CAP_MPSMAX(cap) + 12, PAGE_SHIFT);
+ hdev->page_size = 1U << page_shift;
+
+ hdev->ctrl_config = SSSRAID_CC_CSS_NVM;
+ hdev->ctrl_config |= (page_shift - 12) << SSSRAID_CC_MPS_SHIFT;
+ hdev->ctrl_config |= SSSRAID_CC_AMS_RR | SSSRAID_CC_SHN_NONE;
+ hdev->ctrl_config |= SSSRAID_CC_IOSQES | SSSRAID_CC_IOCQES;
+ hdev->ctrl_config |= SSSRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + SSSRAID_REG_CC);
+
+ return sssraid_wait_ready(hdev, cap, true);
+}
+
+static int sssraid_disable_ctrl(struct sssraid_dev *hdev)
+{
+ hdev->ctrl_config &= ~SSSRAID_CC_SHN_MASK;
+ hdev->ctrl_config &= ~SSSRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + SSSRAID_REG_CC);
+
+ return sssraid_wait_ready(hdev, hdev->cap, false);
+}
+
+static int sssraid_shutdown_ctrl(struct sssraid_dev *hdev)
+{
+ unsigned long timeout = le32_to_cpu(hdev->ctrl_info->rtd3e) / 1000000 * HZ + jiffies;
+
+ hdev->ctrl_config &= ~SSSRAID_CC_SHN_MASK;
+ hdev->ctrl_config |= SSSRAID_CC_SHN_NORMAL;
+ writel(hdev->ctrl_config, hdev->bar + SSSRAID_REG_CC);
+
+ while ((readl(hdev->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_SHST_MASK) !=
+ SSSRAID_CSTS_SHST_CMPLT) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ ioc_err(hdev, "Device shutdown incomplete; abort shutdown\n");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int sssraid_remap_bar(struct sssraid_dev *hdev, u32 size)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (size > pci_resource_len(pdev, 0)) {
+ ioc_err(hdev, "Input size[%u] exceed bar0 length[%llu]\n",
+ size, pci_resource_len(pdev, 0));
+ return -ENODEV;
+ }
+
+ if (hdev->bar)
+ iounmap(hdev->bar);
+
+ hdev->bar = ioremap(pci_resource_start(pdev, 0), size);
+ if (!hdev->bar) {
+ ioc_err(hdev, "ioremap for bar0 failed\n");
+ return -ENODEV;
+ }
+ hdev->dbs = hdev->bar + SSSRAID_REG_DBS;
+
+ return 0;
+}
+
+static int sssraid_create_dma_pools(struct sssraid_dev *hdev)
+{
+ int i;
+ char poolname[20] = { 0 };
+
+ hdev->prp_page_pool = dma_pool_create("prp list page", &hdev->pdev->dev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+
+ if (!hdev->prp_page_pool) {
+ ioc_err(hdev, "create prp_page_pool failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < small_pool_num; i++) {
+ sprintf(poolname, "prp_list_256_%d", i);
+ hdev->prp_small_pool[i] = dma_pool_create(poolname, &hdev->pdev->dev,
+ SMALL_POOL_SIZE,
+ SMALL_POOL_SIZE, 0);
+
+ if (!hdev->prp_small_pool[i]) {
+ ioc_err(hdev, "create prp_small_pool %d failed\n", i);
+ goto destroy_prp_small_pool;
+ }
+ }
+
+ return 0;
+
+destroy_prp_small_pool:
+ while (i > 0)
+ dma_pool_destroy(hdev->prp_small_pool[--i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+
+ return -ENOMEM;
+}
+
+static void sssraid_destroy_dma_pools(struct sssraid_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < small_pool_num; i++)
+ dma_pool_destroy(hdev->prp_small_pool[i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+}
+
+static int sssraid_alloc_resources(struct sssraid_dev *hdev)
+{
+ int retval, nqueue;
+
+ hdev->ctrl_info = kzalloc_node(sizeof(*hdev->ctrl_info), GFP_KERNEL, hdev->numa_node);
+ if (!hdev->ctrl_info)
+ return -ENOMEM;
+
+ retval = sssraid_create_dma_pools(hdev);
+ if (retval)
+ goto free_ctrl_info;
+ /* not num_online_cpus */
+ nqueue = num_possible_cpus() + 1;
+ hdev->cqinfo = kcalloc_node(nqueue, sizeof(struct sssraid_cqueue),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->cqinfo) {
+ retval = -ENOMEM;
+ goto destroy_dma_pools;
+ }
+
+ hdev->sqinfo = kcalloc_node(nqueue, sizeof(struct sssraid_squeue),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->sqinfo) {
+ retval = -ENOMEM;
+ goto free_cqueues;
+ }
+
+ /* sssraid_alloc_admin_cmds moved to sssraid_init_ioc */
+
+ ioc_info(hdev, "queues num: %d\n", nqueue);
+
+ return 0;
+
+free_cqueues:
+ kfree(hdev->cqinfo);
+destroy_dma_pools:
+ sssraid_destroy_dma_pools(hdev);
+free_ctrl_info:
+ kfree(hdev->ctrl_info);
+
+ return retval;
+}
+
+void sssraid_ioc_enable_intr(struct sssraid_dev *hdev)
+{
+ hdev->intr_enabled = 1;
+}
+
+void sssraid_ioc_disable_intr(struct sssraid_dev *hdev)
+{
+ u16 i, max_vectors;
+
+ hdev->intr_enabled = 0;
+ max_vectors = hdev->intr_info_count;
+
+ for (i = 0; i < max_vectors; i++)
+ synchronize_irq(pci_irq_vector(hdev->pdev, i));
+}
+
+static int sssraid_setup_resources(struct sssraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int retval = 0;
+ u64 maskbit = SSSRAID_DMA_MSK_BIT_MAX;
+
+ if (pci_enable_device_mem(pdev)) {
+ ioc_err(hdev, "pci_enable_device_mem: failed\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ retval = pci_request_mem_regions(pdev, SSSRAID_DRIVER_NAME);
+ if (retval) {
+ ioc_err(hdev, "fail to request memory regions\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ /* get cap value at first, so keep
+ * sssraid_remap_bar(hdev, SSSRAID_REG_DBS + 4096)
+ * ioremap(pci_resource_start(..)) still in sssraid_remap_bar
+ */
+ retval = sssraid_remap_bar(hdev, SSSRAID_REG_DBS + 4096);
+ if (retval) {
+ retval = -ENODEV;
+ goto out_failed;
+ }
+
+ pci_set_master(pdev);
+
+ if (readl(hdev->bar + SSSRAID_REG_CSTS) == U32_MAX) {
+ retval = -ENODEV;
+ ioc_err(hdev, "Read csts register failed\n");
+ goto out_failed;
+ }
+
+ hdev->cap = lo_hi_readq(hdev->bar + SSSRAID_REG_CAP);
+ hdev->ioq_depth = min_t(u32, SSSRAID_CAP_MQES(hdev->cap) + 1, io_queue_depth);
+ hdev->scsi_qd = hdev->ioq_depth - SSSRAID_PTCMDS_PERQ;
+ hdev->db_stride = 1 << SSSRAID_CAP_STRIDE(hdev->cap);
+
+ maskbit = SSSRAID_CAP_DMAMASK(hdev->cap);
+ if (maskbit < 32 || maskbit > SSSRAID_DMA_MSK_BIT_MAX) {
+ ioc_err(hdev, "err, dma mask invalid[%llu], set to default\n", maskbit);
+ maskbit = SSSRAID_DMA_MSK_BIT_MAX;
+ }
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit))) {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ ioc_err(hdev, "set dma mask[32] and coherent failed\n");
+ retval = -ENODEV;
+ goto out_failed;
+ }
+ ioc_info(hdev, "set dma mask[32] success\n");
+ } else {
+ ioc_info(hdev, "set dma mask[%llu] success\n", maskbit);
+ }
+
+ /* pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES) moved to setup_isr */
+
+ pci_set_drvdata(pdev, hdev->shost);
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_save_state(pdev);
+
+ sssraid_ioc_disable_intr(hdev);
+
+ return retval;
+
+out_failed:
+ sssraid_cleanup_resources(hdev);
+ return retval;
+}
+
+static int sssraid_alloc_admin_cmds(struct sssraid_dev *hdev)
+{
+ u16 i;
+
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+ spin_lock_init(&hdev->adm_cmd_lock);
+
+ hdev->adm_cmds = kcalloc_node(SSSRAID_AQ_BLK_MQ_DEPTH, sizeof(struct sssraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->adm_cmds) {
+ ioc_err(hdev, "Alloc admin cmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SSSRAID_AQ_BLK_MQ_DEPTH; i++) {
+ hdev->adm_cmds[i].qid = 0;
+ hdev->adm_cmds[i].cid = i;
+ list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list);
+ }
+
+ ioc_info(hdev, "Alloc admin cmds success, num[%d]\n", SSSRAID_AQ_BLK_MQ_DEPTH);
+
+ return 0;
+}
+
+static int sssraid_alloc_qpair(struct sssraid_dev *hdev, u16 qidx, u16 depth)
+{
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[qidx];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+ int retval = 0;
+
+ if (hdev->init_done_queue_cnt > qidx) {
+ ioc_info(hdev, "warn: queue[%d] is exist\n", qidx);
+ return 0;
+ }
+
+ cqinfo->cqes = dma_alloc_coherent(&hdev->pdev->dev, CQ_SIZE(depth),
+ &cqinfo->cq_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!cqinfo->cqes)
+ return -ENOMEM;
+
+ sqinfo->sq_cmds = dma_alloc_coherent(&hdev->pdev->dev, SQ_SIZE(qidx, depth),
+ &sqinfo->sq_dma_addr, GFP_KERNEL);
+ if (!sqinfo->sq_cmds) {
+ retval = -ENOMEM;
+ goto free_cqes;
+ }
+
+ /* alloc sense buffer */
+ sqinfo->sense = dma_alloc_coherent(&hdev->pdev->dev, SENSE_SIZE(depth),
+ &sqinfo->sense_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ if (!sqinfo->sense) {
+ retval = -ENOMEM;
+ goto free_sq_cmds;
+ }
+
+ spin_lock_init(&sqinfo->sq_lock);
+ spin_lock_init(&cqinfo->cq_lock);
+ cqinfo->hdev = hdev;
+ sqinfo->hdev = hdev;
+ sqinfo->q_depth = depth;
+ sqinfo->qidx = qidx;
+ /* cq_vector replaced by msix_index */
+
+ /*
+ * online_queues: completely initialized queue count: sssraid_init_queue
+ * queue_count: allocated but not completely initialized queue count: sssraid_alloc_queue
+ * online_queues/queue_count replaced by init_done_queue_cnt.
+ */
+ hdev->init_done_queue_cnt++;
+
+ return 0;
+
+free_sq_cmds:
+ dma_free_coherent(&hdev->pdev->dev, SQ_SIZE(qidx, depth), (void *)sqinfo->sq_cmds,
+ sqinfo->sq_dma_addr);
+free_cqes:
+ dma_free_coherent(&hdev->pdev->dev, CQ_SIZE(depth), (void *)cqinfo->cqes,
+ cqinfo->cq_dma_addr);
+ return retval;
+}
+
+static void sssraid_init_queue(struct sssraid_dev *hdev, u16 qidx)
+{
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[qidx];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+
+ memset((void *)cqinfo->cqes, 0, CQ_SIZE(sqinfo->q_depth));
+
+ sqinfo->sq_tail = 0;
+ cqinfo->cq_head = 0;
+ cqinfo->cq_phase = 1;
+ sqinfo->q_db = &hdev->dbs[qidx * 2 * hdev->db_stride];
+ sqinfo->prp_small_pool = hdev->prp_small_pool[qidx % small_pool_num];
+}
+
+static int sssraid_setup_admin_qpair(struct sssraid_dev *hdev)
+{
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[0];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[0];
+ u32 aqa;
+ int retval;
+
+ ioc_info(hdev, "start disable ctrl\n");
+
+ retval = sssraid_disable_ctrl(hdev);
+ if (retval)
+ return retval;
+
+ /* this func don't alloc admin queue */
+
+ aqa = sqinfo->q_depth - 1;
+ aqa |= aqa << 16;
+ writel(aqa, hdev->bar + SSSRAID_REG_AQA);
+ lo_hi_writeq(sqinfo->sq_dma_addr, hdev->bar + SSSRAID_REG_ASQ);
+ lo_hi_writeq(cqinfo->cq_dma_addr, hdev->bar + SSSRAID_REG_ACQ);
+
+ ioc_info(hdev, "start enable ctrl\n");
+
+ retval = sssraid_enable_ctrl(hdev);
+ if (retval) {
+ retval = -ENODEV;
+ return retval;
+ }
+
+ /* interrupt registry not here */
+ /* cq_vector replaced by msix_index */
+
+ sssraid_init_queue(hdev, 0);
+
+ ioc_info(hdev, "success, queuecount:[%d], pagesize[%d]\n",
+ hdev->init_done_queue_cnt, hdev->page_size);
+
+ return 0;
+}
+
+static void sssraid_cleanup_isr(struct sssraid_dev *hdev)
+{
+ u16 i;
+
+ sssraid_ioc_disable_intr(hdev);
+
+ if (!hdev->intr_info)
+ return;
+
+ for (i = 0; i < hdev->intr_info_count; i++)
+ free_irq(pci_irq_vector(hdev->pdev, i),
+ (hdev->intr_info + i));
+
+ kfree(hdev->intr_info);
+ hdev->intr_info = NULL;
+ hdev->intr_info_count = 0;
+ pci_free_irq_vectors(hdev->pdev);
+}
+
+static void sssraid_complete_adminq_cmnd(struct sssraid_dev *hdev, u16 qidx,
+ struct sssraid_completion *cqe)
+{
+ struct sssraid_cmd *adm_cmd;
+
+ adm_cmd = hdev->adm_cmds + le16_to_cpu(cqe->cmd_id);
+ if (unlikely(adm_cmd->state == SSSRAID_CMD_IDLE)) {
+ ioc_warn(hdev, "Invalid id %d completed on queue %d\n",
+ le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id));
+ return;
+ }
+
+ adm_cmd->status = le16_to_cpu(cqe->status) >> 1;
+ adm_cmd->result0 = le32_to_cpu(cqe->result);
+ adm_cmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&adm_cmd->cmd_done);
+}
+
+static inline bool sssraid_cqe_pending(struct sssraid_cqueue *cqinfo)
+{
+ return (le16_to_cpu(cqinfo->cqes[cqinfo->cq_head].status) & 1) ==
+ cqinfo->cq_phase;
+}
+
+static inline void sssraid_update_cq_head(struct sssraid_dev *hdev, u16 qidx)
+{
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[qidx];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+
+ if (++cqinfo->cq_head == sqinfo->q_depth) {
+ cqinfo->cq_head = 0;
+ cqinfo->cq_phase = !cqinfo->cq_phase;
+ }
+}
+
+static inline bool sssraid_process_cq(struct sssraid_dev *hdev, u16 qidx, u16 *start,
+ u16 *end, int tag)
+{
+ bool found = false;
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[qidx];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+
+ *start = cqinfo->cq_head;
+ while (!found && sssraid_cqe_pending(cqinfo)) {
+ if (le16_to_cpu(cqinfo->cqes[cqinfo->cq_head].cmd_id) == tag)
+ found = true;
+ sssraid_update_cq_head(hdev, qidx);
+ }
+ *end = cqinfo->cq_head;
+
+ if (*start != *end)
+ writel(cqinfo->cq_head, sqinfo->q_db + sqinfo->hdev->db_stride);
+
+ return found;
+}
+
+static irqreturn_t sssraid_isr(int irq, void *privdata)
+{
+ struct sssraid_intr_info *intr_info = privdata;
+ struct sssraid_dev *hdev = intr_info->hdev;
+ irqreturn_t ret = IRQ_NONE;
+ struct sssraid_cqueue *cqinfo;
+ u16 midx, start, end;
+
+ if (!intr_info)
+ return IRQ_NONE;
+
+ if (!hdev->intr_enabled)
+ return IRQ_NONE;
+
+ midx = intr_info->msix_index;
+ cqinfo = &hdev->cqinfo[midx];
+
+ spin_lock(&cqinfo->cq_lock);
+ if (cqinfo->cq_head != cqinfo->last_cq_head)
+ ret = IRQ_HANDLED;
+
+ sssraid_process_cq(hdev, midx, &start, &end, -1);
+ cqinfo->last_cq_head = cqinfo->cq_head;
+ spin_unlock(&cqinfo->cq_lock);
+
+ if (start != end) {
+ sssraid_complete_cqes(hdev, midx, start, end);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+irqreturn_t sssraid_isr_poll(int irq, void *privdata)
+{
+ return IRQ_NONE;
+}
+
+bool sssraid_poll_cq(struct sssraid_dev *hdev, u16 qidx, int cid)
+{
+ u16 start, end;
+ bool found;
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[qidx];
+
+ if (!sssraid_cqe_pending(cqinfo))
+ return 0;
+
+ spin_lock_irq(&cqinfo->cq_lock);
+ found = sssraid_process_cq(hdev, qidx, &start, &end, cid);
+ spin_unlock_irq(&cqinfo->cq_lock);
+
+ sssraid_complete_cqes(hdev, qidx, start, end);
+ return found;
+}
+
+static inline int sssraid_request_irq(struct sssraid_dev *hdev, u16 index)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct sssraid_intr_info *intr_info = hdev->intr_info + index;
+ int retval = 0;
+
+ intr_info->hdev = hdev;
+ intr_info->msix_index = index;
+ intr_info->cqinfo = NULL;
+
+ snprintf(intr_info->name, SSSRAID_NAME_LENGTH, "%s%d-msix%d",
+ SSSRAID_DRIVER_NAME, hdev->instance, index);
+
+ retval = request_threaded_irq(pci_irq_vector(pdev, index), sssraid_isr,
+ sssraid_isr_poll, IRQF_SHARED, intr_info->name, intr_info);
+
+ if (retval) {
+ ioc_err(hdev, "%s: Unable to allocate interrupt %d!\n",
+ intr_info->name, pci_irq_vector(pdev, index));
+ return retval;
+ }
+
+ return retval;
+}
+
+static int sssraid_setup_isr(struct sssraid_dev *hdev, u8 setup_one)
+{
+ unsigned int irq_flags = PCI_IRQ_MSIX;
+ u16 max_vectors = 0, i;
+ int retval = 0;
+
+ struct irq_affinity desc = { .pre_vectors = 1};
+
+ sssraid_cleanup_isr(hdev);
+
+ if (setup_one)
+ max_vectors = 1;
+ else {
+ max_vectors = hdev->before_affinity_msix_cnt;
+
+ ioc_info(hdev, "MSI-x vectors requested: %d\n", max_vectors);
+ }
+
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
+
+ i = pci_alloc_irq_vectors_affinity(hdev->pdev,
+ 1, max_vectors, irq_flags, &desc);
+
+ if (i <= 0) {
+ ioc_err(hdev, "Cannot alloc irq vectors\n");
+ goto out_failed;
+ }
+ if (i != max_vectors) {
+ ioc_info(hdev,
+ "allocated vectors (%d) are less than configured (%d)\n",
+ i, max_vectors);
+
+ max_vectors = i;
+ }
+
+ hdev->intr_info = kzalloc(sizeof(struct sssraid_intr_info) * max_vectors,
+ GFP_KERNEL);
+ if (!hdev->intr_info) {
+ retval = -1;
+ pci_free_irq_vectors(hdev->pdev);
+ goto out_failed;
+ }
+
+ for (i = 0; i < max_vectors; i++) {
+ retval = sssraid_request_irq(hdev, i);
+ if (retval) {
+ hdev->intr_info_count = i; /* =i is for offload interrupt loop counter */
+ goto out_failed;
+ }
+ }
+
+ /* intr_info_count replace max_qid */
+ hdev->intr_info_count = max_vectors;
+ sssraid_ioc_enable_intr(hdev);
+ return retval;
+out_failed:
+ sssraid_cleanup_isr(hdev);
+
+ return retval;
+}
+
+static bool sssraid_adm_need_reset(struct sssraid_admin_command *cmd)
+{
+ switch (cmd->common.opcode) {
+ case SSSRAID_ADMIN_DELETE_SQ:
+ case SSSRAID_ADMIN_CREATE_SQ:
+ case SSSRAID_ADMIN_DELETE_CQ:
+ case SSSRAID_ADMIN_CREATE_CQ:
+ case SSSRAID_ADMIN_SET_FEATURES:
+ return false;
+ default:
+ return true;
+ }
+}
+
+void sssraid_submit_cmd(struct sssraid_squeue *sqinfo, const void *cmd)
+{
+ u32 sqes = SQE_SIZE(sqinfo->qidx);
+ unsigned long flags;
+ struct sssraid_admin_common_command *acd = (struct sssraid_admin_common_command *)cmd;
+
+ spin_lock_irqsave(&sqinfo->sq_lock, flags);
+ memcpy((sqinfo->sq_cmds + sqes * sqinfo->sq_tail), cmd, sqes);
+ if (++sqinfo->sq_tail == sqinfo->q_depth)
+ sqinfo->sq_tail = 0;
+
+ writel(sqinfo->sq_tail, sqinfo->q_db);
+ spin_unlock_irqrestore(&sqinfo->sq_lock, flags);
+
+ dbgprint(sqinfo->hdev, "cid[%d] qidx[%d], opcode[0x%x], flags[0x%x], hdid[%u]\n",
+ le16_to_cpu(acd->command_id), sqinfo->qidx, acd->opcode, acd->flags,
+ le32_to_cpu(acd->hdid));
+}
+
+int sssraid_submit_admin_sync_cmd(struct sssraid_dev *hdev, struct sssraid_admin_command *cmd,
+ u32 *result0, u32 *result1, u32 timeout)
+{
+ struct sssraid_cmd *adm_cmd = sssraid_get_cmd(hdev, SSSRAID_CMD_ADM);
+
+ if (!adm_cmd) {
+ ioc_err(hdev, "err, get admin cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ /*
+ * watch dog not as optimized as
+ * init_completion/complete
+ */
+ init_completion(&adm_cmd->cmd_done);
+
+ cmd->common.command_id = cpu_to_le16(adm_cmd->cid);
+ sssraid_submit_cmd(&hdev->sqinfo[0], cmd);
+
+ if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) {
+ ioc_err(hdev, "cid[%d] qidx[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+ adm_cmd->cid, adm_cmd->qid, cmd->usr_cmd.opcode,
+ cmd->usr_cmd.info_0.subopcode);
+
+ /* reset controller if admin timeout */
+ if (sssraid_adm_need_reset(cmd))
+ sssraid_adm_timeout(hdev, adm_cmd);
+
+ sssraid_put_cmd(hdev, adm_cmd, SSSRAID_CMD_ADM);
+ return -ETIME;
+ }
+
+ if (result0)
+ *result0 = adm_cmd->result0;
+ if (result1)
+ *result1 = adm_cmd->result1;
+
+ sssraid_put_cmd(hdev, adm_cmd, SSSRAID_CMD_ADM);
+
+ return adm_cmd->status;
+}
+
+static int sssraid_get_ctrl_info(struct sssraid_dev *hdev, struct sssraid_ctrl_info *ctrl_info)
+{
+ struct sssraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int retval;
+
+ data_ptr = dma_alloc_coherent(&hdev->pdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = SSSRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = SSSRAID_GET_INFO_CTRL;
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ retval = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!retval)
+ memcpy(ctrl_info, data_ptr, sizeof(struct sssraid_ctrl_info));
+
+ dma_free_coherent(&hdev->pdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return retval;
+}
+
+int sssraid_init_ctrl_info(struct sssraid_dev *hdev)
+{
+ int retval;
+
+ hdev->ctrl_info->nd = cpu_to_le32(240);
+ hdev->ctrl_info->mdts = 8;
+ hdev->ctrl_info->max_cmds = cpu_to_le16(4096);
+ hdev->ctrl_info->max_num_sge = cpu_to_le16(128);
+ hdev->ctrl_info->max_channel = cpu_to_le16(4);
+ hdev->ctrl_info->max_tgt_id = cpu_to_le32(3239);
+ hdev->ctrl_info->max_lun = cpu_to_le16(2);
+
+ retval = sssraid_get_ctrl_info(hdev, hdev->ctrl_info);
+ if (retval)
+ ioc_err(hdev, "init controller info failed: %d\n", retval);
+
+ ioc_info(hdev, "nd = %d\n", le32_to_cpu(hdev->ctrl_info->nd));
+ ioc_info(hdev, "max_cmd = %d\n", le16_to_cpu(hdev->ctrl_info->max_cmds));
+ ioc_info(hdev, "max_channel = %d\n", le16_to_cpu(hdev->ctrl_info->max_channel));
+ ioc_info(hdev, "max_tgt_id = %d\n", le32_to_cpu(hdev->ctrl_info->max_tgt_id));
+ ioc_info(hdev, "max_lun = %d\n", le16_to_cpu(hdev->ctrl_info->max_lun));
+ ioc_info(hdev, "max_num_sge = %d\n", le16_to_cpu(hdev->ctrl_info->max_num_sge));
+ ioc_info(hdev, "lun_num_boot = %d\n", le16_to_cpu(hdev->ctrl_info->lun_num_in_boot));
+ ioc_info(hdev, "mdts = %d\n", hdev->ctrl_info->mdts);
+ ioc_info(hdev, "acl = %d\n", hdev->ctrl_info->acl);
+ ioc_info(hdev, "aer1 = %d\n", hdev->ctrl_info->aerl);
+ ioc_info(hdev, "card_type = %d\n", hdev->ctrl_info->card_type);
+ ioc_info(hdev, "rtd3e = %d\n", le32_to_cpu(hdev->ctrl_info->rtd3e));
+ ioc_info(hdev, "sn = %s\n", hdev->ctrl_info->sn);
+ ioc_info(hdev, "fr = %s\n", hdev->ctrl_info->fr);
+
+ if (!hdev->ctrl_info->aerl)
+ hdev->ctrl_info->aerl = 1;
+ if (hdev->ctrl_info->aerl > SSSRAID_NR_AEN_COMMANDS)
+ hdev->ctrl_info->aerl = SSSRAID_NR_AEN_COMMANDS;
+
+ return 0;
+}
+
+static int sssraid_set_features(struct sssraid_dev *hdev, u32 fid, u32 dword11, void *buffer,
+ size_t buflen, u32 *result)
+{
+ struct sssraid_admin_command admin_cmd;
+ int ret;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+
+ if (buffer && buflen) {
+ data_ptr = dma_alloc_coherent(&hdev->pdev->dev, buflen, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memcpy(data_ptr, buffer, buflen);
+ }
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.features.opcode = SSSRAID_ADMIN_SET_FEATURES;
+ admin_cmd.features.fid = cpu_to_le32(fid);
+ admin_cmd.features.dword11 = cpu_to_le32(dword11);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, result, NULL, 0);
+
+ if (data_ptr)
+ dma_free_coherent(&hdev->pdev->dev, buflen, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int sssraid_set_queue_cnt(struct sssraid_dev *hdev, u32 *cnt)
+{
+ u32 q_cnt = (*cnt - 1) | ((*cnt - 1) << 16);
+ u32 nr_ioqs, result;
+ int status;
+
+ status = sssraid_set_features(hdev, SSSRAID_FEAT_NUM_QUEUES, q_cnt, NULL, 0, &result);
+ if (status) {
+ ioc_err(hdev, "Set queue count failed, status: %d\n",
+ status);
+ return -EIO;
+ }
+
+ nr_ioqs = min(result & 0xffff, result >> 16) + 1;
+ *cnt = min(*cnt, nr_ioqs);
+ if (*cnt == 0) {
+ ioc_err(hdev, "Illegal queue count: zero\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int sssraid_create_cq(struct sssraid_dev *hdev, u16 qidx)
+{
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[qidx];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+ struct sssraid_admin_command admin_cmd;
+ int flags = SSSRAID_QUEUE_PHYS_CONTIG | SSSRAID_CQ_IRQ_ENABLED;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_cq.opcode = SSSRAID_ADMIN_CREATE_CQ;
+ admin_cmd.create_cq.prp1 = cpu_to_le64(cqinfo->cq_dma_addr);
+ admin_cmd.create_cq.cqid = cpu_to_le16(qidx);
+ admin_cmd.create_cq.qsize = cpu_to_le16(sqinfo->q_depth - 1);
+ admin_cmd.create_cq.cq_flags = cpu_to_le16(flags);
+ admin_cmd.create_cq.irq_vector = cpu_to_le16(qidx);
+
+ return sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static int sssraid_create_io_cq(struct sssraid_dev *hdev, u16 qidx)
+{
+ int retval;
+ struct sssraid_cqueue *cqinfo = hdev->cqinfo + qidx;
+ u16 midx = qidx;
+
+ retval = sssraid_create_cq(hdev, qidx);
+ if (retval)
+ return retval;
+
+ /*
+ * cqinfo initialization at sssraid_init_queue
+ */
+ hdev->intr_info[midx].cqinfo = cqinfo;
+
+ return retval;
+}
+
+static int sssraid_create_sq(struct sssraid_dev *hdev, u16 qidx)
+{
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+ struct sssraid_admin_command admin_cmd;
+ int flags = SSSRAID_QUEUE_PHYS_CONTIG;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_sq.opcode = SSSRAID_ADMIN_CREATE_SQ;
+ admin_cmd.create_sq.prp1 = cpu_to_le64(sqinfo->sq_dma_addr);
+ admin_cmd.create_sq.sqid = cpu_to_le16(qidx);
+ admin_cmd.create_sq.qsize = cpu_to_le16(sqinfo->q_depth - 1);
+ admin_cmd.create_sq.sq_flags = cpu_to_le16(flags);
+ admin_cmd.create_sq.cqid = cpu_to_le16(qidx);
+
+ return sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static int sssraid_create_io_sq(struct sssraid_dev *hdev, u16 qidx)
+{
+ return sssraid_create_sq(hdev, qidx);
+}
+
+int sssraid_get_dev_list(struct sssraid_dev *hdev, struct sssraid_dev_info *devices)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ struct sssraid_admin_command admin_cmd;
+ struct sssraid_dev_list *list_buf;
+ dma_addr_t data_dma = 0;
+ u32 i, idx, hdid, ndev;
+ int ret = 0;
+
+ list_buf = dma_alloc_coherent(&hdev->pdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!list_buf)
+ return -ENOMEM;
+
+ for (idx = 0; idx < nd;) {
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = SSSRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = SSSRAID_GET_INFO_DEV_LIST;
+ admin_cmd.get_info.cdw11 = cpu_to_le32(idx);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+
+ if (ret) {
+ ioc_err(hdev, "Get device list failed, nd: %u, idx: %u, ret: %d\n",
+ nd, idx, ret);
+ goto out;
+ }
+ ndev = le32_to_cpu(list_buf->dev_num);
+
+ ioc_info(hdev, "ndev numbers: %u\n", ndev);
+
+ for (i = 0; i < ndev; i++) {
+ hdid = le32_to_cpu(list_buf->devices[i].hdid);
+ ioc_info(hdev, "list_buf->devices[%d], hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ i, hdid, le16_to_cpu(list_buf->devices[i].target),
+ list_buf->devices[i].channel,
+ list_buf->devices[i].lun,
+ list_buf->devices[i].attr);
+ if (hdid > nd || hdid == 0) {
+ ioc_err(hdev, "err, hdid[%d] invalid\n", hdid);
+ continue;
+ }
+ memcpy(&devices[hdid - 1], &list_buf->devices[i],
+ sizeof(struct sssraid_dev_info));
+ }
+ idx += ndev;
+
+ if (ndev < MAX_DEV_ENTRY_PER_PAGE_4K)
+ break;
+ }
+
+out:
+ dma_free_coherent(&hdev->pdev->dev, PAGE_SIZE, list_buf, data_dma);
+ return ret;
+}
+
+/* send abort command by admin queue temporary */
+int sssraid_send_abort_cmd(struct sssraid_dev *hdev, u32 hdid, u16 qidx, u16 cid)
+{
+ struct sssraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.abort.opcode = SSSRAID_ADMIN_ABORT_CMD;
+ admin_cmd.abort.hdid = cpu_to_le32(hdid);
+ admin_cmd.abort.sqid = cpu_to_le16(qidx);
+ admin_cmd.abort.cid = cpu_to_le16(cid);
+
+ return sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+/* send reset command by admin quueue temporary */
+int sssraid_send_reset_cmd(struct sssraid_dev *hdev, u8 type, u32 hdid)
+{
+ struct sssraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.reset.opcode = SSSRAID_ADMIN_RESET;
+ admin_cmd.reset.hdid = cpu_to_le32(hdid);
+ admin_cmd.reset.type = type;
+
+ return sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static int sssraid_delete_queue(struct sssraid_dev *hdev, u8 op, u16 qidx)
+{
+ struct sssraid_admin_command admin_cmd;
+ int retval;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.delete_queue.opcode = op;
+ admin_cmd.delete_queue.qid = cpu_to_le16(qidx);
+
+ retval = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+
+ if (retval)
+ ioc_err(hdev, "Delete %s:[%d] failed\n",
+ (op == SSSRAID_ADMIN_DELETE_CQ) ? "cq" : "sq", qidx);
+
+ return retval;
+}
+
+static int sssraid_delete_cq(struct sssraid_dev *hdev, u16 qidx)
+{
+ return sssraid_delete_queue(hdev, SSSRAID_ADMIN_DELETE_CQ, qidx);
+}
+
+void sssraid_adm_timeout(struct sssraid_dev *hdev, struct sssraid_cmd *cmd)
+{
+ /* command may be returned because controller reset */
+ if (READ_ONCE(cmd->state) == SSSRAID_CMD_COMPLETE)
+ return;
+
+ if (!sssraid_change_host_state(hdev, SSSRAID_RESETTING)) {
+ ioc_info(hdev, "can't change to reset state\n");
+ return;
+ }
+ sssraid_soft_reset_handler(hdev);
+}
+
+static int sssraid_create_io_qpair(struct sssraid_dev *hdev, u16 qidx)
+{
+ int retval;
+
+ retval = sssraid_create_io_cq(hdev, qidx);
+ if (retval)
+ return retval;
+
+ retval = sssraid_create_io_sq(hdev, qidx);
+ if (retval)
+ goto delete_cq;
+
+ /* intr_info.msix_index substitute cq_vector */
+
+ /* io interrupt registry:
+ * not here, put above
+ */
+
+ sssraid_init_queue(hdev, qidx);
+
+ return 0;
+
+delete_cq:
+ sssraid_delete_cq(hdev, qidx);
+
+ return retval;
+}
+
+static int sssraid_setup_io_qpair(struct sssraid_dev *hdev)
+{
+ u32 i, num_queues;
+ int retval = 0;
+
+ num_queues = min(hdev->intr_info_count, hdev->init_done_queue_cnt - 1);
+ for (i = 1; i <= num_queues; i++) {
+ retval = sssraid_create_io_qpair(hdev, i);
+ if (retval) {
+ ioc_err(hdev, "Create queue[%d] failed\n", i);
+ break;
+ }
+ }
+
+ ioc_info(hdev, "init_done_queue_cnt[%d], intr_info_count[%d] num_queues[%d]",
+ hdev->init_done_queue_cnt, /*hdev->online_queues,*/
+ hdev->intr_info_count, num_queues);
+
+ return retval >= 0 ? 0 : retval;
+}
+
+static int sssraid_alloc_ioq_ptcmds(struct sssraid_dev *hdev)
+{
+ int i;
+ int ptnum = SSSRAID_NR_IOQ_PTCMDS;
+
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
+ spin_lock_init(&hdev->ioq_pt_lock);
+
+ hdev->ioq_ptcmds = kcalloc_node(ptnum, sizeof(struct sssraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->ioq_ptcmds) {
+ ioc_err(hdev, "Alloc ioq_ptcmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ptnum; i++) {
+ hdev->ioq_ptcmds[i].qid = i / SSSRAID_PTCMDS_PERQ + 1;
+ hdev->ioq_ptcmds[i].cid = i % SSSRAID_PTCMDS_PERQ + SSSRAID_IO_BLK_MQ_DEPTH;
+ list_add_tail(&(hdev->ioq_ptcmds[i].list), &hdev->ioq_pt_list);
+ }
+
+ ioc_info(hdev, "Alloc ioq_ptcmds success, ptnum[%d]\n", ptnum);
+
+ return 0;
+}
+
+int sssraid_send_event_ack(struct sssraid_dev *hdev, u8 event,
+ u32 event_ctx, u16 cid)
+{
+ /* event,event_ctx no use at this time */
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[0];
+ struct sssraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = SSSRAID_ADMIN_ASYNC_EVENT;
+ admin_cmd.common.command_id = cpu_to_le16(cid);
+
+ sssraid_submit_cmd(sqinfo, &admin_cmd);
+ ioc_info(hdev, "send aen, cid[%d]\n", cid);
+
+ return 0;
+}
+
+static void sssraid_handle_aen_notice(struct sssraid_dev *hdev, u32 result)
+{
+ switch ((result & 0xff00) >> 8) {
+ case SSSRAID_AEN_DEV_CHANGED:
+ sssraid_scan_disk(hdev);
+ break;
+ case SSSRAID_AEN_FW_ACT_START:
+ ioc_info(hdev, "fw activation starting\n");
+ break;
+ case SSSRAID_AEN_HOST_PROBING:
+ break;
+ default:
+ ioc_warn(hdev, "async event result %08x\n", result);
+ }
+}
+
+static void sssraid_handle_aen_vs(struct sssraid_dev *hdev, u32 result, u32 result1)
+{
+ switch ((result & 0xff00) >> 8) {
+ case SSSRAID_AEN_TIMESYN:
+ sssraid_configure_timestamp(hdev);
+ break;
+ case SSSRAID_AEN_FW_ACT_FINISH:
+ ioc_info(hdev, "fw activation finish\n");
+ if (sssraid_init_ctrl_info(hdev))
+ ioc_err(hdev, "get ctrl info failed after fw act\n");
+ break;
+ case SSSRAID_AEN_EVENT_MIN ... SSSRAID_AEN_EVENT_MAX:
+ ioc_info(hdev, "rcv card event[%d], param1[0x%x] param2[0x%x]\n",
+ (result & 0xff00) >> 8, result, result1);
+ break;
+ default:
+ ioc_warn(hdev, "async event result: 0x%x\n", result);
+ }
+}
+
+static inline void sssraid_send_all_aen(struct sssraid_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->ctrl_info->aerl; i++)
+ sssraid_send_event_ack(hdev, 0, 0, i + SSSRAID_AQ_BLK_MQ_DEPTH);
+}
+
+static int sssraid_dev_list_init(struct sssraid_dev *hdev)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+
+ hdev->devices = kzalloc_node(nd * sizeof(struct sssraid_dev_info),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->devices)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int sssraid_configure_timestamp(struct sssraid_dev *hdev)
+{
+ __le64 ts;
+ int retval;
+
+ ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
+ retval = sssraid_set_features(hdev, SSSRAID_FEAT_TIMESTAMP, 0, &ts, sizeof(ts), NULL);
+
+ if (retval)
+ ioc_err(hdev, "set timestamp failed: %d\n", retval);
+ return retval;
+}
+
+int sssraid_init_ioc(struct sssraid_dev *hdev, u8 re_init)
+{
+ int retval = 0;
+ int i;
+ u32 nr_ioqs, bar_size;
+
+ if (!re_init) {
+ hdev->cpu_count = num_online_cpus();
+
+ retval = sssraid_alloc_resources(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to alloc resources:error %d\n",
+ retval);
+ goto out_nocleanup;
+ }
+ }
+
+ /* reset need re-setup */
+ retval = sssraid_setup_resources(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to setup resources:error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ if (!re_init) {
+ retval = sssraid_alloc_admin_cmds(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to alloc admin cmds:error %d\n",
+ retval);
+ goto out_failed;
+ }
+ /* put here:
+ * alloc admin queue
+ */
+ retval = sssraid_alloc_qpair(hdev, 0, SSSRAID_AQ_DEPTH);
+ if (retval) {
+ ioc_err(hdev, "Failed to alloc admin queue:error %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+
+ retval = sssraid_setup_admin_qpair(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to setup admin queue:error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ /* 1. unregister all interrupt
+ * 2. admin interrupt registry
+ */
+ retval = sssraid_setup_isr(hdev, 1);
+ if (retval) {
+ ioc_err(hdev, "Failed to setup ISR error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ retval = sssraid_init_ctrl_info(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to get ctrl info error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ nr_ioqs = hdev->cpu_count;
+ retval = sssraid_set_queue_cnt(hdev, &nr_ioqs);
+ if (retval) {
+ ioc_err(hdev, "Failed to set queue cnt error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ hdev->before_affinity_msix_cnt = nr_ioqs + 1;
+
+ /* 1. unregister all interrupt
+ * 2. admin interrupt re-registry
+ * 3. io interrupt registry
+ */
+ retval = sssraid_setup_isr(hdev, 0);
+ if (retval) {
+ ioc_err(hdev, "Failed to re-setup ISR, error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ /* remap */
+ bar_size = SSSRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride);
+ retval = sssraid_remap_bar(hdev, bar_size);
+ if (retval) {
+ ioc_err(hdev, "Failed to re-map bar, error %d\n",
+ retval);
+ goto out_failed;
+ }
+ hdev->sqinfo[0].q_db = hdev->dbs;
+
+ /* num_vecs no sense, abandon */
+
+ if (!re_init) {
+ for (i = hdev->init_done_queue_cnt; i <= hdev->intr_info_count; i++) {
+ retval = sssraid_alloc_qpair(hdev, i, hdev->ioq_depth);
+ if (retval) {
+ ioc_err(hdev, "Failed to alloc io queue:error %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+ ioc_info(hdev, "intr_info_count: %d, init_done_queue_cnt: %d, ioq_depth: %d\n",
+ hdev->intr_info_count, hdev->init_done_queue_cnt, hdev->ioq_depth);
+ }
+
+ retval = sssraid_setup_io_qpair(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to setup io qpair, error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ if (!re_init) {
+ retval = sssraid_alloc_ioq_ptcmds(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to alloc ioq ptcmds, error %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+
+ sssraid_send_all_aen(hdev);
+
+ if (!re_init) {
+ retval = sssraid_dev_list_init(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to init device list, error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ retval = sssraid_configure_timestamp(hdev);
+ if (retval) {
+ ioc_err(hdev, "Failed to configure timestamp, error %d\n",
+ retval);
+ goto out_failed;
+ }
+ }
+
+ return retval;
+
+out_failed:
+ sssraid_cleanup_ioc(hdev, re_init);
+out_nocleanup:
+ return retval;
+}
+
+void sssraid_cleanup_resources(struct sssraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ sssraid_cleanup_isr(hdev);
+
+ if (hdev->bar) {
+ iounmap(hdev->bar);
+ hdev->bar = NULL;
+ }
+
+ if (pci_is_enabled(pdev)) {
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+ }
+}
+
+static void sssraid_free_ioq_ptcmds(struct sssraid_dev *hdev)
+{
+ kfree(hdev->ioq_ptcmds);
+ hdev->ioq_ptcmds = NULL;
+
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
+}
+
+static void sssraid_delete_io_queues(struct sssraid_dev *hdev)
+{
+ u16 queues = hdev->init_done_queue_cnt - 1;
+ u8 opcode = SSSRAID_ADMIN_DELETE_SQ;
+ u16 i, pass;
+
+ if (!pci_device_is_present(hdev->pdev)) {
+ ioc_err(hdev, "pci_device is not present, skip disable io queues\n");
+ return;
+ }
+
+ if (hdev->init_done_queue_cnt < 2) {
+ ioc_err(hdev, "err, io queue has been delete\n");
+ return;
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ for (i = queues; i > 0; i--)
+ if (sssraid_delete_queue(hdev, opcode, i))
+ break;
+
+ opcode = SSSRAID_ADMIN_DELETE_CQ;
+ }
+}
+
+void sssraid_complete_aen(struct sssraid_dev *hdev, struct sssraid_completion *cqe)
+{
+ u32 result = le32_to_cpu(cqe->result);
+
+ ioc_info(hdev, "rcv aen, cid[%d], status[0x%x], result[0x%x]\n",
+ le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->status) >> 1, result);
+
+ /*
+ * The response to event moved from this func.
+ * sssraid_send_aen changed to name sssraid_send_event_ack
+ */
+
+ if ((le16_to_cpu(cqe->status) >> 1) != SSSRAID_SC_SUCCESS)
+ return;
+ switch (result & 0x7) {
+ case SSSRAID_AEN_NOTICE:
+ sssraid_handle_aen_notice(hdev, result);
+ break;
+ case SSSRAID_AEN_VS:
+ sssraid_handle_aen_vs(hdev, result, le32_to_cpu(cqe->result1));
+ break;
+ default:
+ ioc_warn(hdev, "Unsupported async event type: %u\n",
+ result & 0x7);
+ break;
+ }
+}
+
+void sssraid_free_iod_res(struct sssraid_dev *hdev, struct sssraid_iod *iod)
+{
+ const int last_prp = hdev->page_size / sizeof(__le64) - 1;
+ dma_addr_t dma_addr, next_dma_addr;
+ struct sssraid_sgl_desc *sg_list;
+ __le64 *prp_list;
+ void *addr;
+ int i;
+
+ dma_addr = iod->first_dma;
+ if (iod->npages == 0)
+ dma_pool_free(iod->sqinfo->prp_small_pool, sssraid_iod_list(iod)[0], dma_addr);
+
+ for (i = 0; i < iod->npages; i++) {
+ addr = sssraid_iod_list(iod)[i];
+
+ if (iod->use_sgl) {
+ sg_list = addr;
+ next_dma_addr =
+ le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
+ } else {
+ prp_list = addr;
+ next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+ }
+
+ dma_pool_free(hdev->prp_page_pool, addr, dma_addr);
+ dma_addr = next_dma_addr;
+ }
+
+ iod->sense = NULL;
+ iod->npages = -1;
+}
+
+static void sssraid_complete_ioq_sync_cmnd(struct sssraid_dev *hdev, u16 qidx,
+ struct sssraid_completion *cqe)
+{
+ struct sssraid_cmd *ptcmd;
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+
+ ptcmd = hdev->ioq_ptcmds + (sqinfo->qidx - 1) * SSSRAID_PTCMDS_PERQ +
+ le16_to_cpu(cqe->cmd_id) - SSSRAID_IO_BLK_MQ_DEPTH;
+
+ ptcmd->status = le16_to_cpu(cqe->status) >> 1;
+ ptcmd->result0 = le32_to_cpu(cqe->result);
+ ptcmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&ptcmd->cmd_done);
+}
+
+static void sssraid_complete_ioq_cmnd(struct sssraid_dev *hdev, u16 qidx,
+ struct sssraid_completion *cqe)
+{
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[qidx];
+
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct sssraid_iod *iod;
+ struct request *req;
+ unsigned long elapsed;
+
+ tags = hdev->shost->tag_set.tags[sqinfo->qidx - 1];
+
+ req = blk_mq_tag_to_rq(tags, le16_to_cpu(cqe->cmd_id));
+ if (unlikely(!req || !blk_mq_request_started(req))) {
+ ioc_warn(hdev, "Invalid id %d completed on queue %d\n",
+ le16_to_cpu(cqe->cmd_id), sqinfo->qidx);
+ return;
+ }
+
+ scmd = blk_mq_rq_to_pdu(req);
+ iod = scsi_cmd_priv(scmd);
+
+ elapsed = jiffies - scmd->jiffies_at_alloc;
+ dbgprint(hdev, "cid[%d] qidx[%d] finish IO cost %3ld.%3ld seconds\n",
+ le16_to_cpu(cqe->cmd_id), sqinfo->qidx, elapsed / HZ, elapsed % HZ);
+
+ if (cmpxchg(&iod->state, SSSRAID_CMD_IN_FLIGHT, SSSRAID_CMD_COMPLETE) !=
+ SSSRAID_CMD_IN_FLIGHT) {
+ ioc_warn(hdev, "cid[%d] qidx[%d] enters abnormal handler, cost %3ld.%3ld seconds\n",
+ le16_to_cpu(cqe->cmd_id), sqinfo->qidx, elapsed / HZ, elapsed % HZ);
+ WRITE_ONCE(iod->state, SSSRAID_CMD_TMO_COMPLETE);
+
+ if (iod->nsge) {
+ iod->nsge = 0;
+ scsi_dma_unmap(scmd);
+ }
+ sssraid_free_iod_res(hdev, iod);
+
+ return;
+ }
+
+ sssraid_map_status(iod, scmd, cqe);
+ if (iod->nsge) {
+ iod->nsge = 0;
+ scsi_dma_unmap(scmd);
+ }
+ sssraid_free_iod_res(hdev, iod);
+ scmd->scsi_done(scmd);
+}
+
+static void sssraid_process_admin_cq(struct sssraid_dev *hdev,
+ struct sssraid_squeue *sqinfo,
+ struct sssraid_completion *cqe)
+{
+ struct sssraid_fwevt *fwevt = NULL;
+ u16 cid = le16_to_cpu(cqe->cmd_id), sz;
+
+ if (likely(cid < SSSRAID_AQ_BLK_MQ_DEPTH))
+ sssraid_complete_adminq_cmnd(hdev, sqinfo->qidx, cqe);
+ else {
+ sz = sizeof(*cqe);
+ fwevt = sssraid_alloc_fwevt(sz);
+ if (!fwevt) {
+ ioc_info(hdev, "%s :failure at %s:%d/%s()!\n",
+ __func__, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ memcpy(fwevt->event_data, cqe, sz);
+ fwevt->hdev = hdev;
+ fwevt->event_id = 0; /* evt_type:0 */
+ fwevt->send_ack = 1; /* ack_req:1 */
+ fwevt->process_evt = 1; /* process_evt_bh:1 */
+ fwevt->evt_ctx = 0; /* 0 */
+ sssraid_fwevt_add_to_list(hdev, fwevt);
+ }
+}
+
+static void sssraid_process_io_cq(struct sssraid_dev *hdev,
+ struct sssraid_squeue *sqinfo,
+ struct sssraid_completion *cqe)
+{
+ u16 cid = le16_to_cpu(cqe->cmd_id);
+
+ if (likely(cid < SSSRAID_IO_BLK_MQ_DEPTH))
+ sssraid_complete_ioq_cmnd(hdev, sqinfo->qidx, cqe);
+ else /* io sync handle */
+ sssraid_complete_ioq_sync_cmnd(hdev, sqinfo->qidx, cqe);
+}
+
+static inline void sssraid_handle_cqe(struct sssraid_dev *hdev, u16 mdix, u16 didx)
+{
+ struct sssraid_cqueue *cqinfo = &hdev->cqinfo[mdix];
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[mdix];
+
+ struct sssraid_completion *cqe = &cqinfo->cqes[didx];
+ u16 cid = le16_to_cpu(cqe->cmd_id);
+
+ if (unlikely(cid >= sqinfo->q_depth)) {
+ ioc_err(hdev, "Invalid command id[%d] completed on queue %d\n",
+ cid, cqe->sq_id);
+ return;
+ }
+
+ dbgprint(hdev, "cid[%d] mdix[%d], result[0x%x], sq_id[%d], status[0x%x]\n",
+ cid, sqinfo->qidx, le32_to_cpu(cqe->result),
+ le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status));
+
+ if (!mdix) /* admin */
+ sssraid_process_admin_cq(hdev, sqinfo, cqe);
+ else /* io */
+ sssraid_process_io_cq(hdev, sqinfo, cqe);
+}
+
+void sssraid_complete_cqes(struct sssraid_dev *hdev, u16 midx, u16 start, u16 end)
+{
+ struct sssraid_squeue *sqinfo = &hdev->sqinfo[midx];
+
+ while (start != end) {
+ sssraid_handle_cqe(hdev, midx, start);
+ if (++start == sqinfo->q_depth)
+ start = 0;
+ }
+}
+
+static void sssraid_disable_admin_queue(struct sssraid_dev *hdev, bool shutdown)
+{
+ struct sssraid_cqueue *adm_cqinfo = &hdev->cqinfo[0];
+ u16 start, end;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ sssraid_shutdown_ctrl(hdev);
+ else
+ sssraid_disable_ctrl(hdev);
+ }
+
+ if (hdev->init_done_queue_cnt == 0) {
+ ioc_err(hdev, "err, admin queue has been delete\n");
+ return;
+ }
+
+ spin_lock_irq(&adm_cqinfo->cq_lock);
+ sssraid_process_cq(hdev, 0, &start, &end, -1);
+ spin_unlock_irq(&adm_cqinfo->cq_lock);
+ sssraid_complete_cqes(hdev, 0, start, end);
+}
+
+static void sssraid_free_all_queues(struct sssraid_dev *hdev)
+{
+ int i;
+ struct sssraid_cqueue *cqinfo;
+ struct sssraid_squeue *sqinfo;
+
+ for (i = 0; i < hdev->init_done_queue_cnt; i++) {
+ cqinfo = &hdev->cqinfo[i];
+ sqinfo = &hdev->sqinfo[i];
+ dma_free_coherent(&hdev->pdev->dev, CQ_SIZE(sqinfo->q_depth),
+ (void *)cqinfo->cqes, cqinfo->cq_dma_addr);
+ dma_free_coherent(&hdev->pdev->dev, SQ_SIZE(sqinfo->qidx, sqinfo->q_depth),
+ sqinfo->sq_cmds, sqinfo->sq_dma_addr);
+ dma_free_coherent(&hdev->pdev->dev, SENSE_SIZE(sqinfo->q_depth),
+ sqinfo->sense, sqinfo->sense_dma_addr);
+ }
+
+ hdev->init_done_queue_cnt = 0;
+}
+
+static void sssraid_free_admin_cmds(struct sssraid_dev *hdev)
+{
+ kfree(hdev->adm_cmds);
+ hdev->adm_cmds = NULL;
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+}
+
+static void sssraid_free_resources(struct sssraid_dev *hdev)
+{
+ sssraid_free_admin_cmds(hdev);
+ kfree(hdev->sqinfo);
+ kfree(hdev->cqinfo);
+ sssraid_destroy_dma_pools(hdev);
+ kfree(hdev->ctrl_info);
+}
+
+void sssraid_cleanup_ioc(struct sssraid_dev *hdev, u8 re_init)
+{
+ if (!re_init)
+ sssraid_free_ioq_ptcmds(hdev);
+
+ sssraid_delete_io_queues(hdev);
+ sssraid_disable_admin_queue(hdev, !re_init);
+
+ if (!re_init)
+ sssraid_free_all_queues(hdev);
+
+ sssraid_ioc_disable_intr(hdev);
+ sssraid_cleanup_resources(hdev);
+
+ if (!re_init)
+ sssraid_free_resources(hdev);
+
+}
+
+int sssraid_soft_reset_handler(struct sssraid_dev *hdev)
+{
+ int retval = 0;
+
+ if (hdev->state != SSSRAID_RESETTING) {
+ ioc_err(hdev, "err, host is not reset state\n");
+ return retval;
+ }
+
+ ioc_info(hdev, "enter host reset\n");
+
+ sssraid_ioc_disable_intr(hdev);
+ sssraid_cleanup_fwevt_list(hdev);
+
+ /* realize above here:
+ * sssraid_dev_disable -> sssraid_back_all_io
+ */
+ sssraid_back_all_io(hdev);
+ /*
+ * realize sssraid_dev_disable,
+ * i.e. sssraid_cleanup_ioc(1)
+ */
+ if (hdev->ctrl_config & SSSRAID_CC_ENABLE) {
+ ioc_info(hdev, "start dev_disable\n");
+ sssraid_cleanup_ioc(hdev, 1);
+ }
+
+ retval = sssraid_init_ioc(hdev, 1);
+ if (retval) {
+ ioc_err(hdev, "init ioc fail.\n");
+ return retval;
+ }
+
+ sssraid_change_host_state(hdev, SSSRAID_LIVE);
+
+ return retval;
+}
diff --git a/drivers/scsi/sssraid/sssraid_os.c b/drivers/scsi/sssraid/sssraid_os.c
new file mode 100644
index 000000000000..16d2b94d78f3
--- /dev/null
+++ b/drivers/scsi/sssraid/sssraid_os.c
@@ -0,0 +1,2337 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 3SNIC Information Technology, Ltd */
+
+/* 3SNIC RAID SSSXXX Series Linux Driver */
+
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/sysfs.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+#include <linux/debugfs.h>
+#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <linux/sort.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dbg.h>
+
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/once.h>
+#include <linux/sched/signal.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#include "sssraid.h"
+#include "sssraid_debug.h"
+
+u32 admin_tmout = 60;
+module_param(admin_tmout, uint, 0644);
+MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)");
+
+static u32 scmd_tmout_rawdisk = 180;
+module_param(scmd_tmout_rawdisk, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_rawdisk, "scsi commands timeout for rawdisk(seconds)");
+
+static u32 scmd_tmout_vd = 180;
+module_param(scmd_tmout_vd, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_vd, "scsi commands timeout for vd(seconds)");
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops ioq_depth_ops = {
+ .set = ioq_depth_set,
+ .get = param_get_uint,
+};
+
+u32 io_queue_depth = 1024;
+module_param_cb(io_queue_depth, &ioq_depth_ops, &io_queue_depth, 0644);
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+
+static int logging_level_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+
+ return param_set_byte(val, kp);
+}
+
+static const struct kernel_param_ops logging_level_ops = {
+ .set = logging_level_set,
+ .get = param_get_byte,
+};
+
+static unsigned char logging_level;
+module_param_cb(logging_level, &logging_level_ops, &logging_level, 0644);
+MODULE_PARM_DESC(logging_level, "set log level, default zero for switch off");
+
+static int small_pool_num_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+ if (n > MAX_SMALL_POOL_NUM)
+ n = MAX_SMALL_POOL_NUM;
+ if (n < 1)
+ n = 1;
+ *((u8 *)kp->arg) = n;
+
+ return 0;
+}
+
+static const struct kernel_param_ops small_pool_num_ops = {
+ .set = small_pool_num_set,
+ .get = param_get_byte,
+};
+
+/* Small pools are used to save PRP for small IOs.It was
+ * found that the spinlock of a single pool conflicts a
+ * lot with multiple CPUs.So multiple pools are introduced
+ * to reduce the conflictions.
+ */
+unsigned char small_pool_num = 4;
+module_param_cb(small_pool_num, &small_pool_num_ops, &small_pool_num, 0644);
+MODULE_PARM_DESC(small_pool_num, "set prp small pool num, default 4, MAX 16");
+
+//static struct class *sssraid_class;
+
+enum FW_STAT_CODE {
+ FW_STAT_OK = 0,
+ FW_STAT_NEED_CHECK,
+ FW_STAT_ERROR,
+ FW_STAT_EP_PCIE_ERROR,
+ FW_STAT_NAC_DMA_ERROR,
+ FW_STAT_ABORTED,
+ FW_STAT_NEED_RETRY
+};
+
+static const char * const raid_levels[] = {"0", "1", "5", "6", "10", "50", "60", "NA"};
+
+static const char * const raid_states[] = {
+ "NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED", "FORMATTING", "SANITIZING",
+ "INITIALIZING", "INITIALIZE_FAIL", "DELETING", "DELETE_FAIL", "WRITE_PROTECT"
+};
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0;
+ int ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < 2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+/*
+ * common
+ */
+static struct class *sssraid_class;
+
+struct sssraid_fwevt *sssraid_alloc_fwevt(int len)
+{
+ struct sssraid_fwevt *fwevt;
+
+ fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
+ if (!fwevt)
+ return NULL;
+
+ kref_init(&fwevt->ref_count);
+ return fwevt;
+
+}
+
+static void sssraid_fwevt_free(struct kref *r)
+{
+ kfree(container_of(r, struct sssraid_fwevt, ref_count));
+}
+
+static void sssraid_fwevt_get(struct sssraid_fwevt *fwevt)
+{
+ kref_get(&fwevt->ref_count);
+}
+
+static void sssraid_fwevt_put(struct sssraid_fwevt *fwevt)
+{
+ kref_put(&fwevt->ref_count, sssraid_fwevt_free);
+}
+
+static void sssraid_fwevt_del_from_list(struct sssraid_dev *hdev,
+ struct sssraid_fwevt *fwevt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdev->fwevt_lock, flags);
+ if (!list_empty(&fwevt->list)) {
+ list_del_init(&fwevt->list);
+ /*
+ * Put fwevt reference count after
+ * removing it from fwevt_list
+ */
+ sssraid_fwevt_put(fwevt);
+ }
+ spin_unlock_irqrestore(&hdev->fwevt_lock, flags);
+}
+
+static void sssraid_fwevt_bh(struct sssraid_dev *hdev,
+ struct sssraid_fwevt *fwevt)
+{
+ struct sssraid_completion *cqe;
+
+ hdev->current_event = fwevt;
+ sssraid_fwevt_del_from_list(hdev, fwevt);
+
+ cqe = (struct sssraid_completion *)fwevt->event_data;
+
+ if (!fwevt->process_evt)
+ goto evt_ack;
+
+ sssraid_complete_aen(hdev, cqe);
+
+evt_ack:
+ /* event response put here: event has been handled. */
+ sssraid_send_event_ack(hdev, fwevt->event_id,
+ fwevt->evt_ctx, le16_to_cpu(cqe->cmd_id));
+ sssraid_fwevt_put(fwevt);
+ hdev->current_event = NULL;
+}
+
+static void sssraid_fwevt_worker(struct work_struct *work)
+{
+ struct sssraid_fwevt *fwevt = container_of(work, struct sssraid_fwevt,
+ work);
+ sssraid_fwevt_bh(fwevt->hdev, fwevt);
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ sssraid_fwevt_put(fwevt);
+}
+
+void sssraid_fwevt_add_to_list(struct sssraid_dev *hdev,
+ struct sssraid_fwevt *fwevt)
+{
+ unsigned long flags;
+
+ if (!hdev->fwevt_worker_thread)
+ return;
+
+ spin_lock_irqsave(&hdev->fwevt_lock, flags);
+ /* get fwevt reference count while adding it to fwevt_list */
+ sssraid_fwevt_get(fwevt);
+ INIT_LIST_HEAD(&fwevt->list);
+ list_add_tail(&fwevt->list, &hdev->fwevt_list);
+ INIT_WORK(&fwevt->work, sssraid_fwevt_worker);
+ /* get fwevt reference count while enqueueing it to worker queue */
+ sssraid_fwevt_get(fwevt);
+ queue_work(hdev->fwevt_worker_thread, &fwevt->work);
+ spin_unlock_irqrestore(&hdev->fwevt_lock, flags);
+}
+
+static struct sssraid_fwevt *sssraid_dequeue_fwevt(
+ struct sssraid_dev *hdev)
+{
+ unsigned long flags;
+ struct sssraid_fwevt *fwevt = NULL;
+
+ spin_lock_irqsave(&hdev->fwevt_lock, flags);
+ if (!list_empty(&hdev->fwevt_list)) {
+ fwevt = list_first_entry(&hdev->fwevt_list,
+ struct sssraid_fwevt, list);
+ list_del_init(&fwevt->list);
+ /*
+ * Put fwevt reference count after
+ * removing it from fwevt_list
+ */
+ sssraid_fwevt_put(fwevt);
+ }
+ spin_unlock_irqrestore(&hdev->fwevt_lock, flags);
+
+ return fwevt;
+}
+
+void sssraid_cleanup_fwevt_list(struct sssraid_dev *hdev)
+{
+ struct sssraid_fwevt *fwevt = NULL;
+
+ if ((list_empty(&hdev->fwevt_list) && !hdev->current_event) ||
+ !hdev->fwevt_worker_thread)
+ return;
+
+ while ((fwevt = sssraid_dequeue_fwevt(hdev)) ||
+ (fwevt = hdev->current_event)) {
+ /*
+ * Wait on the fwevt to complete. If this returns 1, then
+ * the event was never executed, and we need a put for the
+ * reference the work had on the fwevt.
+ *
+ * If it did execute, we wait for it to finish, and the put will
+ * happen from sssraid_process_fwevt()
+ */
+ if (cancel_work_sync(&fwevt->work)) {
+ /*
+ * Put fwevt reference count after
+ * dequeuing it from worker queue
+ */
+ sssraid_fwevt_put(fwevt);
+ /*
+ * Put fwevt reference count to neutralize
+ * kref_init increment
+ */
+ sssraid_fwevt_put(fwevt);
+ }
+ }
+}
+
+/*
+ * common 1
+ */
+static int sssraid_npages_prp(struct sssraid_dev *hdev)
+{
+ u32 size = 1U << ((hdev->ctrl_info->mdts) * 1U) << 12;
+ u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size);
+
+ return DIV_ROUND_UP(PRP_ENTRY_SIZE * nprps, hdev->page_size - PRP_ENTRY_SIZE);
+}
+
+static int sssraid_npages_sgl(struct sssraid_dev *hdev)
+{
+ u32 nsge = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+
+ return DIV_ROUND_UP(nsge * sizeof(struct sssraid_sgl_desc), hdev->page_size);
+}
+
+static u32 sssraid_cmd_size(struct sssraid_dev *hdev)
+{
+ u32 alloc_size = sizeof(__le64 *) * max(sssraid_npages_prp(hdev), sssraid_npages_sgl(hdev));
+
+ ioc_info(hdev, "iod size: %lu, alloc_size: %u\n",
+ sizeof(struct sssraid_iod), alloc_size);
+
+ return sizeof(struct sssraid_iod) + alloc_size;
+}
+
+static int sssraid_setup_prps(struct sssraid_dev *hdev, struct sssraid_iod *iod)
+{
+ struct scatterlist *sg = iod->sg;
+ u64 dma_addr = sg_dma_address(sg);
+ int dma_len = sg_dma_len(sg);
+ __le64 *prp_list, *old_prp_list;
+ int page_size = hdev->page_size;
+ int offset = dma_addr & (page_size - 1);
+ void **list = sssraid_iod_list(iod);
+ int length = iod->length;
+ struct dma_pool *pool;
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ length -= (page_size - offset);
+ if (length <= 0) {
+ iod->first_dma = 0;
+ return 0;
+ }
+
+ dma_len -= (page_size - offset);
+ if (dma_len) {
+ dma_addr += (page_size - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= page_size) {
+ iod->first_dma = dma_addr;
+ return 0;
+ }
+
+ nprps = DIV_ROUND_UP(length, page_size);
+ if (nprps <= (SMALL_POOL_SIZE / PRP_ENTRY_SIZE)) {
+ pool = iod->sqinfo->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ dev_err_ratelimited(&hdev->pdev->dev, "Allocate first prp_list memory failed\n");
+ iod->first_dma = dma_addr;
+ iod->npages = -1;
+ return -ENOMEM;
+ }
+ list[0] = prp_list;
+ iod->first_dma = prp_dma;
+ i = 0;
+ for (;;) {
+ if (i == page_size / PRP_ENTRY_SIZE) {
+ old_prp_list = prp_list;
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ dev_err_ratelimited(&hdev->pdev->dev, "Allocate %dth prp_list memory failed\n",
+ iod->npages + 1);
+ return -ENOMEM;
+ }
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= page_size;
+ dma_addr += page_size;
+ length -= page_size;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ if (unlikely(dma_len < 0))
+ goto bad_sgl;
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ return 0;
+
+bad_sgl:
+ ioc_err(hdev, "Setup prps, invalid SGL for payload: %d nents: %d\n",
+ iod->length, iod->nsge);
+ return -EIO;
+}
+
+static inline bool sssraid_is_rw_scmd(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool sssraid_is_prp(struct sssraid_dev *hdev, struct scsi_cmnd *scmd, u32 nsge)
+{
+ struct scatterlist *sg = scsi_sglist(scmd);
+ u32 page_mask = hdev->page_size - 1;
+ bool is_prp = true;
+ int i = 0;
+
+ scsi_for_each_sg(scmd, sg, nsge, i) {
+ if (i != 0 && i != nsge - 1) {
+ if ((sg_dma_len(sg) & page_mask) ||
+ (sg_dma_address(sg) & page_mask)) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ if (nsge > 1 && i == 0) {
+ if ((sg_dma_address(sg) + sg_dma_len(sg)) & page_mask) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ if (nsge > 1 && i == (nsge - 1)) {
+ if (sg_dma_address(sg) & page_mask) {
+ is_prp = false;
+ break;
+ }
+ }
+ }
+
+ return is_prp;
+}
+
+static void sssraid_sgl_set_data(struct sssraid_sgl_desc *sge, struct scatterlist *sg)
+{
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->type = SSSRAID_SGL_FMT_DATA_DESC << 4;
+}
+
+static void sssraid_sgl_set_seg(struct sssraid_sgl_desc *sge, dma_addr_t dma_addr, int entries)
+{
+ sge->addr = cpu_to_le64(dma_addr);
+ if (entries <= SGES_PER_PAGE) {
+ sge->length = cpu_to_le32(entries * sizeof(*sge));
+ sge->type = SSSRAID_SGL_FMT_LAST_SEG_DESC << 4;
+ } else {
+ sge->length = cpu_to_le32(PAGE_SIZE);
+ sge->type = SSSRAID_SGL_FMT_SEG_DESC << 4;
+ }
+}
+
+
+static int sssraid_setup_ioq_cmd_sgl(struct sssraid_dev *hdev,
+ struct scsi_cmnd *scmd, struct sssraid_ioq_command *ioq_cmd,
+ struct sssraid_iod *iod)
+{
+ struct sssraid_sgl_desc *sg_list, *link, *old_sg_list;
+ struct scatterlist *sg = scsi_sglist(scmd);
+ void **list = sssraid_iod_list(iod);
+ struct dma_pool *pool;
+ int nsge = iod->nsge;
+ dma_addr_t sgl_dma;
+ int i = 0;
+
+ ioq_cmd->common.flags |= SSSRAID_CMD_FLAG_SGL_METABUF;
+
+ if (nsge == 1) {
+ sssraid_sgl_set_data(&ioq_cmd->common.dptr.sgl, sg);
+ return 0;
+ }
+
+ if (nsge <= (SMALL_POOL_SIZE / sizeof(struct sssraid_sgl_desc))) {
+ pool = iod->sqinfo->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ dev_err_ratelimited(&hdev->pdev->dev, "Allocate first sgl_list failed\n");
+ iod->npages = -1;
+ return -ENOMEM;
+ }
+
+ list[0] = sg_list;
+ iod->first_dma = sgl_dma;
+ sssraid_sgl_set_seg(&ioq_cmd->common.dptr.sgl, sgl_dma, nsge);
+ do {
+ if (i == SGES_PER_PAGE) {
+ old_sg_list = sg_list;
+ link = &old_sg_list[SGES_PER_PAGE - 1];
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ dev_err_ratelimited(&hdev->pdev->dev, "Allocate %dth sgl_list failed\n",
+ iod->npages + 1);
+ return -ENOMEM;
+ }
+ list[iod->npages++] = sg_list;
+
+ i = 0;
+ memcpy(&sg_list[i++], link, sizeof(*link));
+ sssraid_sgl_set_seg(link, sgl_dma, nsge);
+ }
+
+ sssraid_sgl_set_data(&sg_list[i++], sg);
+ sg = sg_next(sg);
+ } while (--nsge > 0);
+
+ return 0;
+}
+
+static void sssraid_shost_init(struct sssraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u8 domain, bus;
+ u32 dev_func;
+
+ domain = pci_domain_nr(pdev->bus);
+ bus = pdev->bus->number;
+ dev_func = pdev->devfn;
+
+ hdev->shost->nr_hw_queues = hdev->init_done_queue_cnt - 1;
+ hdev->shost->can_queue = (hdev->ioq_depth - SSSRAID_PTCMDS_PERQ);
+
+ hdev->shost->sg_tablesize = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+ /* 512B per sector */
+ hdev->shost->max_sectors = (1U << ((hdev->ctrl_info->mdts) * 1U) << 12) / 512;
+ hdev->shost->cmd_per_lun = MAX_CMD_PER_DEV;
+ hdev->shost->max_channel = le16_to_cpu(hdev->ctrl_info->max_channel) - 1;
+ hdev->shost->max_id = le32_to_cpu(hdev->ctrl_info->max_tgt_id);
+ hdev->shost->max_lun = le16_to_cpu(hdev->ctrl_info->max_lun);
+
+ hdev->shost->this_id = -1;
+ hdev->shost->unique_id = (domain << 16) | (bus << 8) | dev_func;
+ hdev->shost->max_cmd_len = MAX_CDB_LEN;
+ hdev->shost->hostt->cmd_size = sssraid_cmd_size(hdev);
+}
+
+static inline void sssraid_get_tag_from_scmd(struct scsi_cmnd *scmd, u16 *qidx, u16 *cid)
+{
+ u32 tag = blk_mq_unique_tag(scmd->request);
+
+ *qidx = blk_mq_unique_tag_to_hwq(tag) + 1;
+ *cid = blk_mq_unique_tag_to_tag(tag);
+}
+
+static inline uint32_t get_unaligned_be24(const uint8_t *const p)
+{
+ return get_unaligned_be32(p - 1) & 0xffffffU;
+}
+
+static int sssraid_setup_rw_cmd(struct sssraid_dev *hdev,
+ struct sssraid_rw_command *rw,
+ struct scsi_cmnd *scmd)
+{
+ u32 start_lba_lo, start_lba_hi;
+ u32 datalength = 0;
+ u16 control = 0;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+
+ if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+ rw->opcode = SSSRAID_CMD_WRITE;
+ } else if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
+ rw->opcode = SSSRAID_CMD_READ;
+ } else {
+ ioc_err(hdev, "Invalid RW_IO for unsupported data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* 6-byte READ(0x08) or WRITE(0x0A) cdb */
+ if (scmd->cmd_len == 6) {
+ datalength = (u32)(scmd->cmnd[4] == 0 ?
+ IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]);
+ start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]);
+
+ start_lba_lo &= 0x1FFFFF;
+ }
+
+ /* 10-byte READ(0x28) or WRITE(0x2A) cdb */
+ else if (scmd->cmd_len == 10) {
+ datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SSSRAID_RW_FUA;
+ }
+
+ /* 12-byte READ(0xA8) or WRITE(0xAA) cdb */
+ else if (scmd->cmd_len == 12) {
+ datalength = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SSSRAID_RW_FUA;
+ }
+ /* 16-byte READ(0x88) or WRITE(0x8A) cdb */
+ else if (scmd->cmd_len == 16) {
+ datalength = get_unaligned_be32(&scmd->cmnd[10]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SSSRAID_RW_FUA;
+ }
+
+ if (unlikely(datalength > U16_MAX || datalength == 0)) {
+ ioc_err(hdev, "Invalid IO for illegal transfer data length: %u\n", datalength);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rw->slba = cpu_to_le64(((u64)start_lba_hi << 32) | start_lba_lo);
+ /* 0base for nlb */
+ rw->nlb = cpu_to_le16((u16)(datalength - 1));
+ rw->control = cpu_to_le16(control);
+
+ return 0;
+}
+
+static int sssraid_setup_nonio_cmd(struct sssraid_dev *hdev,
+ struct sssraid_scsi_nonio *scsi_nonio, struct scsi_cmnd *scmd)
+{
+ scsi_nonio->buffer_len = cpu_to_le32(scsi_bufflen(scmd));
+
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ scsi_nonio->opcode = SSSRAID_CMD_NONIO_NONE;
+ break;
+ case DMA_TO_DEVICE:
+ scsi_nonio->opcode = SSSRAID_CMD_NONIO_TODEV;
+ break;
+ case DMA_FROM_DEVICE:
+ scsi_nonio->opcode = SSSRAID_CMD_NONIO_FROMDEV;
+ break;
+ default:
+ ioc_err(hdev, "Invalid NON_IO for unsupported data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sssraid_setup_ioq_cmd(struct sssraid_dev *hdev,
+ struct sssraid_ioq_command *ioq_cmd, struct scsi_cmnd *scmd)
+{
+ memcpy(ioq_cmd->common.cdb, scmd->cmnd, scmd->cmd_len);
+ ioq_cmd->common.cdb_len = scmd->cmd_len;
+
+ if (sssraid_is_rw_scmd(scmd))
+ return sssraid_setup_rw_cmd(hdev, &ioq_cmd->rw, scmd);
+ else
+ return sssraid_setup_nonio_cmd(hdev, &ioq_cmd->scsi_nonio, scmd);
+}
+
+static inline void sssraid_init_iod(struct sssraid_iod *iod)
+{
+ iod->nsge = 0;
+ iod->npages = -1;
+ iod->use_sgl = false;
+ WRITE_ONCE(iod->state, SSSRAID_CMD_IDLE);
+}
+
+int sssraid_io_map_data(struct sssraid_dev *hdev, struct sssraid_iod *iod,
+ struct scsi_cmnd *scmd, struct sssraid_ioq_command *ioq_cmd)
+{
+ int retval;
+
+ retval = scsi_dma_map(scmd);
+ if (unlikely(retval < 0))
+ return retval;
+ iod->nsge = retval;
+ /* No data to DMA, it may be scsi no-rw command */
+ if (unlikely(iod->nsge == 0))
+ return 0;
+
+ iod->length = scsi_bufflen(scmd);
+ iod->sg = scsi_sglist(scmd);
+ iod->use_sgl = !sssraid_is_prp(hdev, scmd, iod->nsge);
+
+ if (iod->use_sgl) {
+ retval = sssraid_setup_ioq_cmd_sgl(hdev, scmd, ioq_cmd, iod);
+ } else {
+ retval = sssraid_setup_prps(hdev, iod);
+ ioq_cmd->common.dptr.prp1 =
+ cpu_to_le64(sg_dma_address(iod->sg));
+ ioq_cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+ }
+
+ if (retval)
+ scsi_dma_unmap(scmd);
+
+ return retval;
+}
+
+void sssraid_map_status(struct sssraid_iod *iod, struct scsi_cmnd *scmd,
+ struct sssraid_completion *cqe)
+{
+ struct sssraid_dev *hdev = iod->sqinfo->hdev;
+
+ scsi_set_resid(scmd, 0);
+
+ switch ((le16_to_cpu(cqe->status) >> 1) & 0x7f) {
+ case FW_STAT_OK:
+ set_host_byte(scmd, DID_OK);
+ break;
+ case FW_STAT_NEED_CHECK:
+ set_host_byte(scmd, DID_OK);
+ scmd->result |= le16_to_cpu(cqe->status) >> 8;
+ if (scmd->result & SAM_STAT_CHECK_CONDITION) {
+ memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ memcpy(scmd->sense_buffer, iod->sense, SCSI_SENSE_BUFFERSIZE);
+ scmd->result = (scmd->result & 0x00ffffff) | (DRIVER_SENSE << 24);
+ }
+ break;
+ case FW_STAT_ABORTED:
+ set_host_byte(scmd, DID_ABORT);
+ break;
+ case FW_STAT_NEED_RETRY:
+ set_host_byte(scmd, DID_REQUEUE);
+ break;
+ default:
+ set_host_byte(scmd, DID_BAD_TARGET);
+ ioc_warn(hdev, "cid[%d] qid[%d] bad status[0x%x]\n",
+ le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id),
+ le16_to_cpu(cqe->status));
+ break;
+ }
+}
+
+
+struct sssraid_cmd *sssraid_get_cmd(struct sssraid_dev *hdev, enum sssraid_cmd_type type)
+{
+ struct sssraid_cmd *cmd = NULL;
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == SSSRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(slock, flags);
+ ioc_err(hdev, "err, cmd[%d] list empty\n", type);
+ return NULL;
+ }
+ cmd = list_entry(head->next, struct sssraid_cmd, list);
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(slock, flags);
+
+ WRITE_ONCE(cmd->state, SSSRAID_CMD_IN_FLIGHT);
+
+ return cmd;
+}
+
+static int sssraid_add_device(struct sssraid_dev *hdev, struct sssraid_dev_info *device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ ioc_info(hdev, "add device, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
+ sdev = scsi_device_lookup(shost, device->channel, le16_to_cpu(device->target), 0);
+ if (sdev) {
+ ioc_warn(hdev, "Device is already exist, channel: %d, target_id: %d, lun: %d\n",
+ device->channel, le16_to_cpu(device->target), 0);
+ scsi_device_put(sdev);
+ return -EEXIST;
+ }
+ scsi_add_device(shost, device->channel, le16_to_cpu(device->target), 0);
+ return 0;
+}
+
+static int sssraid_rescan_device(struct sssraid_dev *hdev, struct sssraid_dev_info *device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ ioc_info(hdev, "rescan device, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
+ sdev = scsi_device_lookup(shost, device->channel, le16_to_cpu(device->target), 0);
+ if (!sdev) {
+ ioc_warn(hdev, "device is not exit rescan it, channel: %d, target_id: %d, lun: %d\n",
+ device->channel, le16_to_cpu(device->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int sssraid_remove_device(struct sssraid_dev *hdev, struct sssraid_dev_info *org_device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ ioc_info(hdev, "remove device, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ le32_to_cpu(org_device->hdid), le16_to_cpu(org_device->target),
+ org_device->channel, org_device->lun, org_device->attr);
+
+ sdev = scsi_device_lookup(shost, org_device->channel, le16_to_cpu(org_device->target), 0);
+ if (!sdev) {
+ ioc_warn(hdev, "device is not exit remove it, channel: %d, target_id: %d, lun: %d\n",
+ org_device->channel, le16_to_cpu(org_device->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int luntarget_cmp_func(const void *l, const void *r)
+{
+ const struct sssraid_dev_info *ln = l;
+ const struct sssraid_dev_info *rn = r;
+ int l_attr = SSSRAID_DEV_INFO_ATTR_BOOT(ln->attr);
+ int r_attr = SSSRAID_DEV_INFO_ATTR_BOOT(rn->attr);
+
+ /* boot first */
+ if (l_attr != r_attr)
+ return (r_attr - l_attr);
+
+ if (ln->channel == rn->channel)
+ return le16_to_cpu(ln->target) - le16_to_cpu(rn->target);
+
+ return ln->channel - rn->channel;
+}
+
+void sssraid_scan_disk(struct sssraid_dev *hdev)
+{
+ struct sssraid_dev_info *devices, *org_devices;
+ struct sssraid_dev_info *sortdevice;
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ u8 flag, org_flag;
+ int i, ret;
+ int count = 0;
+
+ devices = kcalloc(nd, sizeof(struct sssraid_dev_info), GFP_KERNEL);
+ if (!devices)
+ return;
+
+ sortdevice = kcalloc(nd, sizeof(struct sssraid_dev_info), GFP_KERNEL);
+ if (!sortdevice)
+ goto free_list;
+
+ ret = sssraid_get_dev_list(hdev, devices);
+ if (ret)
+ goto free_all;
+ org_devices = hdev->devices;
+ for (i = 0; i < nd; i++) {
+ org_flag = org_devices[i].flag;
+ flag = devices[i].flag;
+
+ dbgprint(hdev, "i: %d, org_flag: 0x%x, flag: 0x%x\n", i, org_flag, flag);
+
+ if (SSSRAID_DEV_INFO_FLAG_VALID(flag)) {
+ if (!SSSRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->devices_rwsem);
+ memcpy(&org_devices[i], &devices[i],
+ sizeof(struct sssraid_dev_info));
+ memcpy(&sortdevice[count++], &devices[i],
+ sizeof(struct sssraid_dev_info));
+ up_write(&hdev->devices_rwsem);
+ } else if (SSSRAID_DEV_INFO_FLAG_CHANGE(flag)) {
+ sssraid_rescan_device(hdev, &devices[i]);
+ }
+ } else {
+ if (SSSRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->devices_rwsem);
+ org_devices[i].flag &= 0xfe;
+ up_write(&hdev->devices_rwsem);
+ sssraid_remove_device(hdev, &org_devices[i]);
+ }
+ }
+ }
+
+ ioc_info(hdev, "scan work add device count = %d\n", count);
+
+ sort(sortdevice, count, sizeof(sortdevice[0]), luntarget_cmp_func, NULL);
+
+ for (i = 0; i < count; i++)
+ sssraid_add_device(hdev, &sortdevice[i]);
+
+free_all:
+ kfree(sortdevice);
+free_list:
+ kfree(devices);
+}
+
+static int sssraid_wait_abnl_cmd_done(struct sssraid_iod *iod)
+{
+ u16 times = 0;
+
+ do {
+ if (READ_ONCE(iod->state) == SSSRAID_CMD_TMO_COMPLETE)
+ break;
+ msleep(500);
+ times++;
+ } while (times <= SSSRAID_WAIT_ABNL_CMD_TIMEOUT);
+
+ /* wait command completion timeout after abort/reset success */
+ if (times >= SSSRAID_WAIT_ABNL_CMD_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static bool sssraid_check_scmd_completed(struct scsi_cmnd *scmd)
+{
+ struct sssraid_dev *hdev = shost_priv(scmd->device->host);
+ struct sssraid_iod *iod = scsi_cmd_priv(scmd);
+ struct sssraid_squeue *sqinfo;
+ u16 hwq, cid;
+
+ sssraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ sqinfo = &hdev->sqinfo[hwq];
+ if (READ_ONCE(iod->state) == SSSRAID_CMD_COMPLETE || sssraid_poll_cq(hdev, hwq, cid)) {
+ ioc_warn(hdev, "cid[%d] qidx[%d] has been completed\n",
+ cid, sqinfo->qidx);
+ return true;
+ }
+ return false;
+}
+
+static int sssraid_scsi_reset(struct scsi_cmnd *scmd, enum sssraid_scsi_rst_type rst)
+{
+ struct sssraid_dev *hdev = shost_priv(scmd->device->host);
+ struct sssraid_iod *iod = scsi_cmd_priv(scmd);
+ struct sssraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SSSRAID_LIVE || !sssraid_wait_abnl_cmd_done(iod) ||
+ sssraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ sssraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ ioc_warn(hdev, "cid[%d] qidx[%d] timeout, %s reset\n", cid, hwq,
+ rst ? "bus" : "target");
+ ret = sssraid_send_reset_cmd(hdev, rst, hostdata->hdid);
+ if (ret == 0) {
+ ret = sssraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ ioc_warn(hdev, "cid[%d] qidx[%d] %s reset failed, not found\n",
+ cid, hwq, rst ? "bus" : "target");
+ return FAILED;
+ }
+
+ ioc_warn(hdev, "cid[%d] qidx[%d] %s reset success\n", cid, hwq,
+ rst ? "bus" : "target");
+ return SUCCESS;
+ }
+
+ ioc_warn(hdev, "cid[%d] qidx[%d] ret[%d] %s reset failed\n", cid, hwq, ret,
+ rst ? "bus" : "target");
+ return FAILED;
+}
+
+bool sssraid_change_host_state(struct sssraid_dev *hdev, enum sssraid_state newstate)
+{
+ unsigned long flags;
+ enum sssraid_state oldstate;
+ bool change = false;
+
+ spin_lock_irqsave(&hdev->state_lock, flags);
+
+ oldstate = hdev->state;
+ switch (newstate) {
+ case SSSRAID_LIVE:
+ switch (oldstate) {
+ case SSSRAID_NEW:
+ case SSSRAID_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SSSRAID_RESETTING:
+ switch (oldstate) {
+ case SSSRAID_LIVE:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SSSRAID_DELETING:
+ if (oldstate != SSSRAID_DELETING)
+ change = true;
+ break;
+ case SSSRAID_DEAD:
+ switch (oldstate) {
+ case SSSRAID_NEW:
+ case SSSRAID_LIVE:
+ case SSSRAID_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (change)
+ hdev->state = newstate;
+ spin_unlock_irqrestore(&hdev->state_lock, flags);
+
+ ioc_info(hdev, "[%d]->[%d], change[%d]\n", oldstate, newstate, change);
+
+ return change;
+}
+
+static int sssraid_get_qd_by_disk(u8 attr)
+{
+ switch (SSSRAID_DEV_DISK_TYPE(attr)) {
+ case SSSRAID_SAS_HDD_VD:
+ case SSSRAID_SATA_HDD_VD:
+ return SSSRAID_HDD_VD_QD;
+ case SSSRAID_SAS_SSD_VD:
+ case SSSRAID_SATA_SSD_VD:
+ case SSSRAID_NVME_SSD_VD:
+ return SSSRAID_SSD_VD_QD;
+ case SSSRAID_SAS_HDD_PD:
+ case SSSRAID_SATA_HDD_PD:
+ return SSSRAID_HDD_PD_QD;
+ case SSSRAID_SAS_SSD_PD:
+ case SSSRAID_SATA_SSD_PD:
+ case SSSRAID_NVME_SSD_PD:
+ return SSSRAID_SSD_PD_QD;
+ default:
+ return MAX_CMD_PER_DEV;
+ }
+}
+
+static int sssraid_match_dev(struct sssraid_dev *hdev, u16 idx, struct scsi_device *sdev)
+{
+ if (SSSRAID_DEV_INFO_FLAG_VALID(hdev->devices[idx].flag)) {
+ if (sdev->channel == hdev->devices[idx].channel &&
+ sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
+ sdev->lun < hdev->devices[idx].lun) {
+ ioc_info(hdev, "Match device success, channel:target:lun[%d:%d:%d]\n",
+ hdev->devices[idx].channel,
+ hdev->devices[idx].target,
+ hdev->devices[idx].lun);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int sssraid_bsg_map_data(struct sssraid_dev *hdev, struct bsg_job *job,
+ struct sssraid_admin_command *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct sssraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ iod->sg = job->request_payload.sg_list;
+ iod->nsge = job->request_payload.sg_cnt;
+ iod->length = job->request_payload.payload_len;
+ iod->use_sgl = false;
+ iod->npages = -1;
+
+ if (!iod->nsge)
+ goto out;
+
+ ret = dma_map_sg_attrs(&hdev->pdev->dev, iod->sg, iod->nsge, dma_dir, DMA_ATTR_NO_WARN);
+ if (!ret)
+ goto out;
+
+ ret = sssraid_setup_prps(hdev, iod);
+ if (ret)
+ goto unmap;
+
+ cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+
+ return 0;
+
+unmap:
+ dma_unmap_sg(&hdev->pdev->dev, iod->sg, iod->nsge, dma_dir);
+out:
+ return ret;
+}
+
+static void sssraid_bsg_unmap_data(struct sssraid_dev *hdev, struct bsg_job *job)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct sssraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ if (iod->nsge)
+ dma_unmap_sg(&hdev->pdev->dev, iod->sg, iod->nsge, dma_dir);
+
+ sssraid_free_iod_res(hdev, iod);
+}
+
+void sssraid_put_cmd(struct sssraid_dev *hdev, struct sssraid_cmd *cmd,
+ enum sssraid_cmd_type type)
+{
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == SSSRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ WRITE_ONCE(cmd->state, SSSRAID_CMD_IDLE);
+ list_add_tail(&cmd->list, head);
+ spin_unlock_irqrestore(slock, flags);
+}
+
+static int sssraid_user_admin_cmd(struct sssraid_dev *hdev, struct bsg_job *job)
+{
+ struct sssraid_bsg_request *bsg_req = job->request;
+ struct sssraid_passthru_common_cmd *cmd = &(bsg_req->admcmd);
+ struct sssraid_admin_command admin_cmd;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+ u32 result[2] = {0};
+ int status;
+
+ if (hdev->state >= SSSRAID_RESETTING) {
+ ioc_err(hdev, "err, host state:[%d] is not right\n",
+ hdev->state);
+ return -EBUSY;
+ }
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = cmd->opcode;
+ admin_cmd.common.flags = cmd->flags;
+ admin_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ admin_cmd.common.cdw2[0] = cpu_to_le32(cmd->cdw2);
+ admin_cmd.common.cdw2[1] = cpu_to_le32(cmd->cdw3);
+ admin_cmd.common.cdw10 = cpu_to_le32(cmd->cdw10);
+ admin_cmd.common.cdw11 = cpu_to_le32(cmd->cdw11);
+ admin_cmd.common.cdw12 = cpu_to_le32(cmd->cdw12);
+ admin_cmd.common.cdw13 = cpu_to_le32(cmd->cdw13);
+ admin_cmd.common.cdw14 = cpu_to_le32(cmd->cdw14);
+ admin_cmd.common.cdw15 = cpu_to_le32(cmd->cdw15);
+
+ status = sssraid_bsg_map_data(hdev, job, &admin_cmd);
+ if (status) {
+ ioc_err(hdev, "err, map data failed\n");
+ return status;
+ }
+
+ status = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, &result[0], &result[1], timeout);
+ if (status >= 0) {
+ job->reply_len = sizeof(result);
+ memcpy(job->reply, result, sizeof(result));
+ }
+
+ if (status)
+ ioc_info(hdev, "opcode[0x%x] subopcode[0x%x], status[0x%x] result0[0x%x] result1[0x%x]\n",
+ cmd->opcode, cmd->info_0.subopcode, status, result[0], result[1]);
+
+ sssraid_bsg_unmap_data(hdev, job);
+
+ return status;
+}
+
+static int sssraid_submit_ioq_sync_cmd(struct sssraid_dev *hdev, struct sssraid_ioq_command *cmd,
+ u32 *result, u32 *reslen, u32 timeout)
+{
+ int ret;
+ dma_addr_t sense_dma;
+ struct sssraid_squeue *sqinfo;
+ void *sense_addr = NULL;
+ struct sssraid_cmd *pt_cmd = sssraid_get_cmd(hdev, SSSRAID_CMD_IOPT);
+
+ if (!pt_cmd) {
+ ioc_err(hdev, "err, get sqinfo cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&pt_cmd->cmd_done);
+
+ sqinfo = &hdev->sqinfo[pt_cmd->qid];
+ ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE;
+ sense_addr = sqinfo->sense + ret;
+ sense_dma = sqinfo->sense_dma_addr + ret;
+
+ cmd->common.sense_addr = cpu_to_le64(sense_dma);
+ cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+ cmd->common.command_id = cpu_to_le16(pt_cmd->cid);
+
+ sssraid_submit_cmd(sqinfo, cmd);
+
+ if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) {
+ ioc_err(hdev, "cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+ pt_cmd->cid, pt_cmd->qid, cmd->common.opcode,
+ (le32_to_cpu(cmd->common.cdw3[0]) & 0xffff));
+
+ /* reset controller if admin timeout */
+ sssraid_adm_timeout(hdev, pt_cmd);
+
+ sssraid_put_cmd(hdev, pt_cmd, SSSRAID_CMD_IOPT);
+ return -ETIME;
+ }
+
+ if (result && reslen) {
+ if ((pt_cmd->status & 0x17f) == 0x101) {
+ memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE);
+ *reslen = SCSI_SENSE_BUFFERSIZE;
+ }
+ }
+
+ sssraid_put_cmd(hdev, pt_cmd, SSSRAID_CMD_IOPT);
+
+ return pt_cmd->status;
+}
+
+static int sssraid_user_ioq_cmd(struct sssraid_dev *hdev, struct bsg_job *job)
+{
+ struct sssraid_bsg_request *bsg_req = (struct sssraid_bsg_request *)(job->request);
+ struct sssraid_ioq_passthru_cmd *cmd = &(bsg_req->ioqcmd);
+ struct sssraid_ioq_command ioq_cmd;
+ int status = 0;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+
+ if (cmd->data_len > IOQ_PT_DATA_LEN) {
+ ioc_err(hdev, "data len bigger than 4k\n");
+ return -EFAULT;
+ }
+
+ if (hdev->state != SSSRAID_LIVE) {
+ ioc_err(hdev, "err, host state:[%d] is not live\n",
+ hdev->state);
+ return -EBUSY;
+ }
+
+ ioc_info(hdev, "opcode[0x%x] subopcode[0x%x] init, datalen[%d]\n",
+ cmd->opcode, cmd->info_1.subopcode, cmd->data_len);
+
+ memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+ ioq_cmd.common.opcode = cmd->opcode;
+ ioq_cmd.common.flags = cmd->flags;
+ ioq_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ ioq_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len);
+ ioq_cmd.common.cdb_len = cmd->info_0.cdb_len;
+ ioq_cmd.common.rsvd2 = cmd->info_0.rsvd0;
+ ioq_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3);
+ ioq_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4);
+ ioq_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5);
+
+ ioq_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10);
+ ioq_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11);
+ ioq_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12);
+ ioq_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13);
+ ioq_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14);
+ ioq_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len);
+
+ memcpy(ioq_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len);
+
+ ioq_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]);
+ ioq_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]);
+ ioq_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]);
+ ioq_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]);
+
+ status = sssraid_bsg_map_data(hdev, job, (struct sssraid_admin_command *)&ioq_cmd);
+ if (status) {
+ ioc_err(hdev, "err, map data failed\n");
+ return status;
+ }
+
+ status = sssraid_submit_ioq_sync_cmd(hdev, &ioq_cmd, job->reply, &job->reply_len, timeout);
+ if (status)
+ ioc_info(hdev, "opcode[0x%x] subopcode[0x%x], status[0x%x], reply_len[%d]\n",
+ cmd->opcode, cmd->info_1.subopcode, status, job->reply_len);
+
+ sssraid_bsg_unmap_data(hdev, job);
+
+ return status;
+}
+
+
+/* bsg dispatch user command */
+static int sssraid_bsg_host_dispatch(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = dev_to_shost(job->dev);
+ struct sssraid_dev *hdev = shost_priv(shost);
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct sssraid_bsg_request *bsg_req = job->request;
+ int ret = -ENOMSG;
+
+ job->reply_len = 0;
+
+ if (bsg_req == NULL || job->request_len != sizeof(struct sssraid_bsg_request)) {
+ bsg_job_done(job, ret, 0);
+ return 0;
+ }
+ dbgprint(hdev, "bsg msgcode[%d] msglen[%d] timeout[%d];"
+ "reqnsge[%d], reqlen[%d]\n",
+ bsg_req->msgcode, job->request_len, rq->timeout,
+ job->request_payload.sg_cnt, job->request_payload.payload_len);
+
+ switch (bsg_req->msgcode) {
+ case SSSRAID_BSG_ADM:
+ ret = sssraid_user_admin_cmd(hdev, job);
+ break;
+ case SSSRAID_BSG_IOQ:
+ ret = sssraid_user_ioq_cmd(hdev, job);
+ break;
+ default:
+ ioc_info(hdev, "unsupport msgcode[%d]\n", bsg_req->msgcode);
+ break;
+ }
+
+ if (ret > 0)
+ ret = ret | (ret << 8);
+
+ bsg_job_done(job, ret, 0);
+ return 0;
+}
+
+static inline void sssraid_remove_bsg(struct sssraid_dev *hdev)
+{
+ if (hdev->bsg_queue) {
+ bsg_unregister_queue(hdev->bsg_queue);
+ blk_cleanup_queue(hdev->bsg_queue);
+ }
+}
+
+static void sssraid_back_fault_cqe(struct sssraid_squeue *sqinfo, struct sssraid_completion *cqe)
+{
+ struct sssraid_dev *hdev = sqinfo->hdev;
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct sssraid_iod *iod;
+ struct request *req;
+
+ tags = hdev->shost->tag_set.tags[sqinfo->qidx - 1];
+ req = blk_mq_tag_to_rq(tags, le16_to_cpu(cqe->cmd_id));
+ if (unlikely(!req || !blk_mq_request_started(req)))
+ return;
+
+ scmd = blk_mq_rq_to_pdu(req);
+ iod = scsi_cmd_priv(scmd);
+
+ if (READ_ONCE(iod->state) != SSSRAID_CMD_IN_FLIGHT &&
+ READ_ONCE(iod->state) != SSSRAID_CMD_TIMEOUT)
+ return;
+
+ WRITE_ONCE(iod->state, SSSRAID_CMD_TMO_COMPLETE);
+ set_host_byte(scmd, DID_NO_CONNECT);
+ if (iod->nsge)
+ scsi_dma_unmap(scmd);
+ sssraid_free_iod_res(hdev, iod);
+ scmd->scsi_done(scmd);
+ ioc_warn(hdev, "Back fault CQE, cid[%d] qidx[%d]\n",
+ le16_to_cpu(cqe->cmd_id), sqinfo->qidx);
+}
+
+void sssraid_back_all_io(struct sssraid_dev *hdev)
+{
+ int i, j;
+ struct sssraid_squeue *sqinfo;
+ struct sssraid_completion cqe = { 0 };
+
+ scsi_block_requests(hdev->shost);
+
+ for (i = 1; i <= hdev->shost->nr_hw_queues; i++) {
+ sqinfo = &hdev->sqinfo[i];
+ for (j = 0; j < hdev->scsi_qd; j++) {
+ cqe.cmd_id = cpu_to_le16(j);
+ sssraid_back_fault_cqe(sqinfo, &cqe);
+ }
+ }
+
+ scsi_unblock_requests(hdev->shost);
+
+ j = SSSRAID_AQ_BLK_MQ_DEPTH;
+ for (i = 0; i < j; i++) {
+ if (READ_ONCE(hdev->adm_cmds[i].state) == SSSRAID_CMD_IN_FLIGHT) {
+ ioc_info(hdev, "Back adm, cid[%d]\n", i);
+ hdev->adm_cmds[i].status = 0xFFFF;
+ WRITE_ONCE(hdev->adm_cmds[i].state, SSSRAID_CMD_COMPLETE);
+ complete(&(hdev->adm_cmds[i].cmd_done));
+ }
+ }
+
+ j = SSSRAID_NR_IOQ_PTCMDS;
+ for (i = 0; i < j; i++) {
+ if (READ_ONCE(hdev->ioq_ptcmds[i].state) == SSSRAID_CMD_IN_FLIGHT) {
+ hdev->ioq_ptcmds[i].status = 0xFFFF;
+ WRITE_ONCE(hdev->ioq_ptcmds[i].state, SSSRAID_CMD_COMPLETE);
+ complete(&(hdev->ioq_ptcmds[i].cmd_done));
+ }
+ }
+}
+
+/**
+ * static struct scsi_host_template sssraid_driver_template
+ */
+static int sssraid_scan_finished(struct Scsi_Host *shost,
+ unsigned long time)
+{
+ struct sssraid_dev *hdev = shost_priv(shost);
+
+ sssraid_scan_disk(hdev);
+
+ return 1;
+}
+
+/* eh_target_reset_handler call back */
+static int sssraid_eh_target_reset(struct scsi_cmnd *scmd)
+{
+ return sssraid_scsi_reset(scmd, SSSRAID_RESET_TARGET);
+}
+
+/* eh_bus_reset_handler call back */
+static int sssraid_bus_reset_handler(struct scsi_cmnd *scmd)
+{
+ return sssraid_scsi_reset(scmd, SSSRAID_RESET_BUS);
+}
+
+/* eh_host_reset_handler call back */
+static int sssraid_eh_host_reset(struct scsi_cmnd *scmd)
+{
+ u16 hwq, cid;
+ struct sssraid_dev *hdev = shost_priv(scmd->device->host);
+
+ scsi_print_command(scmd);
+ if (hdev->state != SSSRAID_LIVE || sssraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ sssraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ ioc_warn(hdev, "cid[%d] qidx[%d] host reset\n", cid, hwq);
+
+ /* It's useless:
+ * old code sssraid_reset_work_sync
+ * queue_work(reset_work) at first,
+ * then flush_work to synchronize.
+ */
+ if (!sssraid_change_host_state(hdev, SSSRAID_RESETTING)) {
+ ioc_info(hdev, "can't change to reset state\n");
+ return FAILED;
+ }
+ if (sssraid_soft_reset_handler(hdev)) {
+ ioc_warn(hdev, "cid[%d] qidx[%d] host reset failed\n", cid, hwq);
+ return FAILED;
+ }
+
+ ioc_warn(hdev, "cid[%d] qidx[%d] host reset success\n", cid, hwq);
+
+ return SUCCESS;
+}
+
+/* host_reset call back */
+static int sssraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ int ret;
+ struct sssraid_dev *hdev = shost_priv(shost);
+
+ ioc_info(hdev, "start sysfs host reset cmd\n");
+ if (!sssraid_change_host_state(hdev, SSSRAID_RESETTING)) {
+ ioc_info(hdev, "can't change to reset state\n");
+ return -EBUSY;
+ }
+ ret = sssraid_soft_reset_handler(hdev);
+ ioc_info(hdev, "stop sysfs host reset cmd[%d]\n", ret);
+
+ return ret;
+}
+
+/* queuecommand call back */
+static int sssraid_qcmd(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct sssraid_iod *iod = scsi_cmd_priv(scmd);
+ struct sssraid_dev *hdev = shost_priv(shost);
+ struct scsi_device *sdev = scmd->device;
+ struct sssraid_sdev_hostdata *hostdata = sdev->hostdata;
+ u16 hwq, cid;
+ struct sssraid_squeue *sq;
+ struct sssraid_ioq_command ioq_cmd;
+ int retval;
+
+ if (unlikely(hdev->state == SSSRAID_RESETTING))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ if (unlikely(hdev->state != SSSRAID_LIVE)) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (unlikely(hdev->logging_level & SSSRAID_DEBUG))
+ scsi_print_command(scmd);
+
+ sssraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ hostdata = sdev->hostdata;
+ sq = &hdev->sqinfo[hwq];
+
+ memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+ ioq_cmd.rw.hdid = cpu_to_le32(hostdata->hdid);
+ ioq_cmd.rw.command_id = cpu_to_le16(cid);
+
+ retval = sssraid_setup_ioq_cmd(hdev, &ioq_cmd, scmd);
+ if (unlikely(retval)) {
+ set_host_byte(scmd, DID_ERROR);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ iod->sense = sq->sense + retval;
+ iod->sense_dma = sq->sense_dma_addr + retval;
+ ioq_cmd.common.sense_addr = cpu_to_le64(iod->sense_dma);
+ ioq_cmd.common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+
+ sssraid_init_iod(iod);
+
+ iod->sqinfo = sq;
+ sssraid_io_map_data(hdev, iod, scmd, &ioq_cmd);
+ if (unlikely(retval)) {
+ ioc_err(hdev, "sssraid_io_map_data Err.\n");
+ set_host_byte(scmd, DID_ERROR);
+ scmd->scsi_done(scmd);
+ retval = 0;
+ goto deinit_iod;
+ }
+
+ WRITE_ONCE(iod->state, SSSRAID_CMD_IN_FLIGHT);
+ sssraid_submit_cmd(sq, &ioq_cmd);
+
+ return 0;
+
+deinit_iod:
+ sssraid_free_iod_res(hdev, iod);
+ return retval;
+}
+
+/* change_queue_depth call back:
+ * keep as old
+ */
+
+/* slave_configure call back */
+static int sssraid_slave_configure(struct scsi_device *sdev)
+{
+ int qd = MAX_CMD_PER_DEV;
+ unsigned int timeout = scmd_tmout_rawdisk * HZ;
+ struct sssraid_dev *hdev = shost_priv(sdev->host);
+ struct sssraid_sdev_hostdata *hostdata = sdev->hostdata;
+ u32 max_sec = sdev->host->max_sectors;
+
+ if (hostdata) {
+ if (SSSRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ timeout = scmd_tmout_vd * HZ;
+ else if (SSSRAID_DEV_INFO_ATTR_RAWDISK(hostdata->attr))
+ timeout = scmd_tmout_rawdisk * HZ;
+ max_sec = hostdata->max_io_kb << 1;
+ qd = sssraid_get_qd_by_disk(hostdata->attr);
+ } else {
+ ioc_err(hdev, "err, sdev->hostdata is null\n");
+ }
+
+ blk_queue_rq_timeout(sdev->request_queue, timeout);
+ sdev->eh_timeout = timeout;
+ scsi_change_queue_depth(sdev, qd);
+
+ if ((max_sec == 0) || (max_sec > sdev->host->max_sectors))
+ max_sec = sdev->host->max_sectors;
+
+ blk_queue_max_hw_sectors(sdev->request_queue, max_sec);
+
+ ioc_info(hdev, "sdev->channel:id:lun[%d:%d:%lld], scmd_timeout[%d]s, maxsec[%d]\n",
+ sdev->channel, sdev->id, sdev->lun, timeout / HZ, max_sec);
+
+ return 0;
+}
+
+/* slave_alloc call back */
+static int sssraid_slave_alloc(struct scsi_device *sdev)
+{
+ struct sssraid_sdev_hostdata *hostdata;
+ struct sssraid_dev *hdev;
+ u16 idx;
+
+ hdev = shost_priv(sdev->host);
+ hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+ if (!hostdata) {
+ ioc_err(hdev, "Alloc scsi host data memory failed\n");
+ return -ENOMEM;
+ }
+
+ down_read(&hdev->devices_rwsem);
+ for (idx = 0; idx < le32_to_cpu(hdev->ctrl_info->nd); idx++) {
+ if (sssraid_match_dev(hdev, idx, sdev))
+ goto scan_host;
+ }
+ up_read(&hdev->devices_rwsem);
+
+ kfree(hostdata);
+ return -ENXIO;
+
+scan_host:
+ hostdata->hdid = le32_to_cpu(hdev->devices[idx].hdid);
+ hostdata->max_io_kb = le16_to_cpu(hdev->devices[idx].max_io_kb);
+ hostdata->attr = hdev->devices[idx].attr;
+ hostdata->flag = hdev->devices[idx].flag;
+ hostdata->rg_id = 0xff;
+ sdev->hostdata = hostdata;
+ up_read(&hdev->devices_rwsem);
+ return 0;
+}
+
+/* slave_destroy call back */
+static void sssraid_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+/* eh_timed_out call back */
+static enum blk_eh_timer_return sssraid_scmd_timeout(struct scsi_cmnd *scmd)
+{
+ struct sssraid_iod *iod = scsi_cmd_priv(scmd);
+ unsigned int timeout = scmd->device->request_queue->rq_timeout;
+
+ if (sssraid_check_scmd_completed(scmd))
+ goto out;
+
+ if (time_after(jiffies, scmd->jiffies_at_alloc + timeout)) {
+ if (cmpxchg(&iod->state, SSSRAID_CMD_IN_FLIGHT, SSSRAID_CMD_TIMEOUT) ==
+ SSSRAID_CMD_IN_FLIGHT) {
+ return BLK_EH_DONE;
+ }
+ }
+out:
+ return BLK_EH_RESET_TIMER;
+}
+
+/* eh_abort_handler call back */
+static int sssraid_abort_handler(struct scsi_cmnd *scmd)
+{
+ struct sssraid_dev *hdev = shost_priv(scmd->device->host);
+ struct sssraid_iod *iod = scsi_cmd_priv(scmd);
+ struct sssraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SSSRAID_LIVE || !sssraid_wait_abnl_cmd_done(iod) ||
+ sssraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ sssraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ ioc_warn(hdev, "cid[%d] qidx[%d] timeout, aborting\n", cid, hwq);
+ ret = sssraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid);
+ if (ret != -ETIME) {
+ ret = sssraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ ioc_warn(hdev, "cid[%d] qidx[%d] abort failed, not found\n", cid, hwq);
+ return FAILED;
+ }
+ ioc_warn(hdev, "cid[%d] qidx[%d] abort succ\n", cid, hwq);
+ return SUCCESS;
+ }
+ ioc_warn(hdev, "cid[%d] qidx[%d] abort failed, timeout\n", cid, hwq);
+ return FAILED;
+}
+
+static ssize_t csts_pp_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sssraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_PP_MASK);
+ ret >>= SSSRAID_CSTS_PP_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_shst_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sssraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_SHST_MASK);
+ ret >>= SSSRAID_CSTS_SHST_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_cfs_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sssraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_CFS_MASK);
+ ret >>= SSSRAID_CSTS_CFS_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_rdy_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sssraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev))
+ ret = (readl(hdev->bar + SSSRAID_REG_CSTS) & SSSRAID_CSTS_RDY);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t fw_version_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct sssraid_dev *hdev = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fr);
+}
+
+static DEVICE_ATTR_RO(csts_pp);
+static DEVICE_ATTR_RO(csts_shst);
+static DEVICE_ATTR_RO(csts_cfs);
+static DEVICE_ATTR_RO(csts_rdy);
+static DEVICE_ATTR_RO(fw_version);
+
+static struct device_attribute *sssraid_host_attrs[] = {
+ &dev_attr_csts_pp,
+ &dev_attr_csts_shst,
+ &dev_attr_csts_cfs,
+ &dev_attr_csts_rdy,
+ &dev_attr_fw_version,
+ NULL,
+};
+
+static int sssraid_get_vd_info(struct sssraid_dev *hdev, struct sssraid_vd_info *vd_info, u16 vid)
+{
+ struct sssraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ if (hdev->state >= SSSRAID_RESETTING) {
+ ioc_err(hdev, "err, host state[%d] is not right\n", hdev->state);
+ return -EBUSY;
+ }
+
+ data_ptr = dma_alloc_coherent(&hdev->pdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN);
+ admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(vd_info, data_ptr, sizeof(struct sssraid_vd_info));
+
+ dma_free_coherent(&hdev->pdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int sssraid_get_bgtask(struct sssraid_dev *hdev, struct sssraid_bgtask *bgtask)
+{
+ struct sssraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ if (hdev->state >= SSSRAID_RESETTING) {
+ ioc_err(hdev, "err, host state[%d] is not right\n", hdev->state);
+ return -EBUSY;
+ }
+
+ data_ptr = dma_alloc_coherent(&hdev->pdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = sssraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(bgtask, data_ptr, sizeof(struct sssraid_bgtask));
+
+ dma_free_coherent(&hdev->pdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static ssize_t raid_level_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct sssraid_dev *hdev;
+ struct sssraid_vd_info *vd_info;
+ struct sssraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SSSRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = sssraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1;
+
+ ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ?
+ vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1);
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]);
+}
+
+static ssize_t raid_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct sssraid_dev *hdev;
+ struct sssraid_vd_info *vd_info;
+ struct sssraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SSSRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = sssraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret) {
+ vd_info->vd_status = 0;
+ vd_info->rg_id = 0xff;
+ }
+
+ ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ? vd_info->vd_status : 0;
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]);
+}
+
+static ssize_t raid_resync_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct sssraid_dev *hdev;
+ struct sssraid_vd_info *vd_info;
+ struct sssraid_bgtask *bgtask;
+ struct sssraid_sdev_hostdata *hostdata;
+ u8 rg_id, i, progress = 0;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SSSRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = sssraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ goto out;
+
+ rg_id = vd_info->rg_id;
+
+ bgtask = (struct sssraid_bgtask *)vd_info;
+ ret = sssraid_get_bgtask(hdev, bgtask);
+ if (ret)
+ goto out;
+ for (i = 0; i < bgtask->task_num; i++) {
+ if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) &&
+ (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id))
+ progress = bgtask->bgtask[i].progress;
+ }
+
+out:
+ kfree(vd_info);
+ return snprintf(buf, PAGE_SIZE, "%d\n", progress);
+}
+
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(raid_state);
+static DEVICE_ATTR_RO(raid_resync);
+
+static struct device_attribute *sssraid_dev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_raid_state,
+ &dev_attr_raid_resync,
+ NULL,
+};
+
+static struct scsi_host_template sssraid_driver_template = {
+ .module = THIS_MODULE,
+ .name = "3SNIC Logic sssraid driver",
+ .proc_name = "sssraid",
+ .queuecommand = sssraid_qcmd,
+ .slave_alloc = sssraid_slave_alloc,
+ .slave_destroy = sssraid_slave_destroy,
+ .slave_configure = sssraid_slave_configure,
+ .scan_finished = sssraid_scan_finished,
+ .eh_timed_out = sssraid_scmd_timeout,
+ .eh_abort_handler = sssraid_abort_handler,
+ .eh_target_reset_handler = sssraid_eh_target_reset,
+ .eh_bus_reset_handler = sssraid_bus_reset_handler,
+ .eh_host_reset_handler = sssraid_eh_host_reset,
+ .change_queue_depth = scsi_change_queue_depth,
+ .host_tagset = 0,
+ .this_id = -1,
+ .unchecked_isa_dma = 0,
+ .shost_attrs = sssraid_host_attrs,
+ .sdev_attrs = sssraid_dev_attrs,
+ .host_reset = sssraid_sysfs_host_reset,
+};
+
+/**
+ * sssraid_probe - PCI probe callback
+ * @pdev: PCI device instance
+ * @id: PCI device ID details
+ *
+ * controller initialization routine.
+ * Allocate per adapter instance through shost_priv and
+ * initialize controller specific data structures, initializae
+ * the controller hardware, add shost to the SCSI subsystem.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+
+static int
+sssraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct sssraid_dev *hdev;
+ struct Scsi_Host *shost;
+ int node;
+ char bsg_name[15];
+ int retval = 0;
+
+ node = dev_to_node(&pdev->dev);
+ if (node == NUMA_NO_NODE) {
+ node = first_memory_node;
+ set_dev_node(&pdev->dev, node);
+ }
+
+ shost = scsi_host_alloc(&sssraid_driver_template, sizeof(*hdev));
+ if (!shost) {
+ retval = -ENODEV;
+ ioc_err(hdev, "Failed to allocate scsi host\n");
+ goto shost_failed;
+ }
+
+ hdev = shost_priv(shost);
+ hdev->numa_node = node;
+ hdev->instance = shost->host_no; /* for device instance */
+ sprintf(hdev->name, "%s%d", SSSRAID_DRIVER_NAME, hdev->instance);
+
+ init_rwsem(&hdev->devices_rwsem);
+ spin_lock_init(&hdev->state_lock);
+
+ spin_lock_init(&hdev->fwevt_lock);
+ spin_lock_init(&hdev->watchdog_lock);
+
+ INIT_LIST_HEAD(&hdev->fwevt_list);
+
+// logging_level = 1; //garden test
+ hdev->logging_level = logging_level; /* according to log_debug_switch*/
+
+ snprintf(hdev->fwevt_worker_name, sizeof(hdev->fwevt_worker_name),
+ "%s%d_fwevt_wrkr", SSSRAID_DRIVER_NAME, hdev->instance);
+ hdev->fwevt_worker_thread = alloc_ordered_workqueue(
+ hdev->fwevt_worker_name, WQ_MEM_RECLAIM);
+ if (!hdev->fwevt_worker_thread) {
+ ioc_err(hdev, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ retval = -ENODEV;
+ goto out_fwevtthread_failed;
+ }
+
+ hdev->shost = shost;
+ hdev->pdev = pdev;
+
+ if (sssraid_init_ioc(hdev, 0)) {
+ ioc_err(hdev, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ retval = -ENODEV;
+ goto out_iocinit_failed;
+ }
+
+ sssraid_shost_init(hdev);
+
+ retval = scsi_add_host(shost, &pdev->dev);
+ if (retval) {
+ ioc_err(hdev, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ goto addhost_failed;
+ }
+
+ snprintf(bsg_name, sizeof(bsg_name), "%s%d", SSSRAID_DRIVER_NAME, shost->host_no);
+ hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name,
+ sssraid_bsg_host_dispatch, NULL, sssraid_cmd_size(hdev));
+ if (IS_ERR(hdev->bsg_queue)) {
+ ioc_err(hdev, "err, setup bsg failed\n");
+ hdev->bsg_queue = NULL;
+ goto bsg_setup_failed;
+ }
+
+ sssraid_change_host_state(hdev, SSSRAID_LIVE);
+
+ scsi_scan_host(shost);
+ return retval;
+
+bsg_setup_failed:
+ scsi_remove_host(shost);
+addhost_failed:
+ sssraid_cleanup_ioc(hdev, 0);
+out_iocinit_failed:
+ destroy_workqueue(hdev->fwevt_worker_thread);
+out_fwevtthread_failed:
+ scsi_host_put(shost);
+shost_failed:
+ return retval;
+}
+
+static void sssraid_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+
+ if (!shost)
+ return;
+
+ ioc_info(hdev, "enter sssraid remove\n");
+
+ hdev = shost_priv(shost);
+
+ sssraid_change_host_state(hdev, SSSRAID_DELETING);
+
+ if (!pci_device_is_present(pdev))
+ sssraid_back_all_io(hdev);
+
+ sssraid_cleanup_fwevt_list(hdev);
+ destroy_workqueue(hdev->fwevt_worker_thread);
+
+ sssraid_remove_bsg(hdev);
+ scsi_remove_host(shost);
+ sssraid_cleanup_ioc(hdev, 0);
+
+ scsi_host_put(shost);
+}
+
+static void sssraid_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+
+ if (!shost)
+ return;
+
+ hdev = shost_priv(shost);
+
+ sssraid_cleanup_fwevt_list(hdev);
+ destroy_workqueue(hdev->fwevt_worker_thread);
+ sssraid_cleanup_ioc(hdev, 0);
+}
+
+#ifdef CONFIG_PM
+static int sssraid_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+ pci_power_t device_state;
+
+ if (!shost)
+ return 0;
+
+ hdev = shost_priv(shost);
+
+ while (hdev->state == SSSRAID_RESETTING)
+ ssleep(1);
+ sssraid_cleanup_fwevt_list(hdev);
+ scsi_block_requests(shost);
+ sssraid_cleanup_ioc(hdev, 1);
+
+ device_state = pci_choose_state(pdev, state);
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, device_state);
+
+ return 0;
+}
+
+static int sssraid_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+ pci_power_t device_state = pdev->current_state;
+
+ if (!shost)
+ return 0;
+
+ hdev = shost_priv(shost);
+
+ ioc_info(hdev, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ hdev->pdev = pdev;
+ hdev->cpu_count = num_online_cpus();
+
+ /* sssraid_setup_resources in sssraid_init_ioc */
+ sssraid_init_ioc(hdev, 1);
+ scsi_unblock_requests(shost);
+
+ return 0;
+}
+#endif
+
+static pci_ers_result_t sssraid_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+
+ if (!shost)
+ return PCI_ERS_RESULT_NONE;
+
+ hdev = shost_priv(shost);
+
+ ioc_info(hdev, "enter pci error detect, state:%d\n", state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ ioc_warn(hdev, "channel is normal, do nothing\n");
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ ioc_warn(hdev, "channel io frozen, need reset controller\n");
+
+ scsi_block_requests(hdev->shost);
+
+ sssraid_change_host_state(hdev, SSSRAID_RESETTING);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ ioc_warn(hdev, "channel io failure, request disconnect\n");
+
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t sssraid_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+
+ if (!shost)
+ return PCI_ERS_RESULT_NONE;
+
+ hdev = shost_priv(shost);
+
+ ioc_info(hdev, "restart after slot reset\n");
+
+ pci_restore_state(pdev);
+
+ sssraid_soft_reset_handler(hdev);
+
+ scsi_unblock_requests(hdev->shost);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void sssraid_reset_done(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct sssraid_dev *hdev;
+
+ if (!shost)
+ return;
+
+ hdev = shost_priv(shost);
+
+ ioc_info(hdev, "enter sssraid reset done\n");
+}
+
+static struct pci_error_handlers sssraid_err_handler = {
+ .error_detected = sssraid_pci_error_detected,
+ .slot_reset = sssraid_pci_slot_reset,
+ .reset_done = sssraid_reset_done,
+};
+
+static const struct pci_device_id sssraid_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_3SNIC_LOGIC, SSSRAID_SERVER_DEVICE_HBA_DID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_3SNIC_LOGIC, SSSRAID_SERVER_DEVICE_RAID_DID) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, sssraid_pci_id_table);
+
+static struct pci_driver sssraid_pci_driver = {
+ .name = SSSRAID_DRIVER_NAME,
+ .id_table = sssraid_pci_id_table,
+ .probe = sssraid_probe,
+ .remove = sssraid_remove,
+ .shutdown = sssraid_shutdown,
+#ifdef CONFIG_PM
+ .suspend = sssraid_suspend,
+ .resume = sssraid_resume,
+#endif
+ .err_handler = &sssraid_err_handler,
+};
+
+static int __init sssraid_init(void)
+{
+ int ret_val;
+
+ pr_info("Loading %s version %s\n", SSSRAID_DRIVER_NAME,
+ SSSRAID_DRIVER_VERSION);
+
+ sssraid_class = class_create(THIS_MODULE, "sssraid");
+ if (IS_ERR(sssraid_class)) {
+ ret_val = PTR_ERR(sssraid_class);
+ return ret_val;
+ }
+
+ ret_val = pci_register_driver(&sssraid_pci_driver);
+
+ return ret_val;
+}
+
+static void __exit sssraid_exit(void)
+{
+ pr_info("Unloading %s version %s\n", SSSRAID_DRIVER_NAME,
+ SSSRAID_DRIVER_VERSION);
+
+ class_destroy(sssraid_class);
+ pci_unregister_driver(&sssraid_pci_driver);
+}
+
+MODULE_AUTHOR("liangry1(a)3snic.com");
+MODULE_DESCRIPTION("3SNIC Information Technology SSSRAID Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SSSRAID_DRIVER_VERSION);
+module_init(sssraid_init);
+module_exit(sssraid_exit);
--
2.25.1
2
1

[PATCH openEuler-5.10-LTS] wifi: wilc1000: validate length of IEEE80211_P2P_ATTR_CHANNEL_LIST attribute
by Zheng Zengkai 28 Dec '22
by Zheng Zengkai 28 Dec '22
28 Dec '22
From: Phil Turnbull <philipturnbull(a)github.com>
stable inclusion
from stable-v5.10.157
commit 5a068535c0073c8402aa0755e8ef259fb98a33c5
category: bugfix
bugzilla: 188173 https://gitee.com/src-openeuler/kernel/issues/I66M3J
CVE: CVE-2022-47521
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
commit f9b62f9843c7b0afdaecabbcebf1dbba18599408 upstream.
Validate that the IEEE80211_P2P_ATTR_CHANNEL_LIST attribute contains
enough space for a 'struct wilc_attr_oper_ch'. If the attribute is too
small then it can trigger an out-of-bounds write later in the function.
'struct wilc_attr_oper_ch' is variable sized so also check 'attr_len'
does not extend beyond the end of 'buf'.
Signed-off-by: Phil Turnbull <philipturnbull(a)github.com>
Tested-by: Ajay Kathat <ajay.kathat(a)microchip.com>
Acked-by: Ajay Kathat <ajay.kathat(a)microchip.com>
Signed-off-by: Kalle Valo <kvalo(a)kernel.org>
Link: https://lore.kernel.org/r/20221123153543.8568-4-philipturnbull@github.com
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Liu Jian <liujian56(a)huawei.com>
Signed-off-by: Zheng Zengkai <zhengzengkai(a)huawei.com>
Reviewed-by: Yue Haibing <yuehaibing(a)huawei.com>
---
drivers/net/wireless/microchip/wilc1000/cfg80211.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 64e01b84d98d..dd26f2086180 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -947,7 +947,8 @@ static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u32 len, u8 sta_ch)
if (index + sizeof(*e) + attr_size > len)
return;
- if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST)
+ if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST &&
+ attr_size >= (sizeof(struct wilc_attr_ch_list) - sizeof(*e)))
ch_list_idx = index;
else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL &&
attr_size == (sizeof(struct wilc_attr_oper_ch) - sizeof(*e)))
--
2.20.1
1
0

[PATCH openEuler-5.10-LTS 1/6] drm/amdkfd: Check for null pointer after calling kmemdup
by Zheng Zengkai 28 Dec '22
by Zheng Zengkai 28 Dec '22
28 Dec '22
From: Jiasheng Jiang <jiasheng(a)iscas.ac.cn>
stable inclusion
from stable-v6.0.14
commit abfaf0eee97925905e742aa3b0b72e04a918fa9e
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I6694U
CVE: CVE-2022-3108
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id…
--------------------------------
As the possible failure of the allocation, kmemdup() may return NULL
pointer.
Therefore, it should be better to check the 'props2' in order to prevent
the dereference of NULL pointer.
Fixes: 3a87177eb141 ("drm/amdkfd: Add topology support for dGPUs")
Signed-off-by: Jiasheng Jiang <jiasheng(a)iscas.ac.cn>
Reviewed-by: Felix Kuehling <Felix.Kuehling(a)amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling(a)amd.com>
Signed-off-by: Alex Deucher <alexander.deucher(a)amd.com>
Signed-off-by: Ren Zhijie <renzhijie2(a)huawei.com>
Reviewed-by: songping yu <yusongping(a)huawei.com>
Reviewed-by: Chen Hui <judy.chenhui(a)huawei.com>
Signed-off-by: Zheng Zengkai <zhengzengkai(a)huawei.com>
---
drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 86b4dadf772e..02e3c650ed1c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -408,6 +408,9 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
return -ENODEV;
/* same everything but the other direction */
props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
+ if (!props2)
+ return -ENOMEM;
+
props2->node_from = id_to;
props2->node_to = id_from;
props2->kobj = NULL;
--
2.20.1
1
5

[PATCH openEuler-1.0-LTS] dm thin: Use last transaction's pmd->root when commit failed
by Yongqiang Liu 27 Dec '22
by Yongqiang Liu 27 Dec '22
27 Dec '22
From: Zhihao Cheng <chengzhihao1(a)huawei.com>
mainline inclusion
from mainline-v6.2-rc1
commit 7991dbff6849f67e823b7cc0c15e5a90b0549b9f
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I65M32
CVE: NA
--------------------------------
Recently we found a softlock up problem in dm thin pool btree lookup
code due to corrupted metadata:
Kernel panic - not syncing: softlockup: hung tasks
CPU: 7 PID: 2669225 Comm: kworker/u16:3
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
Workqueue: dm-thin do_worker [dm_thin_pool]
Call Trace:
<IRQ>
dump_stack+0x9c/0xd3
panic+0x35d/0x6b9
watchdog_timer_fn.cold+0x16/0x25
__run_hrtimer+0xa2/0x2d0
</IRQ>
RIP: 0010:__relink_lru+0x102/0x220 [dm_bufio]
__bufio_new+0x11f/0x4f0 [dm_bufio]
new_read+0xa3/0x1e0 [dm_bufio]
dm_bm_read_lock+0x33/0xd0 [dm_persistent_data]
ro_step+0x63/0x100 [dm_persistent_data]
btree_lookup_raw.constprop.0+0x44/0x220 [dm_persistent_data]
dm_btree_lookup+0x16f/0x210 [dm_persistent_data]
dm_thin_find_block+0x12c/0x210 [dm_thin_pool]
__process_bio_read_only+0xc5/0x400 [dm_thin_pool]
process_thin_deferred_bios+0x1a4/0x4a0 [dm_thin_pool]
process_one_work+0x3c5/0x730
Following process may generate a broken btree mixed with fresh and
stale btree nodes, which could get dm thin trapped in an infinite loop
while looking up data block:
Transaction 1: pmd->root = A, A->B->C // One path in btree
pmd->root = X, X->Y->Z // Copy-up
Transaction 2: X,Z is updated on disk, Y write failed.
// Commit failed, dm thin becomes read-only.
process_bio_read_only
dm_thin_find_block
__find_block
dm_btree_lookup(pmd->root)
The pmd->root points to a broken btree, Y may contain stale node
pointing to any block, for example X, which gets dm thin trapped into
a dead loop while looking up Z.
Fix this by setting pmd->root in __open_metadata(), so that dm thin
will use the last transaction's pmd->root if commit failed.
Fetch a reproducer in [Link].
Linke: https://bugzilla.kernel.org/show_bug.cgi?id=216790
Cc: stable(a)vger.kernel.org
Fixes: 991d9fa02da0 ("dm: add thin provisioning target")
Signed-off-by: Zhihao Cheng <chengzhihao1(a)huawei.com>
Acked-by: Joe Thornber <ejt(a)redhat.com>
Signed-off-by: Mike Snitzer <snitzer(a)kernel.org>
Signed-off-by: Zhihao Cheng <chengzhihao1(a)huawei.com>
Reviewed-by: Zhang Yi <yi.zhang(a)huawei.com>
Signed-off-by: Yongqiang Liu <liuyongqiang13(a)huawei.com>
---
drivers/md/dm-thin-metadata.c | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 80c4f27e99f9..26f04b84e8a2 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -660,6 +660,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
goto bad_cleanup_data_sm;
}
+ /*
+ * For pool metadata opening process, root setting is redundant
+ * because it will be set again in __begin_transaction(). But dm
+ * pool aborting process really needs to get last transaction's
+ * root to avoid accessing broken btree.
+ */
+ pmd->root = le64_to_cpu(disk_super->data_mapping_root);
+ pmd->details_root = le64_to_cpu(disk_super->device_details_root);
+
__setup_btree_details(pmd);
dm_bm_unlock(sblock);
--
2.25.1
1
0

[PATCH openEuler-5.10] timekeeping: Adding a padding before timekeeper in tk_core
by Zheng Zengkai 26 Dec '22
by Zheng Zengkai 26 Dec '22
26 Dec '22
From: Jialin Zhang <zhangjialin11(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I47W8L
CVE: NA
---------------------------
Adding a padding before timekeeper in tk_core can futhor fix
the false sharing problem discovered by commit 605276666775
("timekeeping: Avoiding false sharing in field access of tk_core")
context1 of unixbench is tested on Huawei Kunpeng920.
before this patch
0 CPUs in system; running 96 parallel copies of tests
Pipe-based Context Switching 17213571.7 lps (10.0 s, 1 samples)
System Benchmarks Partial Index BASELINE RESULT INDEX
Pipe-based Context Switching 4000.0 17213571.7 43033.9
========
System Benchmarks Index Score (Partial Only) 43033.9
after this patch
0 CPUs in system; running 96 parallel copies of tests
Pipe-based Context Switching 20321268.3 lps (10.0 s, 1 samples)
System Benchmarks Partial Index BASELINE RESULT INDEX
Pipe-based Context Switching 4000.0 20321268.3 50803.2
========
System Benchmarks Index Score (Partial Only) 50803.2
Signed-off-by: Jialin Zhang <zhangjialin11(a)huawei.com>
Reviewed-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Zheng Zengkai <zhengzengkai(a)huawei.com>
---
kernel/time/timekeeping.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e3142bf7648e..c8d9b89a7571 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -52,6 +52,9 @@ static struct {
u64 padding[8];
#endif
seqcount_raw_spinlock_t seq;
+#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE
+ u64 padding2[2];
+#endif
struct timekeeper timekeeper;
#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE
} tk_core ____cacheline_aligned_128 = {
--
2.20.1
1
0

[PATCH openEuler-5.10] timekeeping: Adding a padding before timekeeper in tk_core
by Zheng Zengkai 26 Dec '22
by Zheng Zengkai 26 Dec '22
26 Dec '22
From: Jialin Zhang <zhangjialin11(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I47W8L
CVE: NA
---------------------------
Adding a padding before timekeeper in tk_core can futhor fix
the false sharing problem discovered by commit 605276666775
("timekeeping: Avoiding false sharing in field access of tk_core")
context1 of unixbench is tested on Huawei Kunpeng920.
before this patch
0 CPUs in system; running 96 parallel copies of tests
Pipe-based Context Switching 17213571.7 lps (10.0 s, 1 samples)
System Benchmarks Partial Index BASELINE RESULT INDEX
Pipe-based Context Switching 4000.0 17213571.7 43033.9
========
System Benchmarks Index Score (Partial Only) 43033.9
after this patch
0 CPUs in system; running 96 parallel copies of tests
Pipe-based Context Switching 20321268.3 lps (10.0 s, 1 samples)
System Benchmarks Partial Index BASELINE RESULT INDEX
Pipe-based Context Switching 4000.0 20321268.3 50803.2
========
System Benchmarks Index Score (Partial Only) 50803.2
Signed-off-by: Jialin Zhang <zhangjialin11(a)huawei.com>
Reviewed-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Zheng Zengkai <zhengzengkai(a)huawei.com>
---
kernel/time/timekeeping.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e3142bf7648e..c8d9b89a7571 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -52,6 +52,9 @@ static struct {
u64 padding[8];
#endif
seqcount_raw_spinlock_t seq;
+#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE
+ u64 padding2[2];
+#endif
struct timekeeper timekeeper;
#ifdef CONFIG_ARCH_LLC_128_LINE_SIZE
} tk_core ____cacheline_aligned_128 = {
--
2.20.1
1
0

[PATCH openEuler-1.0-LTS 1/2] power: supply: wm8350-power: Add missing free in free_charger_irq
by Yongqiang Liu 24 Dec '22
by Yongqiang Liu 24 Dec '22
24 Dec '22
From: Jiasheng Jiang <jiasheng(a)iscas.ac.cn>
stable inclusion
from stable-v4.19.269
commit 60dd1082322966f192f42fe2a6605dfa08eef41f
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I6694F
CVE: CVE-2022-3111
--------------------------------
[ Upstream commit 6dee930f6f6776d1e5a7edf542c6863b47d9f078 ]
In free_charger_irq(), there is no free for 'WM8350_IRQ_CHG_FAST_RDY'.
Therefore, it should be better to add it in order to avoid the memory leak.
Fixes: 14431aa0c5a4 ("power_supply: Add support for WM8350 PMU")
Signed-off-by: Jiasheng Jiang <jiasheng(a)iscas.ac.cn>
Acked-by: Charles Keepax <ckeepax(a)opensource.cirrus.com>
Signed-off-by: Sebastian Reichel <sebastian.reichel(a)collabora.com>
Signed-off-by: Sasha Levin <sashal(a)kernel.org>
Signed-off-by: Chen jun <chenjun102(a)huawei.com>
Reviewed-by: chenweilong <chenweilong(a)huawei.com>
Reviewed-by: Wang Weiyang <wangweiyang2(a)huawei.com>
Signed-off-by: Yongqiang Liu <liuyongqiang13(a)huawei.com>
---
drivers/power/supply/wm8350_power.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c
index 15c0ca15e2aa..034ddfe3b62c 100644
--- a/drivers/power/supply/wm8350_power.c
+++ b/drivers/power/supply/wm8350_power.c
@@ -459,6 +459,7 @@ static void free_charger_irq(struct wm8350 *wm8350)
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END, wm8350);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_FAST_RDY, wm8350);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350);
wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350);
--
2.25.1
1
1