Kernel
Threads by month
- ----- 2025 -----
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
December 2021
- 27 participants
- 170 discussions

[PATCH openEuler-1.0-LTS] USB: gadget: bRequestType is a bitfield, not a enum
by Yang Yingliang 22 Dec '21
by Yang Yingliang 22 Dec '21
22 Dec '21
From: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
mainline inclusion
from mainline-v5.16-rc6
commit f08adf5add9a071160c68bb2a61d697f39ab0758
category: bugfix
bugzilla: NA
CVE: CVE-2021-39685
--------------------------------
Szymon rightly pointed out that the previous check for the endpoint
direction in bRequestType was not looking at only the bit involved, but
rather the whole value. Normally this is ok, but for some request
types, bits other than bit 8 could be set and the check for the endpoint
length could not stall correctly.
Fix that up by only checking the single bit.
Fixes: 153a2d7e3350 ("USB: gadget: detect too-big endpoint 0 requests")
Cc: Felipe Balbi <balbi(a)kernel.org>
Reported-by: Szymon Heidrich <szymon.heidrich(a)gmail.com>
Link: https://lore.kernel.org/r/20211214184621.385828-1-gregkh@linuxfoundation.org
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Xiu Jianfeng <xiujianfeng(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/usb/gadget/composite.c | 6 +++---
drivers/usb/gadget/legacy/dbgp.c | 6 +++---
drivers/usb/gadget/legacy/inode.c | 6 +++---
3 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 3813249387b3d..d39ebf5bdbdd5 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1570,14 +1570,14 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
u8 endp;
if (w_length > USB_COMP_EP0_BUFSIZ) {
- if (ctrl->bRequestType == USB_DIR_OUT) {
- goto done;
- } else {
+ if (ctrl->bRequestType & USB_DIR_IN) {
/* Cast away the const, we are going to overwrite on purpose. */
__le16 *temp = (__le16 *)&ctrl->wLength;
*temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
w_length = USB_COMP_EP0_BUFSIZ;
+ } else {
+ goto done;
}
}
diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
index e567afcb2794c..ffe58d44b2cb1 100644
--- a/drivers/usb/gadget/legacy/dbgp.c
+++ b/drivers/usb/gadget/legacy/dbgp.c
@@ -346,14 +346,14 @@ static int dbgp_setup(struct usb_gadget *gadget,
u16 len = 0;
if (length > DBGP_REQ_LEN) {
- if (ctrl->bRequestType == USB_DIR_OUT) {
- return err;
- } else {
+ if (ctrl->bRequestType & USB_DIR_IN) {
/* Cast away the const, we are going to overwrite on purpose. */
__le16 *temp = (__le16 *)&ctrl->wLength;
*temp = cpu_to_le16(DBGP_REQ_LEN);
length = DBGP_REQ_LEN;
+ } else {
+ return err;
}
}
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 83fd1b55d497a..f91d403da3141 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1335,14 +1335,14 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
u16 w_length = le16_to_cpu(ctrl->wLength);
if (w_length > RBUF_SIZE) {
- if (ctrl->bRequestType == USB_DIR_OUT) {
- return value;
- } else {
+ if (ctrl->bRequestType & USB_DIR_IN) {
/* Cast away the const, we are going to overwrite on purpose. */
__le16 *temp = (__le16 *)&ctrl->wLength;
*temp = cpu_to_le16(RBUF_SIZE);
w_length = RBUF_SIZE;
+ } else {
+ return value;
}
}
--
2.25.1
1
0

[PATCH openEuler-1.0-LTS 1/2] scsi:spraid: support Ramaxel's spraid driver
by Yang Yingliang 22 Dec '21
by Yang Yingliang 22 Dec '21
22 Dec '21
From: Yanling Song <songyl(a)ramaxel.com>
Ramaxel inclusion
category: features
bugzilla: https://gitee.com/openeuler/kernel/issues/I4NJIB
CVE: NA
Support Ramaxel's SPRxxx Raid controller
Signed-off-by: Yanling Song <songyl(a)ramaxel.com>
Reviewed-by: Jiang Yu<yujiang(a)ramaxel.com>
Acked-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/scsi/Kconfig | 1 +
drivers/scsi/Makefile | 1 +
drivers/scsi/spraid/Kconfig | 13 +
drivers/scsi/spraid/Makefile | 7 +
drivers/scsi/spraid/spraid.h | 746 ++++++
drivers/scsi/spraid/spraid_main.c | 3875 +++++++++++++++++++++++++++++
6 files changed, 4643 insertions(+)
create mode 100644 drivers/scsi/spraid/Kconfig
create mode 100644 drivers/scsi/spraid/Makefile
create mode 100644 drivers/scsi/spraid/spraid.h
create mode 100644 drivers/scsi/spraid/spraid_main.c
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8450484184e36..63d2aaa228349 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -522,6 +522,7 @@ source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
source "drivers/scsi/ufs/Kconfig"
+source "drivers/scsi/spraid/Kconfig"
config SCSI_HPTIOP
tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2973693f6dcc7..4056cf26e09e6 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
obj-$(CONFIG_CXLFLASH) += cxlflash/
+obj-$(CONFIG_RAMAXEL_SPRAID) += spraid/
obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
diff --git a/drivers/scsi/spraid/Kconfig b/drivers/scsi/spraid/Kconfig
new file mode 100644
index 0000000000000..bfbba3db8db03
--- /dev/null
+++ b/drivers/scsi/spraid/Kconfig
@@ -0,0 +1,13 @@
+#
+# Ramaxel driver configuration
+#
+
+config RAMAXEL_SPRAID
+ tristate "Ramaxel spraid Adapter"
+ depends on PCI && SCSI
+ select BLK_DEV_BSGLIB
+ depends on ARM64 || X86_64
+ help
+ This driver supports Ramaxel SPRxxx serial
+ raid controller, which has PCIE Gen4 interface
+ with host and supports SAS/SATA Hdd/ssd.
diff --git a/drivers/scsi/spraid/Makefile b/drivers/scsi/spraid/Makefile
new file mode 100644
index 0000000000000..aadc2ffd37ebd
--- /dev/null
+++ b/drivers/scsi/spraid/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Ramaxel device drivers.
+#
+
+obj-$(CONFIG_RAMAXEL_SPRAID) += spraid.o
+
+spraid-objs := spraid_main.o
\ No newline at end of file
diff --git a/drivers/scsi/spraid/spraid.h b/drivers/scsi/spraid/spraid.h
new file mode 100644
index 0000000000000..c1e4980e18e5d
--- /dev/null
+++ b/drivers/scsi/spraid/spraid.h
@@ -0,0 +1,746 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
+
+#ifndef __SPRAID_H_
+#define __SPRAID_H_
+
+#define SPRAID_CAP_MQES(cap) ((cap) & 0xffff)
+#define SPRAID_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define SPRAID_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define SPRAID_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define SPRAID_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
+#define SPRAID_CAP_DMAMASK(cap) (((cap) >> 37) & 0xff)
+
+#define SPRAID_DEFAULT_MAX_CHANNEL 4
+#define SPRAID_DEFAULT_MAX_ID 240
+#define SPRAID_DEFAULT_MAX_LUN_PER_HOST 8
+#define MAX_SECTORS 2048
+
+#define IO_SQE_SIZE sizeof(struct spraid_ioq_command)
+#define ADMIN_SQE_SIZE sizeof(struct spraid_admin_command)
+#define SQE_SIZE(qid) (((qid) > 0) ? IO_SQE_SIZE : ADMIN_SQE_SIZE)
+#define CQ_SIZE(depth) ((depth) * sizeof(struct spraid_completion))
+#define SQ_SIZE(qid, depth) ((depth) * SQE_SIZE(qid))
+
+#define SENSE_SIZE(depth) ((depth) * SCSI_SENSE_BUFFERSIZE)
+
+#define SPRAID_AQ_DEPTH 128
+#define SPRAID_NR_AEN_COMMANDS 16
+#define SPRAID_AQ_BLK_MQ_DEPTH (SPRAID_AQ_DEPTH - SPRAID_NR_AEN_COMMANDS)
+#define SPRAID_AQ_MQ_TAG_DEPTH (SPRAID_AQ_BLK_MQ_DEPTH - 1)
+
+#define SPRAID_ADMIN_QUEUE_NUM 1
+#define SPRAID_PTCMDS_PERQ 1
+#define SPRAID_IO_BLK_MQ_DEPTH (hdev->shost->can_queue)
+#define SPRAID_NR_IOQ_PTCMDS (SPRAID_PTCMDS_PERQ * hdev->shost->nr_hw_queues)
+
+#define FUA_MASK 0x08
+#define SPRAID_MINORS BIT(MINORBITS)
+
+#define COMMAND_IS_WRITE(cmd) ((cmd)->common.opcode & 1)
+
+#define SPRAID_IO_IOSQES 7
+#define SPRAID_IO_IOCQES 4
+#define PRP_ENTRY_SIZE 8
+
+#define SMALL_POOL_SIZE 256
+#define MAX_SMALL_POOL_NUM 16
+#define MAX_CMD_PER_DEV 64
+#define MAX_CDB_LEN 32
+
+#define SPRAID_UP_TO_MULTY4(x) (((x) + 4) & (~0x03))
+
+#define CQE_STATUS_SUCCESS (0x0)
+
+#define PCI_VENDOR_ID_RAMAXEL_LOGIC 0x1E81
+
+#define SPRAID_SERVER_DEVICE_HBA_DID 0x2100
+#define SPRAID_SERVER_DEVICE_RAID_DID 0x2200
+
+#define IO_6_DEFAULT_TX_LEN 256
+
+#define SPRAID_INT_PAGES 2
+#define SPRAID_INT_BYTES(hdev) (SPRAID_INT_PAGES * (hdev)->page_size)
+
+enum {
+ SPRAID_REQ_CANCELLED = (1 << 0),
+ SPRAID_REQ_USERCMD = (1 << 1),
+};
+
+enum {
+ SPRAID_SC_SUCCESS = 0x0,
+ SPRAID_SC_INVALID_OPCODE = 0x1,
+ SPRAID_SC_INVALID_FIELD = 0x2,
+
+ SPRAID_SC_ABORT_LIMIT = 0x103,
+ SPRAID_SC_ABORT_MISSING = 0x104,
+ SPRAID_SC_ASYNC_LIMIT = 0x105,
+
+ SPRAID_SC_DNR = 0x4000,
+};
+
+enum {
+ SPRAID_REG_CAP = 0x0000,
+ SPRAID_REG_CC = 0x0014,
+ SPRAID_REG_CSTS = 0x001c,
+ SPRAID_REG_AQA = 0x0024,
+ SPRAID_REG_ASQ = 0x0028,
+ SPRAID_REG_ACQ = 0x0030,
+ SPRAID_REG_DBS = 0x1000,
+};
+
+enum {
+ SPRAID_CC_ENABLE = 1 << 0,
+ SPRAID_CC_CSS_NVM = 0 << 4,
+ SPRAID_CC_MPS_SHIFT = 7,
+ SPRAID_CC_AMS_SHIFT = 11,
+ SPRAID_CC_SHN_SHIFT = 14,
+ SPRAID_CC_IOSQES_SHIFT = 16,
+ SPRAID_CC_IOCQES_SHIFT = 20,
+ SPRAID_CC_AMS_RR = 0 << SPRAID_CC_AMS_SHIFT,
+ SPRAID_CC_SHN_NONE = 0 << SPRAID_CC_SHN_SHIFT,
+ SPRAID_CC_IOSQES = SPRAID_IO_IOSQES << SPRAID_CC_IOSQES_SHIFT,
+ SPRAID_CC_IOCQES = SPRAID_IO_IOCQES << SPRAID_CC_IOCQES_SHIFT,
+ SPRAID_CC_SHN_NORMAL = 1 << SPRAID_CC_SHN_SHIFT,
+ SPRAID_CC_SHN_MASK = 3 << SPRAID_CC_SHN_SHIFT,
+ SPRAID_CSTS_CFS_SHIFT = 1,
+ SPRAID_CSTS_SHST_SHIFT = 2,
+ SPRAID_CSTS_PP_SHIFT = 5,
+ SPRAID_CSTS_RDY = 1 << 0,
+ SPRAID_CSTS_SHST_CMPLT = 2 << 2,
+ SPRAID_CSTS_SHST_MASK = 3 << 2,
+ SPRAID_CSTS_CFS_MASK = 1 << SPRAID_CSTS_CFS_SHIFT,
+ SPRAID_CSTS_PP_MASK = 1 << SPRAID_CSTS_PP_SHIFT,
+};
+
+enum {
+ SPRAID_ADMIN_DELETE_SQ = 0x00,
+ SPRAID_ADMIN_CREATE_SQ = 0x01,
+ SPRAID_ADMIN_DELETE_CQ = 0x04,
+ SPRAID_ADMIN_CREATE_CQ = 0x05,
+ SPRAID_ADMIN_ABORT_CMD = 0x08,
+ SPRAID_ADMIN_SET_FEATURES = 0x09,
+ SPRAID_ADMIN_ASYNC_EVENT = 0x0c,
+ SPRAID_ADMIN_GET_INFO = 0xc6,
+ SPRAID_ADMIN_RESET = 0xc8,
+};
+
+enum {
+ SPRAID_GET_INFO_CTRL = 0,
+ SPRAID_GET_INFO_DEV_LIST = 1,
+};
+
+enum {
+ SPRAID_RESET_TARGET = 0,
+ SPRAID_RESET_BUS = 1,
+};
+
+enum {
+ SPRAID_AEN_ERROR = 0,
+ SPRAID_AEN_NOTICE = 2,
+ SPRAID_AEN_VS = 7,
+};
+
+enum {
+ SPRAID_AEN_DEV_CHANGED = 0x00,
+ SPRAID_AEN_FW_ACT_START = 0x01,
+ SPRAID_AEN_HOST_PROBING = 0x10,
+};
+
+enum {
+ SPRAID_AEN_TIMESYN = 0x00,
+ SPRAID_AEN_FW_ACT_FINISH = 0x02,
+ SPRAID_AEN_EVENT_MIN = 0x80,
+ SPRAID_AEN_EVENT_MAX = 0xff,
+};
+
+enum {
+ SPRAID_CMD_WRITE = 0x01,
+ SPRAID_CMD_READ = 0x02,
+
+ SPRAID_CMD_NONIO_NONE = 0x80,
+ SPRAID_CMD_NONIO_TODEV = 0x81,
+ SPRAID_CMD_NONIO_FROMDEV = 0x82,
+};
+
+enum {
+ SPRAID_QUEUE_PHYS_CONTIG = (1 << 0),
+ SPRAID_CQ_IRQ_ENABLED = (1 << 1),
+
+ SPRAID_FEAT_NUM_QUEUES = 0x07,
+ SPRAID_FEAT_ASYNC_EVENT = 0x0b,
+ SPRAID_FEAT_TIMESTAMP = 0x0e,
+};
+
+enum spraid_state {
+ SPRAID_NEW,
+ SPRAID_LIVE,
+ SPRAID_RESETTING,
+ SPRAID_DELETING,
+ SPRAID_DEAD,
+};
+
+enum {
+ SPRAID_CARD_HBA,
+ SPRAID_CARD_RAID,
+};
+
+enum spraid_cmd_type {
+ SPRAID_CMD_ADM,
+ SPRAID_CMD_IOPT,
+};
+
+struct spraid_completion {
+ __le32 result;
+ union {
+ struct {
+ __u8 sense_len;
+ __u8 resv[3];
+ };
+ __le32 result1;
+ };
+ __le16 sq_head;
+ __le16 sq_id;
+ __u16 cmd_id;
+ __le16 status;
+};
+
+struct spraid_ctrl_info {
+ __le32 nd;
+ __le16 max_cmds;
+ __le16 max_channel;
+ __le32 max_tgt_id;
+ __le16 max_lun;
+ __le16 max_num_sge;
+ __le16 lun_num_in_boot;
+ __u8 mdts;
+ __u8 acl;
+ __u8 aerl;
+ __u8 card_type;
+ __u16 rsvd;
+ __u32 rtd3e;
+ __u8 sn[32];
+ __u8 fr[16];
+ __u8 rsvd1[4020];
+};
+
+struct spraid_dev {
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct Scsi_Host *shost;
+ struct spraid_queue *queues;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool[MAX_SMALL_POOL_NUM];
+ mempool_t *iod_mempool;
+ void __iomem *bar;
+ u32 max_qid;
+ u32 num_vecs;
+ u32 queue_count;
+ u32 ioq_depth;
+ int db_stride;
+ u32 __iomem *dbs;
+ struct rw_semaphore devices_rwsem;
+ int numa_node;
+ u32 page_size;
+ u32 ctrl_config;
+ u32 online_queues;
+ u64 cap;
+ int instance;
+ struct spraid_ctrl_info *ctrl_info;
+ struct spraid_dev_info *devices;
+
+ struct spraid_cmd *adm_cmds;
+ struct list_head adm_cmd_list;
+ spinlock_t adm_cmd_lock;
+
+ struct spraid_cmd *ioq_ptcmds;
+ struct list_head ioq_pt_list;
+ spinlock_t ioq_pt_lock;
+
+ struct work_struct scan_work;
+ struct work_struct timesyn_work;
+ struct work_struct reset_work;
+ struct work_struct fw_act_work;
+
+ enum spraid_state state;
+ spinlock_t state_lock;
+
+ struct request_queue *bsg_queue;
+};
+
+struct spraid_sgl_desc {
+ __le64 addr;
+ __le32 length;
+ __u8 rsvd[3];
+ __u8 type;
+};
+
+union spraid_data_ptr {
+ struct {
+ __le64 prp1;
+ __le64 prp2;
+ };
+ struct spraid_sgl_desc sgl;
+};
+
+struct spraid_admin_common_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le32 cdw2[4];
+ union spraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+struct spraid_features {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[2];
+ union spraid_data_ptr dptr;
+ __le32 fid;
+ __le32 dword11;
+ __le32 dword12;
+ __le32 dword13;
+ __le32 dword14;
+ __le32 dword15;
+};
+
+struct spraid_create_cq {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 cqid;
+ __le16 qsize;
+ __le16 cq_flags;
+ __le16 irq_vector;
+ __u32 rsvd12[4];
+};
+
+struct spraid_create_sq {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 sqid;
+ __le16 qsize;
+ __le16 sq_flags;
+ __le16 cqid;
+ __u32 rsvd12[4];
+};
+
+struct spraid_delete_queue {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[9];
+ __le16 qid;
+ __u16 rsvd10;
+ __u32 rsvd11[5];
+};
+
+struct spraid_get_info {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u32 rsvd2[4];
+ union spraid_data_ptr dptr;
+ __u8 type;
+ __u8 rsvd10[3];
+ __le32 cdw11;
+ __u32 rsvd12[4];
+};
+
+struct spraid_usr_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ union {
+ struct {
+ __le16 subopcode;
+ __le16 rsvd1;
+ } info_0;
+ __le32 cdw2;
+ };
+ union {
+ struct {
+ __le16 data_len;
+ __le16 param_len;
+ } info_1;
+ __le32 cdw3;
+ };
+ __u64 metadata;
+ union spraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+enum {
+ SPRAID_CMD_FLAG_SGL_METABUF = (1 << 6),
+ SPRAID_CMD_FLAG_SGL_METASEG = (1 << 7),
+ SPRAID_CMD_FLAG_SGL_ALL = SPRAID_CMD_FLAG_SGL_METABUF |
+ SPRAID_CMD_FLAG_SGL_METASEG,
+};
+
+enum spraid_cmd_state {
+ SPRAID_CMD_IDLE = 0,
+ SPRAID_CMD_IN_FLIGHT = 1,
+ SPRAID_CMD_COMPLETE = 2,
+ SPRAID_CMD_TIMEOUT = 3,
+ SPRAID_CMD_TMO_COMPLETE = 4,
+};
+
+struct spraid_abort_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __le16 sqid;
+ __le16 cid;
+ __u32 rsvd11[5];
+};
+
+struct spraid_reset_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __u8 type;
+ __u8 rsvd10[3];
+ __u32 rsvd11[5];
+};
+
+struct spraid_admin_command {
+ union {
+ struct spraid_admin_common_command common;
+ struct spraid_features features;
+ struct spraid_create_cq create_cq;
+ struct spraid_create_sq create_sq;
+ struct spraid_delete_queue delete_queue;
+ struct spraid_get_info get_info;
+ struct spraid_abort_cmd abort;
+ struct spraid_reset_cmd reset;
+ struct spraid_usr_cmd usr_cmd;
+ };
+};
+
+struct spraid_ioq_common_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __le32 cdw3[3];
+ union spraid_data_ptr dptr;
+ __le32 cdw10[6];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __le32 cdw26[6];
+};
+
+struct spraid_rw_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union spraid_data_ptr dptr;
+ __le64 slba;
+ __le16 nlb;
+ __le16 control;
+ __u32 rsvd13[3];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct spraid_scsi_nonio {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_length;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union spraid_data_ptr dptr;
+ __u32 rsvd10[5];
+ __le32 buffer_len;
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct spraid_ioq_command {
+ union {
+ struct spraid_ioq_common_command common;
+ struct spraid_rw_command rw;
+ struct spraid_scsi_nonio scsi_nonio;
+ };
+};
+
+struct spraid_passthru_common_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 data_len;
+ __u16 param_len;
+ } info_1;
+ __u32 cdw3;
+ };
+ __u64 metadata;
+
+ __u64 addr;
+ __u64 prp2;
+
+ __u32 cdw10;
+ __u32 cdw11;
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 cdw15;
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct spraid_ioq_passthru_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 res_sense_len;
+ __u8 cdb_len;
+ __u8 rsvd0;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_1;
+ __u32 cdw3;
+ };
+ union {
+ struct {
+ __u16 rsvd;
+ __u16 param_len;
+ } info_2;
+ __u32 cdw4;
+ };
+ __u32 cdw5;
+ __u64 addr;
+ __u64 prp2;
+ union {
+ struct {
+ __u16 eid;
+ __u16 sid;
+ } info_3;
+ __u32 cdw10;
+ };
+ union {
+ struct {
+ __u16 did;
+ __u8 did_flag;
+ __u8 rsvd2;
+ } info_4;
+ __u32 cdw11;
+ };
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 data_len;
+ __u32 cdw16;
+ __u32 cdw17;
+ __u32 cdw18;
+ __u32 cdw19;
+ __u32 cdw20;
+ __u32 cdw21;
+ __u32 cdw22;
+ __u32 cdw23;
+ __u64 sense_addr;
+ __u32 cdw26[4];
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct spraid_bsg_request {
+ u32 msgcode;
+ u32 control;
+ union {
+ struct spraid_passthru_common_cmd admcmd;
+ struct spraid_ioq_passthru_cmd ioqcmd;
+ };
+};
+
+enum {
+ SPRAID_BSG_ADM,
+ SPRAID_BSG_IOQ,
+};
+
+struct spraid_cmd {
+ int qid;
+ int cid;
+ u32 result0;
+ u32 result1;
+ u16 status;
+ void *priv;
+ enum spraid_cmd_state state;
+ struct completion cmd_done;
+ struct list_head list;
+};
+
+struct spraid_queue {
+ struct spraid_dev *hdev;
+ spinlock_t sq_lock;
+
+ spinlock_t cq_lock ____cacheline_aligned_in_smp;
+
+ void *sq_cmds;
+
+ struct spraid_completion *cqes;
+
+ dma_addr_t sq_dma_addr;
+ dma_addr_t cq_dma_addr;
+ u32 __iomem *q_db;
+ u8 cq_phase;
+ u8 sqes;
+ u16 qid;
+ u16 sq_tail;
+ u16 cq_head;
+ u16 last_cq_head;
+ u16 q_depth;
+ s16 cq_vector;
+ void *sense;
+ dma_addr_t sense_dma_addr;
+ struct dma_pool *prp_small_pool;
+};
+
+struct spraid_iod {
+ struct spraid_queue *spraidq;
+ enum spraid_cmd_state state;
+ int npages;
+ u32 nsge;
+ u32 length;
+ bool use_sgl;
+ bool sg_drv_mgmt;
+ dma_addr_t first_dma;
+ void *sense;
+ dma_addr_t sense_dma;
+ struct scatterlist *sg;
+ struct scatterlist inline_sg[0];
+};
+
+#define SPRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01)
+#define SPRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0)
+#define SPRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02)
+#define SPRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20)
+
+#define SPRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01)
+#define SPRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02)
+
+#define BGTASK_TYPE_REBUILD 4
+#define USR_CMD_READ 0xc2
+#define USR_CMD_RDLEN 0x1000
+#define USR_CMD_VDINFO 0x704
+#define USR_CMD_BGTASK 0x504
+#define VDINFO_PARAM_LEN 0x04
+
+struct spraid_vd_info {
+ __u8 name[32];
+ __le16 id;
+ __u8 rg_id;
+ __u8 rg_level;
+ __u8 sg_num;
+ __u8 sg_disk_num;
+ __u8 vd_status;
+ __u8 vd_type;
+ __u8 rsvd1[4056];
+};
+
+#define MAX_REALTIME_BGTASK_NUM 32
+
+struct bgtask_info {
+ __u8 type;
+ __u8 progress;
+ __u8 rate;
+ __u8 rsvd0;
+ __le16 vd_id;
+ __le16 time_left;
+ __u8 rsvd1[4];
+};
+
+struct spraid_bgtask {
+ __u8 sw;
+ __u8 task_num;
+ __u8 rsvd[6];
+ struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM];
+};
+
+struct spraid_dev_info {
+ __le32 hdid;
+ __le16 target;
+ __u8 channel;
+ __u8 lun;
+ __u8 attr;
+ __u8 flag;
+ __le16 max_io_kb;
+};
+
+#define MAX_DEV_ENTRY_PER_PAGE_4K 340
+struct spraid_dev_list {
+ __le32 dev_num;
+ __u32 rsvd0[3];
+ struct spraid_dev_info devices[MAX_DEV_ENTRY_PER_PAGE_4K];
+};
+
+struct spraid_sdev_hostdata {
+ u32 hdid;
+ u16 max_io_kb;
+ u8 attr;
+ u8 flag;
+ u8 rg_id;
+ u8 rsvd[3];
+};
+
+#endif
+
diff --git a/drivers/scsi/spraid/spraid_main.c b/drivers/scsi/spraid/spraid_main.c
new file mode 100644
index 0000000000000..519b39f44e914
--- /dev/null
+++ b/drivers/scsi/spraid/spraid_main.c
@@ -0,0 +1,3875 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
+
+/* Ramaxel Raid SPXXX Series Linux Driver */
+
+#define pr_fmt(fmt) "spraid: " fmt
+
+#include <linux/sched/signal.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/sysfs.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+#include <linux/once.h>
+#include <linux/debugfs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <asm/unaligned.h>
+#include <linux/sort.h>
+#include <target/target_core_backend.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dbg.h>
+
+
+#include "spraid.h"
+
+static u32 admin_tmout = 60;
+module_param(admin_tmout, uint, 0644);
+MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)");
+
+static u32 scmd_tmout_nonpt = 180;
+module_param(scmd_tmout_nonpt, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_nonpt,
+ "scsi commands timeout for rawdisk&raid(seconds)");
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops ioq_depth_ops = {
+ .set = ioq_depth_set,
+ .get = param_get_uint,
+};
+
+static u32 io_queue_depth = 1024;
+module_param_cb(io_queue_depth, &ioq_depth_ops, &io_queue_depth, 0644);
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+
+static int log_debug_switch_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+
+ return param_set_byte(val, kp);
+}
+
+static const struct kernel_param_ops log_debug_switch_ops = {
+ .set = log_debug_switch_set,
+ .get = param_get_byte,
+};
+
+static unsigned char log_debug_switch;
+module_param_cb(log_debug_switch, &log_debug_switch_ops,
+ &log_debug_switch, 0644);
+MODULE_PARM_DESC(log_debug_switch,
+ "set log state, default non-zero for switch on");
+
+static int small_pool_num_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+ if (n > MAX_SMALL_POOL_NUM)
+ n = MAX_SMALL_POOL_NUM;
+ if (n < 1)
+ n = 1;
+ *((u8 *)kp->arg) = n;
+
+ return 0;
+}
+
+static const struct kernel_param_ops small_pool_num_ops = {
+ .set = small_pool_num_set,
+ .get = param_get_byte,
+};
+
+/* It was found that the spindlock of a single pool conflicts
+ * a lot with multiple CPUs.So multiple pools are introduced
+ * to reduce the conflictions.
+ */
+static unsigned char small_pool_num = 4;
+module_param_cb(small_pool_num, &small_pool_num_ops, &small_pool_num, 0644);
+MODULE_PARM_DESC(small_pool_num, "set prp small pool num, default 4, MAX 16");
+
+static void spraid_free_queue(struct spraid_queue *spraidq);
+static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result);
+static void spraid_handle_aen_vs(struct spraid_dev *hdev,
+ u32 result, u32 result1);
+
+static DEFINE_IDA(spraid_instance_ida);
+
+static struct class *spraid_class;
+
+#define SPRAID_CAP_TIMEOUT_UNIT_MS (HZ / 2)
+
+static struct workqueue_struct *spraid_wq;
+
+#define dev_log_dbg(dev, fmt, ...) do { \
+ if (unlikely(log_debug_switch)) \
+ dev_info(dev, "[%s] [%d] " fmt, \
+ __func__, __LINE__, ##__VA_ARGS__); \
+} while (0)
+
+#define SPRAID_DRV_VERSION "1.0.0.0"
+
+#define ADMIN_TIMEOUT (admin_tmout * HZ)
+#define ADMIN_ERR_TIMEOUT 32757
+
+#define SPRAID_WAIT_ABNL_CMD_TIMEOUT (3 * 2)
+
+#define SPRAID_DMA_MSK_BIT_MAX 64
+
+enum FW_STAT_CODE {
+ FW_STAT_OK = 0,
+ FW_STAT_NEED_CHECK,
+ FW_STAT_ERROR,
+ FW_STAT_EP_PCIE_ERROR,
+ FW_STAT_NAC_DMA_ERROR,
+ FW_STAT_ABORTED,
+ FW_STAT_NEED_RETRY
+};
+
+static const char * const raid_levels[] = {"0", "1", "5", "6", "10", "50", "60",
+ "NA"};
+
+static const char * const raid_states[] = {
+ "NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED",
+ "FORMATTING", "SANITIZING", "INITIALIZING", "INITIALIZE_FAIL",
+ "DELETING", "DELETE_FAIL", "WRITE_PROTECT"
+};
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0;
+ int ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < 2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static int spraid_remap_bar(struct spraid_dev *hdev, u32 size)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (size > pci_resource_len(pdev, 0)) {
+ dev_err(hdev->dev, "Input size[%u] exceed bar0 length[%llu]\n",
+ size, pci_resource_len(pdev, 0));
+ return -ENOMEM;
+ }
+
+ if (hdev->bar)
+ iounmap(hdev->bar);
+
+ hdev->bar = ioremap(pci_resource_start(pdev, 0), size);
+ if (!hdev->bar) {
+ dev_err(hdev->dev, "ioremap for bar0 failed\n");
+ return -ENOMEM;
+ }
+ hdev->dbs = hdev->bar + SPRAID_REG_DBS;
+
+ return 0;
+}
+
+static int spraid_dev_map(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ ret = pci_request_mem_regions(pdev, "spraid");
+ if (ret) {
+ dev_err(hdev->dev, "fail to request memory regions\n");
+ return ret;
+ }
+
+ ret = spraid_remap_bar(hdev, SPRAID_REG_DBS + 4096);
+ if (ret) {
+ pci_release_mem_regions(pdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void spraid_dev_unmap(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (hdev->bar) {
+ iounmap(hdev->bar);
+ hdev->bar = NULL;
+ }
+ pci_release_mem_regions(pdev);
+}
+
+static int spraid_pci_enable(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = -ENOMEM;
+ u64 maskbit = SPRAID_DMA_MSK_BIT_MAX;
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(hdev->dev,
+ "Enable pci device memory resources failed\n");
+ return ret;
+ }
+ pci_set_master(pdev);
+
+ if (readl(hdev->bar + SPRAID_REG_CSTS) == U32_MAX) {
+ ret = -ENODEV;
+ dev_err(hdev->dev, "Read csts register failed\n");
+ goto disable;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(hdev->dev,
+ "Allocate one IRQ for setup admin channel failed\n");
+ goto disable;
+ }
+
+ hdev->cap = lo_hi_readq(hdev->bar + SPRAID_REG_CAP);
+ hdev->ioq_depth = min_t(u32, SPRAID_CAP_MQES(hdev->cap) + 1,
+ io_queue_depth);
+ hdev->db_stride = 1 << SPRAID_CAP_STRIDE(hdev->cap);
+
+ maskbit = SPRAID_CAP_DMAMASK(hdev->cap);
+ if (maskbit < 32 || maskbit > SPRAID_DMA_MSK_BIT_MAX) {
+ dev_err(hdev->dev,
+ "err, dma mask invalid[%llu], set to default\n",
+ maskbit);
+ maskbit = SPRAID_DMA_MSK_BIT_MAX;
+ }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit))) {
+ dev_err(hdev->dev, "set dma mask and coherent failed\n");
+ goto disable;
+ }
+
+ dev_info(hdev->dev, "set dma mask[%llu] success\n", maskbit);
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_save_state(pdev);
+
+ return 0;
+
+disable:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static int spraid_npages_prp(u32 size, struct spraid_dev *hdev)
+{
+ u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size);
+
+ return DIV_ROUND_UP(PRP_ENTRY_SIZE * nprps, PAGE_SIZE - PRP_ENTRY_SIZE);
+}
+
+static int spraid_npages_sgl(u32 nseg)
+{
+ return DIV_ROUND_UP(nseg * sizeof(struct spraid_sgl_desc), PAGE_SIZE);
+}
+
+static void **spraid_iod_list(struct spraid_iod *iod)
+{
+ return (void **)(iod->inline_sg + (iod->sg_drv_mgmt ? iod->nsge : 0));
+}
+
+static u32 spraid_iod_ext_size(struct spraid_dev *hdev, u32 size, u32 nsge,
+ bool sg_drv_mgmt, bool use_sgl)
+{
+ size_t alloc_size, sg_size;
+
+ if (use_sgl)
+ alloc_size = sizeof(__le64 *) * spraid_npages_sgl(nsge);
+ else
+ alloc_size = sizeof(__le64 *) * spraid_npages_prp(size, hdev);
+
+ sg_size = sg_drv_mgmt ? (sizeof(struct scatterlist) * nsge) : 0;
+ return sg_size + alloc_size;
+}
+
+static u32 spraid_cmd_size(struct spraid_dev *hdev,
+ bool sg_drv_mgmt, bool use_sgl)
+{
+ u32 alloc_size = spraid_iod_ext_size(hdev, SPRAID_INT_BYTES(hdev),
+ SPRAID_INT_PAGES, sg_drv_mgmt, use_sgl);
+
+ dev_info(hdev->dev, "sg_drv_mgmt: %s, use_sgl: %s, iod size: %lu;"
+ " alloc_size: %u\n", sg_drv_mgmt ? "true" : "false",
+ use_sgl ? "true" : "false",
+ sizeof(struct spraid_iod), alloc_size);
+
+ return sizeof(struct spraid_iod) + alloc_size;
+}
+
+static int spraid_setup_prps(struct spraid_dev *hdev, struct spraid_iod *iod)
+{
+ struct scatterlist *sg = iod->sg;
+ u64 dma_addr = sg_dma_address(sg);
+ int dma_len = sg_dma_len(sg);
+ __le64 *prp_list, *old_prp_list;
+ u32 page_size = hdev->page_size;
+ int offset = dma_addr & (page_size - 1);
+ void **list = spraid_iod_list(iod);
+ int length = iod->length;
+ struct dma_pool *pool;
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ length -= (page_size - offset);
+ if (length <= 0) {
+ iod->first_dma = 0;
+ return 0;
+ }
+
+ dma_len -= (page_size - offset);
+ if (dma_len) {
+ dma_addr += (page_size - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= page_size) {
+ iod->first_dma = dma_addr;
+ return 0;
+ }
+
+ nprps = DIV_ROUND_UP(length, page_size);
+ if (nprps <= (SMALL_POOL_SIZE / PRP_ENTRY_SIZE)) {
+ pool = iod->spraidq->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ dev_err_ratelimited(hdev->dev,
+ "Allocate first prp_list memory failed\n");
+ iod->first_dma = dma_addr;
+ iod->npages = -1;
+ return -ENOMEM;
+ }
+ list[0] = prp_list;
+ iod->first_dma = prp_dma;
+ i = 0;
+ for (;;) {
+ if (i == page_size / PRP_ENTRY_SIZE) {
+ old_prp_list = prp_list;
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ dev_err_ratelimited(hdev->dev, "Allocate %dth;"
+ " prp_list memory failed\n",
+ iod->npages + 1);
+ return -ENOMEM;
+ }
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= page_size;
+ dma_addr += page_size;
+ length -= page_size;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ if (unlikely(dma_len < 0))
+ goto bad_sgl;
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ return 0;
+
+bad_sgl:
+ dev_err(hdev->dev,
+ "Setup prps, invalid SGL for payload: %d nents: %d\n",
+ iod->length, iod->nsge);
+ return -EIO;
+}
+
+#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct spraid_sgl_desc))
+
+static void spraid_submit_cmd(struct spraid_queue *spraidq, const void *cmd)
+{
+ u32 sqes = SQE_SIZE(spraidq->qid);
+ unsigned long flags;
+ struct spraid_admin_common_command *acd =
+ (struct spraid_admin_common_command *)cmd;
+
+ spin_lock_irqsave(&spraidq->sq_lock, flags);
+ memcpy((spraidq->sq_cmds + sqes * spraidq->sq_tail), cmd, sqes);
+ if (++spraidq->sq_tail == spraidq->q_depth)
+ spraidq->sq_tail = 0;
+
+ writel(spraidq->sq_tail, spraidq->q_db);
+ spin_unlock_irqrestore(&spraidq->sq_lock, flags);
+
+ dev_log_dbg(spraidq->hdev->dev,
+ "cid[%d] qid[%d], opcode[0x%x], flags[0x%x], hdid[%u]\n",
+ acd->command_id, spraidq->qid, acd->opcode,
+ acd->flags, le32_to_cpu(acd->hdid));
+}
+
+static u32 spraid_mod64(u64 dividend, u32 divisor)
+{
+ u64 d;
+ u32 remainder;
+
+ if (!divisor)
+ pr_err("DIVISOR is zero, in div fn\n");
+
+ d = dividend;
+ remainder = do_div(d, divisor);
+ return remainder;
+}
+
+static inline bool spraid_is_rw_scmd(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case READ_32:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ case WRITE_32:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool spraid_is_prp(struct spraid_dev *hdev,
+ struct scsi_cmnd *scmd, u32 nsge)
+{
+ struct scatterlist *sg = scsi_sglist(scmd);
+ u32 page_size = hdev->page_size;
+ bool is_prp = true;
+ int i = 0;
+
+ scsi_for_each_sg(scmd, sg, nsge, i) {
+ if (i != 0 && i != nsge - 1) {
+ if (spraid_mod64(sg_dma_len(sg), page_size) ||
+ spraid_mod64(sg_dma_address(sg), page_size)) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ if (nsge > 1 && i == 0) {
+ if ((spraid_mod64((sg_dma_address(sg) + sg_dma_len(sg)),
+ page_size))) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ if (nsge > 1 && i == (nsge - 1)) {
+ if (spraid_mod64(sg_dma_address(sg), page_size)) {
+ is_prp = false;
+ break;
+ }
+ }
+ }
+
+ return is_prp;
+}
+
+enum {
+ SPRAID_SGL_FMT_DATA_DESC = 0x00,
+ SPRAID_SGL_FMT_SEG_DESC = 0x02,
+ SPRAID_SGL_FMT_LAST_SEG_DESC = 0x03,
+ SPRAID_KEY_SGL_FMT_DATA_DESC = 0x04,
+ SPRAID_TRANSPORT_SGL_DATA_DESC = 0x05
+};
+
+static void spraid_sgl_set_data(struct spraid_sgl_desc *sge,
+ struct scatterlist *sg)
+{
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->type = SPRAID_SGL_FMT_DATA_DESC << 4;
+}
+
+static void spraid_sgl_set_seg(struct spraid_sgl_desc *sge,
+ dma_addr_t dma_addr, int entries)
+{
+ sge->addr = cpu_to_le64(dma_addr);
+ if (entries <= SGES_PER_PAGE) {
+ sge->length = cpu_to_le32(entries * sizeof(*sge));
+ sge->type = SPRAID_SGL_FMT_LAST_SEG_DESC << 4;
+ } else {
+ sge->length = cpu_to_le32(PAGE_SIZE);
+ sge->type = SPRAID_SGL_FMT_SEG_DESC << 4;
+ }
+}
+
+static int spraid_setup_ioq_cmd_sgl(struct spraid_dev *hdev,
+ struct scsi_cmnd *scmd,
+ struct spraid_ioq_command *ioq_cmd,
+ struct spraid_iod *iod)
+{
+ struct spraid_sgl_desc *sg_list, *link, *old_sg_list;
+ struct scatterlist *sg = scsi_sglist(scmd);
+ void **list = spraid_iod_list(iod);
+ struct dma_pool *pool;
+ int nsge = iod->nsge;
+ dma_addr_t sgl_dma;
+ int i = 0;
+
+ ioq_cmd->common.flags |= SPRAID_CMD_FLAG_SGL_METABUF;
+
+ if (nsge == 1) {
+ spraid_sgl_set_data(&ioq_cmd->common.dptr.sgl, sg);
+ return 0;
+ }
+
+ if (nsge <= (SMALL_POOL_SIZE / sizeof(struct spraid_sgl_desc))) {
+ pool = iod->spraidq->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev,
+ "Allocate first sgl_list failed\n");
+ iod->npages = -1;
+ return -ENOMEM;
+ }
+
+ list[0] = sg_list;
+ iod->first_dma = sgl_dma;
+ spraid_sgl_set_seg(&ioq_cmd->common.dptr.sgl, sgl_dma, nsge);
+ do {
+ if (i == SGES_PER_PAGE) {
+ old_sg_list = sg_list;
+ link = &old_sg_list[SGES_PER_PAGE - 1];
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev,
+ "Allocate %dth sgl_list;"
+ " failed\n",
+ iod->npages + 1);
+ return -ENOMEM;
+ }
+ list[iod->npages++] = sg_list;
+
+ i = 0;
+ memcpy(&sg_list[i++], link, sizeof(*link));
+ spraid_sgl_set_seg(link, sgl_dma, nsge);
+ }
+
+ spraid_sgl_set_data(&sg_list[i++], sg);
+ sg = sg_next(sg);
+ } while (--nsge > 0);
+
+ return 0;
+}
+
+#define SPRAID_RW_FUA BIT(14)
+
+static void spraid_setup_rw_cmd(struct spraid_dev *hdev,
+ struct spraid_rw_command *rw,
+ struct scsi_cmnd *scmd)
+{
+ u32 start_lba_lo, start_lba_hi;
+ u32 datalength = 0;
+ u16 control = 0;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+
+ if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+ rw->opcode = SPRAID_CMD_WRITE;
+ } else if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
+ rw->opcode = SPRAID_CMD_READ;
+ } else {
+ dev_err(hdev->dev,
+ "Invalid IO for unsupported data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ }
+
+ /* 6-byte READ(0x08) or WRITE(0x0A) cdb */
+ if (scmd->cmd_len == 6) {
+ datalength = (u32)(scmd->cmnd[4] == 0 ?
+ IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]);
+ start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]);
+
+ start_lba_lo &= 0x1FFFFF;
+ }
+
+ /* 10-byte READ(0x28) or WRITE(0x2A) cdb */
+ else if (scmd->cmd_len == 10) {
+ datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+
+ /* 12-byte READ(0xA8) or WRITE(0xAA) cdb */
+ else if (scmd->cmd_len == 12) {
+ datalength = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+ /* 16-byte READ(0x88) or WRITE(0x8A) cdb */
+ else if (scmd->cmd_len == 16) {
+ datalength = get_unaligned_be32(&scmd->cmnd[10]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+ /* 32-byte READ(0x88) or WRITE(0x8A) cdb */
+ else if (scmd->cmd_len == 32) {
+ datalength = get_unaligned_be32(&scmd->cmnd[28]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[16]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[12]);
+
+ if (scmd->cmnd[10] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+
+ if (unlikely(datalength > U16_MAX || datalength == 0)) {
+ dev_err(hdev->dev,
+ "Invalid IO for illegal transfer data length: %u\n",
+ datalength);
+ WARN_ON(1);
+ }
+
+ rw->slba = cpu_to_le64(((u64)start_lba_hi << 32) | start_lba_lo);
+ /* 0base for nlb */
+ rw->nlb = cpu_to_le16((u16)(datalength - 1));
+ rw->control = cpu_to_le16(control);
+}
+
+static void spraid_setup_nonio_cmd(struct spraid_dev *hdev,
+ struct spraid_scsi_nonio *scsi_nonio,
+ struct scsi_cmnd *scmd)
+{
+ scsi_nonio->buffer_len = cpu_to_le32(scsi_bufflen(scmd));
+
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ scsi_nonio->opcode = SPRAID_CMD_NONIO_NONE;
+ break;
+ case DMA_TO_DEVICE:
+ scsi_nonio->opcode = SPRAID_CMD_NONIO_TODEV;
+ break;
+ case DMA_FROM_DEVICE:
+ scsi_nonio->opcode = SPRAID_CMD_NONIO_FROMDEV;
+ break;
+ default:
+ dev_err(hdev->dev,
+ "Invalid IO for unsupported data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ }
+}
+
+static void spraid_setup_ioq_cmd(struct spraid_dev *hdev,
+ struct spraid_ioq_command *ioq_cmd,
+ struct scsi_cmnd *scmd)
+{
+ memcpy(ioq_cmd->common.cdb, scmd->cmnd, scmd->cmd_len);
+ ioq_cmd->common.cdb_len = scmd->cmd_len;
+
+ if (spraid_is_rw_scmd(scmd))
+ spraid_setup_rw_cmd(hdev, &ioq_cmd->rw, scmd);
+ else
+ spraid_setup_nonio_cmd(hdev, &ioq_cmd->scsi_nonio, scmd);
+}
+
+static int spraid_init_iod(struct spraid_dev *hdev, struct spraid_iod *iod,
+ struct spraid_ioq_command *ioq_cmd,
+ struct scsi_cmnd *scmd)
+{
+ if (unlikely(!iod->sense)) {
+ dev_err(hdev->dev, "Allocate sense data buffer failed\n");
+ return -ENOMEM;
+ }
+ ioq_cmd->common.sense_addr = cpu_to_le64(iod->sense_dma);
+ ioq_cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+
+ iod->nsge = 0;
+ iod->npages = -1;
+ iod->use_sgl = 0;
+ iod->sg_drv_mgmt = false;
+ WRITE_ONCE(iod->state, SPRAID_CMD_IDLE);
+
+ return 0;
+}
+
+static void spraid_free_iod_res(struct spraid_dev *hdev, struct spraid_iod *iod)
+{
+ const int last_prp = hdev->page_size / sizeof(__le64) - 1;
+ dma_addr_t dma_addr, next_dma_addr;
+ struct spraid_sgl_desc *sg_list;
+ __le64 *prp_list;
+ void *addr;
+ int i;
+
+ dma_addr = iod->first_dma;
+ if (iod->npages == 0)
+ dma_pool_free(iod->spraidq->prp_small_pool,
+ spraid_iod_list(iod)[0], dma_addr);
+
+ for (i = 0; i < iod->npages; i++) {
+ addr = spraid_iod_list(iod)[i];
+
+ if (iod->use_sgl) {
+ sg_list = addr;
+ next_dma_addr =
+ le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
+ } else {
+ prp_list = addr;
+ next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+ }
+
+ dma_pool_free(hdev->prp_page_pool, addr, dma_addr);
+ dma_addr = next_dma_addr;
+ }
+
+ if (iod->sg_drv_mgmt && iod->sg != iod->inline_sg) {
+ iod->sg_drv_mgmt = false;
+ mempool_free(iod->sg, hdev->iod_mempool);
+ }
+
+ iod->sense = NULL;
+ iod->npages = -1;
+}
+
+static int spraid_io_map_data(struct spraid_dev *hdev, struct spraid_iod *iod,
+ struct scsi_cmnd *scmd,
+ struct spraid_ioq_command *ioq_cmd)
+{
+ int ret;
+
+ iod->nsge = scsi_dma_map(scmd);
+
+ /* No data to DMA, it may be scsi no-rw command */
+ if (unlikely(iod->nsge == 0))
+ return 0;
+
+ iod->length = scsi_bufflen(scmd);
+ iod->sg = scsi_sglist(scmd);
+ iod->use_sgl = !spraid_is_prp(hdev, scmd, iod->nsge);
+
+ if (iod->use_sgl) {
+ ret = spraid_setup_ioq_cmd_sgl(hdev, scmd, ioq_cmd, iod);
+ } else {
+ ret = spraid_setup_prps(hdev, iod);
+ ioq_cmd->common.dptr.prp1 =
+ cpu_to_le64(sg_dma_address(iod->sg));
+ ioq_cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+ }
+
+ if (ret)
+ scsi_dma_unmap(scmd);
+
+ return ret;
+}
+
+static void spraid_map_status(struct spraid_iod *iod, struct scsi_cmnd *scmd,
+ struct spraid_completion *cqe)
+{
+ scsi_set_resid(scmd, 0);
+
+ switch ((le16_to_cpu(cqe->status) >> 1) & 0x7f) {
+ case FW_STAT_OK:
+ set_host_byte(scmd, DID_OK);
+ break;
+ case FW_STAT_NEED_CHECK:
+ set_host_byte(scmd, DID_OK);
+ scmd->result |= le16_to_cpu(cqe->status) >> 8;
+ if (scmd->result & SAM_STAT_CHECK_CONDITION) {
+ memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ memcpy(scmd->sense_buffer, iod->sense,
+ SCSI_SENSE_BUFFERSIZE);
+ scmd->result =
+ (scmd->result & 0x00ffffff) | (DRIVER_SENSE << 24);
+ }
+ break;
+ case FW_STAT_ABORTED:
+ set_host_byte(scmd, DID_ABORT);
+ break;
+ case FW_STAT_NEED_RETRY:
+ set_host_byte(scmd, DID_REQUEUE);
+ break;
+ default:
+ set_host_byte(scmd, DID_BAD_TARGET);
+ break;
+ }
+}
+
+static inline void spraid_get_tag_from_scmd(struct scsi_cmnd *scmd,
+ u16 *qid, u16 *cid)
+{
+ u32 tag = blk_mq_unique_tag(scmd->request);
+
+ *qid = blk_mq_unique_tag_to_hwq(tag) + 1;
+ *cid = blk_mq_unique_tag_to_tag(tag);
+}
+
+static int spraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_dev *hdev = shost_priv(shost);
+ struct scsi_device *sdev = scmd->device;
+ struct spraid_sdev_hostdata *hostdata;
+ struct spraid_ioq_command ioq_cmd;
+ struct spraid_queue *ioq;
+ unsigned long elapsed;
+ u16 hwq, cid;
+ int ret;
+
+ if (unlikely(!scmd)) {
+ dev_err(hdev->dev, "err, scmd is null\n");
+ return 0;
+ }
+
+ if (unlikely(hdev->state != SPRAID_LIVE)) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (log_debug_switch)
+ scsi_print_command(scmd);
+
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ hostdata = sdev->hostdata;
+ ioq = &hdev->queues[hwq];
+ memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+ ioq_cmd.rw.hdid = cpu_to_le32(hostdata->hdid);
+ ioq_cmd.rw.command_id = cid;
+
+ spraid_setup_ioq_cmd(hdev, &ioq_cmd, scmd);
+
+ ret = cid * SCSI_SENSE_BUFFERSIZE;
+ iod->sense = ioq->sense + ret;
+ iod->sense_dma = ioq->sense_dma_addr + ret;
+
+ ret = spraid_init_iod(hdev, iod, &ioq_cmd, scmd);
+ if (unlikely(ret))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ iod->spraidq = ioq;
+ ret = spraid_io_map_data(hdev, iod, scmd, &ioq_cmd);
+ if (unlikely(ret)) {
+ dev_err(hdev->dev, "spraid_io_map_data Err.\n");
+ set_host_byte(scmd, DID_ERROR);
+ scmd->scsi_done(scmd);
+ ret = 0;
+ goto deinit_iod;
+ }
+
+ WRITE_ONCE(iod->state, SPRAID_CMD_IN_FLIGHT);
+ spraid_submit_cmd(ioq, &ioq_cmd);
+ elapsed = jiffies - scmd->jiffies_at_alloc;
+ dev_log_dbg(hdev->dev,
+ "cid[%d] qid[%d] submit IO cost %3ld.%3ld seconds\n",
+ cid, hwq, elapsed / HZ, elapsed % HZ);
+ return 0;
+
+deinit_iod:
+ spraid_free_iod_res(hdev, iod);
+ return ret;
+}
+
+static int spraid_match_dev(struct spraid_dev *hdev, u16 idx,
+ struct scsi_device *sdev)
+{
+ if (SPRAID_DEV_INFO_FLAG_VALID(hdev->devices[idx].flag)) {
+ if (sdev->channel == hdev->devices[idx].channel &&
+ sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
+ sdev->lun < hdev->devices[idx].lun) {
+ dev_info(hdev->dev,
+ "Match device success, channel;"
+ "target:lun[%d:%d:%d]\n",
+ hdev->devices[idx].channel,
+ hdev->devices[idx].target,
+ hdev->devices[idx].lun);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int spraid_slave_alloc(struct scsi_device *sdev)
+{
+ struct spraid_sdev_hostdata *hostdata;
+ struct spraid_dev *hdev;
+ u16 idx;
+
+ hdev = shost_priv(sdev->host);
+ hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+ if (!hostdata) {
+ dev_err(hdev->dev, "Alloc scsi host data memory failed\n");
+ return -ENOMEM;
+ }
+
+ down_read(&hdev->devices_rwsem);
+ for (idx = 0; idx < le32_to_cpu(hdev->ctrl_info->nd); idx++) {
+ if (spraid_match_dev(hdev, idx, sdev))
+ goto scan_host;
+ }
+ up_read(&hdev->devices_rwsem);
+
+ kfree(hostdata);
+ return -ENXIO;
+
+scan_host:
+ hostdata->hdid = le32_to_cpu(hdev->devices[idx].hdid);
+ hostdata->max_io_kb = le16_to_cpu(hdev->devices[idx].max_io_kb);
+ hostdata->attr = hdev->devices[idx].attr;
+ hostdata->flag = hdev->devices[idx].flag;
+ hostdata->rg_id = 0xff;
+ sdev->hostdata = hostdata;
+ up_read(&hdev->devices_rwsem);
+ return 0;
+}
+
+static void spraid_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+static int spraid_slave_configure(struct scsi_device *sdev)
+{
+ u16 idx;
+ unsigned int timeout = scmd_tmout_nonpt * HZ;
+ struct spraid_dev *hdev = shost_priv(sdev->host);
+ struct spraid_sdev_hostdata *hostdata = sdev->hostdata;
+ u32 max_sec = sdev->host->max_sectors;
+
+ if (hostdata) {
+ idx = hostdata->hdid - 1;
+ if (sdev->channel == hdev->devices[idx].channel &&
+ sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
+ sdev->lun < hdev->devices[idx].lun) {
+ if (SPRAID_DEV_INFO_ATTR_PT(hdev->devices[idx].attr))
+ timeout = 30 * HZ;
+ else
+ timeout = scmd_tmout_nonpt * HZ;
+ max_sec = le16_to_cpu(hdev->devices[idx].max_io_kb)
+ << 1;
+ } else {
+ dev_err(hdev->dev,
+ "[%s] err, sdev->channel:id:lun[%d:%d:%lld];"
+ "devices[%d], channel:target:lun[%d:%d:%d]\n",
+ __func__, sdev->channel, sdev->id, sdev->lun,
+ idx, hdev->devices[idx].channel,
+ hdev->devices[idx].target,
+ hdev->devices[idx].lun);
+ }
+ } else {
+ dev_err(hdev->dev, "[%s] err, sdev->hostdata is null\n",
+ __func__);
+ }
+
+ blk_queue_rq_timeout(sdev->request_queue, timeout);
+ sdev->eh_timeout = timeout;
+
+ if ((max_sec == 0) || (max_sec > sdev->host->max_sectors))
+ max_sec = sdev->host->max_sectors;
+
+ dev_info(hdev->dev,
+ "[%s] sdev->channel:id:lun[%d:%d:%lld];"
+ " scmd_timeout[%d]s, maxsec[%d]\n",
+ __func__, sdev->channel, sdev->id,
+ sdev->lun, timeout / HZ, max_sec);
+
+ return 0;
+}
+
+static void spraid_shost_init(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u8 domain, bus;
+ u32 dev_func;
+
+ domain = pci_domain_nr(pdev->bus);
+ bus = pdev->bus->number;
+ dev_func = pdev->devfn;
+
+ hdev->shost->nr_hw_queues = hdev->online_queues - 1;
+ hdev->shost->can_queue = (hdev->ioq_depth - SPRAID_PTCMDS_PERQ);
+
+ hdev->shost->sg_tablesize = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+ /* 512B per sector */
+ hdev->shost->max_sectors =
+ (1U << ((hdev->ctrl_info->mdts) * 1U) << 12) / 512;
+ hdev->shost->cmd_per_lun = MAX_CMD_PER_DEV;
+ hdev->shost->max_channel =
+ le16_to_cpu(hdev->ctrl_info->max_channel) - 1;
+ hdev->shost->max_id = le32_to_cpu(hdev->ctrl_info->max_tgt_id);
+ hdev->shost->max_lun = le16_to_cpu(hdev->ctrl_info->max_lun);
+
+ hdev->shost->this_id = -1;
+ hdev->shost->unique_id = (domain << 16) | (bus << 8) | dev_func;
+ hdev->shost->max_cmd_len = MAX_CDB_LEN;
+ hdev->shost->hostt->cmd_size = max(spraid_cmd_size(hdev, false, true),
+ spraid_cmd_size(hdev, false, false));
+}
+
+static inline void spraid_host_deinit(struct spraid_dev *hdev)
+{
+ ida_free(&spraid_instance_ida, hdev->instance);
+}
+
+static int spraid_alloc_queue(struct spraid_dev *hdev, u16 qid, u16 depth)
+{
+ struct spraid_queue *spraidq = &hdev->queues[qid];
+ int ret = 0;
+
+ if (hdev->queue_count > qid) {
+ dev_info(hdev->dev, "[%s] warn: queue[%d] is exist\n",
+ __func__, qid);
+ return 0;
+ }
+
+ spraidq->cqes = dma_alloc_coherent(hdev->dev, CQ_SIZE(depth),
+ &spraidq->cq_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!spraidq->cqes)
+ return -ENOMEM;
+
+ spraidq->sq_cmds = dma_alloc_coherent(hdev->dev, SQ_SIZE(qid, depth),
+ &spraidq->sq_dma_addr,
+ GFP_KERNEL);
+ if (!spraidq->sq_cmds) {
+ ret = -ENOMEM;
+ goto free_cqes;
+ }
+
+ spin_lock_init(&spraidq->sq_lock);
+ spin_lock_init(&spraidq->cq_lock);
+ spraidq->hdev = hdev;
+ spraidq->q_depth = depth;
+ spraidq->qid = qid;
+ spraidq->cq_vector = -1;
+ hdev->queue_count++;
+
+ /* alloc sense buffer */
+ spraidq->sense = dma_alloc_coherent(hdev->dev, SENSE_SIZE(depth),
+ &spraidq->sense_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!spraidq->sense) {
+ ret = -ENOMEM;
+ goto free_sq_cmds;
+ }
+
+ return 0;
+
+free_sq_cmds:
+ dma_free_coherent(hdev->dev, SQ_SIZE(qid, depth),
+ (void *)spraidq->sq_cmds, spraidq->sq_dma_addr);
+free_cqes:
+ dma_free_coherent(hdev->dev, CQ_SIZE(depth), (void *)spraidq->cqes,
+ spraidq->cq_dma_addr);
+ return ret;
+}
+
+static int spraid_wait_ready(struct spraid_dev *hdev, u64 cap, bool enabled)
+{
+ unsigned long timeout =
+ ((SPRAID_CAP_TIMEOUT(cap) + 1) * SPRAID_CAP_TIMEOUT_UNIT_MS) + jiffies;
+ u32 bit = enabled ? SPRAID_CSTS_RDY : 0;
+
+ while ((readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_RDY) != bit) {
+ usleep_range(1000, 2000);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(hdev->dev, "Device not ready; aborting %s\n",
+ enabled ? "initialisation" : "reset");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int spraid_shutdown_ctrl(struct spraid_dev *hdev)
+{
+ unsigned long timeout = hdev->ctrl_info->rtd3e + jiffies;
+
+ hdev->ctrl_config &= ~SPRAID_CC_SHN_MASK;
+ hdev->ctrl_config |= SPRAID_CC_SHN_NORMAL;
+ writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+ while ((readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_SHST_MASK) !=
+ SPRAID_CSTS_SHST_CMPLT) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(hdev->dev,
+ "Device shutdown incomplete; abort shutdown\n");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int spraid_disable_ctrl(struct spraid_dev *hdev)
+{
+ hdev->ctrl_config &= ~SPRAID_CC_SHN_MASK;
+ hdev->ctrl_config &= ~SPRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+ return spraid_wait_ready(hdev, hdev->cap, false);
+}
+
+static int spraid_enable_ctrl(struct spraid_dev *hdev)
+{
+ u64 cap = hdev->cap;
+ u32 dev_page_min = SPRAID_CAP_MPSMIN(cap) + 12;
+ u32 page_shift = PAGE_SHIFT;
+
+ if (page_shift < dev_page_min) {
+ dev_err(hdev->dev,
+ "Minimum device page size[%u], too large for host[%u]\n",
+ 1U << dev_page_min, 1U << page_shift);
+ return -ENODEV;
+ }
+
+ page_shift = min_t(unsigned int, SPRAID_CAP_MPSMAX(cap) + 12,
+ PAGE_SHIFT);
+ hdev->page_size = 1U << page_shift;
+
+ hdev->ctrl_config = SPRAID_CC_CSS_NVM;
+ hdev->ctrl_config |= (page_shift - 12) << SPRAID_CC_MPS_SHIFT;
+ hdev->ctrl_config |= SPRAID_CC_AMS_RR | SPRAID_CC_SHN_NONE;
+ hdev->ctrl_config |= SPRAID_CC_IOSQES | SPRAID_CC_IOCQES;
+ hdev->ctrl_config |= SPRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+ return spraid_wait_ready(hdev, cap, true);
+}
+
+static void spraid_init_queue(struct spraid_queue *spraidq, u16 qid)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+
+ memset((void *)spraidq->cqes, 0, CQ_SIZE(spraidq->q_depth));
+
+ spraidq->sq_tail = 0;
+ spraidq->cq_head = 0;
+ spraidq->cq_phase = 1;
+ spraidq->q_db = &hdev->dbs[qid * 2 * hdev->db_stride];
+ spraidq->prp_small_pool = hdev->prp_small_pool[qid % small_pool_num];
+ hdev->online_queues++;
+}
+
+static inline bool spraid_cqe_pending(struct spraid_queue *spraidq)
+{
+ return (le16_to_cpu(spraidq->cqes[spraidq->cq_head].status) & 1) ==
+ spraidq->cq_phase;
+}
+
+static void spraid_sata_report_zone_handle(struct scsi_cmnd *scmd,
+ struct spraid_iod *iod)
+{
+ int i = 0;
+ unsigned int bytes = 0;
+ struct scatterlist *sg = scsi_sglist(scmd);
+
+ scsi_for_each_sg(scmd, sg, iod->nsge, i) {
+ unsigned int offset = 0;
+
+ if (bytes == 0) {
+ char *hdr;
+ u32 list_length;
+ u64 max_lba, opt_lba;
+ u16 same;
+
+ hdr = sg_virt(sg);
+
+ list_length = get_unaligned_le32(&hdr[0]);
+ same = get_unaligned_le16(&hdr[4]);
+ max_lba = get_unaligned_le64(&hdr[8]);
+ opt_lba = get_unaligned_le64(&hdr[16]);
+ put_unaligned_be32(list_length, &hdr[0]);
+ hdr[4] = same & 0xf;
+ put_unaligned_be64(max_lba, &hdr[8]);
+ put_unaligned_be64(opt_lba, &hdr[16]);
+ offset += 64;
+ bytes += 64;
+ }
+ while (offset < sg_dma_len(sg)) {
+ char *rec;
+ u8 cond, type, non_seq, reset;
+ u64 size, start, wp;
+
+ rec = sg_virt(sg) + offset;
+ type = rec[0] & 0xf;
+ cond = (rec[1] >> 4) & 0xf;
+ non_seq = (rec[1] & 2);
+ reset = (rec[1] & 1);
+ size = get_unaligned_le64(&rec[8]);
+ start = get_unaligned_le64(&rec[16]);
+ wp = get_unaligned_le64(&rec[24]);
+ rec[0] = type;
+ rec[1] = (cond << 4) | non_seq | reset;
+ put_unaligned_be64(size, &rec[8]);
+ put_unaligned_be64(start, &rec[16]);
+ put_unaligned_be64(wp, &rec[24]);
+ WARN_ON(offset + 64 > sg_dma_len(sg));
+ offset += 64;
+ bytes += 64;
+ }
+ }
+}
+
+static inline void spraid_handle_ata_cmd(struct spraid_dev *hdev,
+ struct scsi_cmnd *scmd,
+ struct spraid_iod *iod)
+{
+ if (hdev->ctrl_info->card_type != SPRAID_CARD_HBA)
+ return;
+
+ switch (scmd->cmnd[0]) {
+ case ZBC_IN:
+ dev_info(hdev->dev, "[%s] process report zone\n", __func__);
+ spraid_sata_report_zone_handle(scmd, iod);
+ break;
+ default:
+ break;
+ }
+}
+
+static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = ioq->hdev;
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct spraid_iod *iod;
+ struct request *req;
+ unsigned long elapsed;
+
+ tags = hdev->shost->tag_set.tags[ioq->qid - 1];
+ req = blk_mq_tag_to_rq(tags, cqe->cmd_id);
+ if (unlikely(!req || !blk_mq_request_started(req))) {
+ dev_warn(hdev->dev, "Invalid id %d completed on queue %d\n",
+ cqe->cmd_id, ioq->qid);
+ return;
+ }
+
+ scmd = blk_mq_rq_to_pdu(req);
+ iod = scsi_cmd_priv(scmd);
+
+ elapsed = jiffies - scmd->jiffies_at_alloc;
+ dev_log_dbg(hdev->dev,
+ "cid[%d] qid[%d] finish IO cost %3ld.%3ld seconds\n",
+ cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
+
+ if (cmpxchg(&iod->state, SPRAID_CMD_IN_FLIGHT, SPRAID_CMD_COMPLETE) !=
+ SPRAID_CMD_IN_FLIGHT) {
+ dev_warn(hdev->dev,
+ "cid[%d] qid[%d] enters abnormal handler;"
+ " cost %3ld.%3ld seconds\n",
+ cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
+ WRITE_ONCE(iod->state, SPRAID_CMD_TMO_COMPLETE);
+
+ if (iod->nsge) {
+ iod->nsge = 0;
+ scsi_dma_unmap(scmd);
+ }
+ spraid_free_iod_res(hdev, iod);
+
+ return;
+ }
+
+ spraid_handle_ata_cmd(hdev, scmd, iod);
+
+ spraid_map_status(iod, scmd, cqe);
+ if (iod->nsge) {
+ iod->nsge = 0;
+ scsi_dma_unmap(scmd);
+ }
+ spraid_free_iod_res(hdev, iod);
+ scmd->scsi_done(scmd);
+}
+
+static void spraid_complete_adminq_cmnd(struct spraid_queue *adminq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = adminq->hdev;
+ struct spraid_cmd *adm_cmd;
+
+ adm_cmd = hdev->adm_cmds + cqe->cmd_id;
+ if (unlikely(adm_cmd->state == SPRAID_CMD_IDLE)) {
+ dev_warn(adminq->hdev->dev,
+ "Invalid id %d completed on queue %d\n",
+ cqe->cmd_id, le16_to_cpu(cqe->sq_id));
+ return;
+ }
+
+ adm_cmd->status = le16_to_cpu(cqe->status) >> 1;
+ adm_cmd->result0 = le32_to_cpu(cqe->result);
+ adm_cmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&adm_cmd->cmd_done);
+}
+
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid);
+
+static void spraid_complete_aen(struct spraid_queue *spraidq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+ u32 result = le32_to_cpu(cqe->result);
+
+ dev_info(hdev->dev, "rcv aen, cid[%d], status[0x%x], result[0x%x]\n",
+ cqe->cmd_id, le16_to_cpu(cqe->status) >> 1, result);
+
+ spraid_send_aen(hdev, cqe->cmd_id);
+
+ if ((le16_to_cpu(cqe->status) >> 1) != SPRAID_SC_SUCCESS)
+ return;
+ switch (result & 0x7) {
+ case SPRAID_AEN_NOTICE:
+ spraid_handle_aen_notice(hdev, result);
+ break;
+ case SPRAID_AEN_VS:
+ spraid_handle_aen_vs(hdev, result, le32_to_cpu(cqe->result1));
+ break;
+ default:
+ dev_warn(hdev->dev, "Unsupported async event type: %u\n",
+ result & 0x7);
+ break;
+ }
+}
+
+static void spraid_complete_ioq_sync_cmnd(struct spraid_queue *ioq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = ioq->hdev;
+ struct spraid_cmd *ptcmd;
+
+ ptcmd = hdev->ioq_ptcmds + (ioq->qid - 1) * SPRAID_PTCMDS_PERQ +
+ cqe->cmd_id - SPRAID_IO_BLK_MQ_DEPTH;
+
+ ptcmd->status = le16_to_cpu(cqe->status) >> 1;
+ ptcmd->result0 = le32_to_cpu(cqe->result);
+ ptcmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&ptcmd->cmd_done);
+}
+
+static inline void spraid_handle_cqe(struct spraid_queue *spraidq, u16 idx)
+{
+ struct spraid_completion *cqe = &spraidq->cqes[idx];
+ struct spraid_dev *hdev = spraidq->hdev;
+
+ if (unlikely(cqe->cmd_id >= spraidq->q_depth)) {
+ dev_err(hdev->dev,
+ "Invalid command id[%d] completed on queue %d\n",
+ cqe->cmd_id, cqe->sq_id);
+ return;
+ }
+
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d];"
+ " result[0x%x], sq_id[%d], status[0x%x]\n",
+ cqe->cmd_id, spraidq->qid, le32_to_cpu(cqe->result),
+ le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status));
+
+ if (unlikely(spraidq->qid == 0
+ && cqe->cmd_id >= SPRAID_AQ_BLK_MQ_DEPTH)) {
+ spraid_complete_aen(spraidq, cqe);
+ return;
+ }
+
+ if (unlikely(spraidq->qid && cqe->cmd_id >= SPRAID_IO_BLK_MQ_DEPTH)) {
+ spraid_complete_ioq_sync_cmnd(spraidq, cqe);
+ return;
+ }
+
+ if (spraidq->qid)
+ spraid_complete_ioq_cmnd(spraidq, cqe);
+ else
+ spraid_complete_adminq_cmnd(spraidq, cqe);
+}
+
+static void spraid_complete_cqes(struct spraid_queue *spraidq,
+ u16 start, u16 end)
+{
+ while (start != end) {
+ spraid_handle_cqe(spraidq, start);
+ if (++start == spraidq->q_depth)
+ start = 0;
+ }
+}
+
+static inline void spraid_update_cq_head(struct spraid_queue *spraidq)
+{
+ if (++spraidq->cq_head == spraidq->q_depth) {
+ spraidq->cq_head = 0;
+ spraidq->cq_phase = !spraidq->cq_phase;
+ }
+}
+
+static inline bool spraid_process_cq(struct spraid_queue *spraidq,
+ u16 *start, u16 *end, int tag)
+{
+ bool found = false;
+
+ *start = spraidq->cq_head;
+ while (!found && spraid_cqe_pending(spraidq)) {
+ if (spraidq->cqes[spraidq->cq_head].cmd_id == tag)
+ found = true;
+ spraid_update_cq_head(spraidq);
+ }
+ *end = spraidq->cq_head;
+
+ if (*start != *end)
+ writel(spraidq->cq_head,
+ spraidq->q_db + spraidq->hdev->db_stride);
+
+ return found;
+}
+
+static bool spraid_poll_cq(struct spraid_queue *spraidq, int cid)
+{
+ u16 start, end;
+ bool found;
+
+ if (!spraid_cqe_pending(spraidq))
+ return 0;
+
+ spin_lock_irq(&spraidq->cq_lock);
+ found = spraid_process_cq(spraidq, &start, &end, cid);
+ spin_unlock_irq(&spraidq->cq_lock);
+
+ spraid_complete_cqes(spraidq, start, end);
+ return found;
+}
+
+static irqreturn_t spraid_irq(int irq, void *data)
+{
+ struct spraid_queue *spraidq = data;
+ irqreturn_t ret = IRQ_NONE;
+ u16 start, end;
+
+ spin_lock(&spraidq->cq_lock);
+ if (spraidq->cq_head != spraidq->last_cq_head)
+ ret = IRQ_HANDLED;
+
+ spraid_process_cq(spraidq, &start, &end, -1);
+ spraidq->last_cq_head = spraidq->cq_head;
+ spin_unlock(&spraidq->cq_lock);
+
+ if (start != end) {
+ spraid_complete_cqes(spraidq, start, end);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+static int spraid_setup_admin_queue(struct spraid_dev *hdev)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ u32 aqa;
+ int ret;
+
+ dev_info(hdev->dev, "[%s] start disable ctrl\n", __func__);
+
+ ret = spraid_disable_ctrl(hdev);
+ if (ret)
+ return ret;
+
+ ret = spraid_alloc_queue(hdev, 0, SPRAID_AQ_DEPTH);
+ if (ret)
+ return ret;
+
+ aqa = adminq->q_depth - 1;
+ aqa |= aqa << 16;
+ writel(aqa, hdev->bar + SPRAID_REG_AQA);
+ lo_hi_writeq(adminq->sq_dma_addr, hdev->bar + SPRAID_REG_ASQ);
+ lo_hi_writeq(adminq->cq_dma_addr, hdev->bar + SPRAID_REG_ACQ);
+
+ dev_info(hdev->dev, "[%s] start enable ctrl\n", __func__);
+
+ ret = spraid_enable_ctrl(hdev);
+ if (ret) {
+ ret = -ENODEV;
+ goto free_queue;
+ }
+
+ adminq->cq_vector = 0;
+ spraid_init_queue(adminq, 0);
+ ret = pci_request_irq(hdev->pdev, adminq->cq_vector, spraid_irq, NULL,
+ adminq, "spraid%d_q%d",
+ hdev->instance, adminq->qid);
+
+ if (ret) {
+ adminq->cq_vector = -1;
+ hdev->online_queues--;
+ goto free_queue;
+ }
+
+ dev_info(hdev->dev, "[%s] success, queuecount:[%d], onlinequeue:[%d]\n",
+ __func__, hdev->queue_count, hdev->online_queues);
+
+ return 0;
+
+free_queue:
+ spraid_free_queue(adminq);
+ return ret;
+}
+
+static u32 spraid_bar_size(struct spraid_dev *hdev, u32 nr_ioqs)
+{
+ return (SPRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride));
+}
+
+static int spraid_alloc_admin_cmds(struct spraid_dev *hdev)
+{
+ int i;
+
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+ spin_lock_init(&hdev->adm_cmd_lock);
+
+ hdev->adm_cmds = kcalloc_node(SPRAID_AQ_BLK_MQ_DEPTH,
+ sizeof(struct spraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->adm_cmds) {
+ dev_err(hdev->dev, "Alloc admin cmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SPRAID_AQ_BLK_MQ_DEPTH; i++) {
+ hdev->adm_cmds[i].qid = 0;
+ hdev->adm_cmds[i].cid = i;
+ list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list);
+ }
+
+ dev_info(hdev->dev, "Alloc admin cmds success, num[%d]\n",
+ SPRAID_AQ_BLK_MQ_DEPTH);
+
+ return 0;
+}
+
+static void spraid_free_admin_cmds(struct spraid_dev *hdev)
+{
+ kfree(hdev->adm_cmds);
+ hdev->adm_cmds = NULL;
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+}
+
+static struct spraid_cmd *spraid_get_cmd(struct spraid_dev *hdev,
+ enum spraid_cmd_type type)
+{
+ struct spraid_cmd *cmd = NULL;
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == SPRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(slock, flags);
+ dev_err(hdev->dev, "err, cmd[%d] list empty\n", type);
+ return NULL;
+ }
+ cmd = list_entry(head->next, struct spraid_cmd, list);
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(slock, flags);
+
+ WRITE_ONCE(cmd->state, SPRAID_CMD_IN_FLIGHT);
+
+ return cmd;
+}
+
+static void spraid_put_cmd(struct spraid_dev *hdev, struct spraid_cmd *cmd,
+ enum spraid_cmd_type type)
+{
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == SPRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ WRITE_ONCE(cmd->state, SPRAID_CMD_IDLE);
+ list_add_tail(&cmd->list, head);
+ spin_unlock_irqrestore(slock, flags);
+}
+
+
+static int spraid_submit_admin_sync_cmd(struct spraid_dev *hdev,
+ struct spraid_admin_command *cmd,
+ u32 *result0, u32 *result1, u32 timeout)
+{
+ struct spraid_cmd *adm_cmd = spraid_get_cmd(hdev, SPRAID_CMD_ADM);
+
+ if (!adm_cmd) {
+ dev_err(hdev->dev, "err, get admin cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&adm_cmd->cmd_done);
+
+ cmd->common.command_id = adm_cmd->cid;
+ spraid_submit_cmd(&hdev->queues[0], cmd);
+
+ if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout;"
+ " opcode[0x%x] subopcode[0x%x]\n",
+ __func__, adm_cmd->cid, adm_cmd->qid,
+ cmd->usr_cmd.opcode, cmd->usr_cmd.info_0.subopcode);
+ WRITE_ONCE(adm_cmd->state, SPRAID_CMD_TIMEOUT);
+ spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+ return -EINVAL;
+ }
+
+ if (result0)
+ *result0 = adm_cmd->result0;
+ if (result1)
+ *result1 = adm_cmd->result1;
+
+ spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+
+ return adm_cmd->status;
+}
+
+static int spraid_create_cq(struct spraid_dev *hdev, u16 qid,
+ struct spraid_queue *spraidq, u16 cq_vector)
+{
+ struct spraid_admin_command admin_cmd;
+ int flags = SPRAID_QUEUE_PHYS_CONTIG | SPRAID_CQ_IRQ_ENABLED;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_cq.opcode = SPRAID_ADMIN_CREATE_CQ;
+ admin_cmd.create_cq.prp1 = cpu_to_le64(spraidq->cq_dma_addr);
+ admin_cmd.create_cq.cqid = cpu_to_le16(qid);
+ admin_cmd.create_cq.qsize = cpu_to_le16(spraidq->q_depth - 1);
+ admin_cmd.create_cq.cq_flags = cpu_to_le16(flags);
+ admin_cmd.create_cq.irq_vector = cpu_to_le16(cq_vector);
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static int spraid_create_sq(struct spraid_dev *hdev, u16 qid,
+ struct spraid_queue *spraidq)
+{
+ struct spraid_admin_command admin_cmd;
+ int flags = SPRAID_QUEUE_PHYS_CONTIG;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_sq.opcode = SPRAID_ADMIN_CREATE_SQ;
+ admin_cmd.create_sq.prp1 = cpu_to_le64(spraidq->sq_dma_addr);
+ admin_cmd.create_sq.sqid = cpu_to_le16(qid);
+ admin_cmd.create_sq.qsize = cpu_to_le16(spraidq->q_depth - 1);
+ admin_cmd.create_sq.sq_flags = cpu_to_le16(flags);
+ admin_cmd.create_sq.cqid = cpu_to_le16(qid);
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static void spraid_free_queue(struct spraid_queue *spraidq)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+
+ hdev->queue_count--;
+ dma_free_coherent(hdev->dev, CQ_SIZE(spraidq->q_depth),
+ (void *)spraidq->cqes, spraidq->cq_dma_addr);
+ dma_free_coherent(hdev->dev, SQ_SIZE(spraidq->qid, spraidq->q_depth),
+ spraidq->sq_cmds, spraidq->sq_dma_addr);
+ dma_free_coherent(hdev->dev, SENSE_SIZE(spraidq->q_depth),
+ spraidq->sense, spraidq->sense_dma_addr);
+}
+
+static void spraid_free_admin_queue(struct spraid_dev *hdev)
+{
+ spraid_free_queue(&hdev->queues[0]);
+}
+
+static void spraid_free_io_queues(struct spraid_dev *hdev)
+{
+ int i;
+
+ for (i = hdev->queue_count - 1; i >= 1; i--)
+ spraid_free_queue(&hdev->queues[i]);
+}
+
+static int spraid_delete_queue(struct spraid_dev *hdev, u8 op, u16 id)
+{
+ struct spraid_admin_command admin_cmd;
+ int ret;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.delete_queue.opcode = op;
+ admin_cmd.delete_queue.qid = cpu_to_le16(id);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+
+ if (ret)
+ dev_err(hdev->dev, "Delete %s:[%d] failed\n",
+ (op == SPRAID_ADMIN_DELETE_CQ) ? "cq" : "sq", id);
+
+ return ret;
+}
+
+static int spraid_delete_cq(struct spraid_dev *hdev, u16 cqid)
+{
+ return spraid_delete_queue(hdev, SPRAID_ADMIN_DELETE_CQ, cqid);
+}
+
+static int spraid_delete_sq(struct spraid_dev *hdev, u16 sqid)
+{
+ return spraid_delete_queue(hdev, SPRAID_ADMIN_DELETE_SQ, sqid);
+}
+
+static int spraid_create_queue(struct spraid_queue *spraidq, u16 qid)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+ u16 cq_vector;
+ int ret;
+
+ cq_vector = (hdev->num_vecs == 1) ? 0 : qid;
+ ret = spraid_create_cq(hdev, qid, spraidq, cq_vector);
+ if (ret)
+ return ret;
+
+ ret = spraid_create_sq(hdev, qid, spraidq);
+ if (ret)
+ goto delete_cq;
+
+ spraid_init_queue(spraidq, qid);
+ spraidq->cq_vector = cq_vector;
+
+ ret = pci_request_irq(hdev->pdev, cq_vector, spraid_irq, NULL,
+ spraidq, "spraid%d_q%d", hdev->instance, qid);
+
+ if (ret) {
+ dev_err(hdev->dev, "Request queue[%d] irq failed\n", qid);
+ goto delete_sq;
+ }
+
+ return 0;
+
+delete_sq:
+ spraidq->cq_vector = -1;
+ hdev->online_queues--;
+ spraid_delete_sq(hdev, qid);
+delete_cq:
+ spraid_delete_cq(hdev, qid);
+
+ return ret;
+}
+
+static int spraid_create_io_queues(struct spraid_dev *hdev)
+{
+ u32 i, max;
+ int ret = 0;
+
+ max = min(hdev->max_qid, hdev->queue_count - 1);
+ for (i = hdev->online_queues; i <= max; i++) {
+ ret = spraid_create_queue(&hdev->queues[i], i);
+ if (ret) {
+ dev_err(hdev->dev, "Create queue[%d] failed\n", i);
+ break;
+ }
+ }
+
+ dev_info(hdev->dev, "[%s] queue_count[%d], online_queue[%d]",
+ __func__, hdev->queue_count, hdev->online_queues);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+static int spraid_set_features(struct spraid_dev *hdev, u32 fid,
+ u32 dword11, void *buffer,
+ size_t buflen, u32 *result)
+{
+ struct spraid_admin_command admin_cmd;
+ int ret;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+
+ if (buffer && buflen) {
+ data_ptr = dma_alloc_coherent(hdev->dev, buflen,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memcpy(data_ptr, buffer, buflen);
+ }
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.features.opcode = SPRAID_ADMIN_SET_FEATURES;
+ admin_cmd.features.fid = cpu_to_le32(fid);
+ admin_cmd.features.dword11 = cpu_to_le32(dword11);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, result, NULL, 0);
+
+ if (data_ptr)
+ dma_free_coherent(hdev->dev, buflen, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_configure_timestamp(struct spraid_dev *hdev)
+{
+ __le64 ts;
+ int ret;
+
+ ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
+ ret = spraid_set_features(hdev, SPRAID_FEAT_TIMESTAMP,
+ 0, &ts, sizeof(ts), NULL);
+
+ if (ret)
+ dev_err(hdev->dev, "set timestamp failed: %d\n", ret);
+ return ret;
+}
+
+static int spraid_set_queue_cnt(struct spraid_dev *hdev, u32 *cnt)
+{
+ u32 q_cnt = (*cnt - 1) | ((*cnt - 1) << 16);
+ u32 nr_ioqs, result;
+ int status;
+
+ status = spraid_set_features(hdev, SPRAID_FEAT_NUM_QUEUES,
+ q_cnt, NULL, 0, &result);
+ if (status) {
+ dev_err(hdev->dev, "Set queue count failed, status: %d\n",
+ status);
+ return -EIO;
+ }
+
+ nr_ioqs = min(result & 0xffff, result >> 16) + 1;
+ *cnt = min(*cnt, nr_ioqs);
+ if (*cnt == 0) {
+ dev_err(hdev->dev, "Illegal queue count: zero\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int spraid_setup_io_queues(struct spraid_dev *hdev)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ struct pci_dev *pdev = hdev->pdev;
+ u32 nr_ioqs = num_online_cpus();
+ u32 i, size;
+ int ret;
+
+ struct irq_affinity affd = {
+ .pre_vectors = 1
+ };
+
+ ret = spraid_set_queue_cnt(hdev, &nr_ioqs);
+ if (ret < 0)
+ return ret;
+
+ size = spraid_bar_size(hdev, nr_ioqs);
+ ret = spraid_remap_bar(hdev, size);
+ if (ret)
+ return -ENOMEM;
+
+ adminq->q_db = hdev->dbs;
+
+ pci_free_irq(pdev, 0, adminq);
+ pci_free_irq_vectors(pdev);
+
+ ret = pci_alloc_irq_vectors_affinity(pdev, 1, (nr_ioqs + 1),
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+ if (ret <= 0)
+ return -EIO;
+
+ hdev->num_vecs = ret;
+
+ hdev->max_qid = max(ret - 1, 1);
+
+ ret = pci_request_irq(pdev, adminq->cq_vector, spraid_irq, NULL,
+ adminq, "spraid%d_q%d",
+ hdev->instance, adminq->qid);
+ if (ret) {
+ dev_err(hdev->dev, "Request admin irq failed\n");
+ adminq->cq_vector = -1;
+ return ret;
+ }
+
+ for (i = hdev->queue_count; i <= hdev->max_qid; i++) {
+ ret = spraid_alloc_queue(hdev, i, hdev->ioq_depth);
+ if (ret)
+ break;
+ }
+ dev_info(hdev->dev, "[%s] max_qid: %d, queue_count: %d;"
+ " online_queue: %d, ioq_depth: %d\n",
+ __func__, hdev->max_qid, hdev->queue_count,
+ hdev->online_queues, hdev->ioq_depth);
+
+ return spraid_create_io_queues(hdev);
+}
+
+static void spraid_delete_io_queues(struct spraid_dev *hdev)
+{
+ u16 queues = hdev->online_queues - 1;
+ u8 opcode = SPRAID_ADMIN_DELETE_SQ;
+ u16 i, pass;
+
+ if (!pci_device_is_present(hdev->pdev)) {
+ dev_err(hdev->dev,
+ "pci_device is not present, skip disable io queues\n");
+ return;
+ }
+
+ if (hdev->online_queues < 2) {
+ dev_err(hdev->dev, "[%s] err, io queue has been delete\n",
+ __func__);
+ return;
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ for (i = queues; i > 0; i--)
+ if (spraid_delete_queue(hdev, opcode, i))
+ break;
+
+ opcode = SPRAID_ADMIN_DELETE_CQ;
+ }
+}
+
+static void spraid_remove_io_queues(struct spraid_dev *hdev)
+{
+ spraid_delete_io_queues(hdev);
+ spraid_free_io_queues(hdev);
+}
+
+static void spraid_pci_disable(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u32 i;
+
+ for (i = 0; i < hdev->online_queues; i++)
+ pci_free_irq(pdev, hdev->queues[i].cq_vector, &hdev->queues[i]);
+ pci_free_irq_vectors(pdev);
+ if (pci_is_enabled(pdev)) {
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ }
+ hdev->online_queues = 0;
+}
+
+static void spraid_disable_admin_queue(struct spraid_dev *hdev, bool shutdown)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ u16 start, end;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ spraid_shutdown_ctrl(hdev);
+ else
+ spraid_disable_ctrl(hdev);
+ }
+
+ if (hdev->queue_count == 0) {
+ dev_err(hdev->dev, "[%s] err, admin queue has been delete\n",
+ __func__);
+ return;
+ }
+
+ spin_lock_irq(&adminq->cq_lock);
+ spraid_process_cq(adminq, &start, &end, -1);
+ spin_unlock_irq(&adminq->cq_lock);
+
+ spraid_complete_cqes(adminq, start, end);
+ spraid_free_admin_queue(hdev);
+}
+
+static int spraid_create_dma_pools(struct spraid_dev *hdev)
+{
+ int i;
+ char poolname[20] = { 0 };
+
+ hdev->prp_page_pool = dma_pool_create("prp list page", hdev->dev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+
+ if (!hdev->prp_page_pool) {
+ dev_err(hdev->dev, "create prp_page_pool failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < small_pool_num; i++) {
+ sprintf(poolname, "prp_list_256_%d", i);
+ hdev->prp_small_pool[i] =
+ dma_pool_create(poolname, hdev->dev, SMALL_POOL_SIZE,
+ SMALL_POOL_SIZE, 0);
+
+ if (!hdev->prp_small_pool[i]) {
+ dev_err(hdev->dev, "create prp_small_pool %d failed\n",
+ i);
+ goto destroy_prp_small_pool;
+ }
+ }
+
+ return 0;
+
+destroy_prp_small_pool:
+ while (i > 0)
+ dma_pool_destroy(hdev->prp_small_pool[--i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+
+ return -ENOMEM;
+}
+
+static void spraid_destroy_dma_pools(struct spraid_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < small_pool_num; i++)
+ dma_pool_destroy(hdev->prp_small_pool[i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+}
+
+static int spraid_get_dev_list(struct spraid_dev *hdev,
+ struct spraid_dev_info *devices)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ struct spraid_admin_command admin_cmd;
+ struct spraid_dev_list *list_buf;
+ dma_addr_t data_dma = 0;
+ u32 i, idx, hdid, ndev;
+ int ret = 0;
+
+ list_buf = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!list_buf)
+ return -ENOMEM;
+
+ for (idx = 0; idx < nd;) {
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = SPRAID_GET_INFO_DEV_LIST;
+ admin_cmd.get_info.cdw11 = cpu_to_le32(idx);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd,
+ NULL, NULL, 0);
+
+ if (ret) {
+ dev_err(hdev->dev, "Get device list failed, nd: %u;"
+ "idx: %u, ret: %d\n",
+ nd, idx, ret);
+ goto out;
+ }
+ ndev = le32_to_cpu(list_buf->dev_num);
+
+ dev_info(hdev->dev, "ndev numbers: %u\n", ndev);
+
+ for (i = 0; i < ndev; i++) {
+ hdid = le32_to_cpu(list_buf->devices[i].hdid);
+ dev_info(hdev->dev, "list_buf->devices[%d], hdid: %u;"
+ "target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ i, hdid,
+ le16_to_cpu(list_buf->devices[i].target),
+ list_buf->devices[i].channel,
+ list_buf->devices[i].lun,
+ list_buf->devices[i].attr);
+ if (hdid > nd || hdid == 0) {
+ dev_err(hdev->dev, "err, hdid[%d] invalid\n",
+ hdid);
+ continue;
+ }
+ memcpy(&devices[hdid - 1], &list_buf->devices[i],
+ sizeof(struct spraid_dev_info));
+ }
+ idx += ndev;
+
+ if (idx < MAX_DEV_ENTRY_PER_PAGE_4K)
+ break;
+ }
+
+out:
+ dma_free_coherent(hdev->dev, PAGE_SIZE, list_buf, data_dma);
+ return ret;
+}
+
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ struct spraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = SPRAID_ADMIN_ASYNC_EVENT;
+ admin_cmd.common.command_id = cid;
+
+ spraid_submit_cmd(adminq, &admin_cmd);
+ dev_info(hdev->dev, "send aen, cid[%d]\n", cid);
+}
+
+static inline void spraid_send_all_aen(struct spraid_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->ctrl_info->aerl; i++)
+ spraid_send_aen(hdev, i + SPRAID_AQ_BLK_MQ_DEPTH);
+}
+
+static int spraid_add_device(struct spraid_dev *hdev,
+ struct spraid_dev_info *device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "add device, hdid: %u target: %d, channel: %d;"
+ " lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
+ sdev = scsi_device_lookup(shost, device->channel,
+ le16_to_cpu(device->target), 0);
+ if (sdev) {
+ dev_warn(hdev->dev, "Device is already exist, channel: %d;"
+ " target_id: %d, lun: %d\n",
+ device->channel, le16_to_cpu(device->target), 0);
+ scsi_device_put(sdev);
+ return -EEXIST;
+ }
+ scsi_add_device(shost, device->channel, le16_to_cpu(device->target), 0);
+ return 0;
+}
+
+static int spraid_rescan_device(struct spraid_dev *hdev,
+ struct spraid_dev_info *device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "rescan device, hdid: %u target: %d, channel: %d;"
+ " lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
+ sdev = scsi_device_lookup(shost, device->channel,
+ le16_to_cpu(device->target), 0);
+ if (!sdev) {
+ dev_warn(hdev->dev, "device is not exit rescan it, channel: %d;"
+ " target_id: %d, lun: %d\n",
+ device->channel, le16_to_cpu(device->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int spraid_remove_device(struct spraid_dev *hdev,
+ struct spraid_dev_info *org_device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "remove device, hdid: %u target: %d, channel: %d;"
+ " lun: %d, attr[0x%x]\n",
+ le32_to_cpu(org_device->hdid), le16_to_cpu(org_device->target),
+ org_device->channel, org_device->lun, org_device->attr);
+
+ sdev = scsi_device_lookup(shost, org_device->channel,
+ le16_to_cpu(org_device->target), 0);
+ if (!sdev) {
+ dev_warn(hdev->dev, "device is not exit remove it, channel: %d;"
+ " target_id: %d, lun: %d\n",
+ org_device->channel,
+ le16_to_cpu(org_device->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int spraid_dev_list_init(struct spraid_dev *hdev)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ int i, ret;
+
+ hdev->devices = kzalloc_node(nd * sizeof(struct spraid_dev_info),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->devices)
+ return -ENOMEM;
+
+ ret = spraid_get_dev_list(hdev, hdev->devices);
+ if (ret) {
+ dev_err(hdev->dev,
+ "Ignore failure of getting device list;"
+ " within initialization\n");
+ return 0;
+ }
+
+ for (i = 0; i < nd; i++) {
+ if (SPRAID_DEV_INFO_FLAG_VALID(hdev->devices[i].flag) &&
+ SPRAID_DEV_INFO_ATTR_BOOT(hdev->devices[i].attr)) {
+ spraid_add_device(hdev, &hdev->devices[i]);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int luntarget_cmp_func(const void *l, const void *r)
+{
+ const struct spraid_dev_info *ln = l;
+ const struct spraid_dev_info *rn = r;
+
+ if (ln->channel == rn->channel)
+ return le16_to_cpu(ln->target) - le16_to_cpu(rn->target);
+
+ return ln->channel - rn->channel;
+}
+
+static void spraid_scan_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, scan_work);
+ struct spraid_dev_info *devices, *org_devices;
+ struct spraid_dev_info *sortdevice;
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ u8 flag, org_flag;
+ int i, ret;
+ int count = 0;
+
+ devices = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
+ if (!devices)
+ return;
+
+ sortdevice = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
+ if (!sortdevice)
+ goto free_list;
+
+ ret = spraid_get_dev_list(hdev, devices);
+ if (ret)
+ goto free_all;
+ org_devices = hdev->devices;
+ for (i = 0; i < nd; i++) {
+ org_flag = org_devices[i].flag;
+ flag = devices[i].flag;
+
+ dev_log_dbg(hdev->dev, "i: %d, org_flag: 0x%x, flag: 0x%x\n",
+ i, org_flag, flag);
+
+ if (SPRAID_DEV_INFO_FLAG_VALID(flag)) {
+ if (!SPRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->devices_rwsem);
+ memcpy(&org_devices[i], &devices[i],
+ sizeof(struct spraid_dev_info));
+ memcpy(&sortdevice[count++], &devices[i],
+ sizeof(struct spraid_dev_info));
+ up_write(&hdev->devices_rwsem);
+ } else if (SPRAID_DEV_INFO_FLAG_CHANGE(flag)) {
+ spraid_rescan_device(hdev, &devices[i]);
+ }
+ } else {
+ if (SPRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->devices_rwsem);
+ org_devices[i].flag &= 0xfe;
+ up_write(&hdev->devices_rwsem);
+ spraid_remove_device(hdev, &org_devices[i]);
+ }
+ }
+ }
+
+ dev_info(hdev->dev, "scan work add device count = %d\n", count);
+
+ sort(sortdevice, count, sizeof(sortdevice[0]),
+ luntarget_cmp_func, NULL);
+
+ for (i = 0; i < count; i++)
+ spraid_add_device(hdev, &sortdevice[i]);
+
+free_all:
+ kfree(sortdevice);
+free_list:
+ kfree(devices);
+}
+
+static void spraid_timesyn_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, timesyn_work);
+
+ spraid_configure_timestamp(hdev);
+}
+
+static int spraid_init_ctrl_info(struct spraid_dev *hdev);
+static void spraid_fw_act_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, fw_act_work);
+
+ if (spraid_init_ctrl_info(hdev))
+ dev_err(hdev->dev, "get ctrl info failed after fw act\n");
+}
+
+static void spraid_queue_scan(struct spraid_dev *hdev)
+{
+ queue_work(spraid_wq, &hdev->scan_work);
+}
+
+static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result)
+{
+ switch ((result & 0xff00) >> 8) {
+ case SPRAID_AEN_DEV_CHANGED:
+ spraid_queue_scan(hdev);
+ break;
+ case SPRAID_AEN_FW_ACT_START:
+ dev_info(hdev->dev, "fw activation starting\n");
+ break;
+ case SPRAID_AEN_HOST_PROBING:
+ break;
+ default:
+ dev_warn(hdev->dev, "async event result %08x\n", result);
+ }
+}
+
+static void spraid_handle_aen_vs(struct spraid_dev *hdev,
+ u32 result, u32 result1)
+{
+ switch ((result & 0xff00) >> 8) {
+ case SPRAID_AEN_TIMESYN:
+ queue_work(spraid_wq, &hdev->timesyn_work);
+ break;
+ case SPRAID_AEN_FW_ACT_FINISH:
+ dev_info(hdev->dev, "fw activation finish\n");
+ queue_work(spraid_wq, &hdev->fw_act_work);
+ break;
+ case SPRAID_AEN_EVENT_MIN ... SPRAID_AEN_EVENT_MAX:
+ dev_info(hdev->dev, "rcv card event[%d];"
+ " param1[0x%x] param2[0x%x]\n",
+ (result & 0xff00) >> 8, result, result1);
+ break;
+ default:
+ dev_warn(hdev->dev, "async event result: 0x%x\n", result);
+ }
+}
+
+static int spraid_alloc_resources(struct spraid_dev *hdev)
+{
+ int ret, nqueue;
+
+ ret = ida_alloc(&spraid_instance_ida, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(hdev->dev, "Get instance id failed\n");
+ return ret;
+ }
+ hdev->instance = ret;
+
+ hdev->ctrl_info = kzalloc_node(sizeof(*hdev->ctrl_info),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->ctrl_info) {
+ ret = -ENOMEM;
+ goto release_instance;
+ }
+
+ ret = spraid_create_dma_pools(hdev);
+ if (ret)
+ goto free_ctrl_info;
+ nqueue = num_possible_cpus() + 1;
+ hdev->queues = kcalloc_node(nqueue, sizeof(struct spraid_queue),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->queues) {
+ ret = -ENOMEM;
+ goto destroy_dma_pools;
+ }
+
+ ret = spraid_alloc_admin_cmds(hdev);
+ if (ret)
+ goto free_queues;
+
+ dev_info(hdev->dev, "[%s] queues num: %d\n", __func__, nqueue);
+
+ return 0;
+
+free_queues:
+ kfree(hdev->queues);
+destroy_dma_pools:
+ spraid_destroy_dma_pools(hdev);
+free_ctrl_info:
+ kfree(hdev->ctrl_info);
+release_instance:
+ ida_free(&spraid_instance_ida, hdev->instance);
+ return ret;
+}
+
+static void spraid_free_resources(struct spraid_dev *hdev)
+{
+ spraid_free_admin_cmds(hdev);
+ kfree(hdev->queues);
+ spraid_destroy_dma_pools(hdev);
+ kfree(hdev->ctrl_info);
+ ida_free(&spraid_instance_ida, hdev->instance);
+}
+
+static void spraid_bsg_unmap_data(struct spraid_dev *hdev, struct bsg_job *job)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir =
+ rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ if (iod->nsge)
+ dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
+
+ spraid_free_iod_res(hdev, iod);
+}
+
+static int spraid_bsg_map_data(struct spraid_dev *hdev, struct bsg_job *job,
+ struct spraid_admin_command *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir =
+ rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ iod->sg = job->request_payload.sg_list;
+ iod->nsge = job->request_payload.sg_cnt;
+ iod->length = job->request_payload.payload_len;
+ iod->use_sgl = false;
+ iod->npages = -1;
+ iod->sg_drv_mgmt = false;
+
+ if (!iod->nsge)
+ goto out;
+
+ ret = dma_map_sg_attrs(hdev->dev, iod->sg, iod->nsge,
+ dma_dir, DMA_ATTR_NO_WARN);
+ if (!ret)
+ goto out;
+
+ ret = spraid_setup_prps(hdev, iod);
+ if (ret)
+ goto unmap;
+
+ cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+
+ return 0;
+
+unmap:
+ dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
+out:
+ return ret;
+}
+
+static int spraid_get_ctrl_info(struct spraid_dev *hdev,
+ struct spraid_ctrl_info *ctrl_info)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = SPRAID_GET_INFO_CTRL;
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(ctrl_info, data_ptr, sizeof(struct spraid_ctrl_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_init_ctrl_info(struct spraid_dev *hdev)
+{
+ int ret;
+
+ hdev->ctrl_info->nd = cpu_to_le32(240);
+ hdev->ctrl_info->mdts = 8;
+ hdev->ctrl_info->max_cmds = cpu_to_le16(4096);
+ hdev->ctrl_info->max_num_sge = cpu_to_le16(128);
+ hdev->ctrl_info->max_channel = cpu_to_le16(4);
+ hdev->ctrl_info->max_tgt_id = cpu_to_le32(3239);
+ hdev->ctrl_info->max_lun = cpu_to_le16(2);
+
+ ret = spraid_get_ctrl_info(hdev, hdev->ctrl_info);
+ if (ret)
+ dev_err(hdev->dev, "get controller info failed: %d\n", ret);
+
+ dev_info(hdev->dev, "[%s]nd = %d\n", __func__, hdev->ctrl_info->nd);
+ dev_info(hdev->dev, "[%s]max_cmd = %d\n",
+ __func__, hdev->ctrl_info->max_cmds);
+ dev_info(hdev->dev, "[%s]max_channel = %d\n",
+ __func__, hdev->ctrl_info->max_channel);
+ dev_info(hdev->dev, "[%s]max_tgt_id = %d\n",
+ __func__, hdev->ctrl_info->max_tgt_id);
+ dev_info(hdev->dev, "[%s]max_lun = %d\n",
+ __func__, hdev->ctrl_info->max_lun);
+ dev_info(hdev->dev, "[%s]max_num_sge = %d\n",
+ __func__, hdev->ctrl_info->max_num_sge);
+ dev_info(hdev->dev, "[%s]lun_num_boot = %d\n",
+ __func__, hdev->ctrl_info->lun_num_in_boot);
+ dev_info(hdev->dev, "[%s]mdts = %d\n", __func__, hdev->ctrl_info->mdts);
+ dev_info(hdev->dev, "[%s]acl = %d\n", __func__, hdev->ctrl_info->acl);
+ dev_info(hdev->dev, "[%s]aer1 = %d\n", __func__, hdev->ctrl_info->aerl);
+ dev_info(hdev->dev, "[%s]card_type = %d\n",
+ __func__, hdev->ctrl_info->card_type);
+ dev_info(hdev->dev, "[%s]rtd3e = %d\n",
+ __func__, hdev->ctrl_info->rtd3e);
+ dev_info(hdev->dev, "[%s]sn = %s\n", __func__, hdev->ctrl_info->sn);
+ dev_info(hdev->dev, "[%s]fr = %s\n", __func__, hdev->ctrl_info->fr);
+
+ if (!hdev->ctrl_info->aerl)
+ hdev->ctrl_info->aerl = 1;
+ if (hdev->ctrl_info->aerl > SPRAID_NR_AEN_COMMANDS)
+ hdev->ctrl_info->aerl = SPRAID_NR_AEN_COMMANDS;
+
+ return 0;
+}
+
+#define SPRAID_MAX_ADMIN_PAYLOAD_SIZE BIT(16)
+static int spraid_alloc_iod_ext_mem_pool(struct spraid_dev *hdev)
+{
+ u16 max_sge = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+ size_t alloc_size;
+
+ alloc_size = spraid_iod_ext_size(hdev, SPRAID_MAX_ADMIN_PAYLOAD_SIZE,
+ max_sge, true, false);
+ if (alloc_size > PAGE_SIZE)
+ dev_warn(hdev->dev, "It is unreasonable ;"
+ " sg allocation more than one page\n");
+ hdev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
+ mempool_kfree,
+ (void *)alloc_size, GFP_KERNEL,
+ hdev->numa_node);
+ if (!hdev->iod_mempool) {
+ dev_err(hdev->dev, "Create iod extension memory pool failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void spraid_free_iod_ext_mem_pool(struct spraid_dev *hdev)
+{
+ mempool_destroy(hdev->iod_mempool);
+}
+
+static int spraid_user_admin_cmd(struct spraid_dev *hdev, struct bsg_job *job)
+{
+ struct spraid_bsg_request *bsg_req = job->request;
+ struct spraid_passthru_common_cmd *cmd = &(bsg_req->admcmd);
+ struct spraid_admin_command admin_cmd;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+ u32 result[2] = {0};
+ int status;
+
+ if (hdev->state >= SPRAID_RESETTING) {
+ dev_err(hdev->dev, "[%s] err, host state:[%d] is not right\n",
+ __func__, hdev->state);
+ return -EBUSY;
+ }
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init\n",
+ __func__, cmd->opcode, cmd->info_0.subopcode);
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = cmd->opcode;
+ admin_cmd.common.flags = cmd->flags;
+ admin_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ admin_cmd.common.cdw2[0] = cpu_to_le32(cmd->cdw2);
+ admin_cmd.common.cdw2[1] = cpu_to_le32(cmd->cdw3);
+ admin_cmd.common.cdw10 = cpu_to_le32(cmd->cdw10);
+ admin_cmd.common.cdw11 = cpu_to_le32(cmd->cdw11);
+ admin_cmd.common.cdw12 = cpu_to_le32(cmd->cdw12);
+ admin_cmd.common.cdw13 = cpu_to_le32(cmd->cdw13);
+ admin_cmd.common.cdw14 = cpu_to_le32(cmd->cdw14);
+ admin_cmd.common.cdw15 = cpu_to_le32(cmd->cdw15);
+
+ status = spraid_bsg_map_data(hdev, job, &admin_cmd);
+ if (status) {
+ dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+ return status;
+ }
+
+ status = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, &result[0],
+ &result[1], timeout);
+ if (status >= 0) {
+ job->reply_len = sizeof(result);
+ memcpy(job->reply, result, sizeof(result));
+ }
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x];"
+ " status[0x%x] result0[0x%x] result1[0x%x]\n",
+ __func__, cmd->opcode, cmd->info_0.subopcode,
+ status, result[0], result[1]);
+
+ spraid_bsg_unmap_data(hdev, job);
+
+ return status;
+}
+
+static int spraid_alloc_ioq_ptcmds(struct spraid_dev *hdev)
+{
+ int i;
+ int ptnum = SPRAID_NR_IOQ_PTCMDS;
+
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
+ spin_lock_init(&hdev->ioq_pt_lock);
+
+ hdev->ioq_ptcmds = kcalloc_node(ptnum, sizeof(struct spraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->ioq_ptcmds) {
+ dev_err(hdev->dev, "Alloc ioq_ptcmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ptnum; i++) {
+ hdev->ioq_ptcmds[i].qid = i / SPRAID_PTCMDS_PERQ + 1;
+ hdev->ioq_ptcmds[i].cid = i % SPRAID_PTCMDS_PERQ
+ + SPRAID_IO_BLK_MQ_DEPTH;
+ list_add_tail(&(hdev->ioq_ptcmds[i].list), &hdev->ioq_pt_list);
+ }
+
+ dev_info(hdev->dev, "Alloc ioq_ptcmds success, ptnum[%d]\n", ptnum);
+
+ return 0;
+}
+
+static void spraid_free_ioq_ptcmds(struct spraid_dev *hdev)
+{
+ kfree(hdev->ioq_ptcmds);
+ hdev->ioq_ptcmds = NULL;
+
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
+}
+
+static int spraid_submit_ioq_sync_cmd(struct spraid_dev *hdev,
+ struct spraid_ioq_command *cmd,
+ u32 *result, u32 *reslen, u32 timeout)
+{
+ int ret;
+ dma_addr_t sense_dma;
+ struct spraid_queue *ioq;
+ void *sense_addr = NULL;
+ struct spraid_cmd *pt_cmd = spraid_get_cmd(hdev, SPRAID_CMD_IOPT);
+
+ if (!pt_cmd) {
+ dev_err(hdev->dev, "err, get ioq cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&pt_cmd->cmd_done);
+
+ ioq = &hdev->queues[pt_cmd->qid];
+ ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE;
+ sense_addr = ioq->sense + ret;
+ sense_dma = ioq->sense_dma_addr + ret;
+
+ cmd->common.sense_addr = cpu_to_le64(sense_dma);
+ cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+ cmd->common.command_id = pt_cmd->cid;
+
+ spraid_submit_cmd(ioq, cmd);
+
+ if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout;"
+ " opcode[0x%x] subopcode[0x%x]\n",
+ __func__, pt_cmd->cid, pt_cmd->qid, cmd->common.opcode,
+ (le32_to_cpu(cmd->common.cdw3[0]) & 0xffff));
+ WRITE_ONCE(pt_cmd->state, SPRAID_CMD_TIMEOUT);
+ spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
+ return -EINVAL;
+ }
+
+ if (result && reslen) {
+ if ((pt_cmd->status & 0x17f) == 0x101) {
+ memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE);
+ *reslen = SCSI_SENSE_BUFFERSIZE;
+ }
+ }
+
+ spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
+
+ return pt_cmd->status;
+}
+
+static int spraid_user_ioq_cmd(struct spraid_dev *hdev, struct bsg_job *job)
+{
+ struct spraid_bsg_request *bsg_req =
+ (struct spraid_bsg_request *)(job->request);
+ struct spraid_ioq_passthru_cmd *cmd = &(bsg_req->ioqcmd);
+ struct spraid_ioq_command ioq_cmd;
+ int status = 0;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+
+ if (cmd->data_len > PAGE_SIZE) {
+ dev_err(hdev->dev, "[%s] data len bigger than 4k\n", __func__);
+ return -EFAULT;
+ }
+
+ if (hdev->state != SPRAID_LIVE) {
+ dev_err(hdev->dev, "[%s] err, host state:[%d] is not live\n",
+ __func__, hdev->state);
+ return -EBUSY;
+ }
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init;"
+ " datalen[%d]\n",
+ __func__, cmd->opcode, cmd->info_1.subopcode, cmd->data_len);
+
+ memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+ ioq_cmd.common.opcode = cmd->opcode;
+ ioq_cmd.common.flags = cmd->flags;
+ ioq_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ ioq_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len);
+ ioq_cmd.common.cdb_len = cmd->info_0.cdb_len;
+ ioq_cmd.common.rsvd2 = cmd->info_0.rsvd0;
+ ioq_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3);
+ ioq_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4);
+ ioq_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5);
+
+ ioq_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10);
+ ioq_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11);
+ ioq_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12);
+ ioq_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13);
+ ioq_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14);
+ ioq_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len);
+
+ memcpy(ioq_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len);
+
+ ioq_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]);
+ ioq_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]);
+ ioq_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]);
+ ioq_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]);
+
+ status = spraid_bsg_map_data(hdev, job,
+ (struct spraid_admin_command *)&ioq_cmd);
+ if (status) {
+ dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+ return status;
+ }
+
+ status = spraid_submit_ioq_sync_cmd(hdev, &ioq_cmd, job->reply,
+ &job->reply_len, timeout);
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x], status[0x%x];"
+ " reply_len[%d]\n",
+ __func__, cmd->opcode, cmd->info_1.subopcode,
+ status, job->reply_len);
+
+ spraid_bsg_unmap_data(hdev, job);
+
+ return status;
+}
+
+static bool spraid_check_scmd_completed(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_queue *spraidq;
+ u16 hwq, cid;
+
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ spraidq = &hdev->queues[hwq];
+ if (READ_ONCE(iod->state) == SPRAID_CMD_COMPLETE
+ || spraid_poll_cq(spraidq, cid)) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] has been completed\n",
+ cid, spraidq->qid);
+ return true;
+ }
+ return false;
+}
+
+static enum blk_eh_timer_return spraid_scmd_timeout(struct scsi_cmnd *scmd)
+{
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ unsigned int timeout = scmd->device->request_queue->rq_timeout;
+
+ if (spraid_check_scmd_completed(scmd))
+ goto out;
+
+ if (time_after(jiffies, scmd->jiffies_at_alloc + timeout)) {
+ if (cmpxchg(&iod->state,
+ SPRAID_CMD_IN_FLIGHT,
+ SPRAID_CMD_TIMEOUT) == SPRAID_CMD_IN_FLIGHT) {
+ return BLK_EH_DONE;
+ }
+ }
+out:
+ return BLK_EH_RESET_TIMER;
+}
+
+/* send abort command by admin queue temporary */
+static int spraid_send_abort_cmd(struct spraid_dev *hdev,
+ u32 hdid, u16 qid, u16 cid)
+{
+ struct spraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.abort.opcode = SPRAID_ADMIN_ABORT_CMD;
+ admin_cmd.abort.hdid = cpu_to_le32(hdid);
+ admin_cmd.abort.sqid = cpu_to_le16(qid);
+ admin_cmd.abort.cid = cpu_to_le16(cid);
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+/* send reset command by admin quueue temporary */
+static int spraid_send_reset_cmd(struct spraid_dev *hdev, int type, u32 hdid)
+{
+ struct spraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.reset.opcode = SPRAID_ADMIN_RESET;
+ admin_cmd.reset.hdid = cpu_to_le32(hdid);
+ admin_cmd.reset.type = type;
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static bool spraid_change_host_state(struct spraid_dev *hdev,
+ enum spraid_state newstate)
+{
+ unsigned long flags;
+ enum spraid_state oldstate;
+ bool change = false;
+
+ spin_lock_irqsave(&hdev->state_lock, flags);
+
+ oldstate = hdev->state;
+ switch (newstate) {
+ case SPRAID_LIVE:
+ switch (oldstate) {
+ case SPRAID_NEW:
+ case SPRAID_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SPRAID_RESETTING:
+ switch (oldstate) {
+ case SPRAID_LIVE:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SPRAID_DELETING:
+ if (oldstate != SPRAID_DELETING)
+ change = true;
+ break;
+ case SPRAID_DEAD:
+ switch (oldstate) {
+ case SPRAID_NEW:
+ case SPRAID_LIVE:
+ case SPRAID_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (change)
+ hdev->state = newstate;
+ spin_unlock_irqrestore(&hdev->state_lock, flags);
+
+ dev_info(hdev->dev, "[%s][%d]->[%d], change[%d]\n",
+ __func__, oldstate, newstate, change);
+
+ return change;
+}
+
+static void spraid_back_fault_cqe(struct spraid_queue *ioq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = ioq->hdev;
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct spraid_iod *iod;
+ struct request *req;
+
+ tags = hdev->shost->tag_set.tags[ioq->qid - 1];
+ req = blk_mq_tag_to_rq(tags, cqe->cmd_id);
+ if (unlikely(!req || !blk_mq_request_started(req)))
+ return;
+
+ scmd = blk_mq_rq_to_pdu(req);
+ iod = scsi_cmd_priv(scmd);
+
+ set_host_byte(scmd, DID_NO_CONNECT);
+ if (iod->nsge)
+ scsi_dma_unmap(scmd);
+ spraid_free_iod_res(hdev, iod);
+ scmd->scsi_done(scmd);
+ dev_warn(hdev->dev, "Back fault CQE, cid[%d] qid[%d]\n",
+ cqe->cmd_id, ioq->qid);
+}
+
+static void spraid_back_all_io(struct spraid_dev *hdev)
+{
+ int i, j;
+ struct spraid_queue *ioq;
+ struct spraid_completion cqe = { 0 };
+
+ scsi_block_requests(hdev->shost);
+
+ for (i = 1; i <= hdev->shost->nr_hw_queues; i++) {
+ ioq = &hdev->queues[i];
+ for (j = 0; j < hdev->shost->can_queue; j++) {
+ cqe.cmd_id = j;
+ spraid_back_fault_cqe(ioq, &cqe);
+ }
+ }
+
+ scsi_unblock_requests(hdev->shost);
+}
+
+static void spraid_dev_disable(struct spraid_dev *hdev, bool shutdown)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ u16 start, end;
+ unsigned long timeout = jiffies + 600 * HZ;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ spraid_shutdown_ctrl(hdev);
+ else
+ spraid_disable_ctrl(hdev);
+ }
+
+ while (!time_after(jiffies, timeout)) {
+ if (!pci_device_is_present(hdev->pdev)) {
+ dev_info(hdev->dev, "[%s] pci_device not present;"
+ " skip wait\n", __func__);
+ break;
+ }
+ if (!spraid_wait_ready(hdev, hdev->cap, false)) {
+ dev_info(hdev->dev,
+ "[%s] wait ready success after reset\n",
+ __func__);
+ break;
+ }
+ dev_info(hdev->dev, "[%s] waiting csts_rdy ready\n", __func__);
+ }
+
+ if (hdev->queue_count == 0) {
+ dev_err(hdev->dev, "[%s] warn, queue has been delete\n",
+ __func__);
+ return;
+ }
+
+ spin_lock_irq(&adminq->cq_lock);
+ spraid_process_cq(adminq, &start, &end, -1);
+ spin_unlock_irq(&adminq->cq_lock);
+ spraid_complete_cqes(adminq, start, end);
+
+ spraid_pci_disable(hdev);
+
+ spraid_back_all_io(hdev);
+}
+
+static void spraid_reset_work(struct work_struct *work)
+{
+ int ret;
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, reset_work);
+
+ if (hdev->state != SPRAID_RESETTING) {
+ dev_err(hdev->dev, "[%s] err, host is not reset state\n",
+ __func__);
+ return;
+ }
+
+ dev_info(hdev->dev, "[%s] enter host reset\n", __func__);
+
+ if (hdev->ctrl_config & SPRAID_CC_ENABLE) {
+ dev_info(hdev->dev, "[%s] start dev_disable\n", __func__);
+ spraid_dev_disable(hdev, false);
+ }
+
+ ret = spraid_pci_enable(hdev);
+ if (ret)
+ goto out;
+
+ ret = spraid_setup_admin_queue(hdev);
+ if (ret)
+ goto pci_disable;
+
+ ret = spraid_setup_io_queues(hdev);
+ if (ret || hdev->online_queues <= hdev->shost->nr_hw_queues)
+ goto pci_disable;
+
+ spraid_change_host_state(hdev, SPRAID_LIVE);
+
+ spraid_send_all_aen(hdev);
+
+ return;
+
+pci_disable:
+ spraid_pci_disable(hdev);
+out:
+ spraid_change_host_state(hdev, SPRAID_DEAD);
+ dev_err(hdev->dev, "[%s] err, host reset failed\n", __func__);
+}
+
+static int spraid_reset_work_sync(struct spraid_dev *hdev)
+{
+ if (!spraid_change_host_state(hdev, SPRAID_RESETTING)) {
+ dev_info(hdev->dev, "[%s] can't change to reset state\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ if (!queue_work(spraid_wq, &hdev->reset_work)) {
+ dev_err(hdev->dev, "[%s] err, host is already in reset state\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ flush_work(&hdev->reset_work);
+ if (hdev->state != SPRAID_LIVE)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int spraid_wait_abnl_cmd_done(struct spraid_iod *iod)
+{
+ u16 times = 0;
+
+ do {
+ if (READ_ONCE(iod->state) == SPRAID_CMD_TMO_COMPLETE)
+ break;
+ msleep(500);
+ times++;
+ } while (times <= SPRAID_WAIT_ABNL_CMD_TIMEOUT);
+
+ /* wait command completion timeout after abort/reset success */
+ if (times >= SPRAID_WAIT_ABNL_CMD_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int spraid_abort_handler(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, aborting\n", cid, hwq);
+ ret = spraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid);
+ if (ret != ADMIN_ERR_TIMEOUT) {
+ ret = spraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed;"
+ " not found\n", cid, hwq);
+ return FAILED;
+ }
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort succ\n", cid, hwq);
+ return SUCCESS;
+ }
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, timeout\n",
+ cid, hwq);
+ return FAILED;
+}
+
+static int spraid_tgt_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, target reset\n",
+ cid, hwq);
+ ret = spraid_send_reset_cmd(hdev, SPRAID_RESET_TARGET, hostdata->hdid);
+ if (ret == 0) {
+ ret = spraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ dev_warn(hdev->dev,
+ "cid[%d] qid[%d]target reset failed;"
+ " not found\n", cid, hwq);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] target reset success\n",
+ cid, hwq);
+ return SUCCESS;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] ret[%d] target reset failed\n",
+ cid, hwq, ret);
+ return FAILED;
+}
+
+static int spraid_bus_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, bus reset\n", cid, hwq);
+ ret = spraid_send_reset_cmd(hdev, SPRAID_RESET_BUS, hostdata->hdid);
+ if (ret == 0) {
+ ret = spraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ dev_warn(hdev->dev,
+ "cid[%d] qid[%d] bus reset failed;"
+ " not found\n", cid, hwq);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] bus reset succ\n",
+ cid, hwq);
+ return SUCCESS;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] ret[%d] bus reset failed\n",
+ cid, hwq, ret);
+ return FAILED;
+}
+
+static int spraid_shost_reset_handler(struct scsi_cmnd *scmd)
+{
+ u16 hwq, cid;
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+
+ scsi_print_command(scmd);
+ if (hdev->state != SPRAID_LIVE || spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ dev_warn(hdev->dev, "cid[%d] qid[%d] host reset\n", cid, hwq);
+
+ if (spraid_reset_work_sync(hdev)) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] host reset failed\n",
+ cid, hwq);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] host reset success\n", cid, hwq);
+
+ return SUCCESS;
+}
+
+static pci_ers_result_t spraid_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter pci error detect, state:%d\n", state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ dev_warn(hdev->dev, "channel is normal, do nothing\n");
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ dev_warn(hdev->dev,
+ "channel io frozen, need reset controller\n");
+
+ scsi_block_requests(hdev->shost);
+
+ spraid_change_host_state(hdev, SPRAID_RESETTING);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ dev_warn(hdev->dev, "channel io failure, request disconnect\n");
+
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t spraid_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "restart after slot reset\n");
+
+ pci_restore_state(pdev);
+
+ if (!queue_work(spraid_wq, &hdev->reset_work)) {
+ dev_err(hdev->dev, "[%s] err, the device is resetting state\n",
+ __func__);
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ flush_work(&hdev->reset_work);
+
+ scsi_unblock_requests(hdev->shost);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void spraid_reset_done(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter spraid reset done\n");
+}
+
+static ssize_t csts_pp_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+ & SPRAID_CSTS_PP_MASK);
+ ret >>= SPRAID_CSTS_PP_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_shst_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+ & SPRAID_CSTS_SHST_MASK);
+ ret >>= SPRAID_CSTS_SHST_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_cfs_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+ & SPRAID_CSTS_CFS_MASK);
+ ret >>= SPRAID_CSTS_CFS_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_rdy_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev))
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_RDY);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t fw_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fr);
+}
+
+static DEVICE_ATTR_RO(csts_pp);
+static DEVICE_ATTR_RO(csts_shst);
+static DEVICE_ATTR_RO(csts_cfs);
+static DEVICE_ATTR_RO(csts_rdy);
+static DEVICE_ATTR_RO(fw_version);
+
+static struct device_attribute *spraid_host_attrs[] = {
+ &dev_attr_csts_pp,
+ &dev_attr_csts_shst,
+ &dev_attr_csts_cfs,
+ &dev_attr_csts_rdy,
+ &dev_attr_fw_version,
+ NULL,
+};
+
+static int spraid_get_vd_info(struct spraid_dev *hdev,
+ struct spraid_vd_info *vd_info, u16 vid)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN);
+ admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(vd_info, data_ptr, sizeof(struct spraid_vd_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_get_bgtask(struct spraid_dev *hdev,
+ struct spraid_bgtask *bgtask)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(bgtask, data_ptr, sizeof(struct spraid_bgtask));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1;
+
+ ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ?
+ vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1);
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]);
+}
+
+static ssize_t raid_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret) {
+ vd_info->vd_status = 0;
+ vd_info->rg_id = 0xff;
+ }
+
+ ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ?
+ vd_info->vd_status : 0;
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]);
+}
+
+static ssize_t raid_resync_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_bgtask *bgtask;
+ struct spraid_sdev_hostdata *hostdata;
+ u8 rg_id, i, progress = 0;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ goto out;
+
+ rg_id = vd_info->rg_id;
+
+ bgtask = (struct spraid_bgtask *)vd_info;
+ ret = spraid_get_bgtask(hdev, bgtask);
+ if (ret)
+ goto out;
+ for (i = 0; i < bgtask->task_num; i++) {
+ if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) &&
+ (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id))
+ progress = bgtask->bgtask[i].progress;
+ }
+
+out:
+ kfree(vd_info);
+ return snprintf(buf, PAGE_SIZE, "%d\n", progress);
+}
+
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(raid_state);
+static DEVICE_ATTR_RO(raid_resync);
+
+static struct device_attribute *spraid_dev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_raid_state,
+ &dev_attr_raid_resync,
+ NULL,
+};
+
+static struct pci_error_handlers spraid_err_handler = {
+ .error_detected = spraid_pci_error_detected,
+ .slot_reset = spraid_pci_slot_reset,
+ .reset_done = spraid_reset_done,
+};
+
+static int spraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ int ret;
+ struct spraid_dev *hdev = shost_priv(shost);
+
+ dev_info(hdev->dev, "[%s] start sysfs host reset cmd\n", __func__);
+ ret = spraid_reset_work_sync(hdev);
+ dev_info(hdev->dev, "[%s] stop sysfs host reset cmd[%d]\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static struct scsi_host_template spraid_driver_template = {
+ .module = THIS_MODULE,
+ .name = "Ramaxel Logic spraid driver",
+ .proc_name = "spraid",
+ .queuecommand = spraid_queue_command,
+ .slave_alloc = spraid_slave_alloc,
+ .slave_destroy = spraid_slave_destroy,
+ .slave_configure = spraid_slave_configure,
+ .eh_timed_out = spraid_scmd_timeout,
+ .eh_abort_handler = spraid_abort_handler,
+ .eh_target_reset_handler = spraid_tgt_reset_handler,
+ .eh_bus_reset_handler = spraid_bus_reset_handler,
+ .eh_host_reset_handler = spraid_shost_reset_handler,
+ .change_queue_depth = scsi_change_queue_depth,
+ .this_id = -1,
+ .shost_attrs = spraid_host_attrs,
+ .sdev_attrs = spraid_dev_attrs,
+ .host_reset = spraid_sysfs_host_reset,
+};
+
+static void spraid_shutdown(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ spraid_remove_io_queues(hdev);
+ spraid_disable_admin_queue(hdev, true);
+}
+
+/* bsg dispatch user command */
+static int spraid_bsg_host_dispatch(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = dev_to_shost(job->dev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_bsg_request *bsg_req = job->request;
+ int ret = 0;
+
+ dev_info(hdev->dev, "[%s] msgcode[%d], msglen[%d], timeout[%d];"
+ " req_nsge[%d], req_len[%d]\n",
+ __func__, bsg_req->msgcode, job->request_len,
+ rq->timeout, job->request_payload.sg_cnt,
+ job->request_payload.payload_len);
+
+ job->reply_len = 0;
+
+ switch (bsg_req->msgcode) {
+ case SPRAID_BSG_ADM:
+ ret = spraid_user_admin_cmd(hdev, job);
+ break;
+ case SPRAID_BSG_IOQ:
+ ret = spraid_user_ioq_cmd(hdev, job);
+ break;
+ default:
+ dev_info(hdev->dev, "[%s] unsupport msgcode[%d]\n",
+ __func__, bsg_req->msgcode);
+ break;
+ }
+
+ if (ret > 0)
+ ret = ret | (ret << 8);
+
+ bsg_job_done(job, ret, 0);
+ return 0;
+}
+
+static inline void spraid_remove_bsg(struct spraid_dev *hdev)
+{
+ if (hdev->bsg_queue) {
+ bsg_unregister_queue(hdev->bsg_queue);
+ blk_cleanup_queue(hdev->bsg_queue);
+ }
+}
+static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct spraid_dev *hdev;
+ struct Scsi_Host *shost;
+ int node, ret;
+ char bsg_name[15];
+
+ shost = scsi_host_alloc(&spraid_driver_template, sizeof(*hdev));
+ if (!shost) {
+ dev_err(&pdev->dev, "Failed to allocate scsi host\n");
+ return -ENOMEM;
+ }
+ hdev = shost_priv(shost);
+ hdev->pdev = pdev;
+ hdev->dev = get_device(&pdev->dev);
+
+ node = dev_to_node(hdev->dev);
+ if (node == NUMA_NO_NODE) {
+ node = first_memory_node;
+ set_dev_node(hdev->dev, node);
+ }
+ hdev->numa_node = node;
+ hdev->shost = shost;
+ pci_set_drvdata(pdev, hdev);
+
+ ret = spraid_dev_map(hdev);
+ if (ret)
+ goto put_dev;
+
+ init_rwsem(&hdev->devices_rwsem);
+ INIT_WORK(&hdev->scan_work, spraid_scan_work);
+ INIT_WORK(&hdev->timesyn_work, spraid_timesyn_work);
+ INIT_WORK(&hdev->reset_work, spraid_reset_work);
+ INIT_WORK(&hdev->fw_act_work, spraid_fw_act_work);
+ spin_lock_init(&hdev->state_lock);
+
+ ret = spraid_alloc_resources(hdev);
+ if (ret)
+ goto dev_unmap;
+
+ ret = spraid_pci_enable(hdev);
+ if (ret)
+ goto resources_free;
+
+ ret = spraid_setup_admin_queue(hdev);
+ if (ret)
+ goto pci_disable;
+
+ ret = spraid_init_ctrl_info(hdev);
+ if (ret)
+ goto disable_admin_q;
+
+ ret = spraid_alloc_iod_ext_mem_pool(hdev);
+ if (ret)
+ goto disable_admin_q;
+
+ ret = spraid_setup_io_queues(hdev);
+ if (ret)
+ goto free_iod_mempool;
+
+ spraid_shost_init(hdev);
+
+ ret = scsi_add_host(hdev->shost, hdev->dev);
+ if (ret) {
+ dev_err(hdev->dev, "Add shost to system failed, ret: %d\n",
+ ret);
+ goto remove_io_queues;
+ }
+
+ snprintf(bsg_name, sizeof(bsg_name), "spraid%d", shost->host_no);
+ hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name,
+ spraid_bsg_host_dispatch,
+ spraid_cmd_size(hdev, true, false));
+ if (IS_ERR(hdev->bsg_queue)) {
+ dev_err(hdev->dev, "err, setup bsg failed\n");
+ hdev->bsg_queue = NULL;
+ goto remove_io_queues;
+ }
+
+ if (hdev->online_queues == SPRAID_ADMIN_QUEUE_NUM) {
+ dev_warn(hdev->dev, "warn only admin queue can be used\n");
+ return 0;
+ }
+
+ hdev->state = SPRAID_LIVE;
+
+ spraid_send_all_aen(hdev);
+
+ ret = spraid_dev_list_init(hdev);
+ if (ret)
+ goto remove_bsg;
+
+ ret = spraid_configure_timestamp(hdev);
+ if (ret)
+ dev_warn(hdev->dev, "init set timestamp failed\n");
+
+ ret = spraid_alloc_ioq_ptcmds(hdev);
+ if (ret)
+ goto remove_bsg;
+
+ scsi_scan_host(hdev->shost);
+
+ return 0;
+
+remove_bsg:
+ spraid_remove_bsg(hdev);
+remove_io_queues:
+ spraid_remove_io_queues(hdev);
+free_iod_mempool:
+ spraid_free_iod_ext_mem_pool(hdev);
+disable_admin_q:
+ spraid_disable_admin_queue(hdev, false);
+pci_disable:
+ spraid_pci_disable(hdev);
+resources_free:
+ spraid_free_resources(hdev);
+dev_unmap:
+ spraid_dev_unmap(hdev);
+put_dev:
+ put_device(hdev->dev);
+ scsi_host_put(shost);
+
+ return -ENODEV;
+}
+
+static void spraid_remove(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = hdev->shost;
+
+ dev_info(hdev->dev, "enter spraid remove\n");
+
+ spraid_change_host_state(hdev, SPRAID_DELETING);
+ flush_work(&hdev->reset_work);
+
+ if (!pci_device_is_present(pdev))
+ spraid_back_all_io(hdev);
+
+ spraid_remove_bsg(hdev);
+ scsi_remove_host(shost);
+ spraid_free_ioq_ptcmds(hdev);
+ kfree(hdev->devices);
+ spraid_remove_io_queues(hdev);
+ spraid_free_iod_ext_mem_pool(hdev);
+ spraid_disable_admin_queue(hdev, false);
+ spraid_pci_disable(hdev);
+ spraid_free_resources(hdev);
+ spraid_dev_unmap(hdev);
+ put_device(hdev->dev);
+ scsi_host_put(shost);
+
+ dev_info(hdev->dev, "exit spraid remove\n");
+}
+
+static const struct pci_device_id spraid_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC,
+ SPRAID_SERVER_DEVICE_HBA_DID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC,
+ SPRAID_SERVER_DEVICE_RAID_DID) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, spraid_id_table);
+
+static struct pci_driver spraid_driver = {
+ .name = "spraid",
+ .id_table = spraid_id_table,
+ .probe = spraid_probe,
+ .remove = spraid_remove,
+ .shutdown = spraid_shutdown,
+ .err_handler = &spraid_err_handler,
+};
+
+static int __init spraid_init(void)
+{
+ int ret;
+
+ spraid_wq = alloc_workqueue("spraid-wq", WQ_UNBOUND | WQ_MEM_RECLAIM |
+ WQ_SYSFS, 0);
+ if (!spraid_wq)
+ return -ENOMEM;
+
+ spraid_class = class_create(THIS_MODULE, "spraid");
+ if (IS_ERR(spraid_class)) {
+ ret = PTR_ERR(spraid_class);
+ goto destroy_wq;
+ }
+
+ ret = pci_register_driver(&spraid_driver);
+ if (ret < 0)
+ goto destroy_class;
+
+ return 0;
+
+destroy_class:
+ class_destroy(spraid_class);
+destroy_wq:
+ destroy_workqueue(spraid_wq);
+
+ return ret;
+}
+
+static void __exit spraid_exit(void)
+{
+ pci_unregister_driver(&spraid_driver);
+ class_destroy(spraid_class);
+ destroy_workqueue(spraid_wq);
+ ida_destroy(&spraid_instance_ida);
+}
+
+MODULE_AUTHOR("songyl(a)ramaxel.com");
+MODULE_DESCRIPTION("Ramaxel Memory Technology SPraid Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SPRAID_DRV_VERSION);
+module_init(spraid_init);
+module_exit(spraid_exit);
--
2.25.1
1
1
From: Matteo Croce <mcroce(a)microsoft.com>
maillist inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4BULZ
CVE: NA
Reference: https://lore.kernel.org/all/20201110202746.9690-1-mcroce@linux.microsoft.co…
----------------------------------------------------------------------
The kernel cmdline reboot= option offers some sort of control
on how the reboot is issued.
Add handles in sysfs to allow setting these reboot options, so they
can be changed when the system is booted, other than at boot time.
The handlers are under <sysfs>/kernel/reboot, can be read to
get the current configuration and written to alter it.
# cd /sys/kernel/reboot/
# grep . *
cpu:0
force:0
mode:cold
type:acpi
# echo 2 >cpu
# echo yes >force
# echo soft >mode
# echo bios >type
# grep . *
cpu:2
force:1
mode:soft
type:bios
Before setting anything, check for CAP_SYS_BOOT capability, so it's
possible to allow an unpriviledged process to change these settings
simply by relaxing the handles permissions, without opening them to
the world.
Signed-off-by: Matteo Croce <mcroce(a)microsoft.com>
Signed-off-by: Hongyu Li <543306408(a)qq.com>
---
Documentation/ABI/testing/sysfs-kernel-reboot | 32 +++
kernel/reboot.c | 206 ++++++++++++++++++
2 files changed, 238 insertions(+)
create mode 100644 Documentation/ABI/testing/sysfs-kernel-reboot
diff --git a/Documentation/ABI/testing/sysfs-kernel-reboot b/Documentation/ABI/testing/sysfs-kernel-reboot
new file mode 100644
index 000000000000..837330fb2511
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-reboot
@@ -0,0 +1,32 @@
+What: /sys/kernel/reboot
+Date: November 2020
+KernelVersion: 5.11
+Contact: Matteo Croce <mcroce(a)microsoft.com>
+Description: Interface to set the kernel reboot behavior, similarly to
+ what can be done via the reboot= cmdline option.
+ (see Documentation/admin-guide/kernel-parameters.txt)
+
+What: /sys/kernel/reboot/mode
+Date: November 2020
+KernelVersion: 5.11
+Contact: Matteo Croce <mcroce(a)microsoft.com>
+Description: Reboot mode. Valid values are: cold warm hard soft gpio
+
+What: /sys/kernel/reboot/type
+Date: November 2020
+KernelVersion: 5.11
+Contact: Matteo Croce <mcroce(a)microsoft.com>
+Description: Reboot type. Valid values are: bios acpi kbd triple efi pci
+
+What: /sys/kernel/reboot/cpu
+Date: November 2020
+KernelVersion: 5.11
+Contact: Matteo Croce <mcroce(a)microsoft.com>
+Description: CPU number to use to reboot.
+
+What: /sys/kernel/reboot/force
+Date: November 2020
+KernelVersion: 5.11
+Contact: Matteo Croce <mcroce(a)microsoft.com>
+Description: Don't wait for any other CPUs on reboot and
+ avoid anything that could hang.
diff --git a/kernel/reboot.c b/kernel/reboot.c
index af6f23d8bea1..778979d92dc7 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -594,3 +594,209 @@ static int __init reboot_setup(char *str)
return 1;
}
__setup("reboot=", reboot_setup);
+
+#ifdef CONFIG_SYSFS
+
+#define REBOOT_COLD_STR "cold"
+#define REBOOT_WARM_STR "warm"
+#define REBOOT_HARD_STR "hard"
+#define REBOOT_SOFT_STR "soft"
+#define REBOOT_GPIO_STR "gpio"
+#define REBOOT_UNDEFINED_STR "undefined"
+
+#define BOOT_TRIPLE_STR "triple"
+#define BOOT_KBD_STR "kbd"
+#define BOOT_BIOS_STR "bios"
+#define BOOT_ACPI_STR "acpi"
+#define BOOT_EFI_STR "efi"
+#define BOOT_CF9_FORCE_STR "cf9_force"
+#define BOOT_CF9_SAFE_STR "cf9_safe"
+
+static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ const char *val;
+
+ switch (reboot_mode) {
+ case REBOOT_COLD:
+ val = REBOOT_COLD_STR;
+ break;
+ case REBOOT_WARM:
+ val = REBOOT_WARM_STR;
+ break;
+ case REBOOT_HARD:
+ val = REBOOT_HARD_STR;
+ break;
+ case REBOOT_SOFT:
+ val = REBOOT_SOFT_STR;
+ break;
+ case REBOOT_GPIO:
+ val = REBOOT_GPIO_STR;
+ break;
+ default:
+ val = REBOOT_UNDEFINED_STR;
+ }
+
+ return sprintf(buf, "%s\n", val);
+}
+static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (!capable(CAP_SYS_BOOT))
+ return -EPERM;
+
+ if (!strncmp(buf, REBOOT_COLD_STR, strlen(REBOOT_COLD_STR)))
+ reboot_mode = REBOOT_COLD;
+ else if (!strncmp(buf, REBOOT_WARM_STR, strlen(REBOOT_WARM_STR)))
+ reboot_mode = REBOOT_WARM;
+ else if (!strncmp(buf, REBOOT_HARD_STR, strlen(REBOOT_HARD_STR)))
+ reboot_mode = REBOOT_HARD;
+ else if (!strncmp(buf, REBOOT_SOFT_STR, strlen(REBOOT_SOFT_STR)))
+ reboot_mode = REBOOT_SOFT;
+ else if (!strncmp(buf, REBOOT_GPIO_STR, strlen(REBOOT_GPIO_STR)))
+ reboot_mode = REBOOT_GPIO;
+ else
+ return -EINVAL;
+
+ return count;
+}
+static struct kobj_attribute reboot_mode_attr = __ATTR_RW(mode);
+
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ const char *val;
+
+ switch (reboot_type) {
+ case BOOT_TRIPLE:
+ val = BOOT_TRIPLE_STR;
+ break;
+ case BOOT_KBD:
+ val = BOOT_KBD_STR;
+ break;
+ case BOOT_BIOS:
+ val = BOOT_BIOS_STR;
+ break;
+ case BOOT_ACPI:
+ val = BOOT_ACPI_STR;
+ break;
+ case BOOT_EFI:
+ val = BOOT_EFI_STR;
+ break;
+ case BOOT_CF9_FORCE:
+ val = BOOT_CF9_FORCE_STR;
+ break;
+ case BOOT_CF9_SAFE:
+ val = BOOT_CF9_SAFE_STR;
+ break;
+ default:
+ val = REBOOT_UNDEFINED_STR;
+ }
+
+ return sprintf(buf, "%s\n", val);
+}
+static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (!capable(CAP_SYS_BOOT))
+ return -EPERM;
+
+ if (!strncmp(buf, BOOT_TRIPLE_STR, strlen(BOOT_TRIPLE_STR)))
+ reboot_mode = BOOT_TRIPLE;
+ else if (!strncmp(buf, BOOT_KBD_STR, strlen(BOOT_KBD_STR)))
+ reboot_mode = BOOT_KBD;
+ else if (!strncmp(buf, BOOT_BIOS_STR, strlen(BOOT_BIOS_STR)))
+ reboot_mode = BOOT_BIOS;
+ else if (!strncmp(buf, BOOT_ACPI_STR, strlen(BOOT_ACPI_STR)))
+ reboot_mode = BOOT_ACPI;
+ else if (!strncmp(buf, BOOT_EFI_STR, strlen(BOOT_EFI_STR)))
+ reboot_mode = BOOT_EFI;
+ else if (!strncmp(buf, BOOT_CF9_FORCE_STR, strlen(BOOT_CF9_FORCE_STR)))
+ reboot_mode = BOOT_CF9_FORCE;
+ else if (!strncmp(buf, BOOT_CF9_SAFE_STR, strlen(BOOT_CF9_SAFE_STR)))
+ reboot_mode = BOOT_CF9_SAFE;
+ else
+ return -EINVAL;
+
+ return count;
+}
+static struct kobj_attribute reboot_type_attr = __ATTR_RW(type);
+
+static ssize_t cpu_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", reboot_cpu);
+}
+static ssize_t cpu_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int cpunum;
+ int rc;
+
+ if (!capable(CAP_SYS_BOOT))
+ return -EPERM;
+
+ rc = kstrtouint(buf, 0, &cpunum);
+
+ if (rc)
+ return rc;
+
+ if (cpunum >= num_possible_cpus())
+ return -ERANGE;
+
+ reboot_cpu = cpunum;
+
+ return count;
+}
+static struct kobj_attribute reboot_cpu_attr = __ATTR_RW(cpu);
+
+static ssize_t force_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", reboot_force);
+}
+static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ bool res;
+
+ if (!capable(CAP_SYS_BOOT))
+ return -EPERM;
+
+ if (kstrtobool(buf, &res))
+ return -EINVAL;
+
+ reboot_force = res;
+
+ return count;
+}
+static struct kobj_attribute reboot_force_attr = __ATTR_RW(force);
+
+static struct attribute *reboot_attrs[] = {
+ &reboot_mode_attr.attr,
+ &reboot_type_attr.attr,
+ &reboot_cpu_attr.attr,
+ &reboot_force_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group reboot_attr_group = {
+ .attrs = reboot_attrs,
+};
+
+static int __init reboot_ksysfs_init(void)
+{
+ struct kobject *reboot_kobj;
+ int ret;
+
+ reboot_kobj = kobject_create_and_add("reboot", kernel_kobj);
+ if (!reboot_kobj)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(reboot_kobj, &reboot_attr_group);
+ if (ret) {
+ kobject_put(reboot_kobj);
+ return ret;
+ }
+
+ return 0;
+}
+late_initcall(reboot_ksysfs_init);
+
+#endif
--
2.17.1
3
2
From: Nathan Chancellor <natechancellor(a)gmail.com>
maillist inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4BULZ
CVE: NA
Reference: https://lore.kernel.org/all/20201112035023.974748-1-natechancellor@gmail.co…
----------------------------------------------------------------------
Clang warns:
kernel/reboot.c:707:17: warning: implicit conversion from enumeration
type 'enum reboot_type' to different enumeration type 'enum reboot_mode'
[-Wenum-conversion]
reboot_mode = BOOT_TRIPLE;
~ ^~~~~~~~~~~
kernel/reboot.c:709:17: warning: implicit conversion from enumeration
type 'enum reboot_type' to different enumeration type 'enum reboot_mode'
[-Wenum-conversion]
reboot_mode = BOOT_KBD;
~ ^~~~~~~~
kernel/reboot.c:711:17: warning: implicit conversion from enumeration
type 'enum reboot_type' to different enumeration type 'enum reboot_mode'
[-Wenum-conversion]
reboot_mode = BOOT_BIOS;
~ ^~~~~~~~~
kernel/reboot.c:713:17: warning: implicit conversion from enumeration
type 'enum reboot_type' to different enumeration type 'enum reboot_mode'
[-Wenum-conversion]
reboot_mode = BOOT_ACPI;
~ ^~~~~~~~~
kernel/reboot.c:715:17: warning: implicit conversion from enumeration
type 'enum reboot_type' to different enumeration type 'enum reboot_mode'
[-Wenum-conversion]
reboot_mode = BOOT_EFI;
~ ^~~~~~~~
kernel/reboot.c:717:17: warning: implicit conversion from enumeration
type 'enum reboot_type' to different enumeration type 'enum reboot_mode'
[-Wenum-conversion]
reboot_mode = BOOT_CF9_FORCE;
~ ^~~~~~~~~~~~~~
kernel/reboot.c:719:17: warning: implicit conversion from enumeration
type 'enum reboot_type' to different enumeration type 'enum reboot_mode'
[-Wenum-conversion]
reboot_mode = BOOT_CF9_SAFE;
~ ^~~~~~~~~~~~~
7 warnings generated.
It seems that these assignment should be to reboot_type, not
reboot_mode. Fix it so there are no more warnings and the code works
properly.
Fixes: eab8da48579d ("reboot: allow to specify reboot mode via sysfs")
Link: https://github.com/ClangBuiltLinux/linux/issues/1197
Signed-off-by: Nathan Chancellor <natechancellor(a)gmail.com>
Reviewed-and-tested-by: Matteo Croce <mcroce(a)microsoft.com>
Reviewed-by: Petr Mladek <pmladek(a)suse.com>
Signed-off-by: Hongyu Li <543306408(a)qq.com>
---
kernel/reboot.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 778979d92dc7..7a445c710f9a 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -700,19 +700,19 @@ static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr,
return -EPERM;
if (!strncmp(buf, BOOT_TRIPLE_STR, strlen(BOOT_TRIPLE_STR)))
- reboot_mode = BOOT_TRIPLE;
+ reboot_type = BOOT_TRIPLE;
else if (!strncmp(buf, BOOT_KBD_STR, strlen(BOOT_KBD_STR)))
- reboot_mode = BOOT_KBD;
+ reboot_type = BOOT_KBD;
else if (!strncmp(buf, BOOT_BIOS_STR, strlen(BOOT_BIOS_STR)))
- reboot_mode = BOOT_BIOS;
+ reboot_type = BOOT_BIOS;
else if (!strncmp(buf, BOOT_ACPI_STR, strlen(BOOT_ACPI_STR)))
- reboot_mode = BOOT_ACPI;
+ reboot_type = BOOT_ACPI;
else if (!strncmp(buf, BOOT_EFI_STR, strlen(BOOT_EFI_STR)))
- reboot_mode = BOOT_EFI;
+ reboot_type = BOOT_EFI;
else if (!strncmp(buf, BOOT_CF9_FORCE_STR, strlen(BOOT_CF9_FORCE_STR)))
- reboot_mode = BOOT_CF9_FORCE;
+ reboot_type = BOOT_CF9_FORCE;
else if (!strncmp(buf, BOOT_CF9_SAFE_STR, strlen(BOOT_CF9_SAFE_STR)))
- reboot_mode = BOOT_CF9_SAFE;
+ reboot_type = BOOT_CF9_SAFE;
else
return -EINVAL;
--
2.17.1
2
1

[PATCH openEuler-1.0-LTS v2] scsi:spraid: support Ramaxel's spraid driver
by Yanling Song 22 Dec '21
by Yanling Song 22 Dec '21
22 Dec '21
Ramaxel inclusion
category: features
bugzilla: https://gitee.com/openeuler/kernel/issues/I4NJIB
CVE: NA
v1->v2:
1. Add deconfig for arm64 and x86
Support Ramaxel's SPRxxx Raid controller
Signed-off-by: Yanling Song <songyl(a)ramaxel.com>
Reviewed-by: Jiang Yu<yujiang(a)ramaxel.com>
---
arch/arm64/configs/openeuler_defconfig | 1 +
arch/x86/configs/openeuler_defconfig | 1 +
drivers/scsi/Kconfig | 1 +
drivers/scsi/Makefile | 1 +
drivers/scsi/spraid/Kconfig | 13 +
drivers/scsi/spraid/Makefile | 7 +
drivers/scsi/spraid/spraid.h | 746 +++++
drivers/scsi/spraid/spraid_main.c | 3875 ++++++++++++++++++++++++
8 files changed, 4645 insertions(+)
create mode 100644 drivers/scsi/spraid/Kconfig
create mode 100644 drivers/scsi/spraid/Makefile
create mode 100644 drivers/scsi/spraid/spraid.h
create mode 100644 drivers/scsi/spraid/spraid_main.c
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index d2167c80757f..47b0931b91bf 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -2143,6 +2143,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_SMARTPQI=m
+CONFIG_RAMAXEL_SPRAID=m
# CONFIG_SCSI_UFSHCD is not set
# CONFIG_SCSI_HPTIOP is not set
CONFIG_LIBFC=m
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index 7043426d9780..786186288fea 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -2182,6 +2182,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_SMARTPQI=m
+CONFIG_RAMAXEL_SPRAID=m
# CONFIG_SCSI_UFSHCD is not set
# CONFIG_SCSI_HPTIOP is not set
# CONFIG_SCSI_BUSLOGIC is not set
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8450484184e3..63d2aaa22834 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -522,6 +522,7 @@ source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
source "drivers/scsi/ufs/Kconfig"
+source "drivers/scsi/spraid/Kconfig"
config SCSI_HPTIOP
tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2973693f6dcc..4056cf26e09e 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
obj-$(CONFIG_CXLFLASH) += cxlflash/
+obj-$(CONFIG_RAMAXEL_SPRAID) += spraid/
obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
diff --git a/drivers/scsi/spraid/Kconfig b/drivers/scsi/spraid/Kconfig
new file mode 100644
index 000000000000..bfbba3db8db0
--- /dev/null
+++ b/drivers/scsi/spraid/Kconfig
@@ -0,0 +1,13 @@
+#
+# Ramaxel driver configuration
+#
+
+config RAMAXEL_SPRAID
+ tristate "Ramaxel spraid Adapter"
+ depends on PCI && SCSI
+ select BLK_DEV_BSGLIB
+ depends on ARM64 || X86_64
+ help
+ This driver supports Ramaxel SPRxxx serial
+ raid controller, which has PCIE Gen4 interface
+ with host and supports SAS/SATA Hdd/ssd.
diff --git a/drivers/scsi/spraid/Makefile b/drivers/scsi/spraid/Makefile
new file mode 100644
index 000000000000..aadc2ffd37eb
--- /dev/null
+++ b/drivers/scsi/spraid/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Ramaxel device drivers.
+#
+
+obj-$(CONFIG_RAMAXEL_SPRAID) += spraid.o
+
+spraid-objs := spraid_main.o
\ No newline at end of file
diff --git a/drivers/scsi/spraid/spraid.h b/drivers/scsi/spraid/spraid.h
new file mode 100644
index 000000000000..c1e4980e18e5
--- /dev/null
+++ b/drivers/scsi/spraid/spraid.h
@@ -0,0 +1,746 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
+
+#ifndef __SPRAID_H_
+#define __SPRAID_H_
+
+#define SPRAID_CAP_MQES(cap) ((cap) & 0xffff)
+#define SPRAID_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define SPRAID_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define SPRAID_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define SPRAID_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
+#define SPRAID_CAP_DMAMASK(cap) (((cap) >> 37) & 0xff)
+
+#define SPRAID_DEFAULT_MAX_CHANNEL 4
+#define SPRAID_DEFAULT_MAX_ID 240
+#define SPRAID_DEFAULT_MAX_LUN_PER_HOST 8
+#define MAX_SECTORS 2048
+
+#define IO_SQE_SIZE sizeof(struct spraid_ioq_command)
+#define ADMIN_SQE_SIZE sizeof(struct spraid_admin_command)
+#define SQE_SIZE(qid) (((qid) > 0) ? IO_SQE_SIZE : ADMIN_SQE_SIZE)
+#define CQ_SIZE(depth) ((depth) * sizeof(struct spraid_completion))
+#define SQ_SIZE(qid, depth) ((depth) * SQE_SIZE(qid))
+
+#define SENSE_SIZE(depth) ((depth) * SCSI_SENSE_BUFFERSIZE)
+
+#define SPRAID_AQ_DEPTH 128
+#define SPRAID_NR_AEN_COMMANDS 16
+#define SPRAID_AQ_BLK_MQ_DEPTH (SPRAID_AQ_DEPTH - SPRAID_NR_AEN_COMMANDS)
+#define SPRAID_AQ_MQ_TAG_DEPTH (SPRAID_AQ_BLK_MQ_DEPTH - 1)
+
+#define SPRAID_ADMIN_QUEUE_NUM 1
+#define SPRAID_PTCMDS_PERQ 1
+#define SPRAID_IO_BLK_MQ_DEPTH (hdev->shost->can_queue)
+#define SPRAID_NR_IOQ_PTCMDS (SPRAID_PTCMDS_PERQ * hdev->shost->nr_hw_queues)
+
+#define FUA_MASK 0x08
+#define SPRAID_MINORS BIT(MINORBITS)
+
+#define COMMAND_IS_WRITE(cmd) ((cmd)->common.opcode & 1)
+
+#define SPRAID_IO_IOSQES 7
+#define SPRAID_IO_IOCQES 4
+#define PRP_ENTRY_SIZE 8
+
+#define SMALL_POOL_SIZE 256
+#define MAX_SMALL_POOL_NUM 16
+#define MAX_CMD_PER_DEV 64
+#define MAX_CDB_LEN 32
+
+#define SPRAID_UP_TO_MULTY4(x) (((x) + 4) & (~0x03))
+
+#define CQE_STATUS_SUCCESS (0x0)
+
+#define PCI_VENDOR_ID_RAMAXEL_LOGIC 0x1E81
+
+#define SPRAID_SERVER_DEVICE_HBA_DID 0x2100
+#define SPRAID_SERVER_DEVICE_RAID_DID 0x2200
+
+#define IO_6_DEFAULT_TX_LEN 256
+
+#define SPRAID_INT_PAGES 2
+#define SPRAID_INT_BYTES(hdev) (SPRAID_INT_PAGES * (hdev)->page_size)
+
+enum {
+ SPRAID_REQ_CANCELLED = (1 << 0),
+ SPRAID_REQ_USERCMD = (1 << 1),
+};
+
+enum {
+ SPRAID_SC_SUCCESS = 0x0,
+ SPRAID_SC_INVALID_OPCODE = 0x1,
+ SPRAID_SC_INVALID_FIELD = 0x2,
+
+ SPRAID_SC_ABORT_LIMIT = 0x103,
+ SPRAID_SC_ABORT_MISSING = 0x104,
+ SPRAID_SC_ASYNC_LIMIT = 0x105,
+
+ SPRAID_SC_DNR = 0x4000,
+};
+
+enum {
+ SPRAID_REG_CAP = 0x0000,
+ SPRAID_REG_CC = 0x0014,
+ SPRAID_REG_CSTS = 0x001c,
+ SPRAID_REG_AQA = 0x0024,
+ SPRAID_REG_ASQ = 0x0028,
+ SPRAID_REG_ACQ = 0x0030,
+ SPRAID_REG_DBS = 0x1000,
+};
+
+enum {
+ SPRAID_CC_ENABLE = 1 << 0,
+ SPRAID_CC_CSS_NVM = 0 << 4,
+ SPRAID_CC_MPS_SHIFT = 7,
+ SPRAID_CC_AMS_SHIFT = 11,
+ SPRAID_CC_SHN_SHIFT = 14,
+ SPRAID_CC_IOSQES_SHIFT = 16,
+ SPRAID_CC_IOCQES_SHIFT = 20,
+ SPRAID_CC_AMS_RR = 0 << SPRAID_CC_AMS_SHIFT,
+ SPRAID_CC_SHN_NONE = 0 << SPRAID_CC_SHN_SHIFT,
+ SPRAID_CC_IOSQES = SPRAID_IO_IOSQES << SPRAID_CC_IOSQES_SHIFT,
+ SPRAID_CC_IOCQES = SPRAID_IO_IOCQES << SPRAID_CC_IOCQES_SHIFT,
+ SPRAID_CC_SHN_NORMAL = 1 << SPRAID_CC_SHN_SHIFT,
+ SPRAID_CC_SHN_MASK = 3 << SPRAID_CC_SHN_SHIFT,
+ SPRAID_CSTS_CFS_SHIFT = 1,
+ SPRAID_CSTS_SHST_SHIFT = 2,
+ SPRAID_CSTS_PP_SHIFT = 5,
+ SPRAID_CSTS_RDY = 1 << 0,
+ SPRAID_CSTS_SHST_CMPLT = 2 << 2,
+ SPRAID_CSTS_SHST_MASK = 3 << 2,
+ SPRAID_CSTS_CFS_MASK = 1 << SPRAID_CSTS_CFS_SHIFT,
+ SPRAID_CSTS_PP_MASK = 1 << SPRAID_CSTS_PP_SHIFT,
+};
+
+enum {
+ SPRAID_ADMIN_DELETE_SQ = 0x00,
+ SPRAID_ADMIN_CREATE_SQ = 0x01,
+ SPRAID_ADMIN_DELETE_CQ = 0x04,
+ SPRAID_ADMIN_CREATE_CQ = 0x05,
+ SPRAID_ADMIN_ABORT_CMD = 0x08,
+ SPRAID_ADMIN_SET_FEATURES = 0x09,
+ SPRAID_ADMIN_ASYNC_EVENT = 0x0c,
+ SPRAID_ADMIN_GET_INFO = 0xc6,
+ SPRAID_ADMIN_RESET = 0xc8,
+};
+
+enum {
+ SPRAID_GET_INFO_CTRL = 0,
+ SPRAID_GET_INFO_DEV_LIST = 1,
+};
+
+enum {
+ SPRAID_RESET_TARGET = 0,
+ SPRAID_RESET_BUS = 1,
+};
+
+enum {
+ SPRAID_AEN_ERROR = 0,
+ SPRAID_AEN_NOTICE = 2,
+ SPRAID_AEN_VS = 7,
+};
+
+enum {
+ SPRAID_AEN_DEV_CHANGED = 0x00,
+ SPRAID_AEN_FW_ACT_START = 0x01,
+ SPRAID_AEN_HOST_PROBING = 0x10,
+};
+
+enum {
+ SPRAID_AEN_TIMESYN = 0x00,
+ SPRAID_AEN_FW_ACT_FINISH = 0x02,
+ SPRAID_AEN_EVENT_MIN = 0x80,
+ SPRAID_AEN_EVENT_MAX = 0xff,
+};
+
+enum {
+ SPRAID_CMD_WRITE = 0x01,
+ SPRAID_CMD_READ = 0x02,
+
+ SPRAID_CMD_NONIO_NONE = 0x80,
+ SPRAID_CMD_NONIO_TODEV = 0x81,
+ SPRAID_CMD_NONIO_FROMDEV = 0x82,
+};
+
+enum {
+ SPRAID_QUEUE_PHYS_CONTIG = (1 << 0),
+ SPRAID_CQ_IRQ_ENABLED = (1 << 1),
+
+ SPRAID_FEAT_NUM_QUEUES = 0x07,
+ SPRAID_FEAT_ASYNC_EVENT = 0x0b,
+ SPRAID_FEAT_TIMESTAMP = 0x0e,
+};
+
+enum spraid_state {
+ SPRAID_NEW,
+ SPRAID_LIVE,
+ SPRAID_RESETTING,
+ SPRAID_DELETING,
+ SPRAID_DEAD,
+};
+
+enum {
+ SPRAID_CARD_HBA,
+ SPRAID_CARD_RAID,
+};
+
+enum spraid_cmd_type {
+ SPRAID_CMD_ADM,
+ SPRAID_CMD_IOPT,
+};
+
+struct spraid_completion {
+ __le32 result;
+ union {
+ struct {
+ __u8 sense_len;
+ __u8 resv[3];
+ };
+ __le32 result1;
+ };
+ __le16 sq_head;
+ __le16 sq_id;
+ __u16 cmd_id;
+ __le16 status;
+};
+
+struct spraid_ctrl_info {
+ __le32 nd;
+ __le16 max_cmds;
+ __le16 max_channel;
+ __le32 max_tgt_id;
+ __le16 max_lun;
+ __le16 max_num_sge;
+ __le16 lun_num_in_boot;
+ __u8 mdts;
+ __u8 acl;
+ __u8 aerl;
+ __u8 card_type;
+ __u16 rsvd;
+ __u32 rtd3e;
+ __u8 sn[32];
+ __u8 fr[16];
+ __u8 rsvd1[4020];
+};
+
+struct spraid_dev {
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct Scsi_Host *shost;
+ struct spraid_queue *queues;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool[MAX_SMALL_POOL_NUM];
+ mempool_t *iod_mempool;
+ void __iomem *bar;
+ u32 max_qid;
+ u32 num_vecs;
+ u32 queue_count;
+ u32 ioq_depth;
+ int db_stride;
+ u32 __iomem *dbs;
+ struct rw_semaphore devices_rwsem;
+ int numa_node;
+ u32 page_size;
+ u32 ctrl_config;
+ u32 online_queues;
+ u64 cap;
+ int instance;
+ struct spraid_ctrl_info *ctrl_info;
+ struct spraid_dev_info *devices;
+
+ struct spraid_cmd *adm_cmds;
+ struct list_head adm_cmd_list;
+ spinlock_t adm_cmd_lock;
+
+ struct spraid_cmd *ioq_ptcmds;
+ struct list_head ioq_pt_list;
+ spinlock_t ioq_pt_lock;
+
+ struct work_struct scan_work;
+ struct work_struct timesyn_work;
+ struct work_struct reset_work;
+ struct work_struct fw_act_work;
+
+ enum spraid_state state;
+ spinlock_t state_lock;
+
+ struct request_queue *bsg_queue;
+};
+
+struct spraid_sgl_desc {
+ __le64 addr;
+ __le32 length;
+ __u8 rsvd[3];
+ __u8 type;
+};
+
+union spraid_data_ptr {
+ struct {
+ __le64 prp1;
+ __le64 prp2;
+ };
+ struct spraid_sgl_desc sgl;
+};
+
+struct spraid_admin_common_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le32 cdw2[4];
+ union spraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+struct spraid_features {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[2];
+ union spraid_data_ptr dptr;
+ __le32 fid;
+ __le32 dword11;
+ __le32 dword12;
+ __le32 dword13;
+ __le32 dword14;
+ __le32 dword15;
+};
+
+struct spraid_create_cq {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 cqid;
+ __le16 qsize;
+ __le16 cq_flags;
+ __le16 irq_vector;
+ __u32 rsvd12[4];
+};
+
+struct spraid_create_sq {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 sqid;
+ __le16 qsize;
+ __le16 sq_flags;
+ __le16 cqid;
+ __u32 rsvd12[4];
+};
+
+struct spraid_delete_queue {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[9];
+ __le16 qid;
+ __u16 rsvd10;
+ __u32 rsvd11[5];
+};
+
+struct spraid_get_info {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u32 rsvd2[4];
+ union spraid_data_ptr dptr;
+ __u8 type;
+ __u8 rsvd10[3];
+ __le32 cdw11;
+ __u32 rsvd12[4];
+};
+
+struct spraid_usr_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ union {
+ struct {
+ __le16 subopcode;
+ __le16 rsvd1;
+ } info_0;
+ __le32 cdw2;
+ };
+ union {
+ struct {
+ __le16 data_len;
+ __le16 param_len;
+ } info_1;
+ __le32 cdw3;
+ };
+ __u64 metadata;
+ union spraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+enum {
+ SPRAID_CMD_FLAG_SGL_METABUF = (1 << 6),
+ SPRAID_CMD_FLAG_SGL_METASEG = (1 << 7),
+ SPRAID_CMD_FLAG_SGL_ALL = SPRAID_CMD_FLAG_SGL_METABUF |
+ SPRAID_CMD_FLAG_SGL_METASEG,
+};
+
+enum spraid_cmd_state {
+ SPRAID_CMD_IDLE = 0,
+ SPRAID_CMD_IN_FLIGHT = 1,
+ SPRAID_CMD_COMPLETE = 2,
+ SPRAID_CMD_TIMEOUT = 3,
+ SPRAID_CMD_TMO_COMPLETE = 4,
+};
+
+struct spraid_abort_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __le16 sqid;
+ __le16 cid;
+ __u32 rsvd11[5];
+};
+
+struct spraid_reset_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __u8 type;
+ __u8 rsvd10[3];
+ __u32 rsvd11[5];
+};
+
+struct spraid_admin_command {
+ union {
+ struct spraid_admin_common_command common;
+ struct spraid_features features;
+ struct spraid_create_cq create_cq;
+ struct spraid_create_sq create_sq;
+ struct spraid_delete_queue delete_queue;
+ struct spraid_get_info get_info;
+ struct spraid_abort_cmd abort;
+ struct spraid_reset_cmd reset;
+ struct spraid_usr_cmd usr_cmd;
+ };
+};
+
+struct spraid_ioq_common_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __le32 cdw3[3];
+ union spraid_data_ptr dptr;
+ __le32 cdw10[6];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __le32 cdw26[6];
+};
+
+struct spraid_rw_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union spraid_data_ptr dptr;
+ __le64 slba;
+ __le16 nlb;
+ __le16 control;
+ __u32 rsvd13[3];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct spraid_scsi_nonio {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_length;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union spraid_data_ptr dptr;
+ __u32 rsvd10[5];
+ __le32 buffer_len;
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct spraid_ioq_command {
+ union {
+ struct spraid_ioq_common_command common;
+ struct spraid_rw_command rw;
+ struct spraid_scsi_nonio scsi_nonio;
+ };
+};
+
+struct spraid_passthru_common_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 data_len;
+ __u16 param_len;
+ } info_1;
+ __u32 cdw3;
+ };
+ __u64 metadata;
+
+ __u64 addr;
+ __u64 prp2;
+
+ __u32 cdw10;
+ __u32 cdw11;
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 cdw15;
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct spraid_ioq_passthru_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 res_sense_len;
+ __u8 cdb_len;
+ __u8 rsvd0;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_1;
+ __u32 cdw3;
+ };
+ union {
+ struct {
+ __u16 rsvd;
+ __u16 param_len;
+ } info_2;
+ __u32 cdw4;
+ };
+ __u32 cdw5;
+ __u64 addr;
+ __u64 prp2;
+ union {
+ struct {
+ __u16 eid;
+ __u16 sid;
+ } info_3;
+ __u32 cdw10;
+ };
+ union {
+ struct {
+ __u16 did;
+ __u8 did_flag;
+ __u8 rsvd2;
+ } info_4;
+ __u32 cdw11;
+ };
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 data_len;
+ __u32 cdw16;
+ __u32 cdw17;
+ __u32 cdw18;
+ __u32 cdw19;
+ __u32 cdw20;
+ __u32 cdw21;
+ __u32 cdw22;
+ __u32 cdw23;
+ __u64 sense_addr;
+ __u32 cdw26[4];
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct spraid_bsg_request {
+ u32 msgcode;
+ u32 control;
+ union {
+ struct spraid_passthru_common_cmd admcmd;
+ struct spraid_ioq_passthru_cmd ioqcmd;
+ };
+};
+
+enum {
+ SPRAID_BSG_ADM,
+ SPRAID_BSG_IOQ,
+};
+
+struct spraid_cmd {
+ int qid;
+ int cid;
+ u32 result0;
+ u32 result1;
+ u16 status;
+ void *priv;
+ enum spraid_cmd_state state;
+ struct completion cmd_done;
+ struct list_head list;
+};
+
+struct spraid_queue {
+ struct spraid_dev *hdev;
+ spinlock_t sq_lock;
+
+ spinlock_t cq_lock ____cacheline_aligned_in_smp;
+
+ void *sq_cmds;
+
+ struct spraid_completion *cqes;
+
+ dma_addr_t sq_dma_addr;
+ dma_addr_t cq_dma_addr;
+ u32 __iomem *q_db;
+ u8 cq_phase;
+ u8 sqes;
+ u16 qid;
+ u16 sq_tail;
+ u16 cq_head;
+ u16 last_cq_head;
+ u16 q_depth;
+ s16 cq_vector;
+ void *sense;
+ dma_addr_t sense_dma_addr;
+ struct dma_pool *prp_small_pool;
+};
+
+struct spraid_iod {
+ struct spraid_queue *spraidq;
+ enum spraid_cmd_state state;
+ int npages;
+ u32 nsge;
+ u32 length;
+ bool use_sgl;
+ bool sg_drv_mgmt;
+ dma_addr_t first_dma;
+ void *sense;
+ dma_addr_t sense_dma;
+ struct scatterlist *sg;
+ struct scatterlist inline_sg[0];
+};
+
+#define SPRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01)
+#define SPRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0)
+#define SPRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02)
+#define SPRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20)
+
+#define SPRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01)
+#define SPRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02)
+
+#define BGTASK_TYPE_REBUILD 4
+#define USR_CMD_READ 0xc2
+#define USR_CMD_RDLEN 0x1000
+#define USR_CMD_VDINFO 0x704
+#define USR_CMD_BGTASK 0x504
+#define VDINFO_PARAM_LEN 0x04
+
+struct spraid_vd_info {
+ __u8 name[32];
+ __le16 id;
+ __u8 rg_id;
+ __u8 rg_level;
+ __u8 sg_num;
+ __u8 sg_disk_num;
+ __u8 vd_status;
+ __u8 vd_type;
+ __u8 rsvd1[4056];
+};
+
+#define MAX_REALTIME_BGTASK_NUM 32
+
+struct bgtask_info {
+ __u8 type;
+ __u8 progress;
+ __u8 rate;
+ __u8 rsvd0;
+ __le16 vd_id;
+ __le16 time_left;
+ __u8 rsvd1[4];
+};
+
+struct spraid_bgtask {
+ __u8 sw;
+ __u8 task_num;
+ __u8 rsvd[6];
+ struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM];
+};
+
+struct spraid_dev_info {
+ __le32 hdid;
+ __le16 target;
+ __u8 channel;
+ __u8 lun;
+ __u8 attr;
+ __u8 flag;
+ __le16 max_io_kb;
+};
+
+#define MAX_DEV_ENTRY_PER_PAGE_4K 340
+struct spraid_dev_list {
+ __le32 dev_num;
+ __u32 rsvd0[3];
+ struct spraid_dev_info devices[MAX_DEV_ENTRY_PER_PAGE_4K];
+};
+
+struct spraid_sdev_hostdata {
+ u32 hdid;
+ u16 max_io_kb;
+ u8 attr;
+ u8 flag;
+ u8 rg_id;
+ u8 rsvd[3];
+};
+
+#endif
+
diff --git a/drivers/scsi/spraid/spraid_main.c b/drivers/scsi/spraid/spraid_main.c
new file mode 100644
index 000000000000..519b39f44e91
--- /dev/null
+++ b/drivers/scsi/spraid/spraid_main.c
@@ -0,0 +1,3875 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
+
+/* Ramaxel Raid SPXXX Series Linux Driver */
+
+#define pr_fmt(fmt) "spraid: " fmt
+
+#include <linux/sched/signal.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/sysfs.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+#include <linux/once.h>
+#include <linux/debugfs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <asm/unaligned.h>
+#include <linux/sort.h>
+#include <target/target_core_backend.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dbg.h>
+
+
+#include "spraid.h"
+
+static u32 admin_tmout = 60;
+module_param(admin_tmout, uint, 0644);
+MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)");
+
+static u32 scmd_tmout_nonpt = 180;
+module_param(scmd_tmout_nonpt, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_nonpt,
+ "scsi commands timeout for rawdisk&raid(seconds)");
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops ioq_depth_ops = {
+ .set = ioq_depth_set,
+ .get = param_get_uint,
+};
+
+static u32 io_queue_depth = 1024;
+module_param_cb(io_queue_depth, &ioq_depth_ops, &io_queue_depth, 0644);
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+
+static int log_debug_switch_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+
+ return param_set_byte(val, kp);
+}
+
+static const struct kernel_param_ops log_debug_switch_ops = {
+ .set = log_debug_switch_set,
+ .get = param_get_byte,
+};
+
+static unsigned char log_debug_switch;
+module_param_cb(log_debug_switch, &log_debug_switch_ops,
+ &log_debug_switch, 0644);
+MODULE_PARM_DESC(log_debug_switch,
+ "set log state, default non-zero for switch on");
+
+static int small_pool_num_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+ if (n > MAX_SMALL_POOL_NUM)
+ n = MAX_SMALL_POOL_NUM;
+ if (n < 1)
+ n = 1;
+ *((u8 *)kp->arg) = n;
+
+ return 0;
+}
+
+static const struct kernel_param_ops small_pool_num_ops = {
+ .set = small_pool_num_set,
+ .get = param_get_byte,
+};
+
+/* It was found that the spindlock of a single pool conflicts
+ * a lot with multiple CPUs.So multiple pools are introduced
+ * to reduce the conflictions.
+ */
+static unsigned char small_pool_num = 4;
+module_param_cb(small_pool_num, &small_pool_num_ops, &small_pool_num, 0644);
+MODULE_PARM_DESC(small_pool_num, "set prp small pool num, default 4, MAX 16");
+
+static void spraid_free_queue(struct spraid_queue *spraidq);
+static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result);
+static void spraid_handle_aen_vs(struct spraid_dev *hdev,
+ u32 result, u32 result1);
+
+static DEFINE_IDA(spraid_instance_ida);
+
+static struct class *spraid_class;
+
+#define SPRAID_CAP_TIMEOUT_UNIT_MS (HZ / 2)
+
+static struct workqueue_struct *spraid_wq;
+
+#define dev_log_dbg(dev, fmt, ...) do { \
+ if (unlikely(log_debug_switch)) \
+ dev_info(dev, "[%s] [%d] " fmt, \
+ __func__, __LINE__, ##__VA_ARGS__); \
+} while (0)
+
+#define SPRAID_DRV_VERSION "1.0.0.0"
+
+#define ADMIN_TIMEOUT (admin_tmout * HZ)
+#define ADMIN_ERR_TIMEOUT 32757
+
+#define SPRAID_WAIT_ABNL_CMD_TIMEOUT (3 * 2)
+
+#define SPRAID_DMA_MSK_BIT_MAX 64
+
+enum FW_STAT_CODE {
+ FW_STAT_OK = 0,
+ FW_STAT_NEED_CHECK,
+ FW_STAT_ERROR,
+ FW_STAT_EP_PCIE_ERROR,
+ FW_STAT_NAC_DMA_ERROR,
+ FW_STAT_ABORTED,
+ FW_STAT_NEED_RETRY
+};
+
+static const char * const raid_levels[] = {"0", "1", "5", "6", "10", "50", "60",
+ "NA"};
+
+static const char * const raid_states[] = {
+ "NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED",
+ "FORMATTING", "SANITIZING", "INITIALIZING", "INITIALIZE_FAIL",
+ "DELETING", "DELETE_FAIL", "WRITE_PROTECT"
+};
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0;
+ int ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < 2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static int spraid_remap_bar(struct spraid_dev *hdev, u32 size)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (size > pci_resource_len(pdev, 0)) {
+ dev_err(hdev->dev, "Input size[%u] exceed bar0 length[%llu]\n",
+ size, pci_resource_len(pdev, 0));
+ return -ENOMEM;
+ }
+
+ if (hdev->bar)
+ iounmap(hdev->bar);
+
+ hdev->bar = ioremap(pci_resource_start(pdev, 0), size);
+ if (!hdev->bar) {
+ dev_err(hdev->dev, "ioremap for bar0 failed\n");
+ return -ENOMEM;
+ }
+ hdev->dbs = hdev->bar + SPRAID_REG_DBS;
+
+ return 0;
+}
+
+static int spraid_dev_map(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ ret = pci_request_mem_regions(pdev, "spraid");
+ if (ret) {
+ dev_err(hdev->dev, "fail to request memory regions\n");
+ return ret;
+ }
+
+ ret = spraid_remap_bar(hdev, SPRAID_REG_DBS + 4096);
+ if (ret) {
+ pci_release_mem_regions(pdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void spraid_dev_unmap(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (hdev->bar) {
+ iounmap(hdev->bar);
+ hdev->bar = NULL;
+ }
+ pci_release_mem_regions(pdev);
+}
+
+static int spraid_pci_enable(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = -ENOMEM;
+ u64 maskbit = SPRAID_DMA_MSK_BIT_MAX;
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(hdev->dev,
+ "Enable pci device memory resources failed\n");
+ return ret;
+ }
+ pci_set_master(pdev);
+
+ if (readl(hdev->bar + SPRAID_REG_CSTS) == U32_MAX) {
+ ret = -ENODEV;
+ dev_err(hdev->dev, "Read csts register failed\n");
+ goto disable;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(hdev->dev,
+ "Allocate one IRQ for setup admin channel failed\n");
+ goto disable;
+ }
+
+ hdev->cap = lo_hi_readq(hdev->bar + SPRAID_REG_CAP);
+ hdev->ioq_depth = min_t(u32, SPRAID_CAP_MQES(hdev->cap) + 1,
+ io_queue_depth);
+ hdev->db_stride = 1 << SPRAID_CAP_STRIDE(hdev->cap);
+
+ maskbit = SPRAID_CAP_DMAMASK(hdev->cap);
+ if (maskbit < 32 || maskbit > SPRAID_DMA_MSK_BIT_MAX) {
+ dev_err(hdev->dev,
+ "err, dma mask invalid[%llu], set to default\n",
+ maskbit);
+ maskbit = SPRAID_DMA_MSK_BIT_MAX;
+ }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit))) {
+ dev_err(hdev->dev, "set dma mask and coherent failed\n");
+ goto disable;
+ }
+
+ dev_info(hdev->dev, "set dma mask[%llu] success\n", maskbit);
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_save_state(pdev);
+
+ return 0;
+
+disable:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static int spraid_npages_prp(u32 size, struct spraid_dev *hdev)
+{
+ u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size);
+
+ return DIV_ROUND_UP(PRP_ENTRY_SIZE * nprps, PAGE_SIZE - PRP_ENTRY_SIZE);
+}
+
+static int spraid_npages_sgl(u32 nseg)
+{
+ return DIV_ROUND_UP(nseg * sizeof(struct spraid_sgl_desc), PAGE_SIZE);
+}
+
+static void **spraid_iod_list(struct spraid_iod *iod)
+{
+ return (void **)(iod->inline_sg + (iod->sg_drv_mgmt ? iod->nsge : 0));
+}
+
+static u32 spraid_iod_ext_size(struct spraid_dev *hdev, u32 size, u32 nsge,
+ bool sg_drv_mgmt, bool use_sgl)
+{
+ size_t alloc_size, sg_size;
+
+ if (use_sgl)
+ alloc_size = sizeof(__le64 *) * spraid_npages_sgl(nsge);
+ else
+ alloc_size = sizeof(__le64 *) * spraid_npages_prp(size, hdev);
+
+ sg_size = sg_drv_mgmt ? (sizeof(struct scatterlist) * nsge) : 0;
+ return sg_size + alloc_size;
+}
+
+static u32 spraid_cmd_size(struct spraid_dev *hdev,
+ bool sg_drv_mgmt, bool use_sgl)
+{
+ u32 alloc_size = spraid_iod_ext_size(hdev, SPRAID_INT_BYTES(hdev),
+ SPRAID_INT_PAGES, sg_drv_mgmt, use_sgl);
+
+ dev_info(hdev->dev, "sg_drv_mgmt: %s, use_sgl: %s, iod size: %lu;"
+ " alloc_size: %u\n", sg_drv_mgmt ? "true" : "false",
+ use_sgl ? "true" : "false",
+ sizeof(struct spraid_iod), alloc_size);
+
+ return sizeof(struct spraid_iod) + alloc_size;
+}
+
+static int spraid_setup_prps(struct spraid_dev *hdev, struct spraid_iod *iod)
+{
+ struct scatterlist *sg = iod->sg;
+ u64 dma_addr = sg_dma_address(sg);
+ int dma_len = sg_dma_len(sg);
+ __le64 *prp_list, *old_prp_list;
+ u32 page_size = hdev->page_size;
+ int offset = dma_addr & (page_size - 1);
+ void **list = spraid_iod_list(iod);
+ int length = iod->length;
+ struct dma_pool *pool;
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ length -= (page_size - offset);
+ if (length <= 0) {
+ iod->first_dma = 0;
+ return 0;
+ }
+
+ dma_len -= (page_size - offset);
+ if (dma_len) {
+ dma_addr += (page_size - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= page_size) {
+ iod->first_dma = dma_addr;
+ return 0;
+ }
+
+ nprps = DIV_ROUND_UP(length, page_size);
+ if (nprps <= (SMALL_POOL_SIZE / PRP_ENTRY_SIZE)) {
+ pool = iod->spraidq->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ dev_err_ratelimited(hdev->dev,
+ "Allocate first prp_list memory failed\n");
+ iod->first_dma = dma_addr;
+ iod->npages = -1;
+ return -ENOMEM;
+ }
+ list[0] = prp_list;
+ iod->first_dma = prp_dma;
+ i = 0;
+ for (;;) {
+ if (i == page_size / PRP_ENTRY_SIZE) {
+ old_prp_list = prp_list;
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ dev_err_ratelimited(hdev->dev, "Allocate %dth;"
+ " prp_list memory failed\n",
+ iod->npages + 1);
+ return -ENOMEM;
+ }
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= page_size;
+ dma_addr += page_size;
+ length -= page_size;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ if (unlikely(dma_len < 0))
+ goto bad_sgl;
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ return 0;
+
+bad_sgl:
+ dev_err(hdev->dev,
+ "Setup prps, invalid SGL for payload: %d nents: %d\n",
+ iod->length, iod->nsge);
+ return -EIO;
+}
+
+#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct spraid_sgl_desc))
+
+static void spraid_submit_cmd(struct spraid_queue *spraidq, const void *cmd)
+{
+ u32 sqes = SQE_SIZE(spraidq->qid);
+ unsigned long flags;
+ struct spraid_admin_common_command *acd =
+ (struct spraid_admin_common_command *)cmd;
+
+ spin_lock_irqsave(&spraidq->sq_lock, flags);
+ memcpy((spraidq->sq_cmds + sqes * spraidq->sq_tail), cmd, sqes);
+ if (++spraidq->sq_tail == spraidq->q_depth)
+ spraidq->sq_tail = 0;
+
+ writel(spraidq->sq_tail, spraidq->q_db);
+ spin_unlock_irqrestore(&spraidq->sq_lock, flags);
+
+ dev_log_dbg(spraidq->hdev->dev,
+ "cid[%d] qid[%d], opcode[0x%x], flags[0x%x], hdid[%u]\n",
+ acd->command_id, spraidq->qid, acd->opcode,
+ acd->flags, le32_to_cpu(acd->hdid));
+}
+
+static u32 spraid_mod64(u64 dividend, u32 divisor)
+{
+ u64 d;
+ u32 remainder;
+
+ if (!divisor)
+ pr_err("DIVISOR is zero, in div fn\n");
+
+ d = dividend;
+ remainder = do_div(d, divisor);
+ return remainder;
+}
+
+static inline bool spraid_is_rw_scmd(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case READ_32:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ case WRITE_32:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool spraid_is_prp(struct spraid_dev *hdev,
+ struct scsi_cmnd *scmd, u32 nsge)
+{
+ struct scatterlist *sg = scsi_sglist(scmd);
+ u32 page_size = hdev->page_size;
+ bool is_prp = true;
+ int i = 0;
+
+ scsi_for_each_sg(scmd, sg, nsge, i) {
+ if (i != 0 && i != nsge - 1) {
+ if (spraid_mod64(sg_dma_len(sg), page_size) ||
+ spraid_mod64(sg_dma_address(sg), page_size)) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ if (nsge > 1 && i == 0) {
+ if ((spraid_mod64((sg_dma_address(sg) + sg_dma_len(sg)),
+ page_size))) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ if (nsge > 1 && i == (nsge - 1)) {
+ if (spraid_mod64(sg_dma_address(sg), page_size)) {
+ is_prp = false;
+ break;
+ }
+ }
+ }
+
+ return is_prp;
+}
+
+enum {
+ SPRAID_SGL_FMT_DATA_DESC = 0x00,
+ SPRAID_SGL_FMT_SEG_DESC = 0x02,
+ SPRAID_SGL_FMT_LAST_SEG_DESC = 0x03,
+ SPRAID_KEY_SGL_FMT_DATA_DESC = 0x04,
+ SPRAID_TRANSPORT_SGL_DATA_DESC = 0x05
+};
+
+static void spraid_sgl_set_data(struct spraid_sgl_desc *sge,
+ struct scatterlist *sg)
+{
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->type = SPRAID_SGL_FMT_DATA_DESC << 4;
+}
+
+static void spraid_sgl_set_seg(struct spraid_sgl_desc *sge,
+ dma_addr_t dma_addr, int entries)
+{
+ sge->addr = cpu_to_le64(dma_addr);
+ if (entries <= SGES_PER_PAGE) {
+ sge->length = cpu_to_le32(entries * sizeof(*sge));
+ sge->type = SPRAID_SGL_FMT_LAST_SEG_DESC << 4;
+ } else {
+ sge->length = cpu_to_le32(PAGE_SIZE);
+ sge->type = SPRAID_SGL_FMT_SEG_DESC << 4;
+ }
+}
+
+static int spraid_setup_ioq_cmd_sgl(struct spraid_dev *hdev,
+ struct scsi_cmnd *scmd,
+ struct spraid_ioq_command *ioq_cmd,
+ struct spraid_iod *iod)
+{
+ struct spraid_sgl_desc *sg_list, *link, *old_sg_list;
+ struct scatterlist *sg = scsi_sglist(scmd);
+ void **list = spraid_iod_list(iod);
+ struct dma_pool *pool;
+ int nsge = iod->nsge;
+ dma_addr_t sgl_dma;
+ int i = 0;
+
+ ioq_cmd->common.flags |= SPRAID_CMD_FLAG_SGL_METABUF;
+
+ if (nsge == 1) {
+ spraid_sgl_set_data(&ioq_cmd->common.dptr.sgl, sg);
+ return 0;
+ }
+
+ if (nsge <= (SMALL_POOL_SIZE / sizeof(struct spraid_sgl_desc))) {
+ pool = iod->spraidq->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev,
+ "Allocate first sgl_list failed\n");
+ iod->npages = -1;
+ return -ENOMEM;
+ }
+
+ list[0] = sg_list;
+ iod->first_dma = sgl_dma;
+ spraid_sgl_set_seg(&ioq_cmd->common.dptr.sgl, sgl_dma, nsge);
+ do {
+ if (i == SGES_PER_PAGE) {
+ old_sg_list = sg_list;
+ link = &old_sg_list[SGES_PER_PAGE - 1];
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev,
+ "Allocate %dth sgl_list;"
+ " failed\n",
+ iod->npages + 1);
+ return -ENOMEM;
+ }
+ list[iod->npages++] = sg_list;
+
+ i = 0;
+ memcpy(&sg_list[i++], link, sizeof(*link));
+ spraid_sgl_set_seg(link, sgl_dma, nsge);
+ }
+
+ spraid_sgl_set_data(&sg_list[i++], sg);
+ sg = sg_next(sg);
+ } while (--nsge > 0);
+
+ return 0;
+}
+
+#define SPRAID_RW_FUA BIT(14)
+
+static void spraid_setup_rw_cmd(struct spraid_dev *hdev,
+ struct spraid_rw_command *rw,
+ struct scsi_cmnd *scmd)
+{
+ u32 start_lba_lo, start_lba_hi;
+ u32 datalength = 0;
+ u16 control = 0;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+
+ if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+ rw->opcode = SPRAID_CMD_WRITE;
+ } else if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
+ rw->opcode = SPRAID_CMD_READ;
+ } else {
+ dev_err(hdev->dev,
+ "Invalid IO for unsupported data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ }
+
+ /* 6-byte READ(0x08) or WRITE(0x0A) cdb */
+ if (scmd->cmd_len == 6) {
+ datalength = (u32)(scmd->cmnd[4] == 0 ?
+ IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]);
+ start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]);
+
+ start_lba_lo &= 0x1FFFFF;
+ }
+
+ /* 10-byte READ(0x28) or WRITE(0x2A) cdb */
+ else if (scmd->cmd_len == 10) {
+ datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+
+ /* 12-byte READ(0xA8) or WRITE(0xAA) cdb */
+ else if (scmd->cmd_len == 12) {
+ datalength = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+ /* 16-byte READ(0x88) or WRITE(0x8A) cdb */
+ else if (scmd->cmd_len == 16) {
+ datalength = get_unaligned_be32(&scmd->cmnd[10]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+ /* 32-byte READ(0x88) or WRITE(0x8A) cdb */
+ else if (scmd->cmd_len == 32) {
+ datalength = get_unaligned_be32(&scmd->cmnd[28]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[16]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[12]);
+
+ if (scmd->cmnd[10] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+
+ if (unlikely(datalength > U16_MAX || datalength == 0)) {
+ dev_err(hdev->dev,
+ "Invalid IO for illegal transfer data length: %u\n",
+ datalength);
+ WARN_ON(1);
+ }
+
+ rw->slba = cpu_to_le64(((u64)start_lba_hi << 32) | start_lba_lo);
+ /* 0base for nlb */
+ rw->nlb = cpu_to_le16((u16)(datalength - 1));
+ rw->control = cpu_to_le16(control);
+}
+
+static void spraid_setup_nonio_cmd(struct spraid_dev *hdev,
+ struct spraid_scsi_nonio *scsi_nonio,
+ struct scsi_cmnd *scmd)
+{
+ scsi_nonio->buffer_len = cpu_to_le32(scsi_bufflen(scmd));
+
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ scsi_nonio->opcode = SPRAID_CMD_NONIO_NONE;
+ break;
+ case DMA_TO_DEVICE:
+ scsi_nonio->opcode = SPRAID_CMD_NONIO_TODEV;
+ break;
+ case DMA_FROM_DEVICE:
+ scsi_nonio->opcode = SPRAID_CMD_NONIO_FROMDEV;
+ break;
+ default:
+ dev_err(hdev->dev,
+ "Invalid IO for unsupported data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ }
+}
+
+static void spraid_setup_ioq_cmd(struct spraid_dev *hdev,
+ struct spraid_ioq_command *ioq_cmd,
+ struct scsi_cmnd *scmd)
+{
+ memcpy(ioq_cmd->common.cdb, scmd->cmnd, scmd->cmd_len);
+ ioq_cmd->common.cdb_len = scmd->cmd_len;
+
+ if (spraid_is_rw_scmd(scmd))
+ spraid_setup_rw_cmd(hdev, &ioq_cmd->rw, scmd);
+ else
+ spraid_setup_nonio_cmd(hdev, &ioq_cmd->scsi_nonio, scmd);
+}
+
+static int spraid_init_iod(struct spraid_dev *hdev, struct spraid_iod *iod,
+ struct spraid_ioq_command *ioq_cmd,
+ struct scsi_cmnd *scmd)
+{
+ if (unlikely(!iod->sense)) {
+ dev_err(hdev->dev, "Allocate sense data buffer failed\n");
+ return -ENOMEM;
+ }
+ ioq_cmd->common.sense_addr = cpu_to_le64(iod->sense_dma);
+ ioq_cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+
+ iod->nsge = 0;
+ iod->npages = -1;
+ iod->use_sgl = 0;
+ iod->sg_drv_mgmt = false;
+ WRITE_ONCE(iod->state, SPRAID_CMD_IDLE);
+
+ return 0;
+}
+
+static void spraid_free_iod_res(struct spraid_dev *hdev, struct spraid_iod *iod)
+{
+ const int last_prp = hdev->page_size / sizeof(__le64) - 1;
+ dma_addr_t dma_addr, next_dma_addr;
+ struct spraid_sgl_desc *sg_list;
+ __le64 *prp_list;
+ void *addr;
+ int i;
+
+ dma_addr = iod->first_dma;
+ if (iod->npages == 0)
+ dma_pool_free(iod->spraidq->prp_small_pool,
+ spraid_iod_list(iod)[0], dma_addr);
+
+ for (i = 0; i < iod->npages; i++) {
+ addr = spraid_iod_list(iod)[i];
+
+ if (iod->use_sgl) {
+ sg_list = addr;
+ next_dma_addr =
+ le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
+ } else {
+ prp_list = addr;
+ next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+ }
+
+ dma_pool_free(hdev->prp_page_pool, addr, dma_addr);
+ dma_addr = next_dma_addr;
+ }
+
+ if (iod->sg_drv_mgmt && iod->sg != iod->inline_sg) {
+ iod->sg_drv_mgmt = false;
+ mempool_free(iod->sg, hdev->iod_mempool);
+ }
+
+ iod->sense = NULL;
+ iod->npages = -1;
+}
+
+static int spraid_io_map_data(struct spraid_dev *hdev, struct spraid_iod *iod,
+ struct scsi_cmnd *scmd,
+ struct spraid_ioq_command *ioq_cmd)
+{
+ int ret;
+
+ iod->nsge = scsi_dma_map(scmd);
+
+ /* No data to DMA, it may be scsi no-rw command */
+ if (unlikely(iod->nsge == 0))
+ return 0;
+
+ iod->length = scsi_bufflen(scmd);
+ iod->sg = scsi_sglist(scmd);
+ iod->use_sgl = !spraid_is_prp(hdev, scmd, iod->nsge);
+
+ if (iod->use_sgl) {
+ ret = spraid_setup_ioq_cmd_sgl(hdev, scmd, ioq_cmd, iod);
+ } else {
+ ret = spraid_setup_prps(hdev, iod);
+ ioq_cmd->common.dptr.prp1 =
+ cpu_to_le64(sg_dma_address(iod->sg));
+ ioq_cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+ }
+
+ if (ret)
+ scsi_dma_unmap(scmd);
+
+ return ret;
+}
+
+static void spraid_map_status(struct spraid_iod *iod, struct scsi_cmnd *scmd,
+ struct spraid_completion *cqe)
+{
+ scsi_set_resid(scmd, 0);
+
+ switch ((le16_to_cpu(cqe->status) >> 1) & 0x7f) {
+ case FW_STAT_OK:
+ set_host_byte(scmd, DID_OK);
+ break;
+ case FW_STAT_NEED_CHECK:
+ set_host_byte(scmd, DID_OK);
+ scmd->result |= le16_to_cpu(cqe->status) >> 8;
+ if (scmd->result & SAM_STAT_CHECK_CONDITION) {
+ memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ memcpy(scmd->sense_buffer, iod->sense,
+ SCSI_SENSE_BUFFERSIZE);
+ scmd->result =
+ (scmd->result & 0x00ffffff) | (DRIVER_SENSE << 24);
+ }
+ break;
+ case FW_STAT_ABORTED:
+ set_host_byte(scmd, DID_ABORT);
+ break;
+ case FW_STAT_NEED_RETRY:
+ set_host_byte(scmd, DID_REQUEUE);
+ break;
+ default:
+ set_host_byte(scmd, DID_BAD_TARGET);
+ break;
+ }
+}
+
+static inline void spraid_get_tag_from_scmd(struct scsi_cmnd *scmd,
+ u16 *qid, u16 *cid)
+{
+ u32 tag = blk_mq_unique_tag(scmd->request);
+
+ *qid = blk_mq_unique_tag_to_hwq(tag) + 1;
+ *cid = blk_mq_unique_tag_to_tag(tag);
+}
+
+static int spraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_dev *hdev = shost_priv(shost);
+ struct scsi_device *sdev = scmd->device;
+ struct spraid_sdev_hostdata *hostdata;
+ struct spraid_ioq_command ioq_cmd;
+ struct spraid_queue *ioq;
+ unsigned long elapsed;
+ u16 hwq, cid;
+ int ret;
+
+ if (unlikely(!scmd)) {
+ dev_err(hdev->dev, "err, scmd is null\n");
+ return 0;
+ }
+
+ if (unlikely(hdev->state != SPRAID_LIVE)) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (log_debug_switch)
+ scsi_print_command(scmd);
+
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ hostdata = sdev->hostdata;
+ ioq = &hdev->queues[hwq];
+ memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+ ioq_cmd.rw.hdid = cpu_to_le32(hostdata->hdid);
+ ioq_cmd.rw.command_id = cid;
+
+ spraid_setup_ioq_cmd(hdev, &ioq_cmd, scmd);
+
+ ret = cid * SCSI_SENSE_BUFFERSIZE;
+ iod->sense = ioq->sense + ret;
+ iod->sense_dma = ioq->sense_dma_addr + ret;
+
+ ret = spraid_init_iod(hdev, iod, &ioq_cmd, scmd);
+ if (unlikely(ret))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ iod->spraidq = ioq;
+ ret = spraid_io_map_data(hdev, iod, scmd, &ioq_cmd);
+ if (unlikely(ret)) {
+ dev_err(hdev->dev, "spraid_io_map_data Err.\n");
+ set_host_byte(scmd, DID_ERROR);
+ scmd->scsi_done(scmd);
+ ret = 0;
+ goto deinit_iod;
+ }
+
+ WRITE_ONCE(iod->state, SPRAID_CMD_IN_FLIGHT);
+ spraid_submit_cmd(ioq, &ioq_cmd);
+ elapsed = jiffies - scmd->jiffies_at_alloc;
+ dev_log_dbg(hdev->dev,
+ "cid[%d] qid[%d] submit IO cost %3ld.%3ld seconds\n",
+ cid, hwq, elapsed / HZ, elapsed % HZ);
+ return 0;
+
+deinit_iod:
+ spraid_free_iod_res(hdev, iod);
+ return ret;
+}
+
+static int spraid_match_dev(struct spraid_dev *hdev, u16 idx,
+ struct scsi_device *sdev)
+{
+ if (SPRAID_DEV_INFO_FLAG_VALID(hdev->devices[idx].flag)) {
+ if (sdev->channel == hdev->devices[idx].channel &&
+ sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
+ sdev->lun < hdev->devices[idx].lun) {
+ dev_info(hdev->dev,
+ "Match device success, channel;"
+ "target:lun[%d:%d:%d]\n",
+ hdev->devices[idx].channel,
+ hdev->devices[idx].target,
+ hdev->devices[idx].lun);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int spraid_slave_alloc(struct scsi_device *sdev)
+{
+ struct spraid_sdev_hostdata *hostdata;
+ struct spraid_dev *hdev;
+ u16 idx;
+
+ hdev = shost_priv(sdev->host);
+ hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+ if (!hostdata) {
+ dev_err(hdev->dev, "Alloc scsi host data memory failed\n");
+ return -ENOMEM;
+ }
+
+ down_read(&hdev->devices_rwsem);
+ for (idx = 0; idx < le32_to_cpu(hdev->ctrl_info->nd); idx++) {
+ if (spraid_match_dev(hdev, idx, sdev))
+ goto scan_host;
+ }
+ up_read(&hdev->devices_rwsem);
+
+ kfree(hostdata);
+ return -ENXIO;
+
+scan_host:
+ hostdata->hdid = le32_to_cpu(hdev->devices[idx].hdid);
+ hostdata->max_io_kb = le16_to_cpu(hdev->devices[idx].max_io_kb);
+ hostdata->attr = hdev->devices[idx].attr;
+ hostdata->flag = hdev->devices[idx].flag;
+ hostdata->rg_id = 0xff;
+ sdev->hostdata = hostdata;
+ up_read(&hdev->devices_rwsem);
+ return 0;
+}
+
+static void spraid_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+static int spraid_slave_configure(struct scsi_device *sdev)
+{
+ u16 idx;
+ unsigned int timeout = scmd_tmout_nonpt * HZ;
+ struct spraid_dev *hdev = shost_priv(sdev->host);
+ struct spraid_sdev_hostdata *hostdata = sdev->hostdata;
+ u32 max_sec = sdev->host->max_sectors;
+
+ if (hostdata) {
+ idx = hostdata->hdid - 1;
+ if (sdev->channel == hdev->devices[idx].channel &&
+ sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
+ sdev->lun < hdev->devices[idx].lun) {
+ if (SPRAID_DEV_INFO_ATTR_PT(hdev->devices[idx].attr))
+ timeout = 30 * HZ;
+ else
+ timeout = scmd_tmout_nonpt * HZ;
+ max_sec = le16_to_cpu(hdev->devices[idx].max_io_kb)
+ << 1;
+ } else {
+ dev_err(hdev->dev,
+ "[%s] err, sdev->channel:id:lun[%d:%d:%lld];"
+ "devices[%d], channel:target:lun[%d:%d:%d]\n",
+ __func__, sdev->channel, sdev->id, sdev->lun,
+ idx, hdev->devices[idx].channel,
+ hdev->devices[idx].target,
+ hdev->devices[idx].lun);
+ }
+ } else {
+ dev_err(hdev->dev, "[%s] err, sdev->hostdata is null\n",
+ __func__);
+ }
+
+ blk_queue_rq_timeout(sdev->request_queue, timeout);
+ sdev->eh_timeout = timeout;
+
+ if ((max_sec == 0) || (max_sec > sdev->host->max_sectors))
+ max_sec = sdev->host->max_sectors;
+
+ dev_info(hdev->dev,
+ "[%s] sdev->channel:id:lun[%d:%d:%lld];"
+ " scmd_timeout[%d]s, maxsec[%d]\n",
+ __func__, sdev->channel, sdev->id,
+ sdev->lun, timeout / HZ, max_sec);
+
+ return 0;
+}
+
+static void spraid_shost_init(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u8 domain, bus;
+ u32 dev_func;
+
+ domain = pci_domain_nr(pdev->bus);
+ bus = pdev->bus->number;
+ dev_func = pdev->devfn;
+
+ hdev->shost->nr_hw_queues = hdev->online_queues - 1;
+ hdev->shost->can_queue = (hdev->ioq_depth - SPRAID_PTCMDS_PERQ);
+
+ hdev->shost->sg_tablesize = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+ /* 512B per sector */
+ hdev->shost->max_sectors =
+ (1U << ((hdev->ctrl_info->mdts) * 1U) << 12) / 512;
+ hdev->shost->cmd_per_lun = MAX_CMD_PER_DEV;
+ hdev->shost->max_channel =
+ le16_to_cpu(hdev->ctrl_info->max_channel) - 1;
+ hdev->shost->max_id = le32_to_cpu(hdev->ctrl_info->max_tgt_id);
+ hdev->shost->max_lun = le16_to_cpu(hdev->ctrl_info->max_lun);
+
+ hdev->shost->this_id = -1;
+ hdev->shost->unique_id = (domain << 16) | (bus << 8) | dev_func;
+ hdev->shost->max_cmd_len = MAX_CDB_LEN;
+ hdev->shost->hostt->cmd_size = max(spraid_cmd_size(hdev, false, true),
+ spraid_cmd_size(hdev, false, false));
+}
+
+static inline void spraid_host_deinit(struct spraid_dev *hdev)
+{
+ ida_free(&spraid_instance_ida, hdev->instance);
+}
+
+static int spraid_alloc_queue(struct spraid_dev *hdev, u16 qid, u16 depth)
+{
+ struct spraid_queue *spraidq = &hdev->queues[qid];
+ int ret = 0;
+
+ if (hdev->queue_count > qid) {
+ dev_info(hdev->dev, "[%s] warn: queue[%d] is exist\n",
+ __func__, qid);
+ return 0;
+ }
+
+ spraidq->cqes = dma_alloc_coherent(hdev->dev, CQ_SIZE(depth),
+ &spraidq->cq_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!spraidq->cqes)
+ return -ENOMEM;
+
+ spraidq->sq_cmds = dma_alloc_coherent(hdev->dev, SQ_SIZE(qid, depth),
+ &spraidq->sq_dma_addr,
+ GFP_KERNEL);
+ if (!spraidq->sq_cmds) {
+ ret = -ENOMEM;
+ goto free_cqes;
+ }
+
+ spin_lock_init(&spraidq->sq_lock);
+ spin_lock_init(&spraidq->cq_lock);
+ spraidq->hdev = hdev;
+ spraidq->q_depth = depth;
+ spraidq->qid = qid;
+ spraidq->cq_vector = -1;
+ hdev->queue_count++;
+
+ /* alloc sense buffer */
+ spraidq->sense = dma_alloc_coherent(hdev->dev, SENSE_SIZE(depth),
+ &spraidq->sense_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!spraidq->sense) {
+ ret = -ENOMEM;
+ goto free_sq_cmds;
+ }
+
+ return 0;
+
+free_sq_cmds:
+ dma_free_coherent(hdev->dev, SQ_SIZE(qid, depth),
+ (void *)spraidq->sq_cmds, spraidq->sq_dma_addr);
+free_cqes:
+ dma_free_coherent(hdev->dev, CQ_SIZE(depth), (void *)spraidq->cqes,
+ spraidq->cq_dma_addr);
+ return ret;
+}
+
+static int spraid_wait_ready(struct spraid_dev *hdev, u64 cap, bool enabled)
+{
+ unsigned long timeout =
+ ((SPRAID_CAP_TIMEOUT(cap) + 1) * SPRAID_CAP_TIMEOUT_UNIT_MS) + jiffies;
+ u32 bit = enabled ? SPRAID_CSTS_RDY : 0;
+
+ while ((readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_RDY) != bit) {
+ usleep_range(1000, 2000);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(hdev->dev, "Device not ready; aborting %s\n",
+ enabled ? "initialisation" : "reset");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int spraid_shutdown_ctrl(struct spraid_dev *hdev)
+{
+ unsigned long timeout = hdev->ctrl_info->rtd3e + jiffies;
+
+ hdev->ctrl_config &= ~SPRAID_CC_SHN_MASK;
+ hdev->ctrl_config |= SPRAID_CC_SHN_NORMAL;
+ writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+ while ((readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_SHST_MASK) !=
+ SPRAID_CSTS_SHST_CMPLT) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(hdev->dev,
+ "Device shutdown incomplete; abort shutdown\n");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int spraid_disable_ctrl(struct spraid_dev *hdev)
+{
+ hdev->ctrl_config &= ~SPRAID_CC_SHN_MASK;
+ hdev->ctrl_config &= ~SPRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+ return spraid_wait_ready(hdev, hdev->cap, false);
+}
+
+static int spraid_enable_ctrl(struct spraid_dev *hdev)
+{
+ u64 cap = hdev->cap;
+ u32 dev_page_min = SPRAID_CAP_MPSMIN(cap) + 12;
+ u32 page_shift = PAGE_SHIFT;
+
+ if (page_shift < dev_page_min) {
+ dev_err(hdev->dev,
+ "Minimum device page size[%u], too large for host[%u]\n",
+ 1U << dev_page_min, 1U << page_shift);
+ return -ENODEV;
+ }
+
+ page_shift = min_t(unsigned int, SPRAID_CAP_MPSMAX(cap) + 12,
+ PAGE_SHIFT);
+ hdev->page_size = 1U << page_shift;
+
+ hdev->ctrl_config = SPRAID_CC_CSS_NVM;
+ hdev->ctrl_config |= (page_shift - 12) << SPRAID_CC_MPS_SHIFT;
+ hdev->ctrl_config |= SPRAID_CC_AMS_RR | SPRAID_CC_SHN_NONE;
+ hdev->ctrl_config |= SPRAID_CC_IOSQES | SPRAID_CC_IOCQES;
+ hdev->ctrl_config |= SPRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+ return spraid_wait_ready(hdev, cap, true);
+}
+
+static void spraid_init_queue(struct spraid_queue *spraidq, u16 qid)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+
+ memset((void *)spraidq->cqes, 0, CQ_SIZE(spraidq->q_depth));
+
+ spraidq->sq_tail = 0;
+ spraidq->cq_head = 0;
+ spraidq->cq_phase = 1;
+ spraidq->q_db = &hdev->dbs[qid * 2 * hdev->db_stride];
+ spraidq->prp_small_pool = hdev->prp_small_pool[qid % small_pool_num];
+ hdev->online_queues++;
+}
+
+static inline bool spraid_cqe_pending(struct spraid_queue *spraidq)
+{
+ return (le16_to_cpu(spraidq->cqes[spraidq->cq_head].status) & 1) ==
+ spraidq->cq_phase;
+}
+
+static void spraid_sata_report_zone_handle(struct scsi_cmnd *scmd,
+ struct spraid_iod *iod)
+{
+ int i = 0;
+ unsigned int bytes = 0;
+ struct scatterlist *sg = scsi_sglist(scmd);
+
+ scsi_for_each_sg(scmd, sg, iod->nsge, i) {
+ unsigned int offset = 0;
+
+ if (bytes == 0) {
+ char *hdr;
+ u32 list_length;
+ u64 max_lba, opt_lba;
+ u16 same;
+
+ hdr = sg_virt(sg);
+
+ list_length = get_unaligned_le32(&hdr[0]);
+ same = get_unaligned_le16(&hdr[4]);
+ max_lba = get_unaligned_le64(&hdr[8]);
+ opt_lba = get_unaligned_le64(&hdr[16]);
+ put_unaligned_be32(list_length, &hdr[0]);
+ hdr[4] = same & 0xf;
+ put_unaligned_be64(max_lba, &hdr[8]);
+ put_unaligned_be64(opt_lba, &hdr[16]);
+ offset += 64;
+ bytes += 64;
+ }
+ while (offset < sg_dma_len(sg)) {
+ char *rec;
+ u8 cond, type, non_seq, reset;
+ u64 size, start, wp;
+
+ rec = sg_virt(sg) + offset;
+ type = rec[0] & 0xf;
+ cond = (rec[1] >> 4) & 0xf;
+ non_seq = (rec[1] & 2);
+ reset = (rec[1] & 1);
+ size = get_unaligned_le64(&rec[8]);
+ start = get_unaligned_le64(&rec[16]);
+ wp = get_unaligned_le64(&rec[24]);
+ rec[0] = type;
+ rec[1] = (cond << 4) | non_seq | reset;
+ put_unaligned_be64(size, &rec[8]);
+ put_unaligned_be64(start, &rec[16]);
+ put_unaligned_be64(wp, &rec[24]);
+ WARN_ON(offset + 64 > sg_dma_len(sg));
+ offset += 64;
+ bytes += 64;
+ }
+ }
+}
+
+static inline void spraid_handle_ata_cmd(struct spraid_dev *hdev,
+ struct scsi_cmnd *scmd,
+ struct spraid_iod *iod)
+{
+ if (hdev->ctrl_info->card_type != SPRAID_CARD_HBA)
+ return;
+
+ switch (scmd->cmnd[0]) {
+ case ZBC_IN:
+ dev_info(hdev->dev, "[%s] process report zone\n", __func__);
+ spraid_sata_report_zone_handle(scmd, iod);
+ break;
+ default:
+ break;
+ }
+}
+
+static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = ioq->hdev;
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct spraid_iod *iod;
+ struct request *req;
+ unsigned long elapsed;
+
+ tags = hdev->shost->tag_set.tags[ioq->qid - 1];
+ req = blk_mq_tag_to_rq(tags, cqe->cmd_id);
+ if (unlikely(!req || !blk_mq_request_started(req))) {
+ dev_warn(hdev->dev, "Invalid id %d completed on queue %d\n",
+ cqe->cmd_id, ioq->qid);
+ return;
+ }
+
+ scmd = blk_mq_rq_to_pdu(req);
+ iod = scsi_cmd_priv(scmd);
+
+ elapsed = jiffies - scmd->jiffies_at_alloc;
+ dev_log_dbg(hdev->dev,
+ "cid[%d] qid[%d] finish IO cost %3ld.%3ld seconds\n",
+ cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
+
+ if (cmpxchg(&iod->state, SPRAID_CMD_IN_FLIGHT, SPRAID_CMD_COMPLETE) !=
+ SPRAID_CMD_IN_FLIGHT) {
+ dev_warn(hdev->dev,
+ "cid[%d] qid[%d] enters abnormal handler;"
+ " cost %3ld.%3ld seconds\n",
+ cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
+ WRITE_ONCE(iod->state, SPRAID_CMD_TMO_COMPLETE);
+
+ if (iod->nsge) {
+ iod->nsge = 0;
+ scsi_dma_unmap(scmd);
+ }
+ spraid_free_iod_res(hdev, iod);
+
+ return;
+ }
+
+ spraid_handle_ata_cmd(hdev, scmd, iod);
+
+ spraid_map_status(iod, scmd, cqe);
+ if (iod->nsge) {
+ iod->nsge = 0;
+ scsi_dma_unmap(scmd);
+ }
+ spraid_free_iod_res(hdev, iod);
+ scmd->scsi_done(scmd);
+}
+
+static void spraid_complete_adminq_cmnd(struct spraid_queue *adminq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = adminq->hdev;
+ struct spraid_cmd *adm_cmd;
+
+ adm_cmd = hdev->adm_cmds + cqe->cmd_id;
+ if (unlikely(adm_cmd->state == SPRAID_CMD_IDLE)) {
+ dev_warn(adminq->hdev->dev,
+ "Invalid id %d completed on queue %d\n",
+ cqe->cmd_id, le16_to_cpu(cqe->sq_id));
+ return;
+ }
+
+ adm_cmd->status = le16_to_cpu(cqe->status) >> 1;
+ adm_cmd->result0 = le32_to_cpu(cqe->result);
+ adm_cmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&adm_cmd->cmd_done);
+}
+
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid);
+
+static void spraid_complete_aen(struct spraid_queue *spraidq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+ u32 result = le32_to_cpu(cqe->result);
+
+ dev_info(hdev->dev, "rcv aen, cid[%d], status[0x%x], result[0x%x]\n",
+ cqe->cmd_id, le16_to_cpu(cqe->status) >> 1, result);
+
+ spraid_send_aen(hdev, cqe->cmd_id);
+
+ if ((le16_to_cpu(cqe->status) >> 1) != SPRAID_SC_SUCCESS)
+ return;
+ switch (result & 0x7) {
+ case SPRAID_AEN_NOTICE:
+ spraid_handle_aen_notice(hdev, result);
+ break;
+ case SPRAID_AEN_VS:
+ spraid_handle_aen_vs(hdev, result, le32_to_cpu(cqe->result1));
+ break;
+ default:
+ dev_warn(hdev->dev, "Unsupported async event type: %u\n",
+ result & 0x7);
+ break;
+ }
+}
+
+static void spraid_complete_ioq_sync_cmnd(struct spraid_queue *ioq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = ioq->hdev;
+ struct spraid_cmd *ptcmd;
+
+ ptcmd = hdev->ioq_ptcmds + (ioq->qid - 1) * SPRAID_PTCMDS_PERQ +
+ cqe->cmd_id - SPRAID_IO_BLK_MQ_DEPTH;
+
+ ptcmd->status = le16_to_cpu(cqe->status) >> 1;
+ ptcmd->result0 = le32_to_cpu(cqe->result);
+ ptcmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&ptcmd->cmd_done);
+}
+
+static inline void spraid_handle_cqe(struct spraid_queue *spraidq, u16 idx)
+{
+ struct spraid_completion *cqe = &spraidq->cqes[idx];
+ struct spraid_dev *hdev = spraidq->hdev;
+
+ if (unlikely(cqe->cmd_id >= spraidq->q_depth)) {
+ dev_err(hdev->dev,
+ "Invalid command id[%d] completed on queue %d\n",
+ cqe->cmd_id, cqe->sq_id);
+ return;
+ }
+
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d];"
+ " result[0x%x], sq_id[%d], status[0x%x]\n",
+ cqe->cmd_id, spraidq->qid, le32_to_cpu(cqe->result),
+ le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status));
+
+ if (unlikely(spraidq->qid == 0
+ && cqe->cmd_id >= SPRAID_AQ_BLK_MQ_DEPTH)) {
+ spraid_complete_aen(spraidq, cqe);
+ return;
+ }
+
+ if (unlikely(spraidq->qid && cqe->cmd_id >= SPRAID_IO_BLK_MQ_DEPTH)) {
+ spraid_complete_ioq_sync_cmnd(spraidq, cqe);
+ return;
+ }
+
+ if (spraidq->qid)
+ spraid_complete_ioq_cmnd(spraidq, cqe);
+ else
+ spraid_complete_adminq_cmnd(spraidq, cqe);
+}
+
+static void spraid_complete_cqes(struct spraid_queue *spraidq,
+ u16 start, u16 end)
+{
+ while (start != end) {
+ spraid_handle_cqe(spraidq, start);
+ if (++start == spraidq->q_depth)
+ start = 0;
+ }
+}
+
+static inline void spraid_update_cq_head(struct spraid_queue *spraidq)
+{
+ if (++spraidq->cq_head == spraidq->q_depth) {
+ spraidq->cq_head = 0;
+ spraidq->cq_phase = !spraidq->cq_phase;
+ }
+}
+
+static inline bool spraid_process_cq(struct spraid_queue *spraidq,
+ u16 *start, u16 *end, int tag)
+{
+ bool found = false;
+
+ *start = spraidq->cq_head;
+ while (!found && spraid_cqe_pending(spraidq)) {
+ if (spraidq->cqes[spraidq->cq_head].cmd_id == tag)
+ found = true;
+ spraid_update_cq_head(spraidq);
+ }
+ *end = spraidq->cq_head;
+
+ if (*start != *end)
+ writel(spraidq->cq_head,
+ spraidq->q_db + spraidq->hdev->db_stride);
+
+ return found;
+}
+
+static bool spraid_poll_cq(struct spraid_queue *spraidq, int cid)
+{
+ u16 start, end;
+ bool found;
+
+ if (!spraid_cqe_pending(spraidq))
+ return 0;
+
+ spin_lock_irq(&spraidq->cq_lock);
+ found = spraid_process_cq(spraidq, &start, &end, cid);
+ spin_unlock_irq(&spraidq->cq_lock);
+
+ spraid_complete_cqes(spraidq, start, end);
+ return found;
+}
+
+static irqreturn_t spraid_irq(int irq, void *data)
+{
+ struct spraid_queue *spraidq = data;
+ irqreturn_t ret = IRQ_NONE;
+ u16 start, end;
+
+ spin_lock(&spraidq->cq_lock);
+ if (spraidq->cq_head != spraidq->last_cq_head)
+ ret = IRQ_HANDLED;
+
+ spraid_process_cq(spraidq, &start, &end, -1);
+ spraidq->last_cq_head = spraidq->cq_head;
+ spin_unlock(&spraidq->cq_lock);
+
+ if (start != end) {
+ spraid_complete_cqes(spraidq, start, end);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+static int spraid_setup_admin_queue(struct spraid_dev *hdev)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ u32 aqa;
+ int ret;
+
+ dev_info(hdev->dev, "[%s] start disable ctrl\n", __func__);
+
+ ret = spraid_disable_ctrl(hdev);
+ if (ret)
+ return ret;
+
+ ret = spraid_alloc_queue(hdev, 0, SPRAID_AQ_DEPTH);
+ if (ret)
+ return ret;
+
+ aqa = adminq->q_depth - 1;
+ aqa |= aqa << 16;
+ writel(aqa, hdev->bar + SPRAID_REG_AQA);
+ lo_hi_writeq(adminq->sq_dma_addr, hdev->bar + SPRAID_REG_ASQ);
+ lo_hi_writeq(adminq->cq_dma_addr, hdev->bar + SPRAID_REG_ACQ);
+
+ dev_info(hdev->dev, "[%s] start enable ctrl\n", __func__);
+
+ ret = spraid_enable_ctrl(hdev);
+ if (ret) {
+ ret = -ENODEV;
+ goto free_queue;
+ }
+
+ adminq->cq_vector = 0;
+ spraid_init_queue(adminq, 0);
+ ret = pci_request_irq(hdev->pdev, adminq->cq_vector, spraid_irq, NULL,
+ adminq, "spraid%d_q%d",
+ hdev->instance, adminq->qid);
+
+ if (ret) {
+ adminq->cq_vector = -1;
+ hdev->online_queues--;
+ goto free_queue;
+ }
+
+ dev_info(hdev->dev, "[%s] success, queuecount:[%d], onlinequeue:[%d]\n",
+ __func__, hdev->queue_count, hdev->online_queues);
+
+ return 0;
+
+free_queue:
+ spraid_free_queue(adminq);
+ return ret;
+}
+
+static u32 spraid_bar_size(struct spraid_dev *hdev, u32 nr_ioqs)
+{
+ return (SPRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride));
+}
+
+static int spraid_alloc_admin_cmds(struct spraid_dev *hdev)
+{
+ int i;
+
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+ spin_lock_init(&hdev->adm_cmd_lock);
+
+ hdev->adm_cmds = kcalloc_node(SPRAID_AQ_BLK_MQ_DEPTH,
+ sizeof(struct spraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->adm_cmds) {
+ dev_err(hdev->dev, "Alloc admin cmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SPRAID_AQ_BLK_MQ_DEPTH; i++) {
+ hdev->adm_cmds[i].qid = 0;
+ hdev->adm_cmds[i].cid = i;
+ list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list);
+ }
+
+ dev_info(hdev->dev, "Alloc admin cmds success, num[%d]\n",
+ SPRAID_AQ_BLK_MQ_DEPTH);
+
+ return 0;
+}
+
+static void spraid_free_admin_cmds(struct spraid_dev *hdev)
+{
+ kfree(hdev->adm_cmds);
+ hdev->adm_cmds = NULL;
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+}
+
+static struct spraid_cmd *spraid_get_cmd(struct spraid_dev *hdev,
+ enum spraid_cmd_type type)
+{
+ struct spraid_cmd *cmd = NULL;
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == SPRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(slock, flags);
+ dev_err(hdev->dev, "err, cmd[%d] list empty\n", type);
+ return NULL;
+ }
+ cmd = list_entry(head->next, struct spraid_cmd, list);
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(slock, flags);
+
+ WRITE_ONCE(cmd->state, SPRAID_CMD_IN_FLIGHT);
+
+ return cmd;
+}
+
+static void spraid_put_cmd(struct spraid_dev *hdev, struct spraid_cmd *cmd,
+ enum spraid_cmd_type type)
+{
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == SPRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ WRITE_ONCE(cmd->state, SPRAID_CMD_IDLE);
+ list_add_tail(&cmd->list, head);
+ spin_unlock_irqrestore(slock, flags);
+}
+
+
+static int spraid_submit_admin_sync_cmd(struct spraid_dev *hdev,
+ struct spraid_admin_command *cmd,
+ u32 *result0, u32 *result1, u32 timeout)
+{
+ struct spraid_cmd *adm_cmd = spraid_get_cmd(hdev, SPRAID_CMD_ADM);
+
+ if (!adm_cmd) {
+ dev_err(hdev->dev, "err, get admin cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&adm_cmd->cmd_done);
+
+ cmd->common.command_id = adm_cmd->cid;
+ spraid_submit_cmd(&hdev->queues[0], cmd);
+
+ if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout;"
+ " opcode[0x%x] subopcode[0x%x]\n",
+ __func__, adm_cmd->cid, adm_cmd->qid,
+ cmd->usr_cmd.opcode, cmd->usr_cmd.info_0.subopcode);
+ WRITE_ONCE(adm_cmd->state, SPRAID_CMD_TIMEOUT);
+ spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+ return -EINVAL;
+ }
+
+ if (result0)
+ *result0 = adm_cmd->result0;
+ if (result1)
+ *result1 = adm_cmd->result1;
+
+ spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+
+ return adm_cmd->status;
+}
+
+static int spraid_create_cq(struct spraid_dev *hdev, u16 qid,
+ struct spraid_queue *spraidq, u16 cq_vector)
+{
+ struct spraid_admin_command admin_cmd;
+ int flags = SPRAID_QUEUE_PHYS_CONTIG | SPRAID_CQ_IRQ_ENABLED;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_cq.opcode = SPRAID_ADMIN_CREATE_CQ;
+ admin_cmd.create_cq.prp1 = cpu_to_le64(spraidq->cq_dma_addr);
+ admin_cmd.create_cq.cqid = cpu_to_le16(qid);
+ admin_cmd.create_cq.qsize = cpu_to_le16(spraidq->q_depth - 1);
+ admin_cmd.create_cq.cq_flags = cpu_to_le16(flags);
+ admin_cmd.create_cq.irq_vector = cpu_to_le16(cq_vector);
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static int spraid_create_sq(struct spraid_dev *hdev, u16 qid,
+ struct spraid_queue *spraidq)
+{
+ struct spraid_admin_command admin_cmd;
+ int flags = SPRAID_QUEUE_PHYS_CONTIG;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_sq.opcode = SPRAID_ADMIN_CREATE_SQ;
+ admin_cmd.create_sq.prp1 = cpu_to_le64(spraidq->sq_dma_addr);
+ admin_cmd.create_sq.sqid = cpu_to_le16(qid);
+ admin_cmd.create_sq.qsize = cpu_to_le16(spraidq->q_depth - 1);
+ admin_cmd.create_sq.sq_flags = cpu_to_le16(flags);
+ admin_cmd.create_sq.cqid = cpu_to_le16(qid);
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static void spraid_free_queue(struct spraid_queue *spraidq)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+
+ hdev->queue_count--;
+ dma_free_coherent(hdev->dev, CQ_SIZE(spraidq->q_depth),
+ (void *)spraidq->cqes, spraidq->cq_dma_addr);
+ dma_free_coherent(hdev->dev, SQ_SIZE(spraidq->qid, spraidq->q_depth),
+ spraidq->sq_cmds, spraidq->sq_dma_addr);
+ dma_free_coherent(hdev->dev, SENSE_SIZE(spraidq->q_depth),
+ spraidq->sense, spraidq->sense_dma_addr);
+}
+
+static void spraid_free_admin_queue(struct spraid_dev *hdev)
+{
+ spraid_free_queue(&hdev->queues[0]);
+}
+
+static void spraid_free_io_queues(struct spraid_dev *hdev)
+{
+ int i;
+
+ for (i = hdev->queue_count - 1; i >= 1; i--)
+ spraid_free_queue(&hdev->queues[i]);
+}
+
+static int spraid_delete_queue(struct spraid_dev *hdev, u8 op, u16 id)
+{
+ struct spraid_admin_command admin_cmd;
+ int ret;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.delete_queue.opcode = op;
+ admin_cmd.delete_queue.qid = cpu_to_le16(id);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+
+ if (ret)
+ dev_err(hdev->dev, "Delete %s:[%d] failed\n",
+ (op == SPRAID_ADMIN_DELETE_CQ) ? "cq" : "sq", id);
+
+ return ret;
+}
+
+static int spraid_delete_cq(struct spraid_dev *hdev, u16 cqid)
+{
+ return spraid_delete_queue(hdev, SPRAID_ADMIN_DELETE_CQ, cqid);
+}
+
+static int spraid_delete_sq(struct spraid_dev *hdev, u16 sqid)
+{
+ return spraid_delete_queue(hdev, SPRAID_ADMIN_DELETE_SQ, sqid);
+}
+
+static int spraid_create_queue(struct spraid_queue *spraidq, u16 qid)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+ u16 cq_vector;
+ int ret;
+
+ cq_vector = (hdev->num_vecs == 1) ? 0 : qid;
+ ret = spraid_create_cq(hdev, qid, spraidq, cq_vector);
+ if (ret)
+ return ret;
+
+ ret = spraid_create_sq(hdev, qid, spraidq);
+ if (ret)
+ goto delete_cq;
+
+ spraid_init_queue(spraidq, qid);
+ spraidq->cq_vector = cq_vector;
+
+ ret = pci_request_irq(hdev->pdev, cq_vector, spraid_irq, NULL,
+ spraidq, "spraid%d_q%d", hdev->instance, qid);
+
+ if (ret) {
+ dev_err(hdev->dev, "Request queue[%d] irq failed\n", qid);
+ goto delete_sq;
+ }
+
+ return 0;
+
+delete_sq:
+ spraidq->cq_vector = -1;
+ hdev->online_queues--;
+ spraid_delete_sq(hdev, qid);
+delete_cq:
+ spraid_delete_cq(hdev, qid);
+
+ return ret;
+}
+
+static int spraid_create_io_queues(struct spraid_dev *hdev)
+{
+ u32 i, max;
+ int ret = 0;
+
+ max = min(hdev->max_qid, hdev->queue_count - 1);
+ for (i = hdev->online_queues; i <= max; i++) {
+ ret = spraid_create_queue(&hdev->queues[i], i);
+ if (ret) {
+ dev_err(hdev->dev, "Create queue[%d] failed\n", i);
+ break;
+ }
+ }
+
+ dev_info(hdev->dev, "[%s] queue_count[%d], online_queue[%d]",
+ __func__, hdev->queue_count, hdev->online_queues);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+static int spraid_set_features(struct spraid_dev *hdev, u32 fid,
+ u32 dword11, void *buffer,
+ size_t buflen, u32 *result)
+{
+ struct spraid_admin_command admin_cmd;
+ int ret;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+
+ if (buffer && buflen) {
+ data_ptr = dma_alloc_coherent(hdev->dev, buflen,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memcpy(data_ptr, buffer, buflen);
+ }
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.features.opcode = SPRAID_ADMIN_SET_FEATURES;
+ admin_cmd.features.fid = cpu_to_le32(fid);
+ admin_cmd.features.dword11 = cpu_to_le32(dword11);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, result, NULL, 0);
+
+ if (data_ptr)
+ dma_free_coherent(hdev->dev, buflen, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_configure_timestamp(struct spraid_dev *hdev)
+{
+ __le64 ts;
+ int ret;
+
+ ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
+ ret = spraid_set_features(hdev, SPRAID_FEAT_TIMESTAMP,
+ 0, &ts, sizeof(ts), NULL);
+
+ if (ret)
+ dev_err(hdev->dev, "set timestamp failed: %d\n", ret);
+ return ret;
+}
+
+static int spraid_set_queue_cnt(struct spraid_dev *hdev, u32 *cnt)
+{
+ u32 q_cnt = (*cnt - 1) | ((*cnt - 1) << 16);
+ u32 nr_ioqs, result;
+ int status;
+
+ status = spraid_set_features(hdev, SPRAID_FEAT_NUM_QUEUES,
+ q_cnt, NULL, 0, &result);
+ if (status) {
+ dev_err(hdev->dev, "Set queue count failed, status: %d\n",
+ status);
+ return -EIO;
+ }
+
+ nr_ioqs = min(result & 0xffff, result >> 16) + 1;
+ *cnt = min(*cnt, nr_ioqs);
+ if (*cnt == 0) {
+ dev_err(hdev->dev, "Illegal queue count: zero\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int spraid_setup_io_queues(struct spraid_dev *hdev)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ struct pci_dev *pdev = hdev->pdev;
+ u32 nr_ioqs = num_online_cpus();
+ u32 i, size;
+ int ret;
+
+ struct irq_affinity affd = {
+ .pre_vectors = 1
+ };
+
+ ret = spraid_set_queue_cnt(hdev, &nr_ioqs);
+ if (ret < 0)
+ return ret;
+
+ size = spraid_bar_size(hdev, nr_ioqs);
+ ret = spraid_remap_bar(hdev, size);
+ if (ret)
+ return -ENOMEM;
+
+ adminq->q_db = hdev->dbs;
+
+ pci_free_irq(pdev, 0, adminq);
+ pci_free_irq_vectors(pdev);
+
+ ret = pci_alloc_irq_vectors_affinity(pdev, 1, (nr_ioqs + 1),
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+ if (ret <= 0)
+ return -EIO;
+
+ hdev->num_vecs = ret;
+
+ hdev->max_qid = max(ret - 1, 1);
+
+ ret = pci_request_irq(pdev, adminq->cq_vector, spraid_irq, NULL,
+ adminq, "spraid%d_q%d",
+ hdev->instance, adminq->qid);
+ if (ret) {
+ dev_err(hdev->dev, "Request admin irq failed\n");
+ adminq->cq_vector = -1;
+ return ret;
+ }
+
+ for (i = hdev->queue_count; i <= hdev->max_qid; i++) {
+ ret = spraid_alloc_queue(hdev, i, hdev->ioq_depth);
+ if (ret)
+ break;
+ }
+ dev_info(hdev->dev, "[%s] max_qid: %d, queue_count: %d;"
+ " online_queue: %d, ioq_depth: %d\n",
+ __func__, hdev->max_qid, hdev->queue_count,
+ hdev->online_queues, hdev->ioq_depth);
+
+ return spraid_create_io_queues(hdev);
+}
+
+static void spraid_delete_io_queues(struct spraid_dev *hdev)
+{
+ u16 queues = hdev->online_queues - 1;
+ u8 opcode = SPRAID_ADMIN_DELETE_SQ;
+ u16 i, pass;
+
+ if (!pci_device_is_present(hdev->pdev)) {
+ dev_err(hdev->dev,
+ "pci_device is not present, skip disable io queues\n");
+ return;
+ }
+
+ if (hdev->online_queues < 2) {
+ dev_err(hdev->dev, "[%s] err, io queue has been delete\n",
+ __func__);
+ return;
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ for (i = queues; i > 0; i--)
+ if (spraid_delete_queue(hdev, opcode, i))
+ break;
+
+ opcode = SPRAID_ADMIN_DELETE_CQ;
+ }
+}
+
+static void spraid_remove_io_queues(struct spraid_dev *hdev)
+{
+ spraid_delete_io_queues(hdev);
+ spraid_free_io_queues(hdev);
+}
+
+static void spraid_pci_disable(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u32 i;
+
+ for (i = 0; i < hdev->online_queues; i++)
+ pci_free_irq(pdev, hdev->queues[i].cq_vector, &hdev->queues[i]);
+ pci_free_irq_vectors(pdev);
+ if (pci_is_enabled(pdev)) {
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ }
+ hdev->online_queues = 0;
+}
+
+static void spraid_disable_admin_queue(struct spraid_dev *hdev, bool shutdown)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ u16 start, end;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ spraid_shutdown_ctrl(hdev);
+ else
+ spraid_disable_ctrl(hdev);
+ }
+
+ if (hdev->queue_count == 0) {
+ dev_err(hdev->dev, "[%s] err, admin queue has been delete\n",
+ __func__);
+ return;
+ }
+
+ spin_lock_irq(&adminq->cq_lock);
+ spraid_process_cq(adminq, &start, &end, -1);
+ spin_unlock_irq(&adminq->cq_lock);
+
+ spraid_complete_cqes(adminq, start, end);
+ spraid_free_admin_queue(hdev);
+}
+
+static int spraid_create_dma_pools(struct spraid_dev *hdev)
+{
+ int i;
+ char poolname[20] = { 0 };
+
+ hdev->prp_page_pool = dma_pool_create("prp list page", hdev->dev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+
+ if (!hdev->prp_page_pool) {
+ dev_err(hdev->dev, "create prp_page_pool failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < small_pool_num; i++) {
+ sprintf(poolname, "prp_list_256_%d", i);
+ hdev->prp_small_pool[i] =
+ dma_pool_create(poolname, hdev->dev, SMALL_POOL_SIZE,
+ SMALL_POOL_SIZE, 0);
+
+ if (!hdev->prp_small_pool[i]) {
+ dev_err(hdev->dev, "create prp_small_pool %d failed\n",
+ i);
+ goto destroy_prp_small_pool;
+ }
+ }
+
+ return 0;
+
+destroy_prp_small_pool:
+ while (i > 0)
+ dma_pool_destroy(hdev->prp_small_pool[--i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+
+ return -ENOMEM;
+}
+
+static void spraid_destroy_dma_pools(struct spraid_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < small_pool_num; i++)
+ dma_pool_destroy(hdev->prp_small_pool[i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+}
+
+static int spraid_get_dev_list(struct spraid_dev *hdev,
+ struct spraid_dev_info *devices)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ struct spraid_admin_command admin_cmd;
+ struct spraid_dev_list *list_buf;
+ dma_addr_t data_dma = 0;
+ u32 i, idx, hdid, ndev;
+ int ret = 0;
+
+ list_buf = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!list_buf)
+ return -ENOMEM;
+
+ for (idx = 0; idx < nd;) {
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = SPRAID_GET_INFO_DEV_LIST;
+ admin_cmd.get_info.cdw11 = cpu_to_le32(idx);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd,
+ NULL, NULL, 0);
+
+ if (ret) {
+ dev_err(hdev->dev, "Get device list failed, nd: %u;"
+ "idx: %u, ret: %d\n",
+ nd, idx, ret);
+ goto out;
+ }
+ ndev = le32_to_cpu(list_buf->dev_num);
+
+ dev_info(hdev->dev, "ndev numbers: %u\n", ndev);
+
+ for (i = 0; i < ndev; i++) {
+ hdid = le32_to_cpu(list_buf->devices[i].hdid);
+ dev_info(hdev->dev, "list_buf->devices[%d], hdid: %u;"
+ "target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ i, hdid,
+ le16_to_cpu(list_buf->devices[i].target),
+ list_buf->devices[i].channel,
+ list_buf->devices[i].lun,
+ list_buf->devices[i].attr);
+ if (hdid > nd || hdid == 0) {
+ dev_err(hdev->dev, "err, hdid[%d] invalid\n",
+ hdid);
+ continue;
+ }
+ memcpy(&devices[hdid - 1], &list_buf->devices[i],
+ sizeof(struct spraid_dev_info));
+ }
+ idx += ndev;
+
+ if (idx < MAX_DEV_ENTRY_PER_PAGE_4K)
+ break;
+ }
+
+out:
+ dma_free_coherent(hdev->dev, PAGE_SIZE, list_buf, data_dma);
+ return ret;
+}
+
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ struct spraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = SPRAID_ADMIN_ASYNC_EVENT;
+ admin_cmd.common.command_id = cid;
+
+ spraid_submit_cmd(adminq, &admin_cmd);
+ dev_info(hdev->dev, "send aen, cid[%d]\n", cid);
+}
+
+static inline void spraid_send_all_aen(struct spraid_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->ctrl_info->aerl; i++)
+ spraid_send_aen(hdev, i + SPRAID_AQ_BLK_MQ_DEPTH);
+}
+
+static int spraid_add_device(struct spraid_dev *hdev,
+ struct spraid_dev_info *device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "add device, hdid: %u target: %d, channel: %d;"
+ " lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
+ sdev = scsi_device_lookup(shost, device->channel,
+ le16_to_cpu(device->target), 0);
+ if (sdev) {
+ dev_warn(hdev->dev, "Device is already exist, channel: %d;"
+ " target_id: %d, lun: %d\n",
+ device->channel, le16_to_cpu(device->target), 0);
+ scsi_device_put(sdev);
+ return -EEXIST;
+ }
+ scsi_add_device(shost, device->channel, le16_to_cpu(device->target), 0);
+ return 0;
+}
+
+static int spraid_rescan_device(struct spraid_dev *hdev,
+ struct spraid_dev_info *device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "rescan device, hdid: %u target: %d, channel: %d;"
+ " lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
+ sdev = scsi_device_lookup(shost, device->channel,
+ le16_to_cpu(device->target), 0);
+ if (!sdev) {
+ dev_warn(hdev->dev, "device is not exit rescan it, channel: %d;"
+ " target_id: %d, lun: %d\n",
+ device->channel, le16_to_cpu(device->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int spraid_remove_device(struct spraid_dev *hdev,
+ struct spraid_dev_info *org_device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "remove device, hdid: %u target: %d, channel: %d;"
+ " lun: %d, attr[0x%x]\n",
+ le32_to_cpu(org_device->hdid), le16_to_cpu(org_device->target),
+ org_device->channel, org_device->lun, org_device->attr);
+
+ sdev = scsi_device_lookup(shost, org_device->channel,
+ le16_to_cpu(org_device->target), 0);
+ if (!sdev) {
+ dev_warn(hdev->dev, "device is not exit remove it, channel: %d;"
+ " target_id: %d, lun: %d\n",
+ org_device->channel,
+ le16_to_cpu(org_device->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int spraid_dev_list_init(struct spraid_dev *hdev)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ int i, ret;
+
+ hdev->devices = kzalloc_node(nd * sizeof(struct spraid_dev_info),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->devices)
+ return -ENOMEM;
+
+ ret = spraid_get_dev_list(hdev, hdev->devices);
+ if (ret) {
+ dev_err(hdev->dev,
+ "Ignore failure of getting device list;"
+ " within initialization\n");
+ return 0;
+ }
+
+ for (i = 0; i < nd; i++) {
+ if (SPRAID_DEV_INFO_FLAG_VALID(hdev->devices[i].flag) &&
+ SPRAID_DEV_INFO_ATTR_BOOT(hdev->devices[i].attr)) {
+ spraid_add_device(hdev, &hdev->devices[i]);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int luntarget_cmp_func(const void *l, const void *r)
+{
+ const struct spraid_dev_info *ln = l;
+ const struct spraid_dev_info *rn = r;
+
+ if (ln->channel == rn->channel)
+ return le16_to_cpu(ln->target) - le16_to_cpu(rn->target);
+
+ return ln->channel - rn->channel;
+}
+
+static void spraid_scan_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, scan_work);
+ struct spraid_dev_info *devices, *org_devices;
+ struct spraid_dev_info *sortdevice;
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ u8 flag, org_flag;
+ int i, ret;
+ int count = 0;
+
+ devices = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
+ if (!devices)
+ return;
+
+ sortdevice = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
+ if (!sortdevice)
+ goto free_list;
+
+ ret = spraid_get_dev_list(hdev, devices);
+ if (ret)
+ goto free_all;
+ org_devices = hdev->devices;
+ for (i = 0; i < nd; i++) {
+ org_flag = org_devices[i].flag;
+ flag = devices[i].flag;
+
+ dev_log_dbg(hdev->dev, "i: %d, org_flag: 0x%x, flag: 0x%x\n",
+ i, org_flag, flag);
+
+ if (SPRAID_DEV_INFO_FLAG_VALID(flag)) {
+ if (!SPRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->devices_rwsem);
+ memcpy(&org_devices[i], &devices[i],
+ sizeof(struct spraid_dev_info));
+ memcpy(&sortdevice[count++], &devices[i],
+ sizeof(struct spraid_dev_info));
+ up_write(&hdev->devices_rwsem);
+ } else if (SPRAID_DEV_INFO_FLAG_CHANGE(flag)) {
+ spraid_rescan_device(hdev, &devices[i]);
+ }
+ } else {
+ if (SPRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->devices_rwsem);
+ org_devices[i].flag &= 0xfe;
+ up_write(&hdev->devices_rwsem);
+ spraid_remove_device(hdev, &org_devices[i]);
+ }
+ }
+ }
+
+ dev_info(hdev->dev, "scan work add device count = %d\n", count);
+
+ sort(sortdevice, count, sizeof(sortdevice[0]),
+ luntarget_cmp_func, NULL);
+
+ for (i = 0; i < count; i++)
+ spraid_add_device(hdev, &sortdevice[i]);
+
+free_all:
+ kfree(sortdevice);
+free_list:
+ kfree(devices);
+}
+
+static void spraid_timesyn_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, timesyn_work);
+
+ spraid_configure_timestamp(hdev);
+}
+
+static int spraid_init_ctrl_info(struct spraid_dev *hdev);
+static void spraid_fw_act_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, fw_act_work);
+
+ if (spraid_init_ctrl_info(hdev))
+ dev_err(hdev->dev, "get ctrl info failed after fw act\n");
+}
+
+static void spraid_queue_scan(struct spraid_dev *hdev)
+{
+ queue_work(spraid_wq, &hdev->scan_work);
+}
+
+static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result)
+{
+ switch ((result & 0xff00) >> 8) {
+ case SPRAID_AEN_DEV_CHANGED:
+ spraid_queue_scan(hdev);
+ break;
+ case SPRAID_AEN_FW_ACT_START:
+ dev_info(hdev->dev, "fw activation starting\n");
+ break;
+ case SPRAID_AEN_HOST_PROBING:
+ break;
+ default:
+ dev_warn(hdev->dev, "async event result %08x\n", result);
+ }
+}
+
+static void spraid_handle_aen_vs(struct spraid_dev *hdev,
+ u32 result, u32 result1)
+{
+ switch ((result & 0xff00) >> 8) {
+ case SPRAID_AEN_TIMESYN:
+ queue_work(spraid_wq, &hdev->timesyn_work);
+ break;
+ case SPRAID_AEN_FW_ACT_FINISH:
+ dev_info(hdev->dev, "fw activation finish\n");
+ queue_work(spraid_wq, &hdev->fw_act_work);
+ break;
+ case SPRAID_AEN_EVENT_MIN ... SPRAID_AEN_EVENT_MAX:
+ dev_info(hdev->dev, "rcv card event[%d];"
+ " param1[0x%x] param2[0x%x]\n",
+ (result & 0xff00) >> 8, result, result1);
+ break;
+ default:
+ dev_warn(hdev->dev, "async event result: 0x%x\n", result);
+ }
+}
+
+static int spraid_alloc_resources(struct spraid_dev *hdev)
+{
+ int ret, nqueue;
+
+ ret = ida_alloc(&spraid_instance_ida, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(hdev->dev, "Get instance id failed\n");
+ return ret;
+ }
+ hdev->instance = ret;
+
+ hdev->ctrl_info = kzalloc_node(sizeof(*hdev->ctrl_info),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->ctrl_info) {
+ ret = -ENOMEM;
+ goto release_instance;
+ }
+
+ ret = spraid_create_dma_pools(hdev);
+ if (ret)
+ goto free_ctrl_info;
+ nqueue = num_possible_cpus() + 1;
+ hdev->queues = kcalloc_node(nqueue, sizeof(struct spraid_queue),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->queues) {
+ ret = -ENOMEM;
+ goto destroy_dma_pools;
+ }
+
+ ret = spraid_alloc_admin_cmds(hdev);
+ if (ret)
+ goto free_queues;
+
+ dev_info(hdev->dev, "[%s] queues num: %d\n", __func__, nqueue);
+
+ return 0;
+
+free_queues:
+ kfree(hdev->queues);
+destroy_dma_pools:
+ spraid_destroy_dma_pools(hdev);
+free_ctrl_info:
+ kfree(hdev->ctrl_info);
+release_instance:
+ ida_free(&spraid_instance_ida, hdev->instance);
+ return ret;
+}
+
+static void spraid_free_resources(struct spraid_dev *hdev)
+{
+ spraid_free_admin_cmds(hdev);
+ kfree(hdev->queues);
+ spraid_destroy_dma_pools(hdev);
+ kfree(hdev->ctrl_info);
+ ida_free(&spraid_instance_ida, hdev->instance);
+}
+
+static void spraid_bsg_unmap_data(struct spraid_dev *hdev, struct bsg_job *job)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir =
+ rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ if (iod->nsge)
+ dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
+
+ spraid_free_iod_res(hdev, iod);
+}
+
+static int spraid_bsg_map_data(struct spraid_dev *hdev, struct bsg_job *job,
+ struct spraid_admin_command *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir =
+ rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ iod->sg = job->request_payload.sg_list;
+ iod->nsge = job->request_payload.sg_cnt;
+ iod->length = job->request_payload.payload_len;
+ iod->use_sgl = false;
+ iod->npages = -1;
+ iod->sg_drv_mgmt = false;
+
+ if (!iod->nsge)
+ goto out;
+
+ ret = dma_map_sg_attrs(hdev->dev, iod->sg, iod->nsge,
+ dma_dir, DMA_ATTR_NO_WARN);
+ if (!ret)
+ goto out;
+
+ ret = spraid_setup_prps(hdev, iod);
+ if (ret)
+ goto unmap;
+
+ cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+
+ return 0;
+
+unmap:
+ dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
+out:
+ return ret;
+}
+
+static int spraid_get_ctrl_info(struct spraid_dev *hdev,
+ struct spraid_ctrl_info *ctrl_info)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = SPRAID_GET_INFO_CTRL;
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(ctrl_info, data_ptr, sizeof(struct spraid_ctrl_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_init_ctrl_info(struct spraid_dev *hdev)
+{
+ int ret;
+
+ hdev->ctrl_info->nd = cpu_to_le32(240);
+ hdev->ctrl_info->mdts = 8;
+ hdev->ctrl_info->max_cmds = cpu_to_le16(4096);
+ hdev->ctrl_info->max_num_sge = cpu_to_le16(128);
+ hdev->ctrl_info->max_channel = cpu_to_le16(4);
+ hdev->ctrl_info->max_tgt_id = cpu_to_le32(3239);
+ hdev->ctrl_info->max_lun = cpu_to_le16(2);
+
+ ret = spraid_get_ctrl_info(hdev, hdev->ctrl_info);
+ if (ret)
+ dev_err(hdev->dev, "get controller info failed: %d\n", ret);
+
+ dev_info(hdev->dev, "[%s]nd = %d\n", __func__, hdev->ctrl_info->nd);
+ dev_info(hdev->dev, "[%s]max_cmd = %d\n",
+ __func__, hdev->ctrl_info->max_cmds);
+ dev_info(hdev->dev, "[%s]max_channel = %d\n",
+ __func__, hdev->ctrl_info->max_channel);
+ dev_info(hdev->dev, "[%s]max_tgt_id = %d\n",
+ __func__, hdev->ctrl_info->max_tgt_id);
+ dev_info(hdev->dev, "[%s]max_lun = %d\n",
+ __func__, hdev->ctrl_info->max_lun);
+ dev_info(hdev->dev, "[%s]max_num_sge = %d\n",
+ __func__, hdev->ctrl_info->max_num_sge);
+ dev_info(hdev->dev, "[%s]lun_num_boot = %d\n",
+ __func__, hdev->ctrl_info->lun_num_in_boot);
+ dev_info(hdev->dev, "[%s]mdts = %d\n", __func__, hdev->ctrl_info->mdts);
+ dev_info(hdev->dev, "[%s]acl = %d\n", __func__, hdev->ctrl_info->acl);
+ dev_info(hdev->dev, "[%s]aer1 = %d\n", __func__, hdev->ctrl_info->aerl);
+ dev_info(hdev->dev, "[%s]card_type = %d\n",
+ __func__, hdev->ctrl_info->card_type);
+ dev_info(hdev->dev, "[%s]rtd3e = %d\n",
+ __func__, hdev->ctrl_info->rtd3e);
+ dev_info(hdev->dev, "[%s]sn = %s\n", __func__, hdev->ctrl_info->sn);
+ dev_info(hdev->dev, "[%s]fr = %s\n", __func__, hdev->ctrl_info->fr);
+
+ if (!hdev->ctrl_info->aerl)
+ hdev->ctrl_info->aerl = 1;
+ if (hdev->ctrl_info->aerl > SPRAID_NR_AEN_COMMANDS)
+ hdev->ctrl_info->aerl = SPRAID_NR_AEN_COMMANDS;
+
+ return 0;
+}
+
+#define SPRAID_MAX_ADMIN_PAYLOAD_SIZE BIT(16)
+static int spraid_alloc_iod_ext_mem_pool(struct spraid_dev *hdev)
+{
+ u16 max_sge = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+ size_t alloc_size;
+
+ alloc_size = spraid_iod_ext_size(hdev, SPRAID_MAX_ADMIN_PAYLOAD_SIZE,
+ max_sge, true, false);
+ if (alloc_size > PAGE_SIZE)
+ dev_warn(hdev->dev, "It is unreasonable ;"
+ " sg allocation more than one page\n");
+ hdev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
+ mempool_kfree,
+ (void *)alloc_size, GFP_KERNEL,
+ hdev->numa_node);
+ if (!hdev->iod_mempool) {
+ dev_err(hdev->dev, "Create iod extension memory pool failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void spraid_free_iod_ext_mem_pool(struct spraid_dev *hdev)
+{
+ mempool_destroy(hdev->iod_mempool);
+}
+
+static int spraid_user_admin_cmd(struct spraid_dev *hdev, struct bsg_job *job)
+{
+ struct spraid_bsg_request *bsg_req = job->request;
+ struct spraid_passthru_common_cmd *cmd = &(bsg_req->admcmd);
+ struct spraid_admin_command admin_cmd;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+ u32 result[2] = {0};
+ int status;
+
+ if (hdev->state >= SPRAID_RESETTING) {
+ dev_err(hdev->dev, "[%s] err, host state:[%d] is not right\n",
+ __func__, hdev->state);
+ return -EBUSY;
+ }
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init\n",
+ __func__, cmd->opcode, cmd->info_0.subopcode);
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = cmd->opcode;
+ admin_cmd.common.flags = cmd->flags;
+ admin_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ admin_cmd.common.cdw2[0] = cpu_to_le32(cmd->cdw2);
+ admin_cmd.common.cdw2[1] = cpu_to_le32(cmd->cdw3);
+ admin_cmd.common.cdw10 = cpu_to_le32(cmd->cdw10);
+ admin_cmd.common.cdw11 = cpu_to_le32(cmd->cdw11);
+ admin_cmd.common.cdw12 = cpu_to_le32(cmd->cdw12);
+ admin_cmd.common.cdw13 = cpu_to_le32(cmd->cdw13);
+ admin_cmd.common.cdw14 = cpu_to_le32(cmd->cdw14);
+ admin_cmd.common.cdw15 = cpu_to_le32(cmd->cdw15);
+
+ status = spraid_bsg_map_data(hdev, job, &admin_cmd);
+ if (status) {
+ dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+ return status;
+ }
+
+ status = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, &result[0],
+ &result[1], timeout);
+ if (status >= 0) {
+ job->reply_len = sizeof(result);
+ memcpy(job->reply, result, sizeof(result));
+ }
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x];"
+ " status[0x%x] result0[0x%x] result1[0x%x]\n",
+ __func__, cmd->opcode, cmd->info_0.subopcode,
+ status, result[0], result[1]);
+
+ spraid_bsg_unmap_data(hdev, job);
+
+ return status;
+}
+
+static int spraid_alloc_ioq_ptcmds(struct spraid_dev *hdev)
+{
+ int i;
+ int ptnum = SPRAID_NR_IOQ_PTCMDS;
+
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
+ spin_lock_init(&hdev->ioq_pt_lock);
+
+ hdev->ioq_ptcmds = kcalloc_node(ptnum, sizeof(struct spraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->ioq_ptcmds) {
+ dev_err(hdev->dev, "Alloc ioq_ptcmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ptnum; i++) {
+ hdev->ioq_ptcmds[i].qid = i / SPRAID_PTCMDS_PERQ + 1;
+ hdev->ioq_ptcmds[i].cid = i % SPRAID_PTCMDS_PERQ
+ + SPRAID_IO_BLK_MQ_DEPTH;
+ list_add_tail(&(hdev->ioq_ptcmds[i].list), &hdev->ioq_pt_list);
+ }
+
+ dev_info(hdev->dev, "Alloc ioq_ptcmds success, ptnum[%d]\n", ptnum);
+
+ return 0;
+}
+
+static void spraid_free_ioq_ptcmds(struct spraid_dev *hdev)
+{
+ kfree(hdev->ioq_ptcmds);
+ hdev->ioq_ptcmds = NULL;
+
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
+}
+
+static int spraid_submit_ioq_sync_cmd(struct spraid_dev *hdev,
+ struct spraid_ioq_command *cmd,
+ u32 *result, u32 *reslen, u32 timeout)
+{
+ int ret;
+ dma_addr_t sense_dma;
+ struct spraid_queue *ioq;
+ void *sense_addr = NULL;
+ struct spraid_cmd *pt_cmd = spraid_get_cmd(hdev, SPRAID_CMD_IOPT);
+
+ if (!pt_cmd) {
+ dev_err(hdev->dev, "err, get ioq cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&pt_cmd->cmd_done);
+
+ ioq = &hdev->queues[pt_cmd->qid];
+ ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE;
+ sense_addr = ioq->sense + ret;
+ sense_dma = ioq->sense_dma_addr + ret;
+
+ cmd->common.sense_addr = cpu_to_le64(sense_dma);
+ cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+ cmd->common.command_id = pt_cmd->cid;
+
+ spraid_submit_cmd(ioq, cmd);
+
+ if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout;"
+ " opcode[0x%x] subopcode[0x%x]\n",
+ __func__, pt_cmd->cid, pt_cmd->qid, cmd->common.opcode,
+ (le32_to_cpu(cmd->common.cdw3[0]) & 0xffff));
+ WRITE_ONCE(pt_cmd->state, SPRAID_CMD_TIMEOUT);
+ spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
+ return -EINVAL;
+ }
+
+ if (result && reslen) {
+ if ((pt_cmd->status & 0x17f) == 0x101) {
+ memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE);
+ *reslen = SCSI_SENSE_BUFFERSIZE;
+ }
+ }
+
+ spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
+
+ return pt_cmd->status;
+}
+
+static int spraid_user_ioq_cmd(struct spraid_dev *hdev, struct bsg_job *job)
+{
+ struct spraid_bsg_request *bsg_req =
+ (struct spraid_bsg_request *)(job->request);
+ struct spraid_ioq_passthru_cmd *cmd = &(bsg_req->ioqcmd);
+ struct spraid_ioq_command ioq_cmd;
+ int status = 0;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+
+ if (cmd->data_len > PAGE_SIZE) {
+ dev_err(hdev->dev, "[%s] data len bigger than 4k\n", __func__);
+ return -EFAULT;
+ }
+
+ if (hdev->state != SPRAID_LIVE) {
+ dev_err(hdev->dev, "[%s] err, host state:[%d] is not live\n",
+ __func__, hdev->state);
+ return -EBUSY;
+ }
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init;"
+ " datalen[%d]\n",
+ __func__, cmd->opcode, cmd->info_1.subopcode, cmd->data_len);
+
+ memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+ ioq_cmd.common.opcode = cmd->opcode;
+ ioq_cmd.common.flags = cmd->flags;
+ ioq_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ ioq_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len);
+ ioq_cmd.common.cdb_len = cmd->info_0.cdb_len;
+ ioq_cmd.common.rsvd2 = cmd->info_0.rsvd0;
+ ioq_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3);
+ ioq_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4);
+ ioq_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5);
+
+ ioq_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10);
+ ioq_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11);
+ ioq_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12);
+ ioq_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13);
+ ioq_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14);
+ ioq_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len);
+
+ memcpy(ioq_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len);
+
+ ioq_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]);
+ ioq_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]);
+ ioq_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]);
+ ioq_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]);
+
+ status = spraid_bsg_map_data(hdev, job,
+ (struct spraid_admin_command *)&ioq_cmd);
+ if (status) {
+ dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+ return status;
+ }
+
+ status = spraid_submit_ioq_sync_cmd(hdev, &ioq_cmd, job->reply,
+ &job->reply_len, timeout);
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x], status[0x%x];"
+ " reply_len[%d]\n",
+ __func__, cmd->opcode, cmd->info_1.subopcode,
+ status, job->reply_len);
+
+ spraid_bsg_unmap_data(hdev, job);
+
+ return status;
+}
+
+static bool spraid_check_scmd_completed(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_queue *spraidq;
+ u16 hwq, cid;
+
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ spraidq = &hdev->queues[hwq];
+ if (READ_ONCE(iod->state) == SPRAID_CMD_COMPLETE
+ || spraid_poll_cq(spraidq, cid)) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] has been completed\n",
+ cid, spraidq->qid);
+ return true;
+ }
+ return false;
+}
+
+static enum blk_eh_timer_return spraid_scmd_timeout(struct scsi_cmnd *scmd)
+{
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ unsigned int timeout = scmd->device->request_queue->rq_timeout;
+
+ if (spraid_check_scmd_completed(scmd))
+ goto out;
+
+ if (time_after(jiffies, scmd->jiffies_at_alloc + timeout)) {
+ if (cmpxchg(&iod->state,
+ SPRAID_CMD_IN_FLIGHT,
+ SPRAID_CMD_TIMEOUT) == SPRAID_CMD_IN_FLIGHT) {
+ return BLK_EH_DONE;
+ }
+ }
+out:
+ return BLK_EH_RESET_TIMER;
+}
+
+/* send abort command by admin queue temporary */
+static int spraid_send_abort_cmd(struct spraid_dev *hdev,
+ u32 hdid, u16 qid, u16 cid)
+{
+ struct spraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.abort.opcode = SPRAID_ADMIN_ABORT_CMD;
+ admin_cmd.abort.hdid = cpu_to_le32(hdid);
+ admin_cmd.abort.sqid = cpu_to_le16(qid);
+ admin_cmd.abort.cid = cpu_to_le16(cid);
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+/* send reset command by admin quueue temporary */
+static int spraid_send_reset_cmd(struct spraid_dev *hdev, int type, u32 hdid)
+{
+ struct spraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.reset.opcode = SPRAID_ADMIN_RESET;
+ admin_cmd.reset.hdid = cpu_to_le32(hdid);
+ admin_cmd.reset.type = type;
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static bool spraid_change_host_state(struct spraid_dev *hdev,
+ enum spraid_state newstate)
+{
+ unsigned long flags;
+ enum spraid_state oldstate;
+ bool change = false;
+
+ spin_lock_irqsave(&hdev->state_lock, flags);
+
+ oldstate = hdev->state;
+ switch (newstate) {
+ case SPRAID_LIVE:
+ switch (oldstate) {
+ case SPRAID_NEW:
+ case SPRAID_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SPRAID_RESETTING:
+ switch (oldstate) {
+ case SPRAID_LIVE:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SPRAID_DELETING:
+ if (oldstate != SPRAID_DELETING)
+ change = true;
+ break;
+ case SPRAID_DEAD:
+ switch (oldstate) {
+ case SPRAID_NEW:
+ case SPRAID_LIVE:
+ case SPRAID_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (change)
+ hdev->state = newstate;
+ spin_unlock_irqrestore(&hdev->state_lock, flags);
+
+ dev_info(hdev->dev, "[%s][%d]->[%d], change[%d]\n",
+ __func__, oldstate, newstate, change);
+
+ return change;
+}
+
+static void spraid_back_fault_cqe(struct spraid_queue *ioq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = ioq->hdev;
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct spraid_iod *iod;
+ struct request *req;
+
+ tags = hdev->shost->tag_set.tags[ioq->qid - 1];
+ req = blk_mq_tag_to_rq(tags, cqe->cmd_id);
+ if (unlikely(!req || !blk_mq_request_started(req)))
+ return;
+
+ scmd = blk_mq_rq_to_pdu(req);
+ iod = scsi_cmd_priv(scmd);
+
+ set_host_byte(scmd, DID_NO_CONNECT);
+ if (iod->nsge)
+ scsi_dma_unmap(scmd);
+ spraid_free_iod_res(hdev, iod);
+ scmd->scsi_done(scmd);
+ dev_warn(hdev->dev, "Back fault CQE, cid[%d] qid[%d]\n",
+ cqe->cmd_id, ioq->qid);
+}
+
+static void spraid_back_all_io(struct spraid_dev *hdev)
+{
+ int i, j;
+ struct spraid_queue *ioq;
+ struct spraid_completion cqe = { 0 };
+
+ scsi_block_requests(hdev->shost);
+
+ for (i = 1; i <= hdev->shost->nr_hw_queues; i++) {
+ ioq = &hdev->queues[i];
+ for (j = 0; j < hdev->shost->can_queue; j++) {
+ cqe.cmd_id = j;
+ spraid_back_fault_cqe(ioq, &cqe);
+ }
+ }
+
+ scsi_unblock_requests(hdev->shost);
+}
+
+static void spraid_dev_disable(struct spraid_dev *hdev, bool shutdown)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ u16 start, end;
+ unsigned long timeout = jiffies + 600 * HZ;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ spraid_shutdown_ctrl(hdev);
+ else
+ spraid_disable_ctrl(hdev);
+ }
+
+ while (!time_after(jiffies, timeout)) {
+ if (!pci_device_is_present(hdev->pdev)) {
+ dev_info(hdev->dev, "[%s] pci_device not present;"
+ " skip wait\n", __func__);
+ break;
+ }
+ if (!spraid_wait_ready(hdev, hdev->cap, false)) {
+ dev_info(hdev->dev,
+ "[%s] wait ready success after reset\n",
+ __func__);
+ break;
+ }
+ dev_info(hdev->dev, "[%s] waiting csts_rdy ready\n", __func__);
+ }
+
+ if (hdev->queue_count == 0) {
+ dev_err(hdev->dev, "[%s] warn, queue has been delete\n",
+ __func__);
+ return;
+ }
+
+ spin_lock_irq(&adminq->cq_lock);
+ spraid_process_cq(adminq, &start, &end, -1);
+ spin_unlock_irq(&adminq->cq_lock);
+ spraid_complete_cqes(adminq, start, end);
+
+ spraid_pci_disable(hdev);
+
+ spraid_back_all_io(hdev);
+}
+
+static void spraid_reset_work(struct work_struct *work)
+{
+ int ret;
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, reset_work);
+
+ if (hdev->state != SPRAID_RESETTING) {
+ dev_err(hdev->dev, "[%s] err, host is not reset state\n",
+ __func__);
+ return;
+ }
+
+ dev_info(hdev->dev, "[%s] enter host reset\n", __func__);
+
+ if (hdev->ctrl_config & SPRAID_CC_ENABLE) {
+ dev_info(hdev->dev, "[%s] start dev_disable\n", __func__);
+ spraid_dev_disable(hdev, false);
+ }
+
+ ret = spraid_pci_enable(hdev);
+ if (ret)
+ goto out;
+
+ ret = spraid_setup_admin_queue(hdev);
+ if (ret)
+ goto pci_disable;
+
+ ret = spraid_setup_io_queues(hdev);
+ if (ret || hdev->online_queues <= hdev->shost->nr_hw_queues)
+ goto pci_disable;
+
+ spraid_change_host_state(hdev, SPRAID_LIVE);
+
+ spraid_send_all_aen(hdev);
+
+ return;
+
+pci_disable:
+ spraid_pci_disable(hdev);
+out:
+ spraid_change_host_state(hdev, SPRAID_DEAD);
+ dev_err(hdev->dev, "[%s] err, host reset failed\n", __func__);
+}
+
+static int spraid_reset_work_sync(struct spraid_dev *hdev)
+{
+ if (!spraid_change_host_state(hdev, SPRAID_RESETTING)) {
+ dev_info(hdev->dev, "[%s] can't change to reset state\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ if (!queue_work(spraid_wq, &hdev->reset_work)) {
+ dev_err(hdev->dev, "[%s] err, host is already in reset state\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ flush_work(&hdev->reset_work);
+ if (hdev->state != SPRAID_LIVE)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int spraid_wait_abnl_cmd_done(struct spraid_iod *iod)
+{
+ u16 times = 0;
+
+ do {
+ if (READ_ONCE(iod->state) == SPRAID_CMD_TMO_COMPLETE)
+ break;
+ msleep(500);
+ times++;
+ } while (times <= SPRAID_WAIT_ABNL_CMD_TIMEOUT);
+
+ /* wait command completion timeout after abort/reset success */
+ if (times >= SPRAID_WAIT_ABNL_CMD_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int spraid_abort_handler(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, aborting\n", cid, hwq);
+ ret = spraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid);
+ if (ret != ADMIN_ERR_TIMEOUT) {
+ ret = spraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed;"
+ " not found\n", cid, hwq);
+ return FAILED;
+ }
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort succ\n", cid, hwq);
+ return SUCCESS;
+ }
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, timeout\n",
+ cid, hwq);
+ return FAILED;
+}
+
+static int spraid_tgt_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, target reset\n",
+ cid, hwq);
+ ret = spraid_send_reset_cmd(hdev, SPRAID_RESET_TARGET, hostdata->hdid);
+ if (ret == 0) {
+ ret = spraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ dev_warn(hdev->dev,
+ "cid[%d] qid[%d]target reset failed;"
+ " not found\n", cid, hwq);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] target reset success\n",
+ cid, hwq);
+ return SUCCESS;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] ret[%d] target reset failed\n",
+ cid, hwq, ret);
+ return FAILED;
+}
+
+static int spraid_bus_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, bus reset\n", cid, hwq);
+ ret = spraid_send_reset_cmd(hdev, SPRAID_RESET_BUS, hostdata->hdid);
+ if (ret == 0) {
+ ret = spraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ dev_warn(hdev->dev,
+ "cid[%d] qid[%d] bus reset failed;"
+ " not found\n", cid, hwq);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] bus reset succ\n",
+ cid, hwq);
+ return SUCCESS;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] ret[%d] bus reset failed\n",
+ cid, hwq, ret);
+ return FAILED;
+}
+
+static int spraid_shost_reset_handler(struct scsi_cmnd *scmd)
+{
+ u16 hwq, cid;
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+
+ scsi_print_command(scmd);
+ if (hdev->state != SPRAID_LIVE || spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ dev_warn(hdev->dev, "cid[%d] qid[%d] host reset\n", cid, hwq);
+
+ if (spraid_reset_work_sync(hdev)) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] host reset failed\n",
+ cid, hwq);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] host reset success\n", cid, hwq);
+
+ return SUCCESS;
+}
+
+static pci_ers_result_t spraid_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter pci error detect, state:%d\n", state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ dev_warn(hdev->dev, "channel is normal, do nothing\n");
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ dev_warn(hdev->dev,
+ "channel io frozen, need reset controller\n");
+
+ scsi_block_requests(hdev->shost);
+
+ spraid_change_host_state(hdev, SPRAID_RESETTING);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ dev_warn(hdev->dev, "channel io failure, request disconnect\n");
+
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t spraid_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "restart after slot reset\n");
+
+ pci_restore_state(pdev);
+
+ if (!queue_work(spraid_wq, &hdev->reset_work)) {
+ dev_err(hdev->dev, "[%s] err, the device is resetting state\n",
+ __func__);
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ flush_work(&hdev->reset_work);
+
+ scsi_unblock_requests(hdev->shost);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void spraid_reset_done(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter spraid reset done\n");
+}
+
+static ssize_t csts_pp_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+ & SPRAID_CSTS_PP_MASK);
+ ret >>= SPRAID_CSTS_PP_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_shst_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+ & SPRAID_CSTS_SHST_MASK);
+ ret >>= SPRAID_CSTS_SHST_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_cfs_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+ & SPRAID_CSTS_CFS_MASK);
+ ret >>= SPRAID_CSTS_CFS_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_rdy_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev))
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_RDY);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t fw_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fr);
+}
+
+static DEVICE_ATTR_RO(csts_pp);
+static DEVICE_ATTR_RO(csts_shst);
+static DEVICE_ATTR_RO(csts_cfs);
+static DEVICE_ATTR_RO(csts_rdy);
+static DEVICE_ATTR_RO(fw_version);
+
+static struct device_attribute *spraid_host_attrs[] = {
+ &dev_attr_csts_pp,
+ &dev_attr_csts_shst,
+ &dev_attr_csts_cfs,
+ &dev_attr_csts_rdy,
+ &dev_attr_fw_version,
+ NULL,
+};
+
+static int spraid_get_vd_info(struct spraid_dev *hdev,
+ struct spraid_vd_info *vd_info, u16 vid)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN);
+ admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(vd_info, data_ptr, sizeof(struct spraid_vd_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_get_bgtask(struct spraid_dev *hdev,
+ struct spraid_bgtask *bgtask)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(bgtask, data_ptr, sizeof(struct spraid_bgtask));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1;
+
+ ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ?
+ vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1);
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]);
+}
+
+static ssize_t raid_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret) {
+ vd_info->vd_status = 0;
+ vd_info->rg_id = 0xff;
+ }
+
+ ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ?
+ vd_info->vd_status : 0;
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]);
+}
+
+static ssize_t raid_resync_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_bgtask *bgtask;
+ struct spraid_sdev_hostdata *hostdata;
+ u8 rg_id, i, progress = 0;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ goto out;
+
+ rg_id = vd_info->rg_id;
+
+ bgtask = (struct spraid_bgtask *)vd_info;
+ ret = spraid_get_bgtask(hdev, bgtask);
+ if (ret)
+ goto out;
+ for (i = 0; i < bgtask->task_num; i++) {
+ if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) &&
+ (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id))
+ progress = bgtask->bgtask[i].progress;
+ }
+
+out:
+ kfree(vd_info);
+ return snprintf(buf, PAGE_SIZE, "%d\n", progress);
+}
+
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(raid_state);
+static DEVICE_ATTR_RO(raid_resync);
+
+static struct device_attribute *spraid_dev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_raid_state,
+ &dev_attr_raid_resync,
+ NULL,
+};
+
+static struct pci_error_handlers spraid_err_handler = {
+ .error_detected = spraid_pci_error_detected,
+ .slot_reset = spraid_pci_slot_reset,
+ .reset_done = spraid_reset_done,
+};
+
+static int spraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ int ret;
+ struct spraid_dev *hdev = shost_priv(shost);
+
+ dev_info(hdev->dev, "[%s] start sysfs host reset cmd\n", __func__);
+ ret = spraid_reset_work_sync(hdev);
+ dev_info(hdev->dev, "[%s] stop sysfs host reset cmd[%d]\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static struct scsi_host_template spraid_driver_template = {
+ .module = THIS_MODULE,
+ .name = "Ramaxel Logic spraid driver",
+ .proc_name = "spraid",
+ .queuecommand = spraid_queue_command,
+ .slave_alloc = spraid_slave_alloc,
+ .slave_destroy = spraid_slave_destroy,
+ .slave_configure = spraid_slave_configure,
+ .eh_timed_out = spraid_scmd_timeout,
+ .eh_abort_handler = spraid_abort_handler,
+ .eh_target_reset_handler = spraid_tgt_reset_handler,
+ .eh_bus_reset_handler = spraid_bus_reset_handler,
+ .eh_host_reset_handler = spraid_shost_reset_handler,
+ .change_queue_depth = scsi_change_queue_depth,
+ .this_id = -1,
+ .shost_attrs = spraid_host_attrs,
+ .sdev_attrs = spraid_dev_attrs,
+ .host_reset = spraid_sysfs_host_reset,
+};
+
+static void spraid_shutdown(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ spraid_remove_io_queues(hdev);
+ spraid_disable_admin_queue(hdev, true);
+}
+
+/* bsg dispatch user command */
+static int spraid_bsg_host_dispatch(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = dev_to_shost(job->dev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_bsg_request *bsg_req = job->request;
+ int ret = 0;
+
+ dev_info(hdev->dev, "[%s] msgcode[%d], msglen[%d], timeout[%d];"
+ " req_nsge[%d], req_len[%d]\n",
+ __func__, bsg_req->msgcode, job->request_len,
+ rq->timeout, job->request_payload.sg_cnt,
+ job->request_payload.payload_len);
+
+ job->reply_len = 0;
+
+ switch (bsg_req->msgcode) {
+ case SPRAID_BSG_ADM:
+ ret = spraid_user_admin_cmd(hdev, job);
+ break;
+ case SPRAID_BSG_IOQ:
+ ret = spraid_user_ioq_cmd(hdev, job);
+ break;
+ default:
+ dev_info(hdev->dev, "[%s] unsupport msgcode[%d]\n",
+ __func__, bsg_req->msgcode);
+ break;
+ }
+
+ if (ret > 0)
+ ret = ret | (ret << 8);
+
+ bsg_job_done(job, ret, 0);
+ return 0;
+}
+
+static inline void spraid_remove_bsg(struct spraid_dev *hdev)
+{
+ if (hdev->bsg_queue) {
+ bsg_unregister_queue(hdev->bsg_queue);
+ blk_cleanup_queue(hdev->bsg_queue);
+ }
+}
+static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct spraid_dev *hdev;
+ struct Scsi_Host *shost;
+ int node, ret;
+ char bsg_name[15];
+
+ shost = scsi_host_alloc(&spraid_driver_template, sizeof(*hdev));
+ if (!shost) {
+ dev_err(&pdev->dev, "Failed to allocate scsi host\n");
+ return -ENOMEM;
+ }
+ hdev = shost_priv(shost);
+ hdev->pdev = pdev;
+ hdev->dev = get_device(&pdev->dev);
+
+ node = dev_to_node(hdev->dev);
+ if (node == NUMA_NO_NODE) {
+ node = first_memory_node;
+ set_dev_node(hdev->dev, node);
+ }
+ hdev->numa_node = node;
+ hdev->shost = shost;
+ pci_set_drvdata(pdev, hdev);
+
+ ret = spraid_dev_map(hdev);
+ if (ret)
+ goto put_dev;
+
+ init_rwsem(&hdev->devices_rwsem);
+ INIT_WORK(&hdev->scan_work, spraid_scan_work);
+ INIT_WORK(&hdev->timesyn_work, spraid_timesyn_work);
+ INIT_WORK(&hdev->reset_work, spraid_reset_work);
+ INIT_WORK(&hdev->fw_act_work, spraid_fw_act_work);
+ spin_lock_init(&hdev->state_lock);
+
+ ret = spraid_alloc_resources(hdev);
+ if (ret)
+ goto dev_unmap;
+
+ ret = spraid_pci_enable(hdev);
+ if (ret)
+ goto resources_free;
+
+ ret = spraid_setup_admin_queue(hdev);
+ if (ret)
+ goto pci_disable;
+
+ ret = spraid_init_ctrl_info(hdev);
+ if (ret)
+ goto disable_admin_q;
+
+ ret = spraid_alloc_iod_ext_mem_pool(hdev);
+ if (ret)
+ goto disable_admin_q;
+
+ ret = spraid_setup_io_queues(hdev);
+ if (ret)
+ goto free_iod_mempool;
+
+ spraid_shost_init(hdev);
+
+ ret = scsi_add_host(hdev->shost, hdev->dev);
+ if (ret) {
+ dev_err(hdev->dev, "Add shost to system failed, ret: %d\n",
+ ret);
+ goto remove_io_queues;
+ }
+
+ snprintf(bsg_name, sizeof(bsg_name), "spraid%d", shost->host_no);
+ hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name,
+ spraid_bsg_host_dispatch,
+ spraid_cmd_size(hdev, true, false));
+ if (IS_ERR(hdev->bsg_queue)) {
+ dev_err(hdev->dev, "err, setup bsg failed\n");
+ hdev->bsg_queue = NULL;
+ goto remove_io_queues;
+ }
+
+ if (hdev->online_queues == SPRAID_ADMIN_QUEUE_NUM) {
+ dev_warn(hdev->dev, "warn only admin queue can be used\n");
+ return 0;
+ }
+
+ hdev->state = SPRAID_LIVE;
+
+ spraid_send_all_aen(hdev);
+
+ ret = spraid_dev_list_init(hdev);
+ if (ret)
+ goto remove_bsg;
+
+ ret = spraid_configure_timestamp(hdev);
+ if (ret)
+ dev_warn(hdev->dev, "init set timestamp failed\n");
+
+ ret = spraid_alloc_ioq_ptcmds(hdev);
+ if (ret)
+ goto remove_bsg;
+
+ scsi_scan_host(hdev->shost);
+
+ return 0;
+
+remove_bsg:
+ spraid_remove_bsg(hdev);
+remove_io_queues:
+ spraid_remove_io_queues(hdev);
+free_iod_mempool:
+ spraid_free_iod_ext_mem_pool(hdev);
+disable_admin_q:
+ spraid_disable_admin_queue(hdev, false);
+pci_disable:
+ spraid_pci_disable(hdev);
+resources_free:
+ spraid_free_resources(hdev);
+dev_unmap:
+ spraid_dev_unmap(hdev);
+put_dev:
+ put_device(hdev->dev);
+ scsi_host_put(shost);
+
+ return -ENODEV;
+}
+
+static void spraid_remove(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = hdev->shost;
+
+ dev_info(hdev->dev, "enter spraid remove\n");
+
+ spraid_change_host_state(hdev, SPRAID_DELETING);
+ flush_work(&hdev->reset_work);
+
+ if (!pci_device_is_present(pdev))
+ spraid_back_all_io(hdev);
+
+ spraid_remove_bsg(hdev);
+ scsi_remove_host(shost);
+ spraid_free_ioq_ptcmds(hdev);
+ kfree(hdev->devices);
+ spraid_remove_io_queues(hdev);
+ spraid_free_iod_ext_mem_pool(hdev);
+ spraid_disable_admin_queue(hdev, false);
+ spraid_pci_disable(hdev);
+ spraid_free_resources(hdev);
+ spraid_dev_unmap(hdev);
+ put_device(hdev->dev);
+ scsi_host_put(shost);
+
+ dev_info(hdev->dev, "exit spraid remove\n");
+}
+
+static const struct pci_device_id spraid_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC,
+ SPRAID_SERVER_DEVICE_HBA_DID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC,
+ SPRAID_SERVER_DEVICE_RAID_DID) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, spraid_id_table);
+
+static struct pci_driver spraid_driver = {
+ .name = "spraid",
+ .id_table = spraid_id_table,
+ .probe = spraid_probe,
+ .remove = spraid_remove,
+ .shutdown = spraid_shutdown,
+ .err_handler = &spraid_err_handler,
+};
+
+static int __init spraid_init(void)
+{
+ int ret;
+
+ spraid_wq = alloc_workqueue("spraid-wq", WQ_UNBOUND | WQ_MEM_RECLAIM |
+ WQ_SYSFS, 0);
+ if (!spraid_wq)
+ return -ENOMEM;
+
+ spraid_class = class_create(THIS_MODULE, "spraid");
+ if (IS_ERR(spraid_class)) {
+ ret = PTR_ERR(spraid_class);
+ goto destroy_wq;
+ }
+
+ ret = pci_register_driver(&spraid_driver);
+ if (ret < 0)
+ goto destroy_class;
+
+ return 0;
+
+destroy_class:
+ class_destroy(spraid_class);
+destroy_wq:
+ destroy_workqueue(spraid_wq);
+
+ return ret;
+}
+
+static void __exit spraid_exit(void)
+{
+ pci_unregister_driver(&spraid_driver);
+ class_destroy(spraid_class);
+ destroy_workqueue(spraid_wq);
+ ida_destroy(&spraid_instance_ida);
+}
+
+MODULE_AUTHOR("songyl(a)ramaxel.com");
+MODULE_DESCRIPTION("Ramaxel Memory Technology SPraid Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SPRAID_DRV_VERSION);
+module_init(spraid_init);
+module_exit(spraid_exit);
--
2.27.0
1
0

[PATCH openEuler-1.0-LTS 1/2] USB: gadget: detect too-big endpoint 0 requests
by Yang Yingliang 22 Dec '21
by Yang Yingliang 22 Dec '21
22 Dec '21
From: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
mainline inclusion
from mainline-v5.16-rc5
commit 153a2d7e3350cc89d406ba2d35be8793a64c2038
category: bugfix
bugzilla: NA
CVE: CVE-2021-39685
--------------------------------
Sometimes USB hosts can ask for buffers that are too large from endpoint
0, which should not be allowed. If this happens for OUT requests, stall
the endpoint, but for IN requests, trim the request size to the endpoint
buffer size.
Co-developed-by: Szymon Heidrich <szymon.heidrich(a)gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
Reviewed-by: Xiu Jianfeng <xiujianfeng(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/usb/gadget/composite.c | 12 ++++++++++++
drivers/usb/gadget/legacy/dbgp.c | 13 +++++++++++++
drivers/usb/gadget/legacy/inode.c | 16 +++++++++++++++-
3 files changed, 40 insertions(+), 1 deletion(-)
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index af0702cfa92ff..3813249387b3d 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1569,6 +1569,18 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
struct usb_function *f = NULL;
u8 endp;
+ if (w_length > USB_COMP_EP0_BUFSIZ) {
+ if (ctrl->bRequestType == USB_DIR_OUT) {
+ goto done;
+ } else {
+ /* Cast away the const, we are going to overwrite on purpose. */
+ __le16 *temp = (__le16 *)&ctrl->wLength;
+
+ *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
+ w_length = USB_COMP_EP0_BUFSIZ;
+ }
+ }
+
/* partial re-init of the response message; the function or the
* gadget might need to intercept e.g. a control-OUT completion
* when we delegate to it.
diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
index e1d566c9918ae..e567afcb2794c 100644
--- a/drivers/usb/gadget/legacy/dbgp.c
+++ b/drivers/usb/gadget/legacy/dbgp.c
@@ -345,6 +345,19 @@ static int dbgp_setup(struct usb_gadget *gadget,
void *data = NULL;
u16 len = 0;
+ if (length > DBGP_REQ_LEN) {
+ if (ctrl->bRequestType == USB_DIR_OUT) {
+ return err;
+ } else {
+ /* Cast away the const, we are going to overwrite on purpose. */
+ __le16 *temp = (__le16 *)&ctrl->wLength;
+
+ *temp = cpu_to_le16(DBGP_REQ_LEN);
+ length = DBGP_REQ_LEN;
+ }
+ }
+
+
if (request == USB_REQ_GET_DESCRIPTOR) {
switch (value>>8) {
case USB_DT_DEVICE:
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 37ca0e669bd85..83fd1b55d497a 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -109,6 +109,8 @@ enum ep0_state {
/* enough for the whole queue: most events invalidate others */
#define N_EVENT 5
+#define RBUF_SIZE 256
+
struct dev_data {
spinlock_t lock;
refcount_t count;
@@ -143,7 +145,7 @@ struct dev_data {
struct dentry *dentry;
/* except this scratch i/o buffer for ep0 */
- u8 rbuf [256];
+ u8 rbuf[RBUF_SIZE];
};
static inline void get_dev (struct dev_data *data)
@@ -1332,6 +1334,18 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
+ if (w_length > RBUF_SIZE) {
+ if (ctrl->bRequestType == USB_DIR_OUT) {
+ return value;
+ } else {
+ /* Cast away the const, we are going to overwrite on purpose. */
+ __le16 *temp = (__le16 *)&ctrl->wLength;
+
+ *temp = cpu_to_le16(RBUF_SIZE);
+ w_length = RBUF_SIZE;
+ }
+ }
+
spin_lock (&dev->lock);
dev->setup_abort = 0;
if (dev->state == STATE_DEV_UNCONNECTED) {
--
2.25.1
1
1

21 Dec '21
Ramaxel inclusion
category: features
bugzilla: https://gitee.com/openeuler/kernel/issues/I4NJIB
CVE: NA
Support Ramaxel's SPRxxx Raid controller
Signed-off-by: Yanling Song <songyl(a)ramaxel.com>
Reviewed-by: Jiang Yu<yujiang(a)ramaxel.com>
---
drivers/scsi/Kconfig | 1 +
drivers/scsi/Makefile | 1 +
drivers/scsi/spraid/Kconfig | 13 +
drivers/scsi/spraid/Makefile | 7 +
drivers/scsi/spraid/spraid.h | 746 ++++++
drivers/scsi/spraid/spraid_main.c | 3875 +++++++++++++++++++++++++++++
6 files changed, 4643 insertions(+)
create mode 100644 drivers/scsi/spraid/Kconfig
create mode 100644 drivers/scsi/spraid/Makefile
create mode 100644 drivers/scsi/spraid/spraid.h
create mode 100644 drivers/scsi/spraid/spraid_main.c
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8450484184e3..63d2aaa22834 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -522,6 +522,7 @@ source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
source "drivers/scsi/ufs/Kconfig"
+source "drivers/scsi/spraid/Kconfig"
config SCSI_HPTIOP
tristate "HighPoint RocketRAID 3xxx/4xxx Controller support"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 2973693f6dcc..4056cf26e09e 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -94,6 +94,7 @@ obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
obj-$(CONFIG_CXLFLASH) += cxlflash/
+obj-$(CONFIG_RAMAXEL_SPRAID) += spraid/
obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
diff --git a/drivers/scsi/spraid/Kconfig b/drivers/scsi/spraid/Kconfig
new file mode 100644
index 000000000000..bfbba3db8db0
--- /dev/null
+++ b/drivers/scsi/spraid/Kconfig
@@ -0,0 +1,13 @@
+#
+# Ramaxel driver configuration
+#
+
+config RAMAXEL_SPRAID
+ tristate "Ramaxel spraid Adapter"
+ depends on PCI && SCSI
+ select BLK_DEV_BSGLIB
+ depends on ARM64 || X86_64
+ help
+ This driver supports Ramaxel SPRxxx serial
+ raid controller, which has PCIE Gen4 interface
+ with host and supports SAS/SATA Hdd/ssd.
diff --git a/drivers/scsi/spraid/Makefile b/drivers/scsi/spraid/Makefile
new file mode 100644
index 000000000000..aadc2ffd37eb
--- /dev/null
+++ b/drivers/scsi/spraid/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Ramaxel device drivers.
+#
+
+obj-$(CONFIG_RAMAXEL_SPRAID) += spraid.o
+
+spraid-objs := spraid_main.o
\ No newline at end of file
diff --git a/drivers/scsi/spraid/spraid.h b/drivers/scsi/spraid/spraid.h
new file mode 100644
index 000000000000..c1e4980e18e5
--- /dev/null
+++ b/drivers/scsi/spraid/spraid.h
@@ -0,0 +1,746 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
+
+#ifndef __SPRAID_H_
+#define __SPRAID_H_
+
+#define SPRAID_CAP_MQES(cap) ((cap) & 0xffff)
+#define SPRAID_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define SPRAID_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define SPRAID_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define SPRAID_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
+#define SPRAID_CAP_DMAMASK(cap) (((cap) >> 37) & 0xff)
+
+#define SPRAID_DEFAULT_MAX_CHANNEL 4
+#define SPRAID_DEFAULT_MAX_ID 240
+#define SPRAID_DEFAULT_MAX_LUN_PER_HOST 8
+#define MAX_SECTORS 2048
+
+#define IO_SQE_SIZE sizeof(struct spraid_ioq_command)
+#define ADMIN_SQE_SIZE sizeof(struct spraid_admin_command)
+#define SQE_SIZE(qid) (((qid) > 0) ? IO_SQE_SIZE : ADMIN_SQE_SIZE)
+#define CQ_SIZE(depth) ((depth) * sizeof(struct spraid_completion))
+#define SQ_SIZE(qid, depth) ((depth) * SQE_SIZE(qid))
+
+#define SENSE_SIZE(depth) ((depth) * SCSI_SENSE_BUFFERSIZE)
+
+#define SPRAID_AQ_DEPTH 128
+#define SPRAID_NR_AEN_COMMANDS 16
+#define SPRAID_AQ_BLK_MQ_DEPTH (SPRAID_AQ_DEPTH - SPRAID_NR_AEN_COMMANDS)
+#define SPRAID_AQ_MQ_TAG_DEPTH (SPRAID_AQ_BLK_MQ_DEPTH - 1)
+
+#define SPRAID_ADMIN_QUEUE_NUM 1
+#define SPRAID_PTCMDS_PERQ 1
+#define SPRAID_IO_BLK_MQ_DEPTH (hdev->shost->can_queue)
+#define SPRAID_NR_IOQ_PTCMDS (SPRAID_PTCMDS_PERQ * hdev->shost->nr_hw_queues)
+
+#define FUA_MASK 0x08
+#define SPRAID_MINORS BIT(MINORBITS)
+
+#define COMMAND_IS_WRITE(cmd) ((cmd)->common.opcode & 1)
+
+#define SPRAID_IO_IOSQES 7
+#define SPRAID_IO_IOCQES 4
+#define PRP_ENTRY_SIZE 8
+
+#define SMALL_POOL_SIZE 256
+#define MAX_SMALL_POOL_NUM 16
+#define MAX_CMD_PER_DEV 64
+#define MAX_CDB_LEN 32
+
+#define SPRAID_UP_TO_MULTY4(x) (((x) + 4) & (~0x03))
+
+#define CQE_STATUS_SUCCESS (0x0)
+
+#define PCI_VENDOR_ID_RAMAXEL_LOGIC 0x1E81
+
+#define SPRAID_SERVER_DEVICE_HBA_DID 0x2100
+#define SPRAID_SERVER_DEVICE_RAID_DID 0x2200
+
+#define IO_6_DEFAULT_TX_LEN 256
+
+#define SPRAID_INT_PAGES 2
+#define SPRAID_INT_BYTES(hdev) (SPRAID_INT_PAGES * (hdev)->page_size)
+
+enum {
+ SPRAID_REQ_CANCELLED = (1 << 0),
+ SPRAID_REQ_USERCMD = (1 << 1),
+};
+
+enum {
+ SPRAID_SC_SUCCESS = 0x0,
+ SPRAID_SC_INVALID_OPCODE = 0x1,
+ SPRAID_SC_INVALID_FIELD = 0x2,
+
+ SPRAID_SC_ABORT_LIMIT = 0x103,
+ SPRAID_SC_ABORT_MISSING = 0x104,
+ SPRAID_SC_ASYNC_LIMIT = 0x105,
+
+ SPRAID_SC_DNR = 0x4000,
+};
+
+enum {
+ SPRAID_REG_CAP = 0x0000,
+ SPRAID_REG_CC = 0x0014,
+ SPRAID_REG_CSTS = 0x001c,
+ SPRAID_REG_AQA = 0x0024,
+ SPRAID_REG_ASQ = 0x0028,
+ SPRAID_REG_ACQ = 0x0030,
+ SPRAID_REG_DBS = 0x1000,
+};
+
+enum {
+ SPRAID_CC_ENABLE = 1 << 0,
+ SPRAID_CC_CSS_NVM = 0 << 4,
+ SPRAID_CC_MPS_SHIFT = 7,
+ SPRAID_CC_AMS_SHIFT = 11,
+ SPRAID_CC_SHN_SHIFT = 14,
+ SPRAID_CC_IOSQES_SHIFT = 16,
+ SPRAID_CC_IOCQES_SHIFT = 20,
+ SPRAID_CC_AMS_RR = 0 << SPRAID_CC_AMS_SHIFT,
+ SPRAID_CC_SHN_NONE = 0 << SPRAID_CC_SHN_SHIFT,
+ SPRAID_CC_IOSQES = SPRAID_IO_IOSQES << SPRAID_CC_IOSQES_SHIFT,
+ SPRAID_CC_IOCQES = SPRAID_IO_IOCQES << SPRAID_CC_IOCQES_SHIFT,
+ SPRAID_CC_SHN_NORMAL = 1 << SPRAID_CC_SHN_SHIFT,
+ SPRAID_CC_SHN_MASK = 3 << SPRAID_CC_SHN_SHIFT,
+ SPRAID_CSTS_CFS_SHIFT = 1,
+ SPRAID_CSTS_SHST_SHIFT = 2,
+ SPRAID_CSTS_PP_SHIFT = 5,
+ SPRAID_CSTS_RDY = 1 << 0,
+ SPRAID_CSTS_SHST_CMPLT = 2 << 2,
+ SPRAID_CSTS_SHST_MASK = 3 << 2,
+ SPRAID_CSTS_CFS_MASK = 1 << SPRAID_CSTS_CFS_SHIFT,
+ SPRAID_CSTS_PP_MASK = 1 << SPRAID_CSTS_PP_SHIFT,
+};
+
+enum {
+ SPRAID_ADMIN_DELETE_SQ = 0x00,
+ SPRAID_ADMIN_CREATE_SQ = 0x01,
+ SPRAID_ADMIN_DELETE_CQ = 0x04,
+ SPRAID_ADMIN_CREATE_CQ = 0x05,
+ SPRAID_ADMIN_ABORT_CMD = 0x08,
+ SPRAID_ADMIN_SET_FEATURES = 0x09,
+ SPRAID_ADMIN_ASYNC_EVENT = 0x0c,
+ SPRAID_ADMIN_GET_INFO = 0xc6,
+ SPRAID_ADMIN_RESET = 0xc8,
+};
+
+enum {
+ SPRAID_GET_INFO_CTRL = 0,
+ SPRAID_GET_INFO_DEV_LIST = 1,
+};
+
+enum {
+ SPRAID_RESET_TARGET = 0,
+ SPRAID_RESET_BUS = 1,
+};
+
+enum {
+ SPRAID_AEN_ERROR = 0,
+ SPRAID_AEN_NOTICE = 2,
+ SPRAID_AEN_VS = 7,
+};
+
+enum {
+ SPRAID_AEN_DEV_CHANGED = 0x00,
+ SPRAID_AEN_FW_ACT_START = 0x01,
+ SPRAID_AEN_HOST_PROBING = 0x10,
+};
+
+enum {
+ SPRAID_AEN_TIMESYN = 0x00,
+ SPRAID_AEN_FW_ACT_FINISH = 0x02,
+ SPRAID_AEN_EVENT_MIN = 0x80,
+ SPRAID_AEN_EVENT_MAX = 0xff,
+};
+
+enum {
+ SPRAID_CMD_WRITE = 0x01,
+ SPRAID_CMD_READ = 0x02,
+
+ SPRAID_CMD_NONIO_NONE = 0x80,
+ SPRAID_CMD_NONIO_TODEV = 0x81,
+ SPRAID_CMD_NONIO_FROMDEV = 0x82,
+};
+
+enum {
+ SPRAID_QUEUE_PHYS_CONTIG = (1 << 0),
+ SPRAID_CQ_IRQ_ENABLED = (1 << 1),
+
+ SPRAID_FEAT_NUM_QUEUES = 0x07,
+ SPRAID_FEAT_ASYNC_EVENT = 0x0b,
+ SPRAID_FEAT_TIMESTAMP = 0x0e,
+};
+
+enum spraid_state {
+ SPRAID_NEW,
+ SPRAID_LIVE,
+ SPRAID_RESETTING,
+ SPRAID_DELETING,
+ SPRAID_DEAD,
+};
+
+enum {
+ SPRAID_CARD_HBA,
+ SPRAID_CARD_RAID,
+};
+
+enum spraid_cmd_type {
+ SPRAID_CMD_ADM,
+ SPRAID_CMD_IOPT,
+};
+
+struct spraid_completion {
+ __le32 result;
+ union {
+ struct {
+ __u8 sense_len;
+ __u8 resv[3];
+ };
+ __le32 result1;
+ };
+ __le16 sq_head;
+ __le16 sq_id;
+ __u16 cmd_id;
+ __le16 status;
+};
+
+struct spraid_ctrl_info {
+ __le32 nd;
+ __le16 max_cmds;
+ __le16 max_channel;
+ __le32 max_tgt_id;
+ __le16 max_lun;
+ __le16 max_num_sge;
+ __le16 lun_num_in_boot;
+ __u8 mdts;
+ __u8 acl;
+ __u8 aerl;
+ __u8 card_type;
+ __u16 rsvd;
+ __u32 rtd3e;
+ __u8 sn[32];
+ __u8 fr[16];
+ __u8 rsvd1[4020];
+};
+
+struct spraid_dev {
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct Scsi_Host *shost;
+ struct spraid_queue *queues;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool[MAX_SMALL_POOL_NUM];
+ mempool_t *iod_mempool;
+ void __iomem *bar;
+ u32 max_qid;
+ u32 num_vecs;
+ u32 queue_count;
+ u32 ioq_depth;
+ int db_stride;
+ u32 __iomem *dbs;
+ struct rw_semaphore devices_rwsem;
+ int numa_node;
+ u32 page_size;
+ u32 ctrl_config;
+ u32 online_queues;
+ u64 cap;
+ int instance;
+ struct spraid_ctrl_info *ctrl_info;
+ struct spraid_dev_info *devices;
+
+ struct spraid_cmd *adm_cmds;
+ struct list_head adm_cmd_list;
+ spinlock_t adm_cmd_lock;
+
+ struct spraid_cmd *ioq_ptcmds;
+ struct list_head ioq_pt_list;
+ spinlock_t ioq_pt_lock;
+
+ struct work_struct scan_work;
+ struct work_struct timesyn_work;
+ struct work_struct reset_work;
+ struct work_struct fw_act_work;
+
+ enum spraid_state state;
+ spinlock_t state_lock;
+
+ struct request_queue *bsg_queue;
+};
+
+struct spraid_sgl_desc {
+ __le64 addr;
+ __le32 length;
+ __u8 rsvd[3];
+ __u8 type;
+};
+
+union spraid_data_ptr {
+ struct {
+ __le64 prp1;
+ __le64 prp2;
+ };
+ struct spraid_sgl_desc sgl;
+};
+
+struct spraid_admin_common_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le32 cdw2[4];
+ union spraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+struct spraid_features {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[2];
+ union spraid_data_ptr dptr;
+ __le32 fid;
+ __le32 dword11;
+ __le32 dword12;
+ __le32 dword13;
+ __le32 dword14;
+ __le32 dword15;
+};
+
+struct spraid_create_cq {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 cqid;
+ __le16 qsize;
+ __le16 cq_flags;
+ __le16 irq_vector;
+ __u32 rsvd12[4];
+};
+
+struct spraid_create_sq {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 sqid;
+ __le16 qsize;
+ __le16 sq_flags;
+ __le16 cqid;
+ __u32 rsvd12[4];
+};
+
+struct spraid_delete_queue {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[9];
+ __le16 qid;
+ __u16 rsvd10;
+ __u32 rsvd11[5];
+};
+
+struct spraid_get_info {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u32 rsvd2[4];
+ union spraid_data_ptr dptr;
+ __u8 type;
+ __u8 rsvd10[3];
+ __le32 cdw11;
+ __u32 rsvd12[4];
+};
+
+struct spraid_usr_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ union {
+ struct {
+ __le16 subopcode;
+ __le16 rsvd1;
+ } info_0;
+ __le32 cdw2;
+ };
+ union {
+ struct {
+ __le16 data_len;
+ __le16 param_len;
+ } info_1;
+ __le32 cdw3;
+ };
+ __u64 metadata;
+ union spraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+enum {
+ SPRAID_CMD_FLAG_SGL_METABUF = (1 << 6),
+ SPRAID_CMD_FLAG_SGL_METASEG = (1 << 7),
+ SPRAID_CMD_FLAG_SGL_ALL = SPRAID_CMD_FLAG_SGL_METABUF |
+ SPRAID_CMD_FLAG_SGL_METASEG,
+};
+
+enum spraid_cmd_state {
+ SPRAID_CMD_IDLE = 0,
+ SPRAID_CMD_IN_FLIGHT = 1,
+ SPRAID_CMD_COMPLETE = 2,
+ SPRAID_CMD_TIMEOUT = 3,
+ SPRAID_CMD_TMO_COMPLETE = 4,
+};
+
+struct spraid_abort_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __le16 sqid;
+ __le16 cid;
+ __u32 rsvd11[5];
+};
+
+struct spraid_reset_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __u8 type;
+ __u8 rsvd10[3];
+ __u32 rsvd11[5];
+};
+
+struct spraid_admin_command {
+ union {
+ struct spraid_admin_common_command common;
+ struct spraid_features features;
+ struct spraid_create_cq create_cq;
+ struct spraid_create_sq create_sq;
+ struct spraid_delete_queue delete_queue;
+ struct spraid_get_info get_info;
+ struct spraid_abort_cmd abort;
+ struct spraid_reset_cmd reset;
+ struct spraid_usr_cmd usr_cmd;
+ };
+};
+
+struct spraid_ioq_common_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __le32 cdw3[3];
+ union spraid_data_ptr dptr;
+ __le32 cdw10[6];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __le32 cdw26[6];
+};
+
+struct spraid_rw_command {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union spraid_data_ptr dptr;
+ __le64 slba;
+ __le16 nlb;
+ __le16 control;
+ __u32 rsvd13[3];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct spraid_scsi_nonio {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_length;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union spraid_data_ptr dptr;
+ __u32 rsvd10[5];
+ __le32 buffer_len;
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct spraid_ioq_command {
+ union {
+ struct spraid_ioq_common_command common;
+ struct spraid_rw_command rw;
+ struct spraid_scsi_nonio scsi_nonio;
+ };
+};
+
+struct spraid_passthru_common_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 data_len;
+ __u16 param_len;
+ } info_1;
+ __u32 cdw3;
+ };
+ __u64 metadata;
+
+ __u64 addr;
+ __u64 prp2;
+
+ __u32 cdw10;
+ __u32 cdw11;
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 cdw15;
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct spraid_ioq_passthru_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 res_sense_len;
+ __u8 cdb_len;
+ __u8 rsvd0;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_1;
+ __u32 cdw3;
+ };
+ union {
+ struct {
+ __u16 rsvd;
+ __u16 param_len;
+ } info_2;
+ __u32 cdw4;
+ };
+ __u32 cdw5;
+ __u64 addr;
+ __u64 prp2;
+ union {
+ struct {
+ __u16 eid;
+ __u16 sid;
+ } info_3;
+ __u32 cdw10;
+ };
+ union {
+ struct {
+ __u16 did;
+ __u8 did_flag;
+ __u8 rsvd2;
+ } info_4;
+ __u32 cdw11;
+ };
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 data_len;
+ __u32 cdw16;
+ __u32 cdw17;
+ __u32 cdw18;
+ __u32 cdw19;
+ __u32 cdw20;
+ __u32 cdw21;
+ __u32 cdw22;
+ __u32 cdw23;
+ __u64 sense_addr;
+ __u32 cdw26[4];
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct spraid_bsg_request {
+ u32 msgcode;
+ u32 control;
+ union {
+ struct spraid_passthru_common_cmd admcmd;
+ struct spraid_ioq_passthru_cmd ioqcmd;
+ };
+};
+
+enum {
+ SPRAID_BSG_ADM,
+ SPRAID_BSG_IOQ,
+};
+
+struct spraid_cmd {
+ int qid;
+ int cid;
+ u32 result0;
+ u32 result1;
+ u16 status;
+ void *priv;
+ enum spraid_cmd_state state;
+ struct completion cmd_done;
+ struct list_head list;
+};
+
+struct spraid_queue {
+ struct spraid_dev *hdev;
+ spinlock_t sq_lock;
+
+ spinlock_t cq_lock ____cacheline_aligned_in_smp;
+
+ void *sq_cmds;
+
+ struct spraid_completion *cqes;
+
+ dma_addr_t sq_dma_addr;
+ dma_addr_t cq_dma_addr;
+ u32 __iomem *q_db;
+ u8 cq_phase;
+ u8 sqes;
+ u16 qid;
+ u16 sq_tail;
+ u16 cq_head;
+ u16 last_cq_head;
+ u16 q_depth;
+ s16 cq_vector;
+ void *sense;
+ dma_addr_t sense_dma_addr;
+ struct dma_pool *prp_small_pool;
+};
+
+struct spraid_iod {
+ struct spraid_queue *spraidq;
+ enum spraid_cmd_state state;
+ int npages;
+ u32 nsge;
+ u32 length;
+ bool use_sgl;
+ bool sg_drv_mgmt;
+ dma_addr_t first_dma;
+ void *sense;
+ dma_addr_t sense_dma;
+ struct scatterlist *sg;
+ struct scatterlist inline_sg[0];
+};
+
+#define SPRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01)
+#define SPRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0)
+#define SPRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02)
+#define SPRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20)
+
+#define SPRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01)
+#define SPRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02)
+
+#define BGTASK_TYPE_REBUILD 4
+#define USR_CMD_READ 0xc2
+#define USR_CMD_RDLEN 0x1000
+#define USR_CMD_VDINFO 0x704
+#define USR_CMD_BGTASK 0x504
+#define VDINFO_PARAM_LEN 0x04
+
+struct spraid_vd_info {
+ __u8 name[32];
+ __le16 id;
+ __u8 rg_id;
+ __u8 rg_level;
+ __u8 sg_num;
+ __u8 sg_disk_num;
+ __u8 vd_status;
+ __u8 vd_type;
+ __u8 rsvd1[4056];
+};
+
+#define MAX_REALTIME_BGTASK_NUM 32
+
+struct bgtask_info {
+ __u8 type;
+ __u8 progress;
+ __u8 rate;
+ __u8 rsvd0;
+ __le16 vd_id;
+ __le16 time_left;
+ __u8 rsvd1[4];
+};
+
+struct spraid_bgtask {
+ __u8 sw;
+ __u8 task_num;
+ __u8 rsvd[6];
+ struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM];
+};
+
+struct spraid_dev_info {
+ __le32 hdid;
+ __le16 target;
+ __u8 channel;
+ __u8 lun;
+ __u8 attr;
+ __u8 flag;
+ __le16 max_io_kb;
+};
+
+#define MAX_DEV_ENTRY_PER_PAGE_4K 340
+struct spraid_dev_list {
+ __le32 dev_num;
+ __u32 rsvd0[3];
+ struct spraid_dev_info devices[MAX_DEV_ENTRY_PER_PAGE_4K];
+};
+
+struct spraid_sdev_hostdata {
+ u32 hdid;
+ u16 max_io_kb;
+ u8 attr;
+ u8 flag;
+ u8 rg_id;
+ u8 rsvd[3];
+};
+
+#endif
+
diff --git a/drivers/scsi/spraid/spraid_main.c b/drivers/scsi/spraid/spraid_main.c
new file mode 100644
index 000000000000..519b39f44e91
--- /dev/null
+++ b/drivers/scsi/spraid/spraid_main.c
@@ -0,0 +1,3875 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
+
+/* Ramaxel Raid SPXXX Series Linux Driver */
+
+#define pr_fmt(fmt) "spraid: " fmt
+
+#include <linux/sched/signal.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/sysfs.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+#include <linux/once.h>
+#include <linux/debugfs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <asm/unaligned.h>
+#include <linux/sort.h>
+#include <target/target_core_backend.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dbg.h>
+
+
+#include "spraid.h"
+
+static u32 admin_tmout = 60;
+module_param(admin_tmout, uint, 0644);
+MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)");
+
+static u32 scmd_tmout_nonpt = 180;
+module_param(scmd_tmout_nonpt, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_nonpt,
+ "scsi commands timeout for rawdisk&raid(seconds)");
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops ioq_depth_ops = {
+ .set = ioq_depth_set,
+ .get = param_get_uint,
+};
+
+static u32 io_queue_depth = 1024;
+module_param_cb(io_queue_depth, &ioq_depth_ops, &io_queue_depth, 0644);
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+
+static int log_debug_switch_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+
+ return param_set_byte(val, kp);
+}
+
+static const struct kernel_param_ops log_debug_switch_ops = {
+ .set = log_debug_switch_set,
+ .get = param_get_byte,
+};
+
+static unsigned char log_debug_switch;
+module_param_cb(log_debug_switch, &log_debug_switch_ops,
+ &log_debug_switch, 0644);
+MODULE_PARM_DESC(log_debug_switch,
+ "set log state, default non-zero for switch on");
+
+static int small_pool_num_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+ if (n > MAX_SMALL_POOL_NUM)
+ n = MAX_SMALL_POOL_NUM;
+ if (n < 1)
+ n = 1;
+ *((u8 *)kp->arg) = n;
+
+ return 0;
+}
+
+static const struct kernel_param_ops small_pool_num_ops = {
+ .set = small_pool_num_set,
+ .get = param_get_byte,
+};
+
+/* It was found that the spindlock of a single pool conflicts
+ * a lot with multiple CPUs.So multiple pools are introduced
+ * to reduce the conflictions.
+ */
+static unsigned char small_pool_num = 4;
+module_param_cb(small_pool_num, &small_pool_num_ops, &small_pool_num, 0644);
+MODULE_PARM_DESC(small_pool_num, "set prp small pool num, default 4, MAX 16");
+
+static void spraid_free_queue(struct spraid_queue *spraidq);
+static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result);
+static void spraid_handle_aen_vs(struct spraid_dev *hdev,
+ u32 result, u32 result1);
+
+static DEFINE_IDA(spraid_instance_ida);
+
+static struct class *spraid_class;
+
+#define SPRAID_CAP_TIMEOUT_UNIT_MS (HZ / 2)
+
+static struct workqueue_struct *spraid_wq;
+
+#define dev_log_dbg(dev, fmt, ...) do { \
+ if (unlikely(log_debug_switch)) \
+ dev_info(dev, "[%s] [%d] " fmt, \
+ __func__, __LINE__, ##__VA_ARGS__); \
+} while (0)
+
+#define SPRAID_DRV_VERSION "1.0.0.0"
+
+#define ADMIN_TIMEOUT (admin_tmout * HZ)
+#define ADMIN_ERR_TIMEOUT 32757
+
+#define SPRAID_WAIT_ABNL_CMD_TIMEOUT (3 * 2)
+
+#define SPRAID_DMA_MSK_BIT_MAX 64
+
+enum FW_STAT_CODE {
+ FW_STAT_OK = 0,
+ FW_STAT_NEED_CHECK,
+ FW_STAT_ERROR,
+ FW_STAT_EP_PCIE_ERROR,
+ FW_STAT_NAC_DMA_ERROR,
+ FW_STAT_ABORTED,
+ FW_STAT_NEED_RETRY
+};
+
+static const char * const raid_levels[] = {"0", "1", "5", "6", "10", "50", "60",
+ "NA"};
+
+static const char * const raid_states[] = {
+ "NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED",
+ "FORMATTING", "SANITIZING", "INITIALIZING", "INITIALIZE_FAIL",
+ "DELETING", "DELETE_FAIL", "WRITE_PROTECT"
+};
+
+static int ioq_depth_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0;
+ int ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < 2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static int spraid_remap_bar(struct spraid_dev *hdev, u32 size)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (size > pci_resource_len(pdev, 0)) {
+ dev_err(hdev->dev, "Input size[%u] exceed bar0 length[%llu]\n",
+ size, pci_resource_len(pdev, 0));
+ return -ENOMEM;
+ }
+
+ if (hdev->bar)
+ iounmap(hdev->bar);
+
+ hdev->bar = ioremap(pci_resource_start(pdev, 0), size);
+ if (!hdev->bar) {
+ dev_err(hdev->dev, "ioremap for bar0 failed\n");
+ return -ENOMEM;
+ }
+ hdev->dbs = hdev->bar + SPRAID_REG_DBS;
+
+ return 0;
+}
+
+static int spraid_dev_map(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ ret = pci_request_mem_regions(pdev, "spraid");
+ if (ret) {
+ dev_err(hdev->dev, "fail to request memory regions\n");
+ return ret;
+ }
+
+ ret = spraid_remap_bar(hdev, SPRAID_REG_DBS + 4096);
+ if (ret) {
+ pci_release_mem_regions(pdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void spraid_dev_unmap(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (hdev->bar) {
+ iounmap(hdev->bar);
+ hdev->bar = NULL;
+ }
+ pci_release_mem_regions(pdev);
+}
+
+static int spraid_pci_enable(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = -ENOMEM;
+ u64 maskbit = SPRAID_DMA_MSK_BIT_MAX;
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(hdev->dev,
+ "Enable pci device memory resources failed\n");
+ return ret;
+ }
+ pci_set_master(pdev);
+
+ if (readl(hdev->bar + SPRAID_REG_CSTS) == U32_MAX) {
+ ret = -ENODEV;
+ dev_err(hdev->dev, "Read csts register failed\n");
+ goto disable;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(hdev->dev,
+ "Allocate one IRQ for setup admin channel failed\n");
+ goto disable;
+ }
+
+ hdev->cap = lo_hi_readq(hdev->bar + SPRAID_REG_CAP);
+ hdev->ioq_depth = min_t(u32, SPRAID_CAP_MQES(hdev->cap) + 1,
+ io_queue_depth);
+ hdev->db_stride = 1 << SPRAID_CAP_STRIDE(hdev->cap);
+
+ maskbit = SPRAID_CAP_DMAMASK(hdev->cap);
+ if (maskbit < 32 || maskbit > SPRAID_DMA_MSK_BIT_MAX) {
+ dev_err(hdev->dev,
+ "err, dma mask invalid[%llu], set to default\n",
+ maskbit);
+ maskbit = SPRAID_DMA_MSK_BIT_MAX;
+ }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit))) {
+ dev_err(hdev->dev, "set dma mask and coherent failed\n");
+ goto disable;
+ }
+
+ dev_info(hdev->dev, "set dma mask[%llu] success\n", maskbit);
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_save_state(pdev);
+
+ return 0;
+
+disable:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static int spraid_npages_prp(u32 size, struct spraid_dev *hdev)
+{
+ u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size);
+
+ return DIV_ROUND_UP(PRP_ENTRY_SIZE * nprps, PAGE_SIZE - PRP_ENTRY_SIZE);
+}
+
+static int spraid_npages_sgl(u32 nseg)
+{
+ return DIV_ROUND_UP(nseg * sizeof(struct spraid_sgl_desc), PAGE_SIZE);
+}
+
+static void **spraid_iod_list(struct spraid_iod *iod)
+{
+ return (void **)(iod->inline_sg + (iod->sg_drv_mgmt ? iod->nsge : 0));
+}
+
+static u32 spraid_iod_ext_size(struct spraid_dev *hdev, u32 size, u32 nsge,
+ bool sg_drv_mgmt, bool use_sgl)
+{
+ size_t alloc_size, sg_size;
+
+ if (use_sgl)
+ alloc_size = sizeof(__le64 *) * spraid_npages_sgl(nsge);
+ else
+ alloc_size = sizeof(__le64 *) * spraid_npages_prp(size, hdev);
+
+ sg_size = sg_drv_mgmt ? (sizeof(struct scatterlist) * nsge) : 0;
+ return sg_size + alloc_size;
+}
+
+static u32 spraid_cmd_size(struct spraid_dev *hdev,
+ bool sg_drv_mgmt, bool use_sgl)
+{
+ u32 alloc_size = spraid_iod_ext_size(hdev, SPRAID_INT_BYTES(hdev),
+ SPRAID_INT_PAGES, sg_drv_mgmt, use_sgl);
+
+ dev_info(hdev->dev, "sg_drv_mgmt: %s, use_sgl: %s, iod size: %lu;"
+ " alloc_size: %u\n", sg_drv_mgmt ? "true" : "false",
+ use_sgl ? "true" : "false",
+ sizeof(struct spraid_iod), alloc_size);
+
+ return sizeof(struct spraid_iod) + alloc_size;
+}
+
+static int spraid_setup_prps(struct spraid_dev *hdev, struct spraid_iod *iod)
+{
+ struct scatterlist *sg = iod->sg;
+ u64 dma_addr = sg_dma_address(sg);
+ int dma_len = sg_dma_len(sg);
+ __le64 *prp_list, *old_prp_list;
+ u32 page_size = hdev->page_size;
+ int offset = dma_addr & (page_size - 1);
+ void **list = spraid_iod_list(iod);
+ int length = iod->length;
+ struct dma_pool *pool;
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ length -= (page_size - offset);
+ if (length <= 0) {
+ iod->first_dma = 0;
+ return 0;
+ }
+
+ dma_len -= (page_size - offset);
+ if (dma_len) {
+ dma_addr += (page_size - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= page_size) {
+ iod->first_dma = dma_addr;
+ return 0;
+ }
+
+ nprps = DIV_ROUND_UP(length, page_size);
+ if (nprps <= (SMALL_POOL_SIZE / PRP_ENTRY_SIZE)) {
+ pool = iod->spraidq->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ dev_err_ratelimited(hdev->dev,
+ "Allocate first prp_list memory failed\n");
+ iod->first_dma = dma_addr;
+ iod->npages = -1;
+ return -ENOMEM;
+ }
+ list[0] = prp_list;
+ iod->first_dma = prp_dma;
+ i = 0;
+ for (;;) {
+ if (i == page_size / PRP_ENTRY_SIZE) {
+ old_prp_list = prp_list;
+
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+ if (!prp_list) {
+ dev_err_ratelimited(hdev->dev, "Allocate %dth;"
+ " prp_list memory failed\n",
+ iod->npages + 1);
+ return -ENOMEM;
+ }
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= page_size;
+ dma_addr += page_size;
+ length -= page_size;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ if (unlikely(dma_len < 0))
+ goto bad_sgl;
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ return 0;
+
+bad_sgl:
+ dev_err(hdev->dev,
+ "Setup prps, invalid SGL for payload: %d nents: %d\n",
+ iod->length, iod->nsge);
+ return -EIO;
+}
+
+#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct spraid_sgl_desc))
+
+static void spraid_submit_cmd(struct spraid_queue *spraidq, const void *cmd)
+{
+ u32 sqes = SQE_SIZE(spraidq->qid);
+ unsigned long flags;
+ struct spraid_admin_common_command *acd =
+ (struct spraid_admin_common_command *)cmd;
+
+ spin_lock_irqsave(&spraidq->sq_lock, flags);
+ memcpy((spraidq->sq_cmds + sqes * spraidq->sq_tail), cmd, sqes);
+ if (++spraidq->sq_tail == spraidq->q_depth)
+ spraidq->sq_tail = 0;
+
+ writel(spraidq->sq_tail, spraidq->q_db);
+ spin_unlock_irqrestore(&spraidq->sq_lock, flags);
+
+ dev_log_dbg(spraidq->hdev->dev,
+ "cid[%d] qid[%d], opcode[0x%x], flags[0x%x], hdid[%u]\n",
+ acd->command_id, spraidq->qid, acd->opcode,
+ acd->flags, le32_to_cpu(acd->hdid));
+}
+
+static u32 spraid_mod64(u64 dividend, u32 divisor)
+{
+ u64 d;
+ u32 remainder;
+
+ if (!divisor)
+ pr_err("DIVISOR is zero, in div fn\n");
+
+ d = dividend;
+ remainder = do_div(d, divisor);
+ return remainder;
+}
+
+static inline bool spraid_is_rw_scmd(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case READ_32:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ case WRITE_32:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool spraid_is_prp(struct spraid_dev *hdev,
+ struct scsi_cmnd *scmd, u32 nsge)
+{
+ struct scatterlist *sg = scsi_sglist(scmd);
+ u32 page_size = hdev->page_size;
+ bool is_prp = true;
+ int i = 0;
+
+ scsi_for_each_sg(scmd, sg, nsge, i) {
+ if (i != 0 && i != nsge - 1) {
+ if (spraid_mod64(sg_dma_len(sg), page_size) ||
+ spraid_mod64(sg_dma_address(sg), page_size)) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ if (nsge > 1 && i == 0) {
+ if ((spraid_mod64((sg_dma_address(sg) + sg_dma_len(sg)),
+ page_size))) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ if (nsge > 1 && i == (nsge - 1)) {
+ if (spraid_mod64(sg_dma_address(sg), page_size)) {
+ is_prp = false;
+ break;
+ }
+ }
+ }
+
+ return is_prp;
+}
+
+enum {
+ SPRAID_SGL_FMT_DATA_DESC = 0x00,
+ SPRAID_SGL_FMT_SEG_DESC = 0x02,
+ SPRAID_SGL_FMT_LAST_SEG_DESC = 0x03,
+ SPRAID_KEY_SGL_FMT_DATA_DESC = 0x04,
+ SPRAID_TRANSPORT_SGL_DATA_DESC = 0x05
+};
+
+static void spraid_sgl_set_data(struct spraid_sgl_desc *sge,
+ struct scatterlist *sg)
+{
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->type = SPRAID_SGL_FMT_DATA_DESC << 4;
+}
+
+static void spraid_sgl_set_seg(struct spraid_sgl_desc *sge,
+ dma_addr_t dma_addr, int entries)
+{
+ sge->addr = cpu_to_le64(dma_addr);
+ if (entries <= SGES_PER_PAGE) {
+ sge->length = cpu_to_le32(entries * sizeof(*sge));
+ sge->type = SPRAID_SGL_FMT_LAST_SEG_DESC << 4;
+ } else {
+ sge->length = cpu_to_le32(PAGE_SIZE);
+ sge->type = SPRAID_SGL_FMT_SEG_DESC << 4;
+ }
+}
+
+static int spraid_setup_ioq_cmd_sgl(struct spraid_dev *hdev,
+ struct scsi_cmnd *scmd,
+ struct spraid_ioq_command *ioq_cmd,
+ struct spraid_iod *iod)
+{
+ struct spraid_sgl_desc *sg_list, *link, *old_sg_list;
+ struct scatterlist *sg = scsi_sglist(scmd);
+ void **list = spraid_iod_list(iod);
+ struct dma_pool *pool;
+ int nsge = iod->nsge;
+ dma_addr_t sgl_dma;
+ int i = 0;
+
+ ioq_cmd->common.flags |= SPRAID_CMD_FLAG_SGL_METABUF;
+
+ if (nsge == 1) {
+ spraid_sgl_set_data(&ioq_cmd->common.dptr.sgl, sg);
+ return 0;
+ }
+
+ if (nsge <= (SMALL_POOL_SIZE / sizeof(struct spraid_sgl_desc))) {
+ pool = iod->spraidq->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev,
+ "Allocate first sgl_list failed\n");
+ iod->npages = -1;
+ return -ENOMEM;
+ }
+
+ list[0] = sg_list;
+ iod->first_dma = sgl_dma;
+ spraid_sgl_set_seg(&ioq_cmd->common.dptr.sgl, sgl_dma, nsge);
+ do {
+ if (i == SGES_PER_PAGE) {
+ old_sg_list = sg_list;
+ link = &old_sg_list[SGES_PER_PAGE - 1];
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev,
+ "Allocate %dth sgl_list;"
+ " failed\n",
+ iod->npages + 1);
+ return -ENOMEM;
+ }
+ list[iod->npages++] = sg_list;
+
+ i = 0;
+ memcpy(&sg_list[i++], link, sizeof(*link));
+ spraid_sgl_set_seg(link, sgl_dma, nsge);
+ }
+
+ spraid_sgl_set_data(&sg_list[i++], sg);
+ sg = sg_next(sg);
+ } while (--nsge > 0);
+
+ return 0;
+}
+
+#define SPRAID_RW_FUA BIT(14)
+
+static void spraid_setup_rw_cmd(struct spraid_dev *hdev,
+ struct spraid_rw_command *rw,
+ struct scsi_cmnd *scmd)
+{
+ u32 start_lba_lo, start_lba_hi;
+ u32 datalength = 0;
+ u16 control = 0;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+
+ if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+ rw->opcode = SPRAID_CMD_WRITE;
+ } else if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
+ rw->opcode = SPRAID_CMD_READ;
+ } else {
+ dev_err(hdev->dev,
+ "Invalid IO for unsupported data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ }
+
+ /* 6-byte READ(0x08) or WRITE(0x0A) cdb */
+ if (scmd->cmd_len == 6) {
+ datalength = (u32)(scmd->cmnd[4] == 0 ?
+ IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]);
+ start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]);
+
+ start_lba_lo &= 0x1FFFFF;
+ }
+
+ /* 10-byte READ(0x28) or WRITE(0x2A) cdb */
+ else if (scmd->cmd_len == 10) {
+ datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+
+ /* 12-byte READ(0xA8) or WRITE(0xAA) cdb */
+ else if (scmd->cmd_len == 12) {
+ datalength = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+ /* 16-byte READ(0x88) or WRITE(0x8A) cdb */
+ else if (scmd->cmd_len == 16) {
+ datalength = get_unaligned_be32(&scmd->cmnd[10]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+ /* 32-byte READ(0x88) or WRITE(0x8A) cdb */
+ else if (scmd->cmd_len == 32) {
+ datalength = get_unaligned_be32(&scmd->cmnd[28]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[16]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[12]);
+
+ if (scmd->cmnd[10] & FUA_MASK)
+ control |= SPRAID_RW_FUA;
+ }
+
+ if (unlikely(datalength > U16_MAX || datalength == 0)) {
+ dev_err(hdev->dev,
+ "Invalid IO for illegal transfer data length: %u\n",
+ datalength);
+ WARN_ON(1);
+ }
+
+ rw->slba = cpu_to_le64(((u64)start_lba_hi << 32) | start_lba_lo);
+ /* 0base for nlb */
+ rw->nlb = cpu_to_le16((u16)(datalength - 1));
+ rw->control = cpu_to_le16(control);
+}
+
+static void spraid_setup_nonio_cmd(struct spraid_dev *hdev,
+ struct spraid_scsi_nonio *scsi_nonio,
+ struct scsi_cmnd *scmd)
+{
+ scsi_nonio->buffer_len = cpu_to_le32(scsi_bufflen(scmd));
+
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ scsi_nonio->opcode = SPRAID_CMD_NONIO_NONE;
+ break;
+ case DMA_TO_DEVICE:
+ scsi_nonio->opcode = SPRAID_CMD_NONIO_TODEV;
+ break;
+ case DMA_FROM_DEVICE:
+ scsi_nonio->opcode = SPRAID_CMD_NONIO_FROMDEV;
+ break;
+ default:
+ dev_err(hdev->dev,
+ "Invalid IO for unsupported data direction: %d\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ }
+}
+
+static void spraid_setup_ioq_cmd(struct spraid_dev *hdev,
+ struct spraid_ioq_command *ioq_cmd,
+ struct scsi_cmnd *scmd)
+{
+ memcpy(ioq_cmd->common.cdb, scmd->cmnd, scmd->cmd_len);
+ ioq_cmd->common.cdb_len = scmd->cmd_len;
+
+ if (spraid_is_rw_scmd(scmd))
+ spraid_setup_rw_cmd(hdev, &ioq_cmd->rw, scmd);
+ else
+ spraid_setup_nonio_cmd(hdev, &ioq_cmd->scsi_nonio, scmd);
+}
+
+static int spraid_init_iod(struct spraid_dev *hdev, struct spraid_iod *iod,
+ struct spraid_ioq_command *ioq_cmd,
+ struct scsi_cmnd *scmd)
+{
+ if (unlikely(!iod->sense)) {
+ dev_err(hdev->dev, "Allocate sense data buffer failed\n");
+ return -ENOMEM;
+ }
+ ioq_cmd->common.sense_addr = cpu_to_le64(iod->sense_dma);
+ ioq_cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+
+ iod->nsge = 0;
+ iod->npages = -1;
+ iod->use_sgl = 0;
+ iod->sg_drv_mgmt = false;
+ WRITE_ONCE(iod->state, SPRAID_CMD_IDLE);
+
+ return 0;
+}
+
+static void spraid_free_iod_res(struct spraid_dev *hdev, struct spraid_iod *iod)
+{
+ const int last_prp = hdev->page_size / sizeof(__le64) - 1;
+ dma_addr_t dma_addr, next_dma_addr;
+ struct spraid_sgl_desc *sg_list;
+ __le64 *prp_list;
+ void *addr;
+ int i;
+
+ dma_addr = iod->first_dma;
+ if (iod->npages == 0)
+ dma_pool_free(iod->spraidq->prp_small_pool,
+ spraid_iod_list(iod)[0], dma_addr);
+
+ for (i = 0; i < iod->npages; i++) {
+ addr = spraid_iod_list(iod)[i];
+
+ if (iod->use_sgl) {
+ sg_list = addr;
+ next_dma_addr =
+ le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
+ } else {
+ prp_list = addr;
+ next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+ }
+
+ dma_pool_free(hdev->prp_page_pool, addr, dma_addr);
+ dma_addr = next_dma_addr;
+ }
+
+ if (iod->sg_drv_mgmt && iod->sg != iod->inline_sg) {
+ iod->sg_drv_mgmt = false;
+ mempool_free(iod->sg, hdev->iod_mempool);
+ }
+
+ iod->sense = NULL;
+ iod->npages = -1;
+}
+
+static int spraid_io_map_data(struct spraid_dev *hdev, struct spraid_iod *iod,
+ struct scsi_cmnd *scmd,
+ struct spraid_ioq_command *ioq_cmd)
+{
+ int ret;
+
+ iod->nsge = scsi_dma_map(scmd);
+
+ /* No data to DMA, it may be scsi no-rw command */
+ if (unlikely(iod->nsge == 0))
+ return 0;
+
+ iod->length = scsi_bufflen(scmd);
+ iod->sg = scsi_sglist(scmd);
+ iod->use_sgl = !spraid_is_prp(hdev, scmd, iod->nsge);
+
+ if (iod->use_sgl) {
+ ret = spraid_setup_ioq_cmd_sgl(hdev, scmd, ioq_cmd, iod);
+ } else {
+ ret = spraid_setup_prps(hdev, iod);
+ ioq_cmd->common.dptr.prp1 =
+ cpu_to_le64(sg_dma_address(iod->sg));
+ ioq_cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+ }
+
+ if (ret)
+ scsi_dma_unmap(scmd);
+
+ return ret;
+}
+
+static void spraid_map_status(struct spraid_iod *iod, struct scsi_cmnd *scmd,
+ struct spraid_completion *cqe)
+{
+ scsi_set_resid(scmd, 0);
+
+ switch ((le16_to_cpu(cqe->status) >> 1) & 0x7f) {
+ case FW_STAT_OK:
+ set_host_byte(scmd, DID_OK);
+ break;
+ case FW_STAT_NEED_CHECK:
+ set_host_byte(scmd, DID_OK);
+ scmd->result |= le16_to_cpu(cqe->status) >> 8;
+ if (scmd->result & SAM_STAT_CHECK_CONDITION) {
+ memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ memcpy(scmd->sense_buffer, iod->sense,
+ SCSI_SENSE_BUFFERSIZE);
+ scmd->result =
+ (scmd->result & 0x00ffffff) | (DRIVER_SENSE << 24);
+ }
+ break;
+ case FW_STAT_ABORTED:
+ set_host_byte(scmd, DID_ABORT);
+ break;
+ case FW_STAT_NEED_RETRY:
+ set_host_byte(scmd, DID_REQUEUE);
+ break;
+ default:
+ set_host_byte(scmd, DID_BAD_TARGET);
+ break;
+ }
+}
+
+static inline void spraid_get_tag_from_scmd(struct scsi_cmnd *scmd,
+ u16 *qid, u16 *cid)
+{
+ u32 tag = blk_mq_unique_tag(scmd->request);
+
+ *qid = blk_mq_unique_tag_to_hwq(tag) + 1;
+ *cid = blk_mq_unique_tag_to_tag(tag);
+}
+
+static int spraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_dev *hdev = shost_priv(shost);
+ struct scsi_device *sdev = scmd->device;
+ struct spraid_sdev_hostdata *hostdata;
+ struct spraid_ioq_command ioq_cmd;
+ struct spraid_queue *ioq;
+ unsigned long elapsed;
+ u16 hwq, cid;
+ int ret;
+
+ if (unlikely(!scmd)) {
+ dev_err(hdev->dev, "err, scmd is null\n");
+ return 0;
+ }
+
+ if (unlikely(hdev->state != SPRAID_LIVE)) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (log_debug_switch)
+ scsi_print_command(scmd);
+
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ hostdata = sdev->hostdata;
+ ioq = &hdev->queues[hwq];
+ memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+ ioq_cmd.rw.hdid = cpu_to_le32(hostdata->hdid);
+ ioq_cmd.rw.command_id = cid;
+
+ spraid_setup_ioq_cmd(hdev, &ioq_cmd, scmd);
+
+ ret = cid * SCSI_SENSE_BUFFERSIZE;
+ iod->sense = ioq->sense + ret;
+ iod->sense_dma = ioq->sense_dma_addr + ret;
+
+ ret = spraid_init_iod(hdev, iod, &ioq_cmd, scmd);
+ if (unlikely(ret))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ iod->spraidq = ioq;
+ ret = spraid_io_map_data(hdev, iod, scmd, &ioq_cmd);
+ if (unlikely(ret)) {
+ dev_err(hdev->dev, "spraid_io_map_data Err.\n");
+ set_host_byte(scmd, DID_ERROR);
+ scmd->scsi_done(scmd);
+ ret = 0;
+ goto deinit_iod;
+ }
+
+ WRITE_ONCE(iod->state, SPRAID_CMD_IN_FLIGHT);
+ spraid_submit_cmd(ioq, &ioq_cmd);
+ elapsed = jiffies - scmd->jiffies_at_alloc;
+ dev_log_dbg(hdev->dev,
+ "cid[%d] qid[%d] submit IO cost %3ld.%3ld seconds\n",
+ cid, hwq, elapsed / HZ, elapsed % HZ);
+ return 0;
+
+deinit_iod:
+ spraid_free_iod_res(hdev, iod);
+ return ret;
+}
+
+static int spraid_match_dev(struct spraid_dev *hdev, u16 idx,
+ struct scsi_device *sdev)
+{
+ if (SPRAID_DEV_INFO_FLAG_VALID(hdev->devices[idx].flag)) {
+ if (sdev->channel == hdev->devices[idx].channel &&
+ sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
+ sdev->lun < hdev->devices[idx].lun) {
+ dev_info(hdev->dev,
+ "Match device success, channel;"
+ "target:lun[%d:%d:%d]\n",
+ hdev->devices[idx].channel,
+ hdev->devices[idx].target,
+ hdev->devices[idx].lun);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int spraid_slave_alloc(struct scsi_device *sdev)
+{
+ struct spraid_sdev_hostdata *hostdata;
+ struct spraid_dev *hdev;
+ u16 idx;
+
+ hdev = shost_priv(sdev->host);
+ hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+ if (!hostdata) {
+ dev_err(hdev->dev, "Alloc scsi host data memory failed\n");
+ return -ENOMEM;
+ }
+
+ down_read(&hdev->devices_rwsem);
+ for (idx = 0; idx < le32_to_cpu(hdev->ctrl_info->nd); idx++) {
+ if (spraid_match_dev(hdev, idx, sdev))
+ goto scan_host;
+ }
+ up_read(&hdev->devices_rwsem);
+
+ kfree(hostdata);
+ return -ENXIO;
+
+scan_host:
+ hostdata->hdid = le32_to_cpu(hdev->devices[idx].hdid);
+ hostdata->max_io_kb = le16_to_cpu(hdev->devices[idx].max_io_kb);
+ hostdata->attr = hdev->devices[idx].attr;
+ hostdata->flag = hdev->devices[idx].flag;
+ hostdata->rg_id = 0xff;
+ sdev->hostdata = hostdata;
+ up_read(&hdev->devices_rwsem);
+ return 0;
+}
+
+static void spraid_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+static int spraid_slave_configure(struct scsi_device *sdev)
+{
+ u16 idx;
+ unsigned int timeout = scmd_tmout_nonpt * HZ;
+ struct spraid_dev *hdev = shost_priv(sdev->host);
+ struct spraid_sdev_hostdata *hostdata = sdev->hostdata;
+ u32 max_sec = sdev->host->max_sectors;
+
+ if (hostdata) {
+ idx = hostdata->hdid - 1;
+ if (sdev->channel == hdev->devices[idx].channel &&
+ sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
+ sdev->lun < hdev->devices[idx].lun) {
+ if (SPRAID_DEV_INFO_ATTR_PT(hdev->devices[idx].attr))
+ timeout = 30 * HZ;
+ else
+ timeout = scmd_tmout_nonpt * HZ;
+ max_sec = le16_to_cpu(hdev->devices[idx].max_io_kb)
+ << 1;
+ } else {
+ dev_err(hdev->dev,
+ "[%s] err, sdev->channel:id:lun[%d:%d:%lld];"
+ "devices[%d], channel:target:lun[%d:%d:%d]\n",
+ __func__, sdev->channel, sdev->id, sdev->lun,
+ idx, hdev->devices[idx].channel,
+ hdev->devices[idx].target,
+ hdev->devices[idx].lun);
+ }
+ } else {
+ dev_err(hdev->dev, "[%s] err, sdev->hostdata is null\n",
+ __func__);
+ }
+
+ blk_queue_rq_timeout(sdev->request_queue, timeout);
+ sdev->eh_timeout = timeout;
+
+ if ((max_sec == 0) || (max_sec > sdev->host->max_sectors))
+ max_sec = sdev->host->max_sectors;
+
+ dev_info(hdev->dev,
+ "[%s] sdev->channel:id:lun[%d:%d:%lld];"
+ " scmd_timeout[%d]s, maxsec[%d]\n",
+ __func__, sdev->channel, sdev->id,
+ sdev->lun, timeout / HZ, max_sec);
+
+ return 0;
+}
+
+static void spraid_shost_init(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u8 domain, bus;
+ u32 dev_func;
+
+ domain = pci_domain_nr(pdev->bus);
+ bus = pdev->bus->number;
+ dev_func = pdev->devfn;
+
+ hdev->shost->nr_hw_queues = hdev->online_queues - 1;
+ hdev->shost->can_queue = (hdev->ioq_depth - SPRAID_PTCMDS_PERQ);
+
+ hdev->shost->sg_tablesize = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+ /* 512B per sector */
+ hdev->shost->max_sectors =
+ (1U << ((hdev->ctrl_info->mdts) * 1U) << 12) / 512;
+ hdev->shost->cmd_per_lun = MAX_CMD_PER_DEV;
+ hdev->shost->max_channel =
+ le16_to_cpu(hdev->ctrl_info->max_channel) - 1;
+ hdev->shost->max_id = le32_to_cpu(hdev->ctrl_info->max_tgt_id);
+ hdev->shost->max_lun = le16_to_cpu(hdev->ctrl_info->max_lun);
+
+ hdev->shost->this_id = -1;
+ hdev->shost->unique_id = (domain << 16) | (bus << 8) | dev_func;
+ hdev->shost->max_cmd_len = MAX_CDB_LEN;
+ hdev->shost->hostt->cmd_size = max(spraid_cmd_size(hdev, false, true),
+ spraid_cmd_size(hdev, false, false));
+}
+
+static inline void spraid_host_deinit(struct spraid_dev *hdev)
+{
+ ida_free(&spraid_instance_ida, hdev->instance);
+}
+
+static int spraid_alloc_queue(struct spraid_dev *hdev, u16 qid, u16 depth)
+{
+ struct spraid_queue *spraidq = &hdev->queues[qid];
+ int ret = 0;
+
+ if (hdev->queue_count > qid) {
+ dev_info(hdev->dev, "[%s] warn: queue[%d] is exist\n",
+ __func__, qid);
+ return 0;
+ }
+
+ spraidq->cqes = dma_alloc_coherent(hdev->dev, CQ_SIZE(depth),
+ &spraidq->cq_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!spraidq->cqes)
+ return -ENOMEM;
+
+ spraidq->sq_cmds = dma_alloc_coherent(hdev->dev, SQ_SIZE(qid, depth),
+ &spraidq->sq_dma_addr,
+ GFP_KERNEL);
+ if (!spraidq->sq_cmds) {
+ ret = -ENOMEM;
+ goto free_cqes;
+ }
+
+ spin_lock_init(&spraidq->sq_lock);
+ spin_lock_init(&spraidq->cq_lock);
+ spraidq->hdev = hdev;
+ spraidq->q_depth = depth;
+ spraidq->qid = qid;
+ spraidq->cq_vector = -1;
+ hdev->queue_count++;
+
+ /* alloc sense buffer */
+ spraidq->sense = dma_alloc_coherent(hdev->dev, SENSE_SIZE(depth),
+ &spraidq->sense_dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!spraidq->sense) {
+ ret = -ENOMEM;
+ goto free_sq_cmds;
+ }
+
+ return 0;
+
+free_sq_cmds:
+ dma_free_coherent(hdev->dev, SQ_SIZE(qid, depth),
+ (void *)spraidq->sq_cmds, spraidq->sq_dma_addr);
+free_cqes:
+ dma_free_coherent(hdev->dev, CQ_SIZE(depth), (void *)spraidq->cqes,
+ spraidq->cq_dma_addr);
+ return ret;
+}
+
+static int spraid_wait_ready(struct spraid_dev *hdev, u64 cap, bool enabled)
+{
+ unsigned long timeout =
+ ((SPRAID_CAP_TIMEOUT(cap) + 1) * SPRAID_CAP_TIMEOUT_UNIT_MS) + jiffies;
+ u32 bit = enabled ? SPRAID_CSTS_RDY : 0;
+
+ while ((readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_RDY) != bit) {
+ usleep_range(1000, 2000);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(hdev->dev, "Device not ready; aborting %s\n",
+ enabled ? "initialisation" : "reset");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int spraid_shutdown_ctrl(struct spraid_dev *hdev)
+{
+ unsigned long timeout = hdev->ctrl_info->rtd3e + jiffies;
+
+ hdev->ctrl_config &= ~SPRAID_CC_SHN_MASK;
+ hdev->ctrl_config |= SPRAID_CC_SHN_NORMAL;
+ writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+ while ((readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_SHST_MASK) !=
+ SPRAID_CSTS_SHST_CMPLT) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(hdev->dev,
+ "Device shutdown incomplete; abort shutdown\n");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int spraid_disable_ctrl(struct spraid_dev *hdev)
+{
+ hdev->ctrl_config &= ~SPRAID_CC_SHN_MASK;
+ hdev->ctrl_config &= ~SPRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+ return spraid_wait_ready(hdev, hdev->cap, false);
+}
+
+static int spraid_enable_ctrl(struct spraid_dev *hdev)
+{
+ u64 cap = hdev->cap;
+ u32 dev_page_min = SPRAID_CAP_MPSMIN(cap) + 12;
+ u32 page_shift = PAGE_SHIFT;
+
+ if (page_shift < dev_page_min) {
+ dev_err(hdev->dev,
+ "Minimum device page size[%u], too large for host[%u]\n",
+ 1U << dev_page_min, 1U << page_shift);
+ return -ENODEV;
+ }
+
+ page_shift = min_t(unsigned int, SPRAID_CAP_MPSMAX(cap) + 12,
+ PAGE_SHIFT);
+ hdev->page_size = 1U << page_shift;
+
+ hdev->ctrl_config = SPRAID_CC_CSS_NVM;
+ hdev->ctrl_config |= (page_shift - 12) << SPRAID_CC_MPS_SHIFT;
+ hdev->ctrl_config |= SPRAID_CC_AMS_RR | SPRAID_CC_SHN_NONE;
+ hdev->ctrl_config |= SPRAID_CC_IOSQES | SPRAID_CC_IOCQES;
+ hdev->ctrl_config |= SPRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + SPRAID_REG_CC);
+
+ return spraid_wait_ready(hdev, cap, true);
+}
+
+static void spraid_init_queue(struct spraid_queue *spraidq, u16 qid)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+
+ memset((void *)spraidq->cqes, 0, CQ_SIZE(spraidq->q_depth));
+
+ spraidq->sq_tail = 0;
+ spraidq->cq_head = 0;
+ spraidq->cq_phase = 1;
+ spraidq->q_db = &hdev->dbs[qid * 2 * hdev->db_stride];
+ spraidq->prp_small_pool = hdev->prp_small_pool[qid % small_pool_num];
+ hdev->online_queues++;
+}
+
+static inline bool spraid_cqe_pending(struct spraid_queue *spraidq)
+{
+ return (le16_to_cpu(spraidq->cqes[spraidq->cq_head].status) & 1) ==
+ spraidq->cq_phase;
+}
+
+static void spraid_sata_report_zone_handle(struct scsi_cmnd *scmd,
+ struct spraid_iod *iod)
+{
+ int i = 0;
+ unsigned int bytes = 0;
+ struct scatterlist *sg = scsi_sglist(scmd);
+
+ scsi_for_each_sg(scmd, sg, iod->nsge, i) {
+ unsigned int offset = 0;
+
+ if (bytes == 0) {
+ char *hdr;
+ u32 list_length;
+ u64 max_lba, opt_lba;
+ u16 same;
+
+ hdr = sg_virt(sg);
+
+ list_length = get_unaligned_le32(&hdr[0]);
+ same = get_unaligned_le16(&hdr[4]);
+ max_lba = get_unaligned_le64(&hdr[8]);
+ opt_lba = get_unaligned_le64(&hdr[16]);
+ put_unaligned_be32(list_length, &hdr[0]);
+ hdr[4] = same & 0xf;
+ put_unaligned_be64(max_lba, &hdr[8]);
+ put_unaligned_be64(opt_lba, &hdr[16]);
+ offset += 64;
+ bytes += 64;
+ }
+ while (offset < sg_dma_len(sg)) {
+ char *rec;
+ u8 cond, type, non_seq, reset;
+ u64 size, start, wp;
+
+ rec = sg_virt(sg) + offset;
+ type = rec[0] & 0xf;
+ cond = (rec[1] >> 4) & 0xf;
+ non_seq = (rec[1] & 2);
+ reset = (rec[1] & 1);
+ size = get_unaligned_le64(&rec[8]);
+ start = get_unaligned_le64(&rec[16]);
+ wp = get_unaligned_le64(&rec[24]);
+ rec[0] = type;
+ rec[1] = (cond << 4) | non_seq | reset;
+ put_unaligned_be64(size, &rec[8]);
+ put_unaligned_be64(start, &rec[16]);
+ put_unaligned_be64(wp, &rec[24]);
+ WARN_ON(offset + 64 > sg_dma_len(sg));
+ offset += 64;
+ bytes += 64;
+ }
+ }
+}
+
+static inline void spraid_handle_ata_cmd(struct spraid_dev *hdev,
+ struct scsi_cmnd *scmd,
+ struct spraid_iod *iod)
+{
+ if (hdev->ctrl_info->card_type != SPRAID_CARD_HBA)
+ return;
+
+ switch (scmd->cmnd[0]) {
+ case ZBC_IN:
+ dev_info(hdev->dev, "[%s] process report zone\n", __func__);
+ spraid_sata_report_zone_handle(scmd, iod);
+ break;
+ default:
+ break;
+ }
+}
+
+static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = ioq->hdev;
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct spraid_iod *iod;
+ struct request *req;
+ unsigned long elapsed;
+
+ tags = hdev->shost->tag_set.tags[ioq->qid - 1];
+ req = blk_mq_tag_to_rq(tags, cqe->cmd_id);
+ if (unlikely(!req || !blk_mq_request_started(req))) {
+ dev_warn(hdev->dev, "Invalid id %d completed on queue %d\n",
+ cqe->cmd_id, ioq->qid);
+ return;
+ }
+
+ scmd = blk_mq_rq_to_pdu(req);
+ iod = scsi_cmd_priv(scmd);
+
+ elapsed = jiffies - scmd->jiffies_at_alloc;
+ dev_log_dbg(hdev->dev,
+ "cid[%d] qid[%d] finish IO cost %3ld.%3ld seconds\n",
+ cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
+
+ if (cmpxchg(&iod->state, SPRAID_CMD_IN_FLIGHT, SPRAID_CMD_COMPLETE) !=
+ SPRAID_CMD_IN_FLIGHT) {
+ dev_warn(hdev->dev,
+ "cid[%d] qid[%d] enters abnormal handler;"
+ " cost %3ld.%3ld seconds\n",
+ cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
+ WRITE_ONCE(iod->state, SPRAID_CMD_TMO_COMPLETE);
+
+ if (iod->nsge) {
+ iod->nsge = 0;
+ scsi_dma_unmap(scmd);
+ }
+ spraid_free_iod_res(hdev, iod);
+
+ return;
+ }
+
+ spraid_handle_ata_cmd(hdev, scmd, iod);
+
+ spraid_map_status(iod, scmd, cqe);
+ if (iod->nsge) {
+ iod->nsge = 0;
+ scsi_dma_unmap(scmd);
+ }
+ spraid_free_iod_res(hdev, iod);
+ scmd->scsi_done(scmd);
+}
+
+static void spraid_complete_adminq_cmnd(struct spraid_queue *adminq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = adminq->hdev;
+ struct spraid_cmd *adm_cmd;
+
+ adm_cmd = hdev->adm_cmds + cqe->cmd_id;
+ if (unlikely(adm_cmd->state == SPRAID_CMD_IDLE)) {
+ dev_warn(adminq->hdev->dev,
+ "Invalid id %d completed on queue %d\n",
+ cqe->cmd_id, le16_to_cpu(cqe->sq_id));
+ return;
+ }
+
+ adm_cmd->status = le16_to_cpu(cqe->status) >> 1;
+ adm_cmd->result0 = le32_to_cpu(cqe->result);
+ adm_cmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&adm_cmd->cmd_done);
+}
+
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid);
+
+static void spraid_complete_aen(struct spraid_queue *spraidq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+ u32 result = le32_to_cpu(cqe->result);
+
+ dev_info(hdev->dev, "rcv aen, cid[%d], status[0x%x], result[0x%x]\n",
+ cqe->cmd_id, le16_to_cpu(cqe->status) >> 1, result);
+
+ spraid_send_aen(hdev, cqe->cmd_id);
+
+ if ((le16_to_cpu(cqe->status) >> 1) != SPRAID_SC_SUCCESS)
+ return;
+ switch (result & 0x7) {
+ case SPRAID_AEN_NOTICE:
+ spraid_handle_aen_notice(hdev, result);
+ break;
+ case SPRAID_AEN_VS:
+ spraid_handle_aen_vs(hdev, result, le32_to_cpu(cqe->result1));
+ break;
+ default:
+ dev_warn(hdev->dev, "Unsupported async event type: %u\n",
+ result & 0x7);
+ break;
+ }
+}
+
+static void spraid_complete_ioq_sync_cmnd(struct spraid_queue *ioq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = ioq->hdev;
+ struct spraid_cmd *ptcmd;
+
+ ptcmd = hdev->ioq_ptcmds + (ioq->qid - 1) * SPRAID_PTCMDS_PERQ +
+ cqe->cmd_id - SPRAID_IO_BLK_MQ_DEPTH;
+
+ ptcmd->status = le16_to_cpu(cqe->status) >> 1;
+ ptcmd->result0 = le32_to_cpu(cqe->result);
+ ptcmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&ptcmd->cmd_done);
+}
+
+static inline void spraid_handle_cqe(struct spraid_queue *spraidq, u16 idx)
+{
+ struct spraid_completion *cqe = &spraidq->cqes[idx];
+ struct spraid_dev *hdev = spraidq->hdev;
+
+ if (unlikely(cqe->cmd_id >= spraidq->q_depth)) {
+ dev_err(hdev->dev,
+ "Invalid command id[%d] completed on queue %d\n",
+ cqe->cmd_id, cqe->sq_id);
+ return;
+ }
+
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d];"
+ " result[0x%x], sq_id[%d], status[0x%x]\n",
+ cqe->cmd_id, spraidq->qid, le32_to_cpu(cqe->result),
+ le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status));
+
+ if (unlikely(spraidq->qid == 0
+ && cqe->cmd_id >= SPRAID_AQ_BLK_MQ_DEPTH)) {
+ spraid_complete_aen(spraidq, cqe);
+ return;
+ }
+
+ if (unlikely(spraidq->qid && cqe->cmd_id >= SPRAID_IO_BLK_MQ_DEPTH)) {
+ spraid_complete_ioq_sync_cmnd(spraidq, cqe);
+ return;
+ }
+
+ if (spraidq->qid)
+ spraid_complete_ioq_cmnd(spraidq, cqe);
+ else
+ spraid_complete_adminq_cmnd(spraidq, cqe);
+}
+
+static void spraid_complete_cqes(struct spraid_queue *spraidq,
+ u16 start, u16 end)
+{
+ while (start != end) {
+ spraid_handle_cqe(spraidq, start);
+ if (++start == spraidq->q_depth)
+ start = 0;
+ }
+}
+
+static inline void spraid_update_cq_head(struct spraid_queue *spraidq)
+{
+ if (++spraidq->cq_head == spraidq->q_depth) {
+ spraidq->cq_head = 0;
+ spraidq->cq_phase = !spraidq->cq_phase;
+ }
+}
+
+static inline bool spraid_process_cq(struct spraid_queue *spraidq,
+ u16 *start, u16 *end, int tag)
+{
+ bool found = false;
+
+ *start = spraidq->cq_head;
+ while (!found && spraid_cqe_pending(spraidq)) {
+ if (spraidq->cqes[spraidq->cq_head].cmd_id == tag)
+ found = true;
+ spraid_update_cq_head(spraidq);
+ }
+ *end = spraidq->cq_head;
+
+ if (*start != *end)
+ writel(spraidq->cq_head,
+ spraidq->q_db + spraidq->hdev->db_stride);
+
+ return found;
+}
+
+static bool spraid_poll_cq(struct spraid_queue *spraidq, int cid)
+{
+ u16 start, end;
+ bool found;
+
+ if (!spraid_cqe_pending(spraidq))
+ return 0;
+
+ spin_lock_irq(&spraidq->cq_lock);
+ found = spraid_process_cq(spraidq, &start, &end, cid);
+ spin_unlock_irq(&spraidq->cq_lock);
+
+ spraid_complete_cqes(spraidq, start, end);
+ return found;
+}
+
+static irqreturn_t spraid_irq(int irq, void *data)
+{
+ struct spraid_queue *spraidq = data;
+ irqreturn_t ret = IRQ_NONE;
+ u16 start, end;
+
+ spin_lock(&spraidq->cq_lock);
+ if (spraidq->cq_head != spraidq->last_cq_head)
+ ret = IRQ_HANDLED;
+
+ spraid_process_cq(spraidq, &start, &end, -1);
+ spraidq->last_cq_head = spraidq->cq_head;
+ spin_unlock(&spraidq->cq_lock);
+
+ if (start != end) {
+ spraid_complete_cqes(spraidq, start, end);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+static int spraid_setup_admin_queue(struct spraid_dev *hdev)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ u32 aqa;
+ int ret;
+
+ dev_info(hdev->dev, "[%s] start disable ctrl\n", __func__);
+
+ ret = spraid_disable_ctrl(hdev);
+ if (ret)
+ return ret;
+
+ ret = spraid_alloc_queue(hdev, 0, SPRAID_AQ_DEPTH);
+ if (ret)
+ return ret;
+
+ aqa = adminq->q_depth - 1;
+ aqa |= aqa << 16;
+ writel(aqa, hdev->bar + SPRAID_REG_AQA);
+ lo_hi_writeq(adminq->sq_dma_addr, hdev->bar + SPRAID_REG_ASQ);
+ lo_hi_writeq(adminq->cq_dma_addr, hdev->bar + SPRAID_REG_ACQ);
+
+ dev_info(hdev->dev, "[%s] start enable ctrl\n", __func__);
+
+ ret = spraid_enable_ctrl(hdev);
+ if (ret) {
+ ret = -ENODEV;
+ goto free_queue;
+ }
+
+ adminq->cq_vector = 0;
+ spraid_init_queue(adminq, 0);
+ ret = pci_request_irq(hdev->pdev, adminq->cq_vector, spraid_irq, NULL,
+ adminq, "spraid%d_q%d",
+ hdev->instance, adminq->qid);
+
+ if (ret) {
+ adminq->cq_vector = -1;
+ hdev->online_queues--;
+ goto free_queue;
+ }
+
+ dev_info(hdev->dev, "[%s] success, queuecount:[%d], onlinequeue:[%d]\n",
+ __func__, hdev->queue_count, hdev->online_queues);
+
+ return 0;
+
+free_queue:
+ spraid_free_queue(adminq);
+ return ret;
+}
+
+static u32 spraid_bar_size(struct spraid_dev *hdev, u32 nr_ioqs)
+{
+ return (SPRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride));
+}
+
+static int spraid_alloc_admin_cmds(struct spraid_dev *hdev)
+{
+ int i;
+
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+ spin_lock_init(&hdev->adm_cmd_lock);
+
+ hdev->adm_cmds = kcalloc_node(SPRAID_AQ_BLK_MQ_DEPTH,
+ sizeof(struct spraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->adm_cmds) {
+ dev_err(hdev->dev, "Alloc admin cmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SPRAID_AQ_BLK_MQ_DEPTH; i++) {
+ hdev->adm_cmds[i].qid = 0;
+ hdev->adm_cmds[i].cid = i;
+ list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list);
+ }
+
+ dev_info(hdev->dev, "Alloc admin cmds success, num[%d]\n",
+ SPRAID_AQ_BLK_MQ_DEPTH);
+
+ return 0;
+}
+
+static void spraid_free_admin_cmds(struct spraid_dev *hdev)
+{
+ kfree(hdev->adm_cmds);
+ hdev->adm_cmds = NULL;
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+}
+
+static struct spraid_cmd *spraid_get_cmd(struct spraid_dev *hdev,
+ enum spraid_cmd_type type)
+{
+ struct spraid_cmd *cmd = NULL;
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == SPRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(slock, flags);
+ dev_err(hdev->dev, "err, cmd[%d] list empty\n", type);
+ return NULL;
+ }
+ cmd = list_entry(head->next, struct spraid_cmd, list);
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(slock, flags);
+
+ WRITE_ONCE(cmd->state, SPRAID_CMD_IN_FLIGHT);
+
+ return cmd;
+}
+
+static void spraid_put_cmd(struct spraid_dev *hdev, struct spraid_cmd *cmd,
+ enum spraid_cmd_type type)
+{
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == SPRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ WRITE_ONCE(cmd->state, SPRAID_CMD_IDLE);
+ list_add_tail(&cmd->list, head);
+ spin_unlock_irqrestore(slock, flags);
+}
+
+
+static int spraid_submit_admin_sync_cmd(struct spraid_dev *hdev,
+ struct spraid_admin_command *cmd,
+ u32 *result0, u32 *result1, u32 timeout)
+{
+ struct spraid_cmd *adm_cmd = spraid_get_cmd(hdev, SPRAID_CMD_ADM);
+
+ if (!adm_cmd) {
+ dev_err(hdev->dev, "err, get admin cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&adm_cmd->cmd_done);
+
+ cmd->common.command_id = adm_cmd->cid;
+ spraid_submit_cmd(&hdev->queues[0], cmd);
+
+ if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout;"
+ " opcode[0x%x] subopcode[0x%x]\n",
+ __func__, adm_cmd->cid, adm_cmd->qid,
+ cmd->usr_cmd.opcode, cmd->usr_cmd.info_0.subopcode);
+ WRITE_ONCE(adm_cmd->state, SPRAID_CMD_TIMEOUT);
+ spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+ return -EINVAL;
+ }
+
+ if (result0)
+ *result0 = adm_cmd->result0;
+ if (result1)
+ *result1 = adm_cmd->result1;
+
+ spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+
+ return adm_cmd->status;
+}
+
+static int spraid_create_cq(struct spraid_dev *hdev, u16 qid,
+ struct spraid_queue *spraidq, u16 cq_vector)
+{
+ struct spraid_admin_command admin_cmd;
+ int flags = SPRAID_QUEUE_PHYS_CONTIG | SPRAID_CQ_IRQ_ENABLED;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_cq.opcode = SPRAID_ADMIN_CREATE_CQ;
+ admin_cmd.create_cq.prp1 = cpu_to_le64(spraidq->cq_dma_addr);
+ admin_cmd.create_cq.cqid = cpu_to_le16(qid);
+ admin_cmd.create_cq.qsize = cpu_to_le16(spraidq->q_depth - 1);
+ admin_cmd.create_cq.cq_flags = cpu_to_le16(flags);
+ admin_cmd.create_cq.irq_vector = cpu_to_le16(cq_vector);
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static int spraid_create_sq(struct spraid_dev *hdev, u16 qid,
+ struct spraid_queue *spraidq)
+{
+ struct spraid_admin_command admin_cmd;
+ int flags = SPRAID_QUEUE_PHYS_CONTIG;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_sq.opcode = SPRAID_ADMIN_CREATE_SQ;
+ admin_cmd.create_sq.prp1 = cpu_to_le64(spraidq->sq_dma_addr);
+ admin_cmd.create_sq.sqid = cpu_to_le16(qid);
+ admin_cmd.create_sq.qsize = cpu_to_le16(spraidq->q_depth - 1);
+ admin_cmd.create_sq.sq_flags = cpu_to_le16(flags);
+ admin_cmd.create_sq.cqid = cpu_to_le16(qid);
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static void spraid_free_queue(struct spraid_queue *spraidq)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+
+ hdev->queue_count--;
+ dma_free_coherent(hdev->dev, CQ_SIZE(spraidq->q_depth),
+ (void *)spraidq->cqes, spraidq->cq_dma_addr);
+ dma_free_coherent(hdev->dev, SQ_SIZE(spraidq->qid, spraidq->q_depth),
+ spraidq->sq_cmds, spraidq->sq_dma_addr);
+ dma_free_coherent(hdev->dev, SENSE_SIZE(spraidq->q_depth),
+ spraidq->sense, spraidq->sense_dma_addr);
+}
+
+static void spraid_free_admin_queue(struct spraid_dev *hdev)
+{
+ spraid_free_queue(&hdev->queues[0]);
+}
+
+static void spraid_free_io_queues(struct spraid_dev *hdev)
+{
+ int i;
+
+ for (i = hdev->queue_count - 1; i >= 1; i--)
+ spraid_free_queue(&hdev->queues[i]);
+}
+
+static int spraid_delete_queue(struct spraid_dev *hdev, u8 op, u16 id)
+{
+ struct spraid_admin_command admin_cmd;
+ int ret;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.delete_queue.opcode = op;
+ admin_cmd.delete_queue.qid = cpu_to_le16(id);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+
+ if (ret)
+ dev_err(hdev->dev, "Delete %s:[%d] failed\n",
+ (op == SPRAID_ADMIN_DELETE_CQ) ? "cq" : "sq", id);
+
+ return ret;
+}
+
+static int spraid_delete_cq(struct spraid_dev *hdev, u16 cqid)
+{
+ return spraid_delete_queue(hdev, SPRAID_ADMIN_DELETE_CQ, cqid);
+}
+
+static int spraid_delete_sq(struct spraid_dev *hdev, u16 sqid)
+{
+ return spraid_delete_queue(hdev, SPRAID_ADMIN_DELETE_SQ, sqid);
+}
+
+static int spraid_create_queue(struct spraid_queue *spraidq, u16 qid)
+{
+ struct spraid_dev *hdev = spraidq->hdev;
+ u16 cq_vector;
+ int ret;
+
+ cq_vector = (hdev->num_vecs == 1) ? 0 : qid;
+ ret = spraid_create_cq(hdev, qid, spraidq, cq_vector);
+ if (ret)
+ return ret;
+
+ ret = spraid_create_sq(hdev, qid, spraidq);
+ if (ret)
+ goto delete_cq;
+
+ spraid_init_queue(spraidq, qid);
+ spraidq->cq_vector = cq_vector;
+
+ ret = pci_request_irq(hdev->pdev, cq_vector, spraid_irq, NULL,
+ spraidq, "spraid%d_q%d", hdev->instance, qid);
+
+ if (ret) {
+ dev_err(hdev->dev, "Request queue[%d] irq failed\n", qid);
+ goto delete_sq;
+ }
+
+ return 0;
+
+delete_sq:
+ spraidq->cq_vector = -1;
+ hdev->online_queues--;
+ spraid_delete_sq(hdev, qid);
+delete_cq:
+ spraid_delete_cq(hdev, qid);
+
+ return ret;
+}
+
+static int spraid_create_io_queues(struct spraid_dev *hdev)
+{
+ u32 i, max;
+ int ret = 0;
+
+ max = min(hdev->max_qid, hdev->queue_count - 1);
+ for (i = hdev->online_queues; i <= max; i++) {
+ ret = spraid_create_queue(&hdev->queues[i], i);
+ if (ret) {
+ dev_err(hdev->dev, "Create queue[%d] failed\n", i);
+ break;
+ }
+ }
+
+ dev_info(hdev->dev, "[%s] queue_count[%d], online_queue[%d]",
+ __func__, hdev->queue_count, hdev->online_queues);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+static int spraid_set_features(struct spraid_dev *hdev, u32 fid,
+ u32 dword11, void *buffer,
+ size_t buflen, u32 *result)
+{
+ struct spraid_admin_command admin_cmd;
+ int ret;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+
+ if (buffer && buflen) {
+ data_ptr = dma_alloc_coherent(hdev->dev, buflen,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memcpy(data_ptr, buffer, buflen);
+ }
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.features.opcode = SPRAID_ADMIN_SET_FEATURES;
+ admin_cmd.features.fid = cpu_to_le32(fid);
+ admin_cmd.features.dword11 = cpu_to_le32(dword11);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, result, NULL, 0);
+
+ if (data_ptr)
+ dma_free_coherent(hdev->dev, buflen, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_configure_timestamp(struct spraid_dev *hdev)
+{
+ __le64 ts;
+ int ret;
+
+ ts = cpu_to_le64(ktime_to_ms(ktime_get_real()));
+ ret = spraid_set_features(hdev, SPRAID_FEAT_TIMESTAMP,
+ 0, &ts, sizeof(ts), NULL);
+
+ if (ret)
+ dev_err(hdev->dev, "set timestamp failed: %d\n", ret);
+ return ret;
+}
+
+static int spraid_set_queue_cnt(struct spraid_dev *hdev, u32 *cnt)
+{
+ u32 q_cnt = (*cnt - 1) | ((*cnt - 1) << 16);
+ u32 nr_ioqs, result;
+ int status;
+
+ status = spraid_set_features(hdev, SPRAID_FEAT_NUM_QUEUES,
+ q_cnt, NULL, 0, &result);
+ if (status) {
+ dev_err(hdev->dev, "Set queue count failed, status: %d\n",
+ status);
+ return -EIO;
+ }
+
+ nr_ioqs = min(result & 0xffff, result >> 16) + 1;
+ *cnt = min(*cnt, nr_ioqs);
+ if (*cnt == 0) {
+ dev_err(hdev->dev, "Illegal queue count: zero\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int spraid_setup_io_queues(struct spraid_dev *hdev)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ struct pci_dev *pdev = hdev->pdev;
+ u32 nr_ioqs = num_online_cpus();
+ u32 i, size;
+ int ret;
+
+ struct irq_affinity affd = {
+ .pre_vectors = 1
+ };
+
+ ret = spraid_set_queue_cnt(hdev, &nr_ioqs);
+ if (ret < 0)
+ return ret;
+
+ size = spraid_bar_size(hdev, nr_ioqs);
+ ret = spraid_remap_bar(hdev, size);
+ if (ret)
+ return -ENOMEM;
+
+ adminq->q_db = hdev->dbs;
+
+ pci_free_irq(pdev, 0, adminq);
+ pci_free_irq_vectors(pdev);
+
+ ret = pci_alloc_irq_vectors_affinity(pdev, 1, (nr_ioqs + 1),
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+ if (ret <= 0)
+ return -EIO;
+
+ hdev->num_vecs = ret;
+
+ hdev->max_qid = max(ret - 1, 1);
+
+ ret = pci_request_irq(pdev, adminq->cq_vector, spraid_irq, NULL,
+ adminq, "spraid%d_q%d",
+ hdev->instance, adminq->qid);
+ if (ret) {
+ dev_err(hdev->dev, "Request admin irq failed\n");
+ adminq->cq_vector = -1;
+ return ret;
+ }
+
+ for (i = hdev->queue_count; i <= hdev->max_qid; i++) {
+ ret = spraid_alloc_queue(hdev, i, hdev->ioq_depth);
+ if (ret)
+ break;
+ }
+ dev_info(hdev->dev, "[%s] max_qid: %d, queue_count: %d;"
+ " online_queue: %d, ioq_depth: %d\n",
+ __func__, hdev->max_qid, hdev->queue_count,
+ hdev->online_queues, hdev->ioq_depth);
+
+ return spraid_create_io_queues(hdev);
+}
+
+static void spraid_delete_io_queues(struct spraid_dev *hdev)
+{
+ u16 queues = hdev->online_queues - 1;
+ u8 opcode = SPRAID_ADMIN_DELETE_SQ;
+ u16 i, pass;
+
+ if (!pci_device_is_present(hdev->pdev)) {
+ dev_err(hdev->dev,
+ "pci_device is not present, skip disable io queues\n");
+ return;
+ }
+
+ if (hdev->online_queues < 2) {
+ dev_err(hdev->dev, "[%s] err, io queue has been delete\n",
+ __func__);
+ return;
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ for (i = queues; i > 0; i--)
+ if (spraid_delete_queue(hdev, opcode, i))
+ break;
+
+ opcode = SPRAID_ADMIN_DELETE_CQ;
+ }
+}
+
+static void spraid_remove_io_queues(struct spraid_dev *hdev)
+{
+ spraid_delete_io_queues(hdev);
+ spraid_free_io_queues(hdev);
+}
+
+static void spraid_pci_disable(struct spraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u32 i;
+
+ for (i = 0; i < hdev->online_queues; i++)
+ pci_free_irq(pdev, hdev->queues[i].cq_vector, &hdev->queues[i]);
+ pci_free_irq_vectors(pdev);
+ if (pci_is_enabled(pdev)) {
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ }
+ hdev->online_queues = 0;
+}
+
+static void spraid_disable_admin_queue(struct spraid_dev *hdev, bool shutdown)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ u16 start, end;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ spraid_shutdown_ctrl(hdev);
+ else
+ spraid_disable_ctrl(hdev);
+ }
+
+ if (hdev->queue_count == 0) {
+ dev_err(hdev->dev, "[%s] err, admin queue has been delete\n",
+ __func__);
+ return;
+ }
+
+ spin_lock_irq(&adminq->cq_lock);
+ spraid_process_cq(adminq, &start, &end, -1);
+ spin_unlock_irq(&adminq->cq_lock);
+
+ spraid_complete_cqes(adminq, start, end);
+ spraid_free_admin_queue(hdev);
+}
+
+static int spraid_create_dma_pools(struct spraid_dev *hdev)
+{
+ int i;
+ char poolname[20] = { 0 };
+
+ hdev->prp_page_pool = dma_pool_create("prp list page", hdev->dev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+
+ if (!hdev->prp_page_pool) {
+ dev_err(hdev->dev, "create prp_page_pool failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < small_pool_num; i++) {
+ sprintf(poolname, "prp_list_256_%d", i);
+ hdev->prp_small_pool[i] =
+ dma_pool_create(poolname, hdev->dev, SMALL_POOL_SIZE,
+ SMALL_POOL_SIZE, 0);
+
+ if (!hdev->prp_small_pool[i]) {
+ dev_err(hdev->dev, "create prp_small_pool %d failed\n",
+ i);
+ goto destroy_prp_small_pool;
+ }
+ }
+
+ return 0;
+
+destroy_prp_small_pool:
+ while (i > 0)
+ dma_pool_destroy(hdev->prp_small_pool[--i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+
+ return -ENOMEM;
+}
+
+static void spraid_destroy_dma_pools(struct spraid_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < small_pool_num; i++)
+ dma_pool_destroy(hdev->prp_small_pool[i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+}
+
+static int spraid_get_dev_list(struct spraid_dev *hdev,
+ struct spraid_dev_info *devices)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ struct spraid_admin_command admin_cmd;
+ struct spraid_dev_list *list_buf;
+ dma_addr_t data_dma = 0;
+ u32 i, idx, hdid, ndev;
+ int ret = 0;
+
+ list_buf = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!list_buf)
+ return -ENOMEM;
+
+ for (idx = 0; idx < nd;) {
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = SPRAID_GET_INFO_DEV_LIST;
+ admin_cmd.get_info.cdw11 = cpu_to_le32(idx);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd,
+ NULL, NULL, 0);
+
+ if (ret) {
+ dev_err(hdev->dev, "Get device list failed, nd: %u;"
+ "idx: %u, ret: %d\n",
+ nd, idx, ret);
+ goto out;
+ }
+ ndev = le32_to_cpu(list_buf->dev_num);
+
+ dev_info(hdev->dev, "ndev numbers: %u\n", ndev);
+
+ for (i = 0; i < ndev; i++) {
+ hdid = le32_to_cpu(list_buf->devices[i].hdid);
+ dev_info(hdev->dev, "list_buf->devices[%d], hdid: %u;"
+ "target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ i, hdid,
+ le16_to_cpu(list_buf->devices[i].target),
+ list_buf->devices[i].channel,
+ list_buf->devices[i].lun,
+ list_buf->devices[i].attr);
+ if (hdid > nd || hdid == 0) {
+ dev_err(hdev->dev, "err, hdid[%d] invalid\n",
+ hdid);
+ continue;
+ }
+ memcpy(&devices[hdid - 1], &list_buf->devices[i],
+ sizeof(struct spraid_dev_info));
+ }
+ idx += ndev;
+
+ if (idx < MAX_DEV_ENTRY_PER_PAGE_4K)
+ break;
+ }
+
+out:
+ dma_free_coherent(hdev->dev, PAGE_SIZE, list_buf, data_dma);
+ return ret;
+}
+
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ struct spraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = SPRAID_ADMIN_ASYNC_EVENT;
+ admin_cmd.common.command_id = cid;
+
+ spraid_submit_cmd(adminq, &admin_cmd);
+ dev_info(hdev->dev, "send aen, cid[%d]\n", cid);
+}
+
+static inline void spraid_send_all_aen(struct spraid_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->ctrl_info->aerl; i++)
+ spraid_send_aen(hdev, i + SPRAID_AQ_BLK_MQ_DEPTH);
+}
+
+static int spraid_add_device(struct spraid_dev *hdev,
+ struct spraid_dev_info *device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "add device, hdid: %u target: %d, channel: %d;"
+ " lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
+ sdev = scsi_device_lookup(shost, device->channel,
+ le16_to_cpu(device->target), 0);
+ if (sdev) {
+ dev_warn(hdev->dev, "Device is already exist, channel: %d;"
+ " target_id: %d, lun: %d\n",
+ device->channel, le16_to_cpu(device->target), 0);
+ scsi_device_put(sdev);
+ return -EEXIST;
+ }
+ scsi_add_device(shost, device->channel, le16_to_cpu(device->target), 0);
+ return 0;
+}
+
+static int spraid_rescan_device(struct spraid_dev *hdev,
+ struct spraid_dev_info *device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "rescan device, hdid: %u target: %d, channel: %d;"
+ " lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
+ sdev = scsi_device_lookup(shost, device->channel,
+ le16_to_cpu(device->target), 0);
+ if (!sdev) {
+ dev_warn(hdev->dev, "device is not exit rescan it, channel: %d;"
+ " target_id: %d, lun: %d\n",
+ device->channel, le16_to_cpu(device->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int spraid_remove_device(struct spraid_dev *hdev,
+ struct spraid_dev_info *org_device)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "remove device, hdid: %u target: %d, channel: %d;"
+ " lun: %d, attr[0x%x]\n",
+ le32_to_cpu(org_device->hdid), le16_to_cpu(org_device->target),
+ org_device->channel, org_device->lun, org_device->attr);
+
+ sdev = scsi_device_lookup(shost, org_device->channel,
+ le16_to_cpu(org_device->target), 0);
+ if (!sdev) {
+ dev_warn(hdev->dev, "device is not exit remove it, channel: %d;"
+ " target_id: %d, lun: %d\n",
+ org_device->channel,
+ le16_to_cpu(org_device->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int spraid_dev_list_init(struct spraid_dev *hdev)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ int i, ret;
+
+ hdev->devices = kzalloc_node(nd * sizeof(struct spraid_dev_info),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->devices)
+ return -ENOMEM;
+
+ ret = spraid_get_dev_list(hdev, hdev->devices);
+ if (ret) {
+ dev_err(hdev->dev,
+ "Ignore failure of getting device list;"
+ " within initialization\n");
+ return 0;
+ }
+
+ for (i = 0; i < nd; i++) {
+ if (SPRAID_DEV_INFO_FLAG_VALID(hdev->devices[i].flag) &&
+ SPRAID_DEV_INFO_ATTR_BOOT(hdev->devices[i].attr)) {
+ spraid_add_device(hdev, &hdev->devices[i]);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int luntarget_cmp_func(const void *l, const void *r)
+{
+ const struct spraid_dev_info *ln = l;
+ const struct spraid_dev_info *rn = r;
+
+ if (ln->channel == rn->channel)
+ return le16_to_cpu(ln->target) - le16_to_cpu(rn->target);
+
+ return ln->channel - rn->channel;
+}
+
+static void spraid_scan_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, scan_work);
+ struct spraid_dev_info *devices, *org_devices;
+ struct spraid_dev_info *sortdevice;
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ u8 flag, org_flag;
+ int i, ret;
+ int count = 0;
+
+ devices = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
+ if (!devices)
+ return;
+
+ sortdevice = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
+ if (!sortdevice)
+ goto free_list;
+
+ ret = spraid_get_dev_list(hdev, devices);
+ if (ret)
+ goto free_all;
+ org_devices = hdev->devices;
+ for (i = 0; i < nd; i++) {
+ org_flag = org_devices[i].flag;
+ flag = devices[i].flag;
+
+ dev_log_dbg(hdev->dev, "i: %d, org_flag: 0x%x, flag: 0x%x\n",
+ i, org_flag, flag);
+
+ if (SPRAID_DEV_INFO_FLAG_VALID(flag)) {
+ if (!SPRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->devices_rwsem);
+ memcpy(&org_devices[i], &devices[i],
+ sizeof(struct spraid_dev_info));
+ memcpy(&sortdevice[count++], &devices[i],
+ sizeof(struct spraid_dev_info));
+ up_write(&hdev->devices_rwsem);
+ } else if (SPRAID_DEV_INFO_FLAG_CHANGE(flag)) {
+ spraid_rescan_device(hdev, &devices[i]);
+ }
+ } else {
+ if (SPRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->devices_rwsem);
+ org_devices[i].flag &= 0xfe;
+ up_write(&hdev->devices_rwsem);
+ spraid_remove_device(hdev, &org_devices[i]);
+ }
+ }
+ }
+
+ dev_info(hdev->dev, "scan work add device count = %d\n", count);
+
+ sort(sortdevice, count, sizeof(sortdevice[0]),
+ luntarget_cmp_func, NULL);
+
+ for (i = 0; i < count; i++)
+ spraid_add_device(hdev, &sortdevice[i]);
+
+free_all:
+ kfree(sortdevice);
+free_list:
+ kfree(devices);
+}
+
+static void spraid_timesyn_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, timesyn_work);
+
+ spraid_configure_timestamp(hdev);
+}
+
+static int spraid_init_ctrl_info(struct spraid_dev *hdev);
+static void spraid_fw_act_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, fw_act_work);
+
+ if (spraid_init_ctrl_info(hdev))
+ dev_err(hdev->dev, "get ctrl info failed after fw act\n");
+}
+
+static void spraid_queue_scan(struct spraid_dev *hdev)
+{
+ queue_work(spraid_wq, &hdev->scan_work);
+}
+
+static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result)
+{
+ switch ((result & 0xff00) >> 8) {
+ case SPRAID_AEN_DEV_CHANGED:
+ spraid_queue_scan(hdev);
+ break;
+ case SPRAID_AEN_FW_ACT_START:
+ dev_info(hdev->dev, "fw activation starting\n");
+ break;
+ case SPRAID_AEN_HOST_PROBING:
+ break;
+ default:
+ dev_warn(hdev->dev, "async event result %08x\n", result);
+ }
+}
+
+static void spraid_handle_aen_vs(struct spraid_dev *hdev,
+ u32 result, u32 result1)
+{
+ switch ((result & 0xff00) >> 8) {
+ case SPRAID_AEN_TIMESYN:
+ queue_work(spraid_wq, &hdev->timesyn_work);
+ break;
+ case SPRAID_AEN_FW_ACT_FINISH:
+ dev_info(hdev->dev, "fw activation finish\n");
+ queue_work(spraid_wq, &hdev->fw_act_work);
+ break;
+ case SPRAID_AEN_EVENT_MIN ... SPRAID_AEN_EVENT_MAX:
+ dev_info(hdev->dev, "rcv card event[%d];"
+ " param1[0x%x] param2[0x%x]\n",
+ (result & 0xff00) >> 8, result, result1);
+ break;
+ default:
+ dev_warn(hdev->dev, "async event result: 0x%x\n", result);
+ }
+}
+
+static int spraid_alloc_resources(struct spraid_dev *hdev)
+{
+ int ret, nqueue;
+
+ ret = ida_alloc(&spraid_instance_ida, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(hdev->dev, "Get instance id failed\n");
+ return ret;
+ }
+ hdev->instance = ret;
+
+ hdev->ctrl_info = kzalloc_node(sizeof(*hdev->ctrl_info),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->ctrl_info) {
+ ret = -ENOMEM;
+ goto release_instance;
+ }
+
+ ret = spraid_create_dma_pools(hdev);
+ if (ret)
+ goto free_ctrl_info;
+ nqueue = num_possible_cpus() + 1;
+ hdev->queues = kcalloc_node(nqueue, sizeof(struct spraid_queue),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->queues) {
+ ret = -ENOMEM;
+ goto destroy_dma_pools;
+ }
+
+ ret = spraid_alloc_admin_cmds(hdev);
+ if (ret)
+ goto free_queues;
+
+ dev_info(hdev->dev, "[%s] queues num: %d\n", __func__, nqueue);
+
+ return 0;
+
+free_queues:
+ kfree(hdev->queues);
+destroy_dma_pools:
+ spraid_destroy_dma_pools(hdev);
+free_ctrl_info:
+ kfree(hdev->ctrl_info);
+release_instance:
+ ida_free(&spraid_instance_ida, hdev->instance);
+ return ret;
+}
+
+static void spraid_free_resources(struct spraid_dev *hdev)
+{
+ spraid_free_admin_cmds(hdev);
+ kfree(hdev->queues);
+ spraid_destroy_dma_pools(hdev);
+ kfree(hdev->ctrl_info);
+ ida_free(&spraid_instance_ida, hdev->instance);
+}
+
+static void spraid_bsg_unmap_data(struct spraid_dev *hdev, struct bsg_job *job)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir =
+ rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ if (iod->nsge)
+ dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
+
+ spraid_free_iod_res(hdev, iod);
+}
+
+static int spraid_bsg_map_data(struct spraid_dev *hdev, struct bsg_job *job,
+ struct spraid_admin_command *cmd)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir =
+ rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ iod->sg = job->request_payload.sg_list;
+ iod->nsge = job->request_payload.sg_cnt;
+ iod->length = job->request_payload.payload_len;
+ iod->use_sgl = false;
+ iod->npages = -1;
+ iod->sg_drv_mgmt = false;
+
+ if (!iod->nsge)
+ goto out;
+
+ ret = dma_map_sg_attrs(hdev->dev, iod->sg, iod->nsge,
+ dma_dir, DMA_ATTR_NO_WARN);
+ if (!ret)
+ goto out;
+
+ ret = spraid_setup_prps(hdev, iod);
+ if (ret)
+ goto unmap;
+
+ cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+ cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
+
+ return 0;
+
+unmap:
+ dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
+out:
+ return ret;
+}
+
+static int spraid_get_ctrl_info(struct spraid_dev *hdev,
+ struct spraid_ctrl_info *ctrl_info)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = SPRAID_GET_INFO_CTRL;
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(ctrl_info, data_ptr, sizeof(struct spraid_ctrl_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_init_ctrl_info(struct spraid_dev *hdev)
+{
+ int ret;
+
+ hdev->ctrl_info->nd = cpu_to_le32(240);
+ hdev->ctrl_info->mdts = 8;
+ hdev->ctrl_info->max_cmds = cpu_to_le16(4096);
+ hdev->ctrl_info->max_num_sge = cpu_to_le16(128);
+ hdev->ctrl_info->max_channel = cpu_to_le16(4);
+ hdev->ctrl_info->max_tgt_id = cpu_to_le32(3239);
+ hdev->ctrl_info->max_lun = cpu_to_le16(2);
+
+ ret = spraid_get_ctrl_info(hdev, hdev->ctrl_info);
+ if (ret)
+ dev_err(hdev->dev, "get controller info failed: %d\n", ret);
+
+ dev_info(hdev->dev, "[%s]nd = %d\n", __func__, hdev->ctrl_info->nd);
+ dev_info(hdev->dev, "[%s]max_cmd = %d\n",
+ __func__, hdev->ctrl_info->max_cmds);
+ dev_info(hdev->dev, "[%s]max_channel = %d\n",
+ __func__, hdev->ctrl_info->max_channel);
+ dev_info(hdev->dev, "[%s]max_tgt_id = %d\n",
+ __func__, hdev->ctrl_info->max_tgt_id);
+ dev_info(hdev->dev, "[%s]max_lun = %d\n",
+ __func__, hdev->ctrl_info->max_lun);
+ dev_info(hdev->dev, "[%s]max_num_sge = %d\n",
+ __func__, hdev->ctrl_info->max_num_sge);
+ dev_info(hdev->dev, "[%s]lun_num_boot = %d\n",
+ __func__, hdev->ctrl_info->lun_num_in_boot);
+ dev_info(hdev->dev, "[%s]mdts = %d\n", __func__, hdev->ctrl_info->mdts);
+ dev_info(hdev->dev, "[%s]acl = %d\n", __func__, hdev->ctrl_info->acl);
+ dev_info(hdev->dev, "[%s]aer1 = %d\n", __func__, hdev->ctrl_info->aerl);
+ dev_info(hdev->dev, "[%s]card_type = %d\n",
+ __func__, hdev->ctrl_info->card_type);
+ dev_info(hdev->dev, "[%s]rtd3e = %d\n",
+ __func__, hdev->ctrl_info->rtd3e);
+ dev_info(hdev->dev, "[%s]sn = %s\n", __func__, hdev->ctrl_info->sn);
+ dev_info(hdev->dev, "[%s]fr = %s\n", __func__, hdev->ctrl_info->fr);
+
+ if (!hdev->ctrl_info->aerl)
+ hdev->ctrl_info->aerl = 1;
+ if (hdev->ctrl_info->aerl > SPRAID_NR_AEN_COMMANDS)
+ hdev->ctrl_info->aerl = SPRAID_NR_AEN_COMMANDS;
+
+ return 0;
+}
+
+#define SPRAID_MAX_ADMIN_PAYLOAD_SIZE BIT(16)
+static int spraid_alloc_iod_ext_mem_pool(struct spraid_dev *hdev)
+{
+ u16 max_sge = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+ size_t alloc_size;
+
+ alloc_size = spraid_iod_ext_size(hdev, SPRAID_MAX_ADMIN_PAYLOAD_SIZE,
+ max_sge, true, false);
+ if (alloc_size > PAGE_SIZE)
+ dev_warn(hdev->dev, "It is unreasonable ;"
+ " sg allocation more than one page\n");
+ hdev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
+ mempool_kfree,
+ (void *)alloc_size, GFP_KERNEL,
+ hdev->numa_node);
+ if (!hdev->iod_mempool) {
+ dev_err(hdev->dev, "Create iod extension memory pool failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void spraid_free_iod_ext_mem_pool(struct spraid_dev *hdev)
+{
+ mempool_destroy(hdev->iod_mempool);
+}
+
+static int spraid_user_admin_cmd(struct spraid_dev *hdev, struct bsg_job *job)
+{
+ struct spraid_bsg_request *bsg_req = job->request;
+ struct spraid_passthru_common_cmd *cmd = &(bsg_req->admcmd);
+ struct spraid_admin_command admin_cmd;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+ u32 result[2] = {0};
+ int status;
+
+ if (hdev->state >= SPRAID_RESETTING) {
+ dev_err(hdev->dev, "[%s] err, host state:[%d] is not right\n",
+ __func__, hdev->state);
+ return -EBUSY;
+ }
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init\n",
+ __func__, cmd->opcode, cmd->info_0.subopcode);
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = cmd->opcode;
+ admin_cmd.common.flags = cmd->flags;
+ admin_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ admin_cmd.common.cdw2[0] = cpu_to_le32(cmd->cdw2);
+ admin_cmd.common.cdw2[1] = cpu_to_le32(cmd->cdw3);
+ admin_cmd.common.cdw10 = cpu_to_le32(cmd->cdw10);
+ admin_cmd.common.cdw11 = cpu_to_le32(cmd->cdw11);
+ admin_cmd.common.cdw12 = cpu_to_le32(cmd->cdw12);
+ admin_cmd.common.cdw13 = cpu_to_le32(cmd->cdw13);
+ admin_cmd.common.cdw14 = cpu_to_le32(cmd->cdw14);
+ admin_cmd.common.cdw15 = cpu_to_le32(cmd->cdw15);
+
+ status = spraid_bsg_map_data(hdev, job, &admin_cmd);
+ if (status) {
+ dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+ return status;
+ }
+
+ status = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, &result[0],
+ &result[1], timeout);
+ if (status >= 0) {
+ job->reply_len = sizeof(result);
+ memcpy(job->reply, result, sizeof(result));
+ }
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x];"
+ " status[0x%x] result0[0x%x] result1[0x%x]\n",
+ __func__, cmd->opcode, cmd->info_0.subopcode,
+ status, result[0], result[1]);
+
+ spraid_bsg_unmap_data(hdev, job);
+
+ return status;
+}
+
+static int spraid_alloc_ioq_ptcmds(struct spraid_dev *hdev)
+{
+ int i;
+ int ptnum = SPRAID_NR_IOQ_PTCMDS;
+
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
+ spin_lock_init(&hdev->ioq_pt_lock);
+
+ hdev->ioq_ptcmds = kcalloc_node(ptnum, sizeof(struct spraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->ioq_ptcmds) {
+ dev_err(hdev->dev, "Alloc ioq_ptcmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ptnum; i++) {
+ hdev->ioq_ptcmds[i].qid = i / SPRAID_PTCMDS_PERQ + 1;
+ hdev->ioq_ptcmds[i].cid = i % SPRAID_PTCMDS_PERQ
+ + SPRAID_IO_BLK_MQ_DEPTH;
+ list_add_tail(&(hdev->ioq_ptcmds[i].list), &hdev->ioq_pt_list);
+ }
+
+ dev_info(hdev->dev, "Alloc ioq_ptcmds success, ptnum[%d]\n", ptnum);
+
+ return 0;
+}
+
+static void spraid_free_ioq_ptcmds(struct spraid_dev *hdev)
+{
+ kfree(hdev->ioq_ptcmds);
+ hdev->ioq_ptcmds = NULL;
+
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
+}
+
+static int spraid_submit_ioq_sync_cmd(struct spraid_dev *hdev,
+ struct spraid_ioq_command *cmd,
+ u32 *result, u32 *reslen, u32 timeout)
+{
+ int ret;
+ dma_addr_t sense_dma;
+ struct spraid_queue *ioq;
+ void *sense_addr = NULL;
+ struct spraid_cmd *pt_cmd = spraid_get_cmd(hdev, SPRAID_CMD_IOPT);
+
+ if (!pt_cmd) {
+ dev_err(hdev->dev, "err, get ioq cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&pt_cmd->cmd_done);
+
+ ioq = &hdev->queues[pt_cmd->qid];
+ ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE;
+ sense_addr = ioq->sense + ret;
+ sense_dma = ioq->sense_dma_addr + ret;
+
+ cmd->common.sense_addr = cpu_to_le64(sense_dma);
+ cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+ cmd->common.command_id = pt_cmd->cid;
+
+ spraid_submit_cmd(ioq, cmd);
+
+ if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout;"
+ " opcode[0x%x] subopcode[0x%x]\n",
+ __func__, pt_cmd->cid, pt_cmd->qid, cmd->common.opcode,
+ (le32_to_cpu(cmd->common.cdw3[0]) & 0xffff));
+ WRITE_ONCE(pt_cmd->state, SPRAID_CMD_TIMEOUT);
+ spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
+ return -EINVAL;
+ }
+
+ if (result && reslen) {
+ if ((pt_cmd->status & 0x17f) == 0x101) {
+ memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE);
+ *reslen = SCSI_SENSE_BUFFERSIZE;
+ }
+ }
+
+ spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
+
+ return pt_cmd->status;
+}
+
+static int spraid_user_ioq_cmd(struct spraid_dev *hdev, struct bsg_job *job)
+{
+ struct spraid_bsg_request *bsg_req =
+ (struct spraid_bsg_request *)(job->request);
+ struct spraid_ioq_passthru_cmd *cmd = &(bsg_req->ioqcmd);
+ struct spraid_ioq_command ioq_cmd;
+ int status = 0;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+
+ if (cmd->data_len > PAGE_SIZE) {
+ dev_err(hdev->dev, "[%s] data len bigger than 4k\n", __func__);
+ return -EFAULT;
+ }
+
+ if (hdev->state != SPRAID_LIVE) {
+ dev_err(hdev->dev, "[%s] err, host state:[%d] is not live\n",
+ __func__, hdev->state);
+ return -EBUSY;
+ }
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init;"
+ " datalen[%d]\n",
+ __func__, cmd->opcode, cmd->info_1.subopcode, cmd->data_len);
+
+ memset(&ioq_cmd, 0, sizeof(ioq_cmd));
+ ioq_cmd.common.opcode = cmd->opcode;
+ ioq_cmd.common.flags = cmd->flags;
+ ioq_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ ioq_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len);
+ ioq_cmd.common.cdb_len = cmd->info_0.cdb_len;
+ ioq_cmd.common.rsvd2 = cmd->info_0.rsvd0;
+ ioq_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3);
+ ioq_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4);
+ ioq_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5);
+
+ ioq_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10);
+ ioq_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11);
+ ioq_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12);
+ ioq_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13);
+ ioq_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14);
+ ioq_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len);
+
+ memcpy(ioq_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len);
+
+ ioq_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]);
+ ioq_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]);
+ ioq_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]);
+ ioq_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]);
+
+ status = spraid_bsg_map_data(hdev, job,
+ (struct spraid_admin_command *)&ioq_cmd);
+ if (status) {
+ dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+ return status;
+ }
+
+ status = spraid_submit_ioq_sync_cmd(hdev, &ioq_cmd, job->reply,
+ &job->reply_len, timeout);
+
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x], status[0x%x];"
+ " reply_len[%d]\n",
+ __func__, cmd->opcode, cmd->info_1.subopcode,
+ status, job->reply_len);
+
+ spraid_bsg_unmap_data(hdev, job);
+
+ return status;
+}
+
+static bool spraid_check_scmd_completed(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_queue *spraidq;
+ u16 hwq, cid;
+
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ spraidq = &hdev->queues[hwq];
+ if (READ_ONCE(iod->state) == SPRAID_CMD_COMPLETE
+ || spraid_poll_cq(spraidq, cid)) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] has been completed\n",
+ cid, spraidq->qid);
+ return true;
+ }
+ return false;
+}
+
+static enum blk_eh_timer_return spraid_scmd_timeout(struct scsi_cmnd *scmd)
+{
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ unsigned int timeout = scmd->device->request_queue->rq_timeout;
+
+ if (spraid_check_scmd_completed(scmd))
+ goto out;
+
+ if (time_after(jiffies, scmd->jiffies_at_alloc + timeout)) {
+ if (cmpxchg(&iod->state,
+ SPRAID_CMD_IN_FLIGHT,
+ SPRAID_CMD_TIMEOUT) == SPRAID_CMD_IN_FLIGHT) {
+ return BLK_EH_DONE;
+ }
+ }
+out:
+ return BLK_EH_RESET_TIMER;
+}
+
+/* send abort command by admin queue temporary */
+static int spraid_send_abort_cmd(struct spraid_dev *hdev,
+ u32 hdid, u16 qid, u16 cid)
+{
+ struct spraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.abort.opcode = SPRAID_ADMIN_ABORT_CMD;
+ admin_cmd.abort.hdid = cpu_to_le32(hdid);
+ admin_cmd.abort.sqid = cpu_to_le16(qid);
+ admin_cmd.abort.cid = cpu_to_le16(cid);
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+/* send reset command by admin quueue temporary */
+static int spraid_send_reset_cmd(struct spraid_dev *hdev, int type, u32 hdid)
+{
+ struct spraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.reset.opcode = SPRAID_ADMIN_RESET;
+ admin_cmd.reset.hdid = cpu_to_le32(hdid);
+ admin_cmd.reset.type = type;
+
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static bool spraid_change_host_state(struct spraid_dev *hdev,
+ enum spraid_state newstate)
+{
+ unsigned long flags;
+ enum spraid_state oldstate;
+ bool change = false;
+
+ spin_lock_irqsave(&hdev->state_lock, flags);
+
+ oldstate = hdev->state;
+ switch (newstate) {
+ case SPRAID_LIVE:
+ switch (oldstate) {
+ case SPRAID_NEW:
+ case SPRAID_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SPRAID_RESETTING:
+ switch (oldstate) {
+ case SPRAID_LIVE:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case SPRAID_DELETING:
+ if (oldstate != SPRAID_DELETING)
+ change = true;
+ break;
+ case SPRAID_DEAD:
+ switch (oldstate) {
+ case SPRAID_NEW:
+ case SPRAID_LIVE:
+ case SPRAID_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (change)
+ hdev->state = newstate;
+ spin_unlock_irqrestore(&hdev->state_lock, flags);
+
+ dev_info(hdev->dev, "[%s][%d]->[%d], change[%d]\n",
+ __func__, oldstate, newstate, change);
+
+ return change;
+}
+
+static void spraid_back_fault_cqe(struct spraid_queue *ioq,
+ struct spraid_completion *cqe)
+{
+ struct spraid_dev *hdev = ioq->hdev;
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct spraid_iod *iod;
+ struct request *req;
+
+ tags = hdev->shost->tag_set.tags[ioq->qid - 1];
+ req = blk_mq_tag_to_rq(tags, cqe->cmd_id);
+ if (unlikely(!req || !blk_mq_request_started(req)))
+ return;
+
+ scmd = blk_mq_rq_to_pdu(req);
+ iod = scsi_cmd_priv(scmd);
+
+ set_host_byte(scmd, DID_NO_CONNECT);
+ if (iod->nsge)
+ scsi_dma_unmap(scmd);
+ spraid_free_iod_res(hdev, iod);
+ scmd->scsi_done(scmd);
+ dev_warn(hdev->dev, "Back fault CQE, cid[%d] qid[%d]\n",
+ cqe->cmd_id, ioq->qid);
+}
+
+static void spraid_back_all_io(struct spraid_dev *hdev)
+{
+ int i, j;
+ struct spraid_queue *ioq;
+ struct spraid_completion cqe = { 0 };
+
+ scsi_block_requests(hdev->shost);
+
+ for (i = 1; i <= hdev->shost->nr_hw_queues; i++) {
+ ioq = &hdev->queues[i];
+ for (j = 0; j < hdev->shost->can_queue; j++) {
+ cqe.cmd_id = j;
+ spraid_back_fault_cqe(ioq, &cqe);
+ }
+ }
+
+ scsi_unblock_requests(hdev->shost);
+}
+
+static void spraid_dev_disable(struct spraid_dev *hdev, bool shutdown)
+{
+ struct spraid_queue *adminq = &hdev->queues[0];
+ u16 start, end;
+ unsigned long timeout = jiffies + 600 * HZ;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ spraid_shutdown_ctrl(hdev);
+ else
+ spraid_disable_ctrl(hdev);
+ }
+
+ while (!time_after(jiffies, timeout)) {
+ if (!pci_device_is_present(hdev->pdev)) {
+ dev_info(hdev->dev, "[%s] pci_device not present;"
+ " skip wait\n", __func__);
+ break;
+ }
+ if (!spraid_wait_ready(hdev, hdev->cap, false)) {
+ dev_info(hdev->dev,
+ "[%s] wait ready success after reset\n",
+ __func__);
+ break;
+ }
+ dev_info(hdev->dev, "[%s] waiting csts_rdy ready\n", __func__);
+ }
+
+ if (hdev->queue_count == 0) {
+ dev_err(hdev->dev, "[%s] warn, queue has been delete\n",
+ __func__);
+ return;
+ }
+
+ spin_lock_irq(&adminq->cq_lock);
+ spraid_process_cq(adminq, &start, &end, -1);
+ spin_unlock_irq(&adminq->cq_lock);
+ spraid_complete_cqes(adminq, start, end);
+
+ spraid_pci_disable(hdev);
+
+ spraid_back_all_io(hdev);
+}
+
+static void spraid_reset_work(struct work_struct *work)
+{
+ int ret;
+ struct spraid_dev *hdev =
+ container_of(work, struct spraid_dev, reset_work);
+
+ if (hdev->state != SPRAID_RESETTING) {
+ dev_err(hdev->dev, "[%s] err, host is not reset state\n",
+ __func__);
+ return;
+ }
+
+ dev_info(hdev->dev, "[%s] enter host reset\n", __func__);
+
+ if (hdev->ctrl_config & SPRAID_CC_ENABLE) {
+ dev_info(hdev->dev, "[%s] start dev_disable\n", __func__);
+ spraid_dev_disable(hdev, false);
+ }
+
+ ret = spraid_pci_enable(hdev);
+ if (ret)
+ goto out;
+
+ ret = spraid_setup_admin_queue(hdev);
+ if (ret)
+ goto pci_disable;
+
+ ret = spraid_setup_io_queues(hdev);
+ if (ret || hdev->online_queues <= hdev->shost->nr_hw_queues)
+ goto pci_disable;
+
+ spraid_change_host_state(hdev, SPRAID_LIVE);
+
+ spraid_send_all_aen(hdev);
+
+ return;
+
+pci_disable:
+ spraid_pci_disable(hdev);
+out:
+ spraid_change_host_state(hdev, SPRAID_DEAD);
+ dev_err(hdev->dev, "[%s] err, host reset failed\n", __func__);
+}
+
+static int spraid_reset_work_sync(struct spraid_dev *hdev)
+{
+ if (!spraid_change_host_state(hdev, SPRAID_RESETTING)) {
+ dev_info(hdev->dev, "[%s] can't change to reset state\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ if (!queue_work(spraid_wq, &hdev->reset_work)) {
+ dev_err(hdev->dev, "[%s] err, host is already in reset state\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ flush_work(&hdev->reset_work);
+ if (hdev->state != SPRAID_LIVE)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int spraid_wait_abnl_cmd_done(struct spraid_iod *iod)
+{
+ u16 times = 0;
+
+ do {
+ if (READ_ONCE(iod->state) == SPRAID_CMD_TMO_COMPLETE)
+ break;
+ msleep(500);
+ times++;
+ } while (times <= SPRAID_WAIT_ABNL_CMD_TIMEOUT);
+
+ /* wait command completion timeout after abort/reset success */
+ if (times >= SPRAID_WAIT_ABNL_CMD_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int spraid_abort_handler(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, aborting\n", cid, hwq);
+ ret = spraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid);
+ if (ret != ADMIN_ERR_TIMEOUT) {
+ ret = spraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed;"
+ " not found\n", cid, hwq);
+ return FAILED;
+ }
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort succ\n", cid, hwq);
+ return SUCCESS;
+ }
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, timeout\n",
+ cid, hwq);
+ return FAILED;
+}
+
+static int spraid_tgt_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, target reset\n",
+ cid, hwq);
+ ret = spraid_send_reset_cmd(hdev, SPRAID_RESET_TARGET, hostdata->hdid);
+ if (ret == 0) {
+ ret = spraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ dev_warn(hdev->dev,
+ "cid[%d] qid[%d]target reset failed;"
+ " not found\n", cid, hwq);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] target reset success\n",
+ cid, hwq);
+ return SUCCESS;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] ret[%d] target reset failed\n",
+ cid, hwq, ret);
+ return FAILED;
+}
+
+static int spraid_bus_reset_handler(struct scsi_cmnd *scmd)
+{
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+ struct spraid_iod *iod = scsi_cmd_priv(scmd);
+ struct spraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, bus reset\n", cid, hwq);
+ ret = spraid_send_reset_cmd(hdev, SPRAID_RESET_BUS, hostdata->hdid);
+ if (ret == 0) {
+ ret = spraid_wait_abnl_cmd_done(iod);
+ if (ret) {
+ dev_warn(hdev->dev,
+ "cid[%d] qid[%d] bus reset failed;"
+ " not found\n", cid, hwq);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] bus reset succ\n",
+ cid, hwq);
+ return SUCCESS;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] ret[%d] bus reset failed\n",
+ cid, hwq, ret);
+ return FAILED;
+}
+
+static int spraid_shost_reset_handler(struct scsi_cmnd *scmd)
+{
+ u16 hwq, cid;
+ struct spraid_dev *hdev = shost_priv(scmd->device->host);
+
+ scsi_print_command(scmd);
+ if (hdev->state != SPRAID_LIVE || spraid_check_scmd_completed(scmd))
+ return SUCCESS;
+
+ spraid_get_tag_from_scmd(scmd, &hwq, &cid);
+ dev_warn(hdev->dev, "cid[%d] qid[%d] host reset\n", cid, hwq);
+
+ if (spraid_reset_work_sync(hdev)) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] host reset failed\n",
+ cid, hwq);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] host reset success\n", cid, hwq);
+
+ return SUCCESS;
+}
+
+static pci_ers_result_t spraid_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter pci error detect, state:%d\n", state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ dev_warn(hdev->dev, "channel is normal, do nothing\n");
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ dev_warn(hdev->dev,
+ "channel io frozen, need reset controller\n");
+
+ scsi_block_requests(hdev->shost);
+
+ spraid_change_host_state(hdev, SPRAID_RESETTING);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ dev_warn(hdev->dev, "channel io failure, request disconnect\n");
+
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t spraid_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "restart after slot reset\n");
+
+ pci_restore_state(pdev);
+
+ if (!queue_work(spraid_wq, &hdev->reset_work)) {
+ dev_err(hdev->dev, "[%s] err, the device is resetting state\n",
+ __func__);
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ flush_work(&hdev->reset_work);
+
+ scsi_unblock_requests(hdev->shost);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void spraid_reset_done(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter spraid reset done\n");
+}
+
+static ssize_t csts_pp_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+ & SPRAID_CSTS_PP_MASK);
+ ret >>= SPRAID_CSTS_PP_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_shst_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+ & SPRAID_CSTS_SHST_MASK);
+ ret >>= SPRAID_CSTS_SHST_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_cfs_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS)
+ & SPRAID_CSTS_CFS_MASK);
+ ret >>= SPRAID_CSTS_CFS_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_rdy_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev))
+ ret = (readl(hdev->bar + SPRAID_REG_CSTS) & SPRAID_CSTS_RDY);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t fw_version_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct spraid_dev *hdev = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fr);
+}
+
+static DEVICE_ATTR_RO(csts_pp);
+static DEVICE_ATTR_RO(csts_shst);
+static DEVICE_ATTR_RO(csts_cfs);
+static DEVICE_ATTR_RO(csts_rdy);
+static DEVICE_ATTR_RO(fw_version);
+
+static struct device_attribute *spraid_host_attrs[] = {
+ &dev_attr_csts_pp,
+ &dev_attr_csts_shst,
+ &dev_attr_csts_cfs,
+ &dev_attr_csts_rdy,
+ &dev_attr_fw_version,
+ NULL,
+};
+
+static int spraid_get_vd_info(struct spraid_dev *hdev,
+ struct spraid_vd_info *vd_info, u16 vid)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN);
+ admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(vd_info, data_ptr, sizeof(struct spraid_vd_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_get_bgtask(struct spraid_dev *hdev,
+ struct spraid_bgtask *bgtask)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE,
+ &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(bgtask, data_ptr, sizeof(struct spraid_bgtask));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1;
+
+ ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ?
+ vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1);
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]);
+}
+
+static ssize_t raid_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret) {
+ vd_info->vd_status = 0;
+ vd_info->rg_id = 0xff;
+ }
+
+ ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ?
+ vd_info->vd_status : 0;
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]);
+}
+
+static ssize_t raid_resync_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_bgtask *bgtask;
+ struct spraid_sdev_hostdata *hostdata;
+ u8 rg_id, i, progress = 0;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ goto out;
+
+ rg_id = vd_info->rg_id;
+
+ bgtask = (struct spraid_bgtask *)vd_info;
+ ret = spraid_get_bgtask(hdev, bgtask);
+ if (ret)
+ goto out;
+ for (i = 0; i < bgtask->task_num; i++) {
+ if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) &&
+ (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id))
+ progress = bgtask->bgtask[i].progress;
+ }
+
+out:
+ kfree(vd_info);
+ return snprintf(buf, PAGE_SIZE, "%d\n", progress);
+}
+
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(raid_state);
+static DEVICE_ATTR_RO(raid_resync);
+
+static struct device_attribute *spraid_dev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_raid_state,
+ &dev_attr_raid_resync,
+ NULL,
+};
+
+static struct pci_error_handlers spraid_err_handler = {
+ .error_detected = spraid_pci_error_detected,
+ .slot_reset = spraid_pci_slot_reset,
+ .reset_done = spraid_reset_done,
+};
+
+static int spraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ int ret;
+ struct spraid_dev *hdev = shost_priv(shost);
+
+ dev_info(hdev->dev, "[%s] start sysfs host reset cmd\n", __func__);
+ ret = spraid_reset_work_sync(hdev);
+ dev_info(hdev->dev, "[%s] stop sysfs host reset cmd[%d]\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static struct scsi_host_template spraid_driver_template = {
+ .module = THIS_MODULE,
+ .name = "Ramaxel Logic spraid driver",
+ .proc_name = "spraid",
+ .queuecommand = spraid_queue_command,
+ .slave_alloc = spraid_slave_alloc,
+ .slave_destroy = spraid_slave_destroy,
+ .slave_configure = spraid_slave_configure,
+ .eh_timed_out = spraid_scmd_timeout,
+ .eh_abort_handler = spraid_abort_handler,
+ .eh_target_reset_handler = spraid_tgt_reset_handler,
+ .eh_bus_reset_handler = spraid_bus_reset_handler,
+ .eh_host_reset_handler = spraid_shost_reset_handler,
+ .change_queue_depth = scsi_change_queue_depth,
+ .this_id = -1,
+ .shost_attrs = spraid_host_attrs,
+ .sdev_attrs = spraid_dev_attrs,
+ .host_reset = spraid_sysfs_host_reset,
+};
+
+static void spraid_shutdown(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ spraid_remove_io_queues(hdev);
+ spraid_disable_admin_queue(hdev, true);
+}
+
+/* bsg dispatch user command */
+static int spraid_bsg_host_dispatch(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = dev_to_shost(job->dev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_bsg_request *bsg_req = job->request;
+ int ret = 0;
+
+ dev_info(hdev->dev, "[%s] msgcode[%d], msglen[%d], timeout[%d];"
+ " req_nsge[%d], req_len[%d]\n",
+ __func__, bsg_req->msgcode, job->request_len,
+ rq->timeout, job->request_payload.sg_cnt,
+ job->request_payload.payload_len);
+
+ job->reply_len = 0;
+
+ switch (bsg_req->msgcode) {
+ case SPRAID_BSG_ADM:
+ ret = spraid_user_admin_cmd(hdev, job);
+ break;
+ case SPRAID_BSG_IOQ:
+ ret = spraid_user_ioq_cmd(hdev, job);
+ break;
+ default:
+ dev_info(hdev->dev, "[%s] unsupport msgcode[%d]\n",
+ __func__, bsg_req->msgcode);
+ break;
+ }
+
+ if (ret > 0)
+ ret = ret | (ret << 8);
+
+ bsg_job_done(job, ret, 0);
+ return 0;
+}
+
+static inline void spraid_remove_bsg(struct spraid_dev *hdev)
+{
+ if (hdev->bsg_queue) {
+ bsg_unregister_queue(hdev->bsg_queue);
+ blk_cleanup_queue(hdev->bsg_queue);
+ }
+}
+static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct spraid_dev *hdev;
+ struct Scsi_Host *shost;
+ int node, ret;
+ char bsg_name[15];
+
+ shost = scsi_host_alloc(&spraid_driver_template, sizeof(*hdev));
+ if (!shost) {
+ dev_err(&pdev->dev, "Failed to allocate scsi host\n");
+ return -ENOMEM;
+ }
+ hdev = shost_priv(shost);
+ hdev->pdev = pdev;
+ hdev->dev = get_device(&pdev->dev);
+
+ node = dev_to_node(hdev->dev);
+ if (node == NUMA_NO_NODE) {
+ node = first_memory_node;
+ set_dev_node(hdev->dev, node);
+ }
+ hdev->numa_node = node;
+ hdev->shost = shost;
+ pci_set_drvdata(pdev, hdev);
+
+ ret = spraid_dev_map(hdev);
+ if (ret)
+ goto put_dev;
+
+ init_rwsem(&hdev->devices_rwsem);
+ INIT_WORK(&hdev->scan_work, spraid_scan_work);
+ INIT_WORK(&hdev->timesyn_work, spraid_timesyn_work);
+ INIT_WORK(&hdev->reset_work, spraid_reset_work);
+ INIT_WORK(&hdev->fw_act_work, spraid_fw_act_work);
+ spin_lock_init(&hdev->state_lock);
+
+ ret = spraid_alloc_resources(hdev);
+ if (ret)
+ goto dev_unmap;
+
+ ret = spraid_pci_enable(hdev);
+ if (ret)
+ goto resources_free;
+
+ ret = spraid_setup_admin_queue(hdev);
+ if (ret)
+ goto pci_disable;
+
+ ret = spraid_init_ctrl_info(hdev);
+ if (ret)
+ goto disable_admin_q;
+
+ ret = spraid_alloc_iod_ext_mem_pool(hdev);
+ if (ret)
+ goto disable_admin_q;
+
+ ret = spraid_setup_io_queues(hdev);
+ if (ret)
+ goto free_iod_mempool;
+
+ spraid_shost_init(hdev);
+
+ ret = scsi_add_host(hdev->shost, hdev->dev);
+ if (ret) {
+ dev_err(hdev->dev, "Add shost to system failed, ret: %d\n",
+ ret);
+ goto remove_io_queues;
+ }
+
+ snprintf(bsg_name, sizeof(bsg_name), "spraid%d", shost->host_no);
+ hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name,
+ spraid_bsg_host_dispatch,
+ spraid_cmd_size(hdev, true, false));
+ if (IS_ERR(hdev->bsg_queue)) {
+ dev_err(hdev->dev, "err, setup bsg failed\n");
+ hdev->bsg_queue = NULL;
+ goto remove_io_queues;
+ }
+
+ if (hdev->online_queues == SPRAID_ADMIN_QUEUE_NUM) {
+ dev_warn(hdev->dev, "warn only admin queue can be used\n");
+ return 0;
+ }
+
+ hdev->state = SPRAID_LIVE;
+
+ spraid_send_all_aen(hdev);
+
+ ret = spraid_dev_list_init(hdev);
+ if (ret)
+ goto remove_bsg;
+
+ ret = spraid_configure_timestamp(hdev);
+ if (ret)
+ dev_warn(hdev->dev, "init set timestamp failed\n");
+
+ ret = spraid_alloc_ioq_ptcmds(hdev);
+ if (ret)
+ goto remove_bsg;
+
+ scsi_scan_host(hdev->shost);
+
+ return 0;
+
+remove_bsg:
+ spraid_remove_bsg(hdev);
+remove_io_queues:
+ spraid_remove_io_queues(hdev);
+free_iod_mempool:
+ spraid_free_iod_ext_mem_pool(hdev);
+disable_admin_q:
+ spraid_disable_admin_queue(hdev, false);
+pci_disable:
+ spraid_pci_disable(hdev);
+resources_free:
+ spraid_free_resources(hdev);
+dev_unmap:
+ spraid_dev_unmap(hdev);
+put_dev:
+ put_device(hdev->dev);
+ scsi_host_put(shost);
+
+ return -ENODEV;
+}
+
+static void spraid_remove(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = hdev->shost;
+
+ dev_info(hdev->dev, "enter spraid remove\n");
+
+ spraid_change_host_state(hdev, SPRAID_DELETING);
+ flush_work(&hdev->reset_work);
+
+ if (!pci_device_is_present(pdev))
+ spraid_back_all_io(hdev);
+
+ spraid_remove_bsg(hdev);
+ scsi_remove_host(shost);
+ spraid_free_ioq_ptcmds(hdev);
+ kfree(hdev->devices);
+ spraid_remove_io_queues(hdev);
+ spraid_free_iod_ext_mem_pool(hdev);
+ spraid_disable_admin_queue(hdev, false);
+ spraid_pci_disable(hdev);
+ spraid_free_resources(hdev);
+ spraid_dev_unmap(hdev);
+ put_device(hdev->dev);
+ scsi_host_put(shost);
+
+ dev_info(hdev->dev, "exit spraid remove\n");
+}
+
+static const struct pci_device_id spraid_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC,
+ SPRAID_SERVER_DEVICE_HBA_DID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC,
+ SPRAID_SERVER_DEVICE_RAID_DID) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, spraid_id_table);
+
+static struct pci_driver spraid_driver = {
+ .name = "spraid",
+ .id_table = spraid_id_table,
+ .probe = spraid_probe,
+ .remove = spraid_remove,
+ .shutdown = spraid_shutdown,
+ .err_handler = &spraid_err_handler,
+};
+
+static int __init spraid_init(void)
+{
+ int ret;
+
+ spraid_wq = alloc_workqueue("spraid-wq", WQ_UNBOUND | WQ_MEM_RECLAIM |
+ WQ_SYSFS, 0);
+ if (!spraid_wq)
+ return -ENOMEM;
+
+ spraid_class = class_create(THIS_MODULE, "spraid");
+ if (IS_ERR(spraid_class)) {
+ ret = PTR_ERR(spraid_class);
+ goto destroy_wq;
+ }
+
+ ret = pci_register_driver(&spraid_driver);
+ if (ret < 0)
+ goto destroy_class;
+
+ return 0;
+
+destroy_class:
+ class_destroy(spraid_class);
+destroy_wq:
+ destroy_workqueue(spraid_wq);
+
+ return ret;
+}
+
+static void __exit spraid_exit(void)
+{
+ pci_unregister_driver(&spraid_driver);
+ class_destroy(spraid_class);
+ destroy_workqueue(spraid_wq);
+ ida_destroy(&spraid_instance_ida);
+}
+
+MODULE_AUTHOR("songyl(a)ramaxel.com");
+MODULE_DESCRIPTION("Ramaxel Memory Technology SPraid Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(SPRAID_DRV_VERSION);
+module_init(spraid_init);
+module_exit(spraid_exit);
--
2.27.0
1
0

[PATCH openEuler-1.0-LTS 1/2] blk-mq: fix abnormal free in single queue process
by Yang Yingliang 20 Dec '21
by Yang Yingliang 20 Dec '21
20 Dec '21
From: Zhang Wensheng <zhangwensheng5(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 185922, https://gitee.com/openeuler/kernel/issues/I4NDPF
CVE: NA
--------------------------------
fix the problem introduced by commit 80086651acbd ("blk: Fix
lock inversion between ioc lock and bfqd lock"), which lead to
blk_queue_bio using blk_mq_free_request to free abnormally.
fix the problem using __blk_put_request to replace it.
Fixes: 80086651acbdb ("blk: Fix lock inversion between ioc lock and bfqd lock")
Signed-off-by: Zhang Wensheng <zhangwensheng5(a)huawei.com>
Reviewed-by: Laibin Qiu <qiulaibin(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
block/blk-mq.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/block/blk-mq.h b/block/blk-mq.h
index bb8aabbc5c579..b3540d62c541e 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -228,7 +228,7 @@ static inline void blk_mq_free_requests(struct list_head *list)
struct request *rq = list_entry_rq(list->next);
list_del_init(&rq->queuelist);
- blk_mq_free_request(rq);
+ __blk_put_request(rq->q, rq);
}
}
--
2.25.1
1
1

[PATCH openEuler-1.0-LTS] scsi: hisi_sas: Add support for sata disk I/O errors report to libsas
by Yang Yingliang 20 Dec '21
by Yang Yingliang 20 Dec '21
20 Dec '21
From: Xingui Yang <yangxingui(a)huawei.com>
driver inclusion
category: feature
bugzilla: NA
CVE: NA
---------------------------
If the response frame has been written back to the memory and carries the
disk error and status when sata I/O completion abnormally, then set task
stat to SAS_PROTO_RESPONSE and let libsas to handle it.
Signed-off-by: Xingui Yang <yangxingui(a)huawei.com>
Reviewed-by: Kangfenglong <kangfenglong(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 17 +++++++++++++----
1 file changed, 13 insertions(+), 4 deletions(-)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 4508c4a2f02fc..3c662fa5f6c0c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -470,6 +470,9 @@ struct hisi_sas_err_record_v3 {
#define RX_DATA_LEN_UNDERFLOW_OFF 6
#define RX_DATA_LEN_UNDERFLOW_MSK (1 << RX_DATA_LEN_UNDERFLOW_OFF)
+#define RX_FIS_STATUS_ERR_OFF 0
+#define RX_FIS_STATUS_ERR_MSK (1 << RX_FIS_STATUS_ERR_OFF)
+
#define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
#define HISI_SAS_MSI_COUNT_V3_HW 32
@@ -2241,17 +2244,20 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
hisi_sas_status_buf_addr_mem(slot);
u32 dma_rx_err_type = record->dma_rx_err_type;
u32 trans_tx_fail_type = record->trans_tx_fail_type;
+ u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type);
+ u32 dw0 = le32_to_cpu(complete_hdr->dw0);
+ u32 dw3 = le32_to_cpu(complete_hdr->dw3);
switch (task->task_proto) {
case SAS_PROTOCOL_SSP:
if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
ts->residual = trans_tx_fail_type;
ts->stat = SAS_DATA_UNDERRUN;
- if ((!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_GOOD_MSK)) &&
- (complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)) {
+ if (!(dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) &&
+ (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)) {
hisi_sas_set_sense_data(task, slot);
}
- } else if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
+ } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
ts->stat = SAS_QUEUE_FULL;
slot->abort = 1;
} else {
@@ -2262,7 +2268,10 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- if (complete_hdr->dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
+ if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) &&
+ (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) {
+ ts->stat = SAS_PROTO_RESPONSE;
+ } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
ts->stat = SAS_PHY_DOWN;
slot->abort = 1;
} else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
--
2.25.1
1
0

20 Dec '21
Ramaxel inclusion
category: features
bugzilla: https://gitee.com/openeuler/kernel/issues/I4JXCG
CVE: NA
Changes from v1:
1. Add more debug infor.
2. Remove some unnecessary module parameters.
3. Report disks by the order of channel/target id.
4. Add host_reset handler.
5. Use get_unaligned_be24.
Signed-off-by: Yanling Song <songyl(a)ramaxel.com>
Reviewed-by: Jiang Yu<yujiang(a)ramaxel.com>
---
drivers/scsi/spraid/Kconfig | 6 +-
drivers/scsi/spraid/spraid.h | 142 ++-
drivers/scsi/spraid/spraid_main.c | 1571 +++++++++++++++--------------
3 files changed, 938 insertions(+), 781 deletions(-)
diff --git a/drivers/scsi/spraid/Kconfig b/drivers/scsi/spraid/Kconfig
index 83962efaab07..bfbba3db8db0 100644
--- a/drivers/scsi/spraid/Kconfig
+++ b/drivers/scsi/spraid/Kconfig
@@ -5,7 +5,9 @@
config RAMAXEL_SPRAID
tristate "Ramaxel spraid Adapter"
depends on PCI && SCSI
+ select BLK_DEV_BSGLIB
depends on ARM64 || X86_64
- default m
help
- This driver supports Ramaxel spraid driver.
+ This driver supports Ramaxel SPRxxx serial
+ raid controller, which has PCIE Gen4 interface
+ with host and supports SAS/SATA Hdd/ssd.
diff --git a/drivers/scsi/spraid/spraid.h b/drivers/scsi/spraid/spraid.h
index da46d8e1b4b6..983d7af2faa8 100644
--- a/drivers/scsi/spraid/spraid.h
+++ b/drivers/scsi/spraid/spraid.h
@@ -1,4 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
#ifndef __SPRAID_H_
#define __SPRAID_H_
@@ -24,7 +25,7 @@
#define SENSE_SIZE(depth) ((depth) * SCSI_SENSE_BUFFERSIZE)
#define SPRAID_AQ_DEPTH 128
-#define SPRAID_NR_AEN_COMMANDS 1
+#define SPRAID_NR_AEN_COMMANDS 16
#define SPRAID_AQ_BLK_MQ_DEPTH (SPRAID_AQ_DEPTH - SPRAID_NR_AEN_COMMANDS)
#define SPRAID_AQ_MQ_TAG_DEPTH (SPRAID_AQ_BLK_MQ_DEPTH - 1)
@@ -44,7 +45,7 @@
#define SMALL_POOL_SIZE 256
#define MAX_SMALL_POOL_NUM 16
-#define MAX_CMD_PER_DEV 32
+#define MAX_CMD_PER_DEV 64
#define MAX_CDB_LEN 32
#define SPRAID_UP_TO_MULTY4(x) (((x) + 4) & (~0x03))
@@ -53,7 +54,7 @@
#define PCI_VENDOR_ID_RAMAXEL_LOGIC 0x1E81
-#define SPRAID_SERVER_DEVICE_HAB_DID 0x2100
+#define SPRAID_SERVER_DEVICE_HBA_DID 0x2100
#define SPRAID_SERVER_DEVICE_RAID_DID 0x2200
#define IO_6_DEFAULT_TX_LEN 256
@@ -142,11 +143,15 @@ enum {
enum {
SPRAID_AEN_DEV_CHANGED = 0x00,
+ SPRAID_AEN_FW_ACT_START = 0x01,
SPRAID_AEN_HOST_PROBING = 0x10,
};
enum {
- SPRAID_AEN_TIMESYN = 0x07
+ SPRAID_AEN_TIMESYN = 0x00,
+ SPRAID_AEN_FW_ACT_FINISH = 0x02,
+ SPRAID_AEN_EVENT_MIN = 0x80,
+ SPRAID_AEN_EVENT_MAX = 0xff,
};
enum {
@@ -175,6 +180,16 @@ enum spraid_state {
SPRAID_DEAD,
};
+enum {
+ SPRAID_CARD_HBA,
+ SPRAID_CARD_RAID,
+};
+
+enum spraid_cmd_type {
+ SPRAID_CMD_ADM,
+ SPRAID_CMD_IOPT,
+};
+
struct spraid_completion {
__le32 result;
union {
@@ -217,8 +232,6 @@ struct spraid_dev {
struct dma_pool *prp_page_pool;
struct dma_pool *prp_small_pool[MAX_SMALL_POOL_NUM];
mempool_t *iod_mempool;
- struct blk_mq_tag_set admin_tagset;
- struct request_queue *admin_q;
void __iomem *bar;
u32 max_qid;
u32 num_vecs;
@@ -232,23 +245,27 @@ struct spraid_dev {
u32 ctrl_config;
u32 online_queues;
u64 cap;
- struct device ctrl_device;
- struct cdev cdev;
int instance;
struct spraid_ctrl_info *ctrl_info;
struct spraid_dev_info *devices;
- struct spraid_ioq_ptcmd *ioq_ptcmds;
+ struct spraid_cmd *adm_cmds;
+ struct list_head adm_cmd_list;
+ spinlock_t adm_cmd_lock;
+
+ struct spraid_cmd *ioq_ptcmds;
struct list_head ioq_pt_list;
spinlock_t ioq_pt_lock;
- struct work_struct aen_work;
struct work_struct scan_work;
struct work_struct timesyn_work;
struct work_struct reset_work;
+ struct work_struct fw_act_work;
enum spraid_state state;
spinlock_t state_lock;
+
+ struct request_queue *bsg_queue;
};
struct spraid_sgl_desc {
@@ -347,6 +364,35 @@ struct spraid_get_info {
__u32 rsvd12[4];
};
+struct spraid_usr_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __le32 hdid;
+ union {
+ struct {
+ __le16 subopcode;
+ __le16 rsvd1;
+ } info_0;
+ __le32 cdw2;
+ };
+ union {
+ struct {
+ __le16 data_len;
+ __le16 param_len;
+ } info_1;
+ __le32 cdw3;
+ };
+ __u64 metadata;
+ union spraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
enum {
SPRAID_CMD_FLAG_SGL_METABUF = (1 << 6),
SPRAID_CMD_FLAG_SGL_METASEG = (1 << 7),
@@ -393,6 +439,7 @@ struct spraid_admin_command {
struct spraid_get_info get_info;
struct spraid_abort_cmd abort;
struct spraid_reset_cmd reset;
+ struct spraid_usr_cmd usr_cmd;
};
};
@@ -456,9 +503,6 @@ struct spraid_ioq_command {
};
};
-#define SPRAID_IOCTL_RESET_CMD _IOWR('N', 0x80, struct spraid_passthru_common_cmd)
-#define SPRAID_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct spraid_passthru_common_cmd)
-
struct spraid_passthru_common_cmd {
__u8 opcode;
__u8 flags;
@@ -494,8 +538,6 @@ struct spraid_passthru_common_cmd {
__u32 result1;
};
-#define SPRAID_IOCTL_IOQ_CMD _IOWR('N', 0x42, struct spraid_ioq_passthru_cmd)
-
struct spraid_ioq_passthru_cmd {
__u8 opcode;
__u8 flags;
@@ -560,7 +602,21 @@ struct spraid_ioq_passthru_cmd {
__u32 result1;
};
-struct spraid_ioq_ptcmd {
+struct spraid_bsg_request {
+ u32 msgcode;
+ u32 control;
+ union {
+ struct spraid_passthru_common_cmd admcmd;
+ struct spraid_ioq_passthru_cmd ioqcmd;
+ };
+};
+
+enum {
+ SPRAID_BSG_ADM,
+ SPRAID_BSG_IOQ,
+};
+
+struct spraid_cmd {
int qid;
int cid;
u32 result0;
@@ -572,14 +628,6 @@ struct spraid_ioq_ptcmd {
struct list_head list;
};
-struct spraid_admin_request {
- struct spraid_admin_command *cmd;
- u32 result0;
- u32 result1;
- u16 flags;
- u16 status;
-};
-
struct spraid_queue {
struct spraid_dev *hdev;
spinlock_t sq_lock; /* spinlock for lock handling */
@@ -607,7 +655,6 @@ struct spraid_queue {
};
struct spraid_iod {
- struct spraid_admin_request req;
struct spraid_queue *spraidq;
enum spraid_cmd_state state;
int npages;
@@ -623,13 +670,51 @@ struct spraid_iod {
};
#define SPRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01)
-#define SPRAID_DEV_INFO_ATTR_HDD(attr) ((attr) & 0x02)
+#define SPRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0)
#define SPRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02)
#define SPRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20)
#define SPRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01)
#define SPRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02)
+#define BGTASK_TYPE_REBUILD 4
+#define USR_CMD_READ 0xc2
+#define USR_CMD_RDLEN 0x1000
+#define USR_CMD_VDINFO 0x704
+#define USR_CMD_BGTASK 0x504
+#define VDINFO_PARAM_LEN 0x04
+
+struct spraid_vd_info {
+ __u8 name[32];
+ __le16 id;
+ __u8 rg_id;
+ __u8 rg_level;
+ __u8 sg_num;
+ __u8 sg_disk_num;
+ __u8 vd_status;
+ __u8 vd_type;
+ __u8 rsvd1[4056];
+};
+
+#define MAX_REALTIME_BGTASK_NUM 32
+
+struct bgtask_info {
+ __u8 type;
+ __u8 progress;
+ __u8 rate;
+ __u8 rsvd0;
+ __le16 vd_id;
+ __le16 time_left;
+ __u8 rsvd1[4];
+};
+
+struct spraid_bgtask {
+ __u8 sw;
+ __u8 task_num;
+ __u8 rsvd[6];
+ struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM];
+};
+
struct spraid_dev_info {
__le32 hdid;
__le16 target;
@@ -649,6 +734,11 @@ struct spraid_dev_list {
struct spraid_sdev_hostdata {
u32 hdid;
+ u16 max_io_kb;
+ u8 attr;
+ u8 flag;
+ u8 rg_id;
+ u8 rsvd[3];
};
#endif
diff --git a/drivers/scsi/spraid/spraid_main.c b/drivers/scsi/spraid/spraid_main.c
index a0a75ecb0027..976b7ded336a 100644
--- a/drivers/scsi/spraid/spraid_main.c
+++ b/drivers/scsi/spraid/spraid_main.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * Linux spraid device driver
- * Copyright(c) 2021 Ramaxel Memory Technology, Ltd
- */
+/* Copyright(c) 2021 Ramaxel Memory Technology, Ltd */
+
+/* Ramaxel Raid SPXXX Series Linux Driver */
+
#define pr_fmt(fmt) "spraid: " fmt
#include <linux/sched/signal.h>
@@ -23,6 +23,9 @@
#include <linux/debugfs.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <asm/unaligned.h>
+#include <linux/sort.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -31,28 +34,17 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_dbg.h>
+
#include "spraid.h"
static u32 admin_tmout = 60;
module_param(admin_tmout, uint, 0644);
MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)");
-static u32 scmd_tmout_pt = 30;
-module_param(scmd_tmout_pt, uint, 0644);
-MODULE_PARM_DESC(scmd_tmout_pt, "scsi commands timeout for passthrough(seconds)");
-
static u32 scmd_tmout_nonpt = 180;
module_param(scmd_tmout_nonpt, uint, 0644);
MODULE_PARM_DESC(scmd_tmout_nonpt, "scsi commands timeout for rawdisk&raid(seconds)");
-static u32 wait_abl_tmout = 3;
-module_param(wait_abl_tmout, uint, 0644);
-MODULE_PARM_DESC(wait_abl_tmout, "wait abnormal io timeout(seconds)");
-
-static bool use_sgl_force;
-module_param(use_sgl_force, bool, 0644);
-MODULE_PARM_DESC(use_sgl_force, "force IO use sgl format, default false");
-
static int ioq_depth_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops ioq_depth_ops = {
.set = ioq_depth_set,
@@ -106,16 +98,20 @@ static const struct kernel_param_ops small_pool_num_ops = {
.get = param_get_byte,
};
+/* It was found that the spindlock of a single pool conflicts
+ * a lot with multiple CPUs.So multiple pools are introduced
+ * to reduce the conflictions.
+ */
static unsigned char small_pool_num = 4;
module_param_cb(small_pool_num, &small_pool_num_ops, &small_pool_num, 0644);
MODULE_PARM_DESC(small_pool_num, "set prp small pool num, default 4, MAX 16");
static void spraid_free_queue(struct spraid_queue *spraidq);
static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result);
-static void spraid_handle_aen_vs(struct spraid_dev *hdev, u32 result);
+static void spraid_handle_aen_vs(struct spraid_dev *hdev, u32 result, u32 result1);
static DEFINE_IDA(spraid_instance_ida);
-static dev_t spraid_chr_devt;
+
static struct class *spraid_class;
#define SPRAID_CAP_TIMEOUT_UNIT_MS (HZ / 2)
@@ -133,7 +129,7 @@ static struct workqueue_struct *spraid_wq;
#define ADMIN_TIMEOUT (admin_tmout * HZ)
#define ADMIN_ERR_TIMEOUT 32757
-#define SPRAID_WAIT_ABNL_CMD_TIMEOUT (wait_abl_tmout * 2)
+#define SPRAID_WAIT_ABNL_CMD_TIMEOUT (3 * 2)
#define SPRAID_DMA_MSK_BIT_MAX 64
@@ -147,6 +143,13 @@ enum FW_STAT_CODE {
FW_STAT_NEED_RETRY
};
+static const char * const raid_levels[] = {"0", "1", "5", "6", "10", "50", "60", "NA"};
+
+static const char * const raid_states[] = {
+ "NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED", "FORMATTING", "SANITIZING",
+ "INITIALIZING", "INITIALIZE_FAIL", "DELETING", "DELETE_FAIL", "WRITE_PROTECT"
+};
+
static int ioq_depth_set(const char *val, const struct kernel_param *kp)
{
int n = 0;
@@ -263,12 +266,6 @@ static int spraid_pci_enable(struct spraid_dev *hdev)
return ret;
}
-static inline
-struct spraid_admin_request *spraid_admin_req(struct request *req)
-{
- return blk_mq_rq_to_pdu(req);
-}
-
static int spraid_npages_prp(u32 size, struct spraid_dev *hdev)
{
u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size);
@@ -419,7 +416,7 @@ static void spraid_submit_cmd(struct spraid_queue *spraidq, const void *cmd)
writel(spraidq->sq_tail, spraidq->q_db);
spin_unlock_irqrestore(&spraidq->sq_lock, flags);
- dev_log_dbg(spraidq->hdev->dev, "cid[%d], qid[%d], opcode[0x%x], flags[0x%x], hdid[%u]\n",
+ dev_log_dbg(spraidq->hdev->dev, "cid[%d] qid[%d], opcode[0x%x], flags[0x%x], hdid[%u]\n",
acd->command_id, spraidq->qid, acd->opcode, acd->flags, le32_to_cpu(acd->hdid));
}
@@ -605,18 +602,15 @@ static void spraid_setup_rw_cmd(struct spraid_dev *hdev,
if (scmd->cmd_len == 6) {
datalength = (u32)(scmd->cmnd[4] == 0 ?
IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]);
- start_lba_lo = ((u32)scmd->cmnd[1] << 16) |
- ((u32)scmd->cmnd[2] << 8) | (u32)scmd->cmnd[3];
+ start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]);
start_lba_lo &= 0x1FFFFF;
}
/* 10-byte READ(0x28) or WRITE(0x2A) cdb */
else if (scmd->cmd_len == 10) {
- datalength = (u32)scmd->cmnd[8] | ((u32)scmd->cmnd[7] << 8);
- start_lba_lo = ((u32)scmd->cmnd[2] << 24) |
- ((u32)scmd->cmnd[3] << 16) |
- ((u32)scmd->cmnd[4] << 8) | (u32)scmd->cmnd[5];
+ datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
if (scmd->cmnd[1] & FUA_MASK)
control |= SPRAID_RW_FUA;
@@ -624,42 +618,26 @@ static void spraid_setup_rw_cmd(struct spraid_dev *hdev,
/* 12-byte READ(0xA8) or WRITE(0xAA) cdb */
else if (scmd->cmd_len == 12) {
- datalength = ((u32)scmd->cmnd[6] << 24) |
- ((u32)scmd->cmnd[7] << 16) |
- ((u32)scmd->cmnd[8] << 8) | (u32)scmd->cmnd[9];
- start_lba_lo = ((u32)scmd->cmnd[2] << 24) |
- ((u32)scmd->cmnd[3] << 16) |
- ((u32)scmd->cmnd[4] << 8) | (u32)scmd->cmnd[5];
+ datalength = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
if (scmd->cmnd[1] & FUA_MASK)
control |= SPRAID_RW_FUA;
}
/* 16-byte READ(0x88) or WRITE(0x8A) cdb */
else if (scmd->cmd_len == 16) {
- datalength = ((u32)scmd->cmnd[10] << 24) |
- ((u32)scmd->cmnd[11] << 16) |
- ((u32)scmd->cmnd[12] << 8) | (u32)scmd->cmnd[13];
- start_lba_lo = ((u32)scmd->cmnd[6] << 24) |
- ((u32)scmd->cmnd[7] << 16) |
- ((u32)scmd->cmnd[8] << 8) | (u32)scmd->cmnd[9];
- start_lba_hi = ((u32)scmd->cmnd[2] << 24) |
- ((u32)scmd->cmnd[3] << 16) |
- ((u32)scmd->cmnd[4] << 8) | (u32)scmd->cmnd[5];
+ datalength = get_unaligned_be32(&scmd->cmnd[10]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]);
if (scmd->cmnd[1] & FUA_MASK)
control |= SPRAID_RW_FUA;
}
/* 32-byte READ(0x88) or WRITE(0x8A) cdb */
else if (scmd->cmd_len == 32) {
- datalength = ((u32)scmd->cmnd[28] << 24) |
- ((u32)scmd->cmnd[29] << 16) |
- ((u32)scmd->cmnd[30] << 8) | (u32)scmd->cmnd[31];
- start_lba_lo = ((u32)scmd->cmnd[16] << 24) |
- ((u32)scmd->cmnd[17] << 16) |
- ((u32)scmd->cmnd[18] << 8) | (u32)scmd->cmnd[19];
- start_lba_hi = ((u32)scmd->cmnd[12] << 24) |
- ((u32)scmd->cmnd[13] << 16) |
- ((u32)scmd->cmnd[14] << 8) | (u32)scmd->cmnd[15];
+ datalength = get_unaligned_be32(&scmd->cmnd[28]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[16]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[12]);
if (scmd->cmnd[10] & FUA_MASK)
control |= SPRAID_RW_FUA;
@@ -814,7 +792,7 @@ static void spraid_map_status(struct spraid_iod *iod, struct scsi_cmnd *scmd,
if (scmd->result & SAM_STAT_CHECK_CONDITION) {
memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
memcpy(scmd->sense_buffer, iod->sense, SCSI_SENSE_BUFFERSIZE);
- set_driver_byte(scmd, DRIVER_SENSE);
+ scmd->result = (scmd->result & 0x00ffffff) | (DRIVER_SENSE << 24);
}
break;
case FW_STAT_ABORTED:
@@ -850,14 +828,13 @@ static int spraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
int ret;
if (unlikely(!scmd)) {
- dev_err(hdev->dev, "err, scmd is null, return 0\n");
+ dev_err(hdev->dev, "err, scmd is null\n");
return 0;
}
if (unlikely(hdev->state != SPRAID_LIVE)) {
set_host_byte(scmd, DID_NO_CONNECT);
scmd->scsi_done(scmd);
- dev_err(hdev->dev, "[%s] err, hdev state is not live\n", __func__);
return 0;
}
@@ -894,7 +871,7 @@ static int spraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
WRITE_ONCE(iod->state, SPRAID_CMD_IN_FLIGHT);
spraid_submit_cmd(ioq, &ioq_cmd);
elapsed = jiffies - scmd->jiffies_at_alloc;
- dev_log_dbg(hdev->dev, "cid[%d], qid[%d] submit IO cost %3ld.%3ld seconds\n",
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d] submit IO cost %3ld.%3ld seconds\n",
cid, hwq, elapsed / HZ, elapsed % HZ);
return 0;
@@ -945,6 +922,10 @@ static int spraid_slave_alloc(struct scsi_device *sdev)
scan_host:
hostdata->hdid = le32_to_cpu(hdev->devices[idx].hdid);
+ hostdata->max_io_kb = le16_to_cpu(hdev->devices[idx].max_io_kb);
+ hostdata->attr = hdev->devices[idx].attr;
+ hostdata->flag = hdev->devices[idx].flag;
+ hostdata->rg_id = 0xff;
sdev->hostdata = hostdata;
up_read(&hdev->devices_rwsem);
return 0;
@@ -964,13 +945,13 @@ static int spraid_slave_configure(struct scsi_device *sdev)
struct spraid_sdev_hostdata *hostdata = sdev->hostdata;
u32 max_sec = sdev->host->max_sectors;
- if (!hostdata) {
+ if (hostdata) {
idx = hostdata->hdid - 1;
if (sdev->channel == hdev->devices[idx].channel &&
sdev->id == le16_to_cpu(hdev->devices[idx].target) &&
sdev->lun < hdev->devices[idx].lun) {
if (SPRAID_DEV_INFO_ATTR_PT(hdev->devices[idx].attr))
- timeout = scmd_tmout_pt * HZ;
+ timeout = 30 * HZ;
else
timeout = scmd_tmout_nonpt * HZ;
max_sec = le16_to_cpu(hdev->devices[idx].max_io_kb) << 1;
@@ -991,7 +972,6 @@ static int spraid_slave_configure(struct scsi_device *sdev)
if ((max_sec == 0) || (max_sec > sdev->host->max_sectors))
max_sec = sdev->host->max_sectors;
- blk_queue_max_hw_sectors(sdev->request_queue, max_sec);
dev_info(hdev->dev, "[%s] sdev->channel:id:lun[%d:%d:%lld], scmd_timeout[%d]s, maxsec[%d]\n",
__func__, sdev->channel, sdev->id, sdev->lun, timeout / HZ, max_sec);
@@ -1176,6 +1156,75 @@ static inline bool spraid_cqe_pending(struct spraid_queue *spraidq)
spraidq->cq_phase;
}
+static void spraid_sata_report_zone_handle(struct scsi_cmnd *scmd, struct spraid_iod *iod)
+{
+ int i = 0;
+ unsigned int bytes = 0;
+ struct scatterlist *sg = scsi_sglist(scmd);
+
+ scsi_for_each_sg(scmd, sg, iod->nsge, i) {
+ unsigned int offset = 0;
+
+ if (bytes == 0) {
+ char *hdr;
+ u32 list_length;
+ u64 max_lba, opt_lba;
+ u16 same;
+
+ hdr = sg_virt(sg);
+
+ list_length = get_unaligned_le32(&hdr[0]);
+ same = get_unaligned_le16(&hdr[4]);
+ max_lba = get_unaligned_le64(&hdr[8]);
+ opt_lba = get_unaligned_le64(&hdr[16]);
+ put_unaligned_be32(list_length, &hdr[0]);
+ hdr[4] = same & 0xf;
+ put_unaligned_be64(max_lba, &hdr[8]);
+ put_unaligned_be64(opt_lba, &hdr[16]);
+ offset += 64;
+ bytes += 64;
+ }
+ while (offset < sg_dma_len(sg)) {
+ char *rec;
+ u8 cond, type, non_seq, reset;
+ u64 size, start, wp;
+
+ rec = sg_virt(sg) + offset;
+ type = rec[0] & 0xf;
+ cond = (rec[1] >> 4) & 0xf;
+ non_seq = (rec[1] & 2);
+ reset = (rec[1] & 1);
+ size = get_unaligned_le64(&rec[8]);
+ start = get_unaligned_le64(&rec[16]);
+ wp = get_unaligned_le64(&rec[24]);
+ rec[0] = type;
+ rec[1] = (cond << 4) | non_seq | reset;
+ put_unaligned_be64(size, &rec[8]);
+ put_unaligned_be64(start, &rec[16]);
+ put_unaligned_be64(wp, &rec[24]);
+ WARN_ON(offset + 64 > sg_dma_len(sg));
+ offset += 64;
+ bytes += 64;
+ }
+ }
+}
+
+static inline void spraid_handle_ata_cmd(struct spraid_dev *hdev, struct scsi_cmnd *scmd,
+ struct spraid_iod *iod)
+{
+ if (hdev->ctrl_info->card_type != SPRAID_CARD_HBA)
+ return;
+
+ switch (scmd->cmnd[0]) {
+ case ZBC_IN:
+ dev_info(hdev->dev, "[%s] process report zone\n", __func__);
+ spraid_sata_report_zone_handle(scmd, iod);
+ break;
+ default:
+ break;
+ }
+}
+
static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq, struct spraid_completion *cqe)
{
struct spraid_dev *hdev = ioq->hdev;
@@ -1197,12 +1246,12 @@ static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq, struct spraid_com
iod = scsi_cmd_priv(scmd);
elapsed = jiffies - scmd->jiffies_at_alloc;
- dev_log_dbg(hdev->dev, "cid[%d], qid[%d] finish IO cost %3ld.%3ld seconds\n",
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d] finish IO cost %3ld.%3ld seconds\n",
cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
if (cmpxchg(&iod->state, SPRAID_CMD_IN_FLIGHT, SPRAID_CMD_COMPLETE) !=
SPRAID_CMD_IN_FLIGHT) {
- dev_warn(hdev->dev, "cid[%d], qid[%d] enters abnormal handler, cost %3ld.%3ld seconds\n",
+ dev_warn(hdev->dev, "cid[%d] qid[%d] enters abnormal handler, cost %3ld.%3ld seconds\n",
cqe->cmd_id, ioq->qid, elapsed / HZ, elapsed % HZ);
WRITE_ONCE(iod->state, SPRAID_CMD_TMO_COMPLETE);
@@ -1215,6 +1264,8 @@ static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq, struct spraid_com
return;
}
+ spraid_handle_ata_cmd(hdev, scmd, iod);
+
spraid_map_status(iod, scmd, cqe);
if (iod->nsge) {
iod->nsge = 0;
@@ -1224,38 +1275,36 @@ static void spraid_complete_ioq_cmnd(struct spraid_queue *ioq, struct spraid_com
scmd->scsi_done(scmd);
}
-static inline void spraid_end_admin_request(struct request *req, __le16 status,
- __le32 result0, __le32 result1)
-{
- struct spraid_admin_request *rq = spraid_admin_req(req);
-
- rq->status = le16_to_cpu(status) >> 1;
- rq->result0 = le32_to_cpu(result0);
- rq->result1 = le32_to_cpu(result1);
- blk_mq_complete_request(req);
-}
-
static void spraid_complete_adminq_cmnd(struct spraid_queue *adminq, struct spraid_completion *cqe)
{
- struct blk_mq_tags *tags = adminq->hdev->admin_tagset.tags[0];
- struct request *req;
+ struct spraid_dev *hdev = adminq->hdev;
+ struct spraid_cmd *adm_cmd;
- req = blk_mq_tag_to_rq(tags, cqe->cmd_id);
- if (unlikely(!req)) {
+ adm_cmd = hdev->adm_cmds + cqe->cmd_id;
+ if (unlikely(adm_cmd->state == SPRAID_CMD_IDLE)) {
dev_warn(adminq->hdev->dev, "Invalid id %d completed on queue %d\n",
cqe->cmd_id, le16_to_cpu(cqe->sq_id));
return;
}
- spraid_end_admin_request(req, cqe->status, cqe->result, cqe->result1);
+
+ adm_cmd->status = le16_to_cpu(cqe->status) >> 1;
+ adm_cmd->result0 = le32_to_cpu(cqe->result);
+ adm_cmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&adm_cmd->cmd_done);
}
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid);
+
static void spraid_complete_aen(struct spraid_queue *spraidq, struct spraid_completion *cqe)
{
struct spraid_dev *hdev = spraidq->hdev;
u32 result = le32_to_cpu(cqe->result);
- dev_info(hdev->dev, "rcv aen, status[%x], result[%x]\n",
- le16_to_cpu(cqe->status) >> 1, result);
+ dev_info(hdev->dev, "rcv aen, cid[%d], status[0x%x], result[0x%x]\n",
+ cqe->cmd_id, le16_to_cpu(cqe->status) >> 1, result);
+
+ spraid_send_aen(hdev, cqe->cmd_id);
if ((le16_to_cpu(cqe->status) >> 1) != SPRAID_SC_SUCCESS)
return;
@@ -1264,22 +1313,19 @@ static void spraid_complete_aen(struct spraid_queue *spraidq, struct spraid_comp
spraid_handle_aen_notice(hdev, result);
break;
case SPRAID_AEN_VS:
- spraid_handle_aen_vs(hdev, result);
+ spraid_handle_aen_vs(hdev, result, le32_to_cpu(cqe->result1));
break;
default:
dev_warn(hdev->dev, "Unsupported async event type: %u\n",
result & 0x7);
break;
}
- queue_work(spraid_wq, &hdev->aen_work);
}
-static void spraid_put_ioq_ptcmd(struct spraid_dev *hdev, struct spraid_ioq_ptcmd *cmd);
-
static void spraid_complete_ioq_sync_cmnd(struct spraid_queue *ioq, struct spraid_completion *cqe)
{
struct spraid_dev *hdev = ioq->hdev;
- struct spraid_ioq_ptcmd *ptcmd;
+ struct spraid_cmd *ptcmd;
ptcmd = hdev->ioq_ptcmds + (ioq->qid - 1) * SPRAID_PTCMDS_PERQ +
cqe->cmd_id - SPRAID_IO_BLK_MQ_DEPTH;
@@ -1289,8 +1335,6 @@ static void spraid_complete_ioq_sync_cmnd(struct spraid_queue *ioq, struct sprai
ptcmd->result1 = le32_to_cpu(cqe->result1);
complete(&ptcmd->cmd_done);
-
- spraid_put_ioq_ptcmd(hdev, ptcmd);
}
static inline void spraid_handle_cqe(struct spraid_queue *spraidq, u16 idx)
@@ -1304,7 +1348,7 @@ static inline void spraid_handle_cqe(struct spraid_queue *spraidq, u16 idx)
return;
}
- dev_log_dbg(hdev->dev, "cid[%d], qid[%d], result[0x%x], sq_id[%d], status[0x%x]\n",
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d], result[0x%x], sq_id[%d], status[0x%x]\n",
cqe->cmd_id, spraidq->qid, le32_to_cpu(cqe->result),
le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status));
@@ -1452,62 +1496,119 @@ static u32 spraid_bar_size(struct spraid_dev *hdev, u32 nr_ioqs)
return (SPRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride));
}
-static inline void spraid_clear_spraid_request(struct request *req)
+static int spraid_alloc_admin_cmds(struct spraid_dev *hdev)
{
- if (!(req->rq_flags & RQF_DONTPREP)) {
- spraid_admin_req(req)->flags = 0;
- req->rq_flags |= RQF_DONTPREP;
+ int i;
+
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+ spin_lock_init(&hdev->adm_cmd_lock);
+
+ hdev->adm_cmds = kcalloc_node(SPRAID_AQ_BLK_MQ_DEPTH, sizeof(struct spraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->adm_cmds) {
+ dev_err(hdev->dev, "Alloc admin cmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SPRAID_AQ_BLK_MQ_DEPTH; i++) {
+ hdev->adm_cmds[i].qid = 0;
+ hdev->adm_cmds[i].cid = i;
+ list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list);
}
+
+ dev_info(hdev->dev, "Alloc admin cmds success, num[%d]\n", SPRAID_AQ_BLK_MQ_DEPTH);
+
+ return 0;
}
-static struct request *spraid_alloc_admin_request(struct request_queue *q,
- struct spraid_admin_command *cmd,
- blk_mq_req_flags_t flags)
+static void spraid_free_admin_cmds(struct spraid_dev *hdev)
{
- u32 op = COMMAND_IS_WRITE(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
- struct request *req;
+ kfree(hdev->adm_cmds);
+ hdev->adm_cmds = NULL;
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+}
+
+static struct spraid_cmd *spraid_get_cmd(struct spraid_dev *hdev, enum spraid_cmd_type type)
+{
+ struct spraid_cmd *cmd = NULL;
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
- req = blk_mq_alloc_request(q, op, flags);
- if (IS_ERR(req))
- return req;
- req->cmd_flags |= REQ_FAILFAST_DRIVER;
- spraid_clear_spraid_request(req);
- spraid_admin_req(req)->cmd = cmd;
+ if (type == SPRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(slock, flags);
+ dev_err(hdev->dev, "err, cmd[%d] list empty\n", type);
+ return NULL;
+ }
+ cmd = list_entry(head->next, struct spraid_cmd, list);
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(slock, flags);
+
+ WRITE_ONCE(cmd->state, SPRAID_CMD_IN_FLIGHT);
- return req;
+ return cmd;
}
-static int spraid_submit_admin_sync_cmd(struct request_queue *q,
- struct spraid_admin_command *cmd,
- u32 *result, void *buffer,
- u32 bufflen, u32 timeout, int at_head, blk_mq_req_flags_t flags)
+static void spraid_put_cmd(struct spraid_dev *hdev, struct spraid_cmd *cmd,
+ enum spraid_cmd_type type)
{
- struct request *req;
- int ret;
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
- req = spraid_alloc_admin_request(q, cmd, flags);
- if (IS_ERR(req))
- return PTR_ERR(req);
+ if (type == SPRAID_CMD_IOPT) {
+ head = &hdev->ioq_pt_list;
+ slock = &hdev->ioq_pt_lock;
+ }
- req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
- if (buffer && bufflen) {
- ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
- if (ret)
- goto out;
+ spin_lock_irqsave(slock, flags);
+ WRITE_ONCE(cmd->state, SPRAID_CMD_IDLE);
+ list_add_tail(&cmd->list, head);
+ spin_unlock_irqrestore(slock, flags);
+}
+
+
+static int spraid_submit_admin_sync_cmd(struct spraid_dev *hdev, struct spraid_admin_command *cmd,
+ u32 *result0, u32 *result1, u32 timeout)
+{
+ struct spraid_cmd *adm_cmd = spraid_get_cmd(hdev, SPRAID_CMD_ADM);
+
+ if (!adm_cmd) {
+ dev_err(hdev->dev, "err, get admin cmd failed\n");
+ return -EFAULT;
}
- blk_execute_rq(req->q, NULL, req, at_head);
- if (result)
- *result = spraid_admin_req(req)->result0;
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
- if (spraid_admin_req(req)->flags & SPRAID_REQ_CANCELLED)
- ret = -EINTR;
- else
- ret = spraid_admin_req(req)->status;
+ init_completion(&adm_cmd->cmd_done);
-out:
- blk_mq_free_request(req);
- return ret;
+ cmd->common.command_id = adm_cmd->cid;
+ spraid_submit_cmd(&hdev->queues[0], cmd);
+
+ if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+ __func__, adm_cmd->cid, adm_cmd->qid, cmd->usr_cmd.opcode,
+ cmd->usr_cmd.info_0.subopcode);
+ WRITE_ONCE(adm_cmd->state, SPRAID_CMD_TIMEOUT);
+ spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+ return -EINVAL;
+ }
+
+ if (result0)
+ *result0 = adm_cmd->result0;
+ if (result1)
+ *result1 = adm_cmd->result1;
+
+ spraid_put_cmd(hdev, adm_cmd, SPRAID_CMD_ADM);
+
+ return adm_cmd->status;
}
static int spraid_create_cq(struct spraid_dev *hdev, u16 qid,
@@ -1524,8 +1625,7 @@ static int spraid_create_cq(struct spraid_dev *hdev, u16 qid,
admin_cmd.create_cq.cq_flags = cpu_to_le16(flags);
admin_cmd.create_cq.irq_vector = cpu_to_le16(cq_vector);
- return spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL,
- NULL, 0, 0, 0, 0);
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
}
static int spraid_create_sq(struct spraid_dev *hdev, u16 qid,
@@ -1542,8 +1642,7 @@ static int spraid_create_sq(struct spraid_dev *hdev, u16 qid,
admin_cmd.create_sq.sq_flags = cpu_to_le16(flags);
admin_cmd.create_sq.cqid = cpu_to_le16(qid);
- return spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL,
- NULL, 0, 0, 0, 0);
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
}
static void spraid_free_queue(struct spraid_queue *spraidq)
@@ -1581,8 +1680,7 @@ static int spraid_delete_queue(struct spraid_dev *hdev, u8 op, u16 id)
admin_cmd.delete_queue.opcode = op;
admin_cmd.delete_queue.qid = cpu_to_le16(id);
- ret = spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL,
- NULL, 0, 0, 0, 0);
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
if (ret)
dev_err(hdev->dev, "Delete %s:[%d] failed\n",
@@ -1663,19 +1761,28 @@ static int spraid_set_features(struct spraid_dev *hdev, u32 fid, u32 dword11, vo
size_t buflen, u32 *result)
{
struct spraid_admin_command admin_cmd;
- u32 res;
int ret;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+
+ if (buffer && buflen) {
+ data_ptr = dma_alloc_coherent(hdev->dev, buflen, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memcpy(data_ptr, buffer, buflen);
+ }
memset(&admin_cmd, 0, sizeof(admin_cmd));
admin_cmd.features.opcode = SPRAID_ADMIN_SET_FEATURES;
admin_cmd.features.fid = cpu_to_le32(fid);
admin_cmd.features.dword11 = cpu_to_le32(dword11);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
- ret = spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, &res,
- buffer, buflen, 0, 0, 0);
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, result, NULL, 0);
- if (!ret && result)
- *result = res;
+ if (data_ptr)
+ dma_free_coherent(hdev->dev, buflen, data_ptr, data_dma);
return ret;
}
@@ -1764,8 +1871,7 @@ static int spraid_setup_io_queues(struct spraid_dev *hdev)
break;
}
dev_info(hdev->dev, "[%s] max_qid: %d, queue_count: %d, online_queue: %d, ioq_depth: %d\n",
- __func__, hdev->max_qid, hdev->queue_count,
- hdev->online_queues, hdev->ioq_depth);
+ __func__, hdev->max_qid, hdev->queue_count, hdev->online_queues, hdev->ioq_depth);
return spraid_create_io_queues(hdev);
}
@@ -1889,10 +1995,11 @@ static int spraid_get_dev_list(struct spraid_dev *hdev, struct spraid_dev_info *
u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
struct spraid_admin_command admin_cmd;
struct spraid_dev_list *list_buf;
+ dma_addr_t data_dma = 0;
u32 i, idx, hdid, ndev;
int ret = 0;
- list_buf = kmalloc(sizeof(*list_buf), GFP_KERNEL);
+ list_buf = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
if (!list_buf)
return -ENOMEM;
@@ -1901,9 +2008,9 @@ static int spraid_get_dev_list(struct spraid_dev *hdev, struct spraid_dev_info *
admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
admin_cmd.get_info.type = SPRAID_GET_INFO_DEV_LIST;
admin_cmd.get_info.cdw11 = cpu_to_le32(idx);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
- ret = spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL, list_buf,
- sizeof(*list_buf), 0, 0, 0);
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
if (ret) {
dev_err(hdev->dev, "Get device list failed, nd: %u, idx: %u, ret: %d\n",
@@ -1916,12 +2023,11 @@ static int spraid_get_dev_list(struct spraid_dev *hdev, struct spraid_dev_info *
for (i = 0; i < ndev; i++) {
hdid = le32_to_cpu(list_buf->devices[i].hdid);
- dev_info(hdev->dev, "list_buf->devices[%d], hdid: %u target: %d, channel: %d, lun: %d, attr[%x]\n",
- i, hdid,
- le16_to_cpu(list_buf->devices[i].target),
- list_buf->devices[i].channel,
- list_buf->devices[i].lun,
- list_buf->devices[i].attr);
+ dev_info(hdev->dev, "list_buf->devices[%d], hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ i, hdid, le16_to_cpu(list_buf->devices[i].target),
+ list_buf->devices[i].channel,
+ list_buf->devices[i].lun,
+ list_buf->devices[i].attr);
if (hdid > nd || hdid == 0) {
dev_err(hdev->dev, "err, hdid[%d] invalid\n", hdid);
continue;
@@ -1936,21 +2042,29 @@ static int spraid_get_dev_list(struct spraid_dev *hdev, struct spraid_dev_info *
}
out:
- kfree(list_buf);
+ dma_free_coherent(hdev->dev, PAGE_SIZE, list_buf, data_dma);
return ret;
}
-static void spraid_send_aen(struct spraid_dev *hdev)
+static void spraid_send_aen(struct spraid_dev *hdev, u16 cid)
{
struct spraid_queue *adminq = &hdev->queues[0];
struct spraid_admin_command admin_cmd;
memset(&admin_cmd, 0, sizeof(admin_cmd));
admin_cmd.common.opcode = SPRAID_ADMIN_ASYNC_EVENT;
- admin_cmd.common.command_id = SPRAID_AQ_BLK_MQ_DEPTH;
+ admin_cmd.common.command_id = cid;
spraid_submit_cmd(adminq, &admin_cmd);
- dev_info(hdev->dev, "send aen, cid[%d]\n", SPRAID_AQ_BLK_MQ_DEPTH);
+ dev_info(hdev->dev, "send aen, cid[%d]\n", cid);
+}
+
+static inline void spraid_send_all_aen(struct spraid_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->ctrl_info->aerl; i++)
+ spraid_send_aen(hdev, i + SPRAID_AQ_BLK_MQ_DEPTH);
}
static int spraid_add_device(struct spraid_dev *hdev, struct spraid_dev_info *device)
@@ -1958,6 +2072,10 @@ static int spraid_add_device(struct spraid_dev *hdev, struct spraid_dev_info *de
struct Scsi_Host *shost = hdev->shost;
struct scsi_device *sdev;
+ dev_info(hdev->dev, "add device, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
sdev = scsi_device_lookup(shost, device->channel, le16_to_cpu(device->target), 0);
if (sdev) {
dev_warn(hdev->dev, "Device is already exist, channel: %d, target_id: %d, lun: %d\n",
@@ -1974,9 +2092,13 @@ static int spraid_rescan_device(struct spraid_dev *hdev, struct spraid_dev_info
struct Scsi_Host *shost = hdev->shost;
struct scsi_device *sdev;
+ dev_info(hdev->dev, "rescan device, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ le32_to_cpu(device->hdid), le16_to_cpu(device->target),
+ device->channel, device->lun, device->attr);
+
sdev = scsi_device_lookup(shost, device->channel, le16_to_cpu(device->target), 0);
if (!sdev) {
- dev_warn(hdev->dev, "Device is not exit, channel: %d, target_id: %d, lun: %d\n",
+ dev_warn(hdev->dev, "device is not exit rescan it, channel: %d, target_id: %d, lun: %d\n",
device->channel, le16_to_cpu(device->target), 0);
return -ENODEV;
}
@@ -1991,9 +2113,13 @@ static int spraid_remove_device(struct spraid_dev *hdev, struct spraid_dev_info
struct Scsi_Host *shost = hdev->shost;
struct scsi_device *sdev;
+ dev_info(hdev->dev, "remove device, hdid: %u target: %d, channel: %d, lun: %d, attr[0x%x]\n",
+ le32_to_cpu(org_device->hdid), le16_to_cpu(org_device->target),
+ org_device->channel, org_device->lun, org_device->attr);
+
sdev = scsi_device_lookup(shost, org_device->channel, le16_to_cpu(org_device->target), 0);
if (!sdev) {
- dev_warn(hdev->dev, "Device is not exit, channel: %d, target_id: %d, lun: %d\n",
+ dev_warn(hdev->dev, "device is not exit remove it, channel: %d, target_id: %d, lun: %d\n",
org_device->channel, le16_to_cpu(org_device->target), 0);
return -ENODEV;
}
@@ -2029,36 +2155,54 @@ static int spraid_dev_list_init(struct spraid_dev *hdev)
return 0;
}
+static int luntarget_cmp_func(const void *l, const void *r)
+{
+ const struct spraid_dev_info *ln = l;
+ const struct spraid_dev_info *rn = r;
+
+ if (ln->channel == rn->channel)
+ return ln->target - rn->target;
+
+ return ln->channel - rn->channel;
+}
+
static void spraid_scan_work(struct work_struct *work)
{
struct spraid_dev *hdev =
container_of(work, struct spraid_dev, scan_work);
struct spraid_dev_info *devices, *org_devices;
+ struct spraid_dev_info *sortdevice;
u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
u8 flag, org_flag;
int i, ret;
+ int count = 0;
devices = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
if (!devices)
return;
+
+ sortdevice = kcalloc(nd, sizeof(struct spraid_dev_info), GFP_KERNEL);
+ if (!sortdevice)
+ goto free_list;
+
ret = spraid_get_dev_list(hdev, devices);
if (ret)
- goto free_list;
+ goto free_all;
org_devices = hdev->devices;
for (i = 0; i < nd; i++) {
org_flag = org_devices[i].flag;
flag = devices[i].flag;
- dev_log_dbg(hdev->dev, "i: %d, org_flag: 0x%x, flag: 0x%x\n",
- i, org_flag, flag);
+ dev_log_dbg(hdev->dev, "i: %d, org_flag: 0x%x, flag: 0x%x\n", i, org_flag, flag);
if (SPRAID_DEV_INFO_FLAG_VALID(flag)) {
if (!SPRAID_DEV_INFO_FLAG_VALID(org_flag)) {
down_write(&hdev->devices_rwsem);
memcpy(&org_devices[i], &devices[i],
- sizeof(struct spraid_dev_info));
+ sizeof(struct spraid_dev_info));
+ memcpy(&sortdevice[count++], &devices[i],
+ sizeof(struct spraid_dev_info));
up_write(&hdev->devices_rwsem);
- spraid_add_device(hdev, &devices[i]);
} else if (SPRAID_DEV_INFO_FLAG_CHANGE(flag)) {
spraid_rescan_device(hdev, &devices[i]);
}
@@ -2071,6 +2215,16 @@ static void spraid_scan_work(struct work_struct *work)
}
}
}
+
+ dev_info(hdev->dev, "scan work add device count = %d\n", count);
+
+ sort(sortdevice, count, sizeof(sortdevice[0]), luntarget_cmp_func, NULL);
+
+ for (i = 0; i < count; i++)
+ spraid_add_device(hdev, &sortdevice[i]);
+
+free_all:
+ kfree(sortdevice);
free_list:
kfree(devices);
}
@@ -2083,6 +2237,15 @@ static void spraid_timesyn_work(struct work_struct *work)
spraid_configure_timestamp(hdev);
}
+static int spraid_init_ctrl_info(struct spraid_dev *hdev);
+static void spraid_fw_act_work(struct work_struct *work)
+{
+ struct spraid_dev *hdev = container_of(work, struct spraid_dev, fw_act_work);
+
+ if (spraid_init_ctrl_info(hdev))
+ dev_err(hdev->dev, "get ctrl info failed after fw act\n");
+}
+
static void spraid_queue_scan(struct spraid_dev *hdev)
{
queue_work(spraid_wq, &hdev->scan_work);
@@ -2094,6 +2257,9 @@ static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result)
case SPRAID_AEN_DEV_CHANGED:
spraid_queue_scan(hdev);
break;
+ case SPRAID_AEN_FW_ACT_START:
+ dev_info(hdev->dev, "fw activation starting\n");
+ break;
case SPRAID_AEN_HOST_PROBING:
break;
default:
@@ -2101,25 +2267,25 @@ static void spraid_handle_aen_notice(struct spraid_dev *hdev, u32 result)
}
}
-static void spraid_handle_aen_vs(struct spraid_dev *hdev, u32 result)
+static void spraid_handle_aen_vs(struct spraid_dev *hdev, u32 result, u32 result1)
{
- switch (result) {
+ switch ((result & 0xff00) >> 8) {
case SPRAID_AEN_TIMESYN:
queue_work(spraid_wq, &hdev->timesyn_work);
break;
+ case SPRAID_AEN_FW_ACT_FINISH:
+ dev_info(hdev->dev, "fw activation finish\n");
+ queue_work(spraid_wq, &hdev->fw_act_work);
+ break;
+ case SPRAID_AEN_EVENT_MIN ... SPRAID_AEN_EVENT_MAX:
+ dev_info(hdev->dev, "rcv card event[%d], param1[0x%x] param2[0x%x]\n",
+ (result & 0xff00) >> 8, result, result1);
+ break;
default:
- dev_warn(hdev->dev, "async event result: %x\n", result);
+ dev_warn(hdev->dev, "async event result: 0x%x\n", result);
}
}
-static void spraid_async_event_work(struct work_struct *work)
-{
- struct spraid_dev *hdev =
- container_of(work, struct spraid_dev, aen_work);
-
- spraid_send_aen(hdev);
-}
-
static int spraid_alloc_resources(struct spraid_dev *hdev)
{
int ret, nqueue;
@@ -2149,10 +2315,16 @@ static int spraid_alloc_resources(struct spraid_dev *hdev)
goto destroy_dma_pools;
}
+ ret = spraid_alloc_admin_cmds(hdev);
+ if (ret)
+ goto free_queues;
+
dev_info(hdev->dev, "[%s] queues num: %d\n", __func__, nqueue);
return 0;
+free_queues:
+ kfree(hdev->queues);
destroy_dma_pools:
spraid_destroy_dma_pools(hdev);
free_ctrl_info:
@@ -2164,50 +2336,18 @@ static int spraid_alloc_resources(struct spraid_dev *hdev)
static void spraid_free_resources(struct spraid_dev *hdev)
{
+ spraid_free_admin_cmds(hdev);
kfree(hdev->queues);
spraid_destroy_dma_pools(hdev);
kfree(hdev->ctrl_info);
ida_free(&spraid_instance_ida, hdev->instance);
}
-static void spraid_setup_passthrough(struct request *req, struct spraid_admin_command *cmd)
-{
- memcpy(cmd, spraid_admin_req(req)->cmd, sizeof(*cmd));
- cmd->common.flags &= ~SPRAID_CMD_FLAG_SGL_ALL;
-}
-
-static inline void spraid_clear_hreq(struct request *req)
-{
- if (!(req->rq_flags & RQF_DONTPREP)) {
- spraid_admin_req(req)->flags = 0;
- req->rq_flags |= RQF_DONTPREP;
- }
-}
-
-static blk_status_t spraid_setup_admin_cmd(struct request *req, struct spraid_admin_command *cmd)
+static void spraid_bsg_unmap_data(struct spraid_dev *hdev, struct bsg_job *job)
{
- spraid_clear_hreq(req);
-
- memset(cmd, 0, sizeof(*cmd));
- switch (req_op(req)) {
- case REQ_OP_DRV_IN:
- case REQ_OP_DRV_OUT:
- spraid_setup_passthrough(req, cmd);
- break;
- default:
- WARN_ON_ONCE(1);
- return BLK_STS_IOERR;
- }
-
- cmd->common.command_id = req->tag;
- return BLK_STS_OK;
-}
-
-static void spraid_unmap_data(struct spraid_dev *hdev, struct request *req)
-{
- struct spraid_iod *iod = blk_mq_rq_to_pdu(req);
- enum dma_data_direction dma_dir = rq_data_dir(req) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
if (iod->nsge)
dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
@@ -2215,36 +2355,36 @@ static void spraid_unmap_data(struct spraid_dev *hdev, struct request *req)
spraid_free_iod_res(hdev, iod);
}
-static blk_status_t spraid_admin_map_data(struct spraid_dev *hdev, struct request *req,
- struct spraid_admin_command *cmd)
+static int spraid_bsg_map_data(struct spraid_dev *hdev, struct bsg_job *job,
+ struct spraid_admin_command *cmd)
{
- struct spraid_iod *iod = blk_mq_rq_to_pdu(req);
- struct request_queue *admin_q = req->q;
- enum dma_data_direction dma_dir = rq_data_dir(req) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE;
- blk_status_t ret = BLK_STS_IOERR;
- int nr_mapped;
- int res;
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_iod *iod = job->dd_data;
+ enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ iod->sg = job->request_payload.sg_list;
+ iod->nsge = job->request_payload.sg_cnt;
+ iod->length = job->request_payload.payload_len;
+ iod->use_sgl = false;
+ iod->npages = -1;
+ iod->sg_drv_mgmt = false;
- sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
- iod->nsge = blk_rq_map_sg(admin_q, req, iod->sg);
if (!iod->nsge)
goto out;
- dev_info(hdev->dev, "nseg: %u, nsge: %u\n",
- blk_rq_nr_phys_segments(req), iod->nsge);
-
- ret = BLK_STS_RESOURCE;
- nr_mapped = dma_map_sg_attrs(hdev->dev, iod->sg, iod->nsge, dma_dir, DMA_ATTR_NO_WARN);
- if (!nr_mapped)
+ ret = dma_map_sg_attrs(hdev->dev, iod->sg, iod->nsge, dma_dir, DMA_ATTR_NO_WARN);
+ if (!ret)
goto out;
- res = spraid_setup_prps(hdev, iod);
- if (res)
+ ret = spraid_setup_prps(hdev, iod);
+ if (ret)
goto unmap;
+
cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
cmd->common.dptr.prp2 = cpu_to_le64(iod->first_dma);
- return BLK_STS_OK;
+
+ return 0;
unmap:
dma_unmap_sg(hdev->dev, iod->sg, iod->nsge, dma_dir);
@@ -2252,137 +2392,29 @@ static blk_status_t spraid_admin_map_data(struct spraid_dev *hdev, struct reques
return ret;
}
-static blk_status_t spraid_init_admin_iod(struct request *rq, struct spraid_dev *hdev)
-{
- struct spraid_iod *iod = blk_mq_rq_to_pdu(rq);
- int nents = blk_rq_nr_phys_segments(rq);
- unsigned int size = blk_rq_payload_bytes(rq);
-
- if (nents > SPRAID_INT_PAGES || size > SPRAID_INT_BYTES(hdev)) {
- iod->sg = mempool_alloc(hdev->iod_mempool, GFP_ATOMIC);
- if (!iod->sg)
- return BLK_STS_RESOURCE;
- } else {
- iod->sg = iod->inline_sg;
- }
-
- iod->nsge = 0;
- iod->use_sgl = false;
- iod->npages = -1;
- iod->length = size;
- iod->sg_drv_mgmt = true;
-
- return BLK_STS_OK;
-}
-
-static blk_status_t spraid_queue_admin_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct spraid_queue *adminq = hctx->driver_data;
- struct spraid_dev *hdev = adminq->hdev;
- struct request *req = bd->rq;
- struct spraid_iod *iod = blk_mq_rq_to_pdu(req);
- struct spraid_admin_command cmd;
- blk_status_t ret;
-
- ret = spraid_setup_admin_cmd(req, &cmd);
- if (ret)
- goto out;
-
- ret = spraid_init_admin_iod(req, hdev);
- if (ret)
- goto out;
-
- if (blk_rq_nr_phys_segments(req)) {
- ret = spraid_admin_map_data(hdev, req, &cmd);
- if (ret)
- goto cleanup_iod;
- }
-
- blk_mq_start_request(req);
- spraid_submit_cmd(adminq, &cmd);
- return BLK_STS_OK;
-
-cleanup_iod:
- spraid_free_iod_res(hdev, iod);
-out:
- return ret;
-}
-
-static blk_status_t spraid_error_status(struct request *req)
-{
- switch (spraid_admin_req(req)->status & 0x7ff) {
- case SPRAID_SC_SUCCESS:
- return BLK_STS_OK;
- default:
- return BLK_STS_IOERR;
- }
-}
-
-static void spraid_complete_admin_rq(struct request *req)
-{
- struct spraid_iod *iod = blk_mq_rq_to_pdu(req);
- struct spraid_dev *hdev = iod->spraidq->hdev;
-
- if (blk_rq_nr_phys_segments(req))
- spraid_unmap_data(hdev, req);
- blk_mq_end_request(req, spraid_error_status(req));
-}
-
-static int spraid_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx)
-{
- struct spraid_dev *hdev = data;
- struct spraid_queue *adminq = &hdev->queues[0];
-
- WARN_ON(hctx_idx != 0);
- WARN_ON(hdev->admin_tagset.tags[0] != hctx->tags);
-
- hctx->driver_data = adminq;
- return 0;
-}
-
-static int spraid_admin_init_request(struct blk_mq_tag_set *set, struct request *req,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- struct spraid_dev *hdev = set->driver_data;
- struct spraid_iod *iod = blk_mq_rq_to_pdu(req);
- struct spraid_queue *adminq = &hdev->queues[0];
-
- WARN_ON(!adminq);
- iod->spraidq = adminq;
- return 0;
-}
-
-static enum blk_eh_timer_return
-spraid_admin_timeout(struct request *req, bool reserved)
-{
- struct spraid_iod *iod = blk_mq_rq_to_pdu(req);
- struct spraid_queue *spraidq = iod->spraidq;
- struct spraid_dev *hdev = spraidq->hdev;
-
- dev_err(hdev->dev, "Admin cid[%d] qid[%d] timeout\n",
- req->tag, spraidq->qid);
-
- if (spraid_poll_cq(spraidq, req->tag)) {
- dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, completion polled\n",
- req->tag, spraidq->qid);
- return BLK_EH_DONE;
- }
-
- spraid_end_admin_request(req, cpu_to_le16(-EINVAL), 0, 0);
- return BLK_EH_DONE;
-}
-
static int spraid_get_ctrl_info(struct spraid_dev *hdev, struct spraid_ctrl_info *ctrl_info)
{
struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
memset(&admin_cmd, 0, sizeof(admin_cmd));
admin_cmd.get_info.opcode = SPRAID_ADMIN_GET_INFO;
admin_cmd.get_info.type = SPRAID_GET_INFO_CTRL;
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(ctrl_info, data_ptr, sizeof(struct spraid_ctrl_info));
- return spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL,
- ctrl_info, sizeof(struct spraid_ctrl_info), 0, 0, 0);
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
}
static int spraid_init_ctrl_info(struct spraid_dev *hdev)
@@ -2416,6 +2448,11 @@ static int spraid_init_ctrl_info(struct spraid_dev *hdev)
dev_info(hdev->dev, "[%s]sn = %s\n", __func__, hdev->ctrl_info->sn);
dev_info(hdev->dev, "[%s]fr = %s\n", __func__, hdev->ctrl_info->fr);
+ if (!hdev->ctrl_info->aerl)
+ hdev->ctrl_info->aerl = 1;
+ if (hdev->ctrl_info->aerl > SPRAID_NR_AEN_COMMANDS)
+ hdev->ctrl_info->aerl = SPRAID_NR_AEN_COMMANDS;
+
return 0;
}
@@ -2444,99 +2481,54 @@ static void spraid_free_iod_ext_mem_pool(struct spraid_dev *hdev)
mempool_destroy(hdev->iod_mempool);
}
-static int spraid_submit_user_cmd(struct request_queue *q, struct spraid_admin_command *cmd,
- void __user *ubuffer, unsigned int bufflen, u32 *result,
- unsigned int timeout)
-{
- struct request *req;
- struct bio *bio = NULL;
- int ret;
-
- req = spraid_alloc_admin_request(q, cmd, 0);
- if (IS_ERR(req))
- return PTR_ERR(req);
-
- req->timeout = timeout ? timeout : ADMIN_TIMEOUT;
- spraid_admin_req(req)->flags |= SPRAID_REQ_USERCMD;
-
- if (ubuffer && bufflen) {
- ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, GFP_KERNEL);
- if (ret)
- goto out;
- bio = req->bio;
- }
- blk_execute_rq(req->q, NULL, req, 0);
- if (spraid_admin_req(req)->flags & SPRAID_REQ_CANCELLED)
- ret = -EINTR;
- else
- ret = spraid_admin_req(req)->status;
- if (result) {
- result[0] = spraid_admin_req(req)->result0;
- result[1] = spraid_admin_req(req)->result1;
- }
- if (bio)
- blk_rq_unmap_user(bio);
-out:
- blk_mq_free_request(req);
- return ret;
-}
-
-static int spraid_user_admin_cmd(struct spraid_dev *hdev,
- struct spraid_passthru_common_cmd __user *ucmd)
+static int spraid_user_admin_cmd(struct spraid_dev *hdev, struct bsg_job *job)
{
- struct spraid_passthru_common_cmd cmd;
+ struct spraid_bsg_request *bsg_req = job->request;
+ struct spraid_passthru_common_cmd *cmd = &(bsg_req->admcmd);
struct spraid_admin_command admin_cmd;
- u32 timeout = 0;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+ u32 result[2] = {0};
int status;
- if (!capable(CAP_SYS_ADMIN)) {
- dev_err(hdev->dev, "Current user hasn't administrator right, reject service\n");
- return -EACCES;
- }
-
- if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
- dev_err(hdev->dev, "Copy command from user space to kernel space failed\n");
- return -EFAULT;
- }
-
- if (cmd.flags) {
- dev_err(hdev->dev, "Invalid flags in user command\n");
- return -EINVAL;
+ if (hdev->state >= SPRAID_RESETTING) {
+ dev_err(hdev->dev, "[%s] err, host state:[%d] is not right\n",
+ __func__, hdev->state);
+ return -EBUSY;
}
- dev_info(hdev->dev, "user_admin_cmd opcode: 0x%x, subopcode: 0x%x\n",
- cmd.opcode, cmd.cdw2 & 0x7ff);
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init\n",
+ __func__, cmd->opcode, cmd->info_0.subopcode);
memset(&admin_cmd, 0, sizeof(admin_cmd));
- admin_cmd.common.opcode = cmd.opcode;
- admin_cmd.common.flags = cmd.flags;
- admin_cmd.common.hdid = cpu_to_le32(cmd.nsid);
- admin_cmd.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
- admin_cmd.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
- admin_cmd.common.cdw10 = cpu_to_le32(cmd.cdw10);
- admin_cmd.common.cdw11 = cpu_to_le32(cmd.cdw11);
- admin_cmd.common.cdw12 = cpu_to_le32(cmd.cdw12);
- admin_cmd.common.cdw13 = cpu_to_le32(cmd.cdw13);
- admin_cmd.common.cdw14 = cpu_to_le32(cmd.cdw14);
- admin_cmd.common.cdw15 = cpu_to_le32(cmd.cdw15);
-
- if (cmd.timeout_ms)
- timeout = msecs_to_jiffies(cmd.timeout_ms);
-
- status = spraid_submit_user_cmd(hdev->admin_q, &admin_cmd,
- (void __user *)(uintptr_t)cmd.addr, cmd.info_1.data_len,
- &cmd.result0, timeout);
-
- dev_info(hdev->dev, "user_admin_cmd status: 0x%x, result0: 0x%x, result1: 0x%x\n",
- status, cmd.result0, cmd.result1);
+ admin_cmd.common.opcode = cmd->opcode;
+ admin_cmd.common.flags = cmd->flags;
+ admin_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ admin_cmd.common.cdw2[0] = cpu_to_le32(cmd->cdw2);
+ admin_cmd.common.cdw2[1] = cpu_to_le32(cmd->cdw3);
+ admin_cmd.common.cdw10 = cpu_to_le32(cmd->cdw10);
+ admin_cmd.common.cdw11 = cpu_to_le32(cmd->cdw11);
+ admin_cmd.common.cdw12 = cpu_to_le32(cmd->cdw12);
+ admin_cmd.common.cdw13 = cpu_to_le32(cmd->cdw13);
+ admin_cmd.common.cdw14 = cpu_to_le32(cmd->cdw14);
+ admin_cmd.common.cdw15 = cpu_to_le32(cmd->cdw15);
+
+ status = spraid_bsg_map_data(hdev, job, &admin_cmd);
+ if (status) {
+ dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+ return status;
+ }
+ status = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, &result[0], &result[1], timeout);
if (status >= 0) {
- if (put_user(cmd.result0, &ucmd->result0))
- return -EFAULT;
- if (put_user(cmd.result1, &ucmd->result1))
- return -EFAULT;
+ job->reply_len = sizeof(result);
+ memcpy(job->reply, result, sizeof(result));
}
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x], status[0x%x] result0[0x%x] result1[0x%x]\n",
+ __func__, cmd->opcode, cmd->info_0.subopcode, status, result[0], result[1]);
+
+ spraid_bsg_unmap_data(hdev, job);
+
return status;
}
@@ -2548,8 +2540,8 @@ static int spraid_alloc_ioq_ptcmds(struct spraid_dev *hdev)
INIT_LIST_HEAD(&hdev->ioq_pt_list);
spin_lock_init(&hdev->ioq_pt_lock);
- hdev->ioq_ptcmds = kcalloc_node(ptnum, sizeof(struct spraid_ioq_ptcmd),
- GFP_KERNEL, hdev->numa_node);
+ hdev->ioq_ptcmds = kcalloc_node(ptnum, sizeof(struct spraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
if (!hdev->ioq_ptcmds) {
dev_err(hdev->dev, "Alloc ioq_ptcmds failed\n");
@@ -2567,55 +2559,35 @@ static int spraid_alloc_ioq_ptcmds(struct spraid_dev *hdev)
return 0;
}
-static struct spraid_ioq_ptcmd *spraid_get_ioq_ptcmd(struct spraid_dev *hdev)
+static void spraid_free_ioq_ptcmds(struct spraid_dev *hdev)
{
- struct spraid_ioq_ptcmd *cmd = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&hdev->ioq_pt_lock, flags);
- if (list_empty(&hdev->ioq_pt_list)) {
- spin_unlock_irqrestore(&hdev->ioq_pt_lock, flags);
- dev_err(hdev->dev, "err, ioq ptcmd list empty\n");
- return NULL;
- }
- cmd = list_entry((&hdev->ioq_pt_list)->next, struct spraid_ioq_ptcmd, list);
- list_del_init(&cmd->list);
- spin_unlock_irqrestore(&hdev->ioq_pt_lock, flags);
-
- WRITE_ONCE(cmd->state, SPRAID_CMD_IDLE);
-
- return cmd;
-}
-
-static void spraid_put_ioq_ptcmd(struct spraid_dev *hdev, struct spraid_ioq_ptcmd *cmd)
-{
- unsigned long flags;
+ kfree(hdev->ioq_ptcmds);
+ hdev->ioq_ptcmds = NULL;
- spin_lock_irqsave(&hdev->ioq_pt_lock, flags);
- list_add(&cmd->list, (&hdev->ioq_pt_list)->next);
- spin_unlock_irqrestore(&hdev->ioq_pt_lock, flags);
+ INIT_LIST_HEAD(&hdev->ioq_pt_list);
}
static int spraid_submit_ioq_sync_cmd(struct spraid_dev *hdev, struct spraid_ioq_command *cmd,
- u32 *result, void **sense, u32 timeout)
+ u32 *result, u32 *reslen, u32 timeout)
{
- struct spraid_queue *ioq;
int ret;
dma_addr_t sense_dma;
- struct spraid_ioq_ptcmd *pt_cmd = spraid_get_ioq_ptcmd(hdev);
-
- *sense = NULL;
+ struct spraid_queue *ioq;
+ void *sense_addr = NULL;
+ struct spraid_cmd *pt_cmd = spraid_get_cmd(hdev, SPRAID_CMD_IOPT);
- if (!pt_cmd)
+ if (!pt_cmd) {
+ dev_err(hdev->dev, "err, get ioq cmd failed\n");
return -EFAULT;
+ }
- dev_info(hdev->dev, "[%s] ptcmd, cid[%d], qid[%d]\n", __func__, pt_cmd->cid, pt_cmd->qid);
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
init_completion(&pt_cmd->cmd_done);
ioq = &hdev->queues[pt_cmd->qid];
ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE;
- pt_cmd->priv = ioq->sense + ret;
+ sense_addr = ioq->sense + ret;
sense_dma = ioq->sense_dma_addr + ret;
cmd->common.sense_addr = cpu_to_le64(sense_dma);
@@ -2625,260 +2597,87 @@ static int spraid_submit_ioq_sync_cmd(struct spraid_dev *hdev, struct spraid_ioq
spraid_submit_cmd(ioq, cmd);
if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) {
- dev_err(hdev->dev, "[%s] cid[%d], qid[%d] timeout\n",
- __func__, pt_cmd->cid, pt_cmd->qid);
+ dev_err(hdev->dev, "[%s] cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+ __func__, pt_cmd->cid, pt_cmd->qid, cmd->common.opcode,
+ (le32_to_cpu(cmd->common.cdw3[0]) & 0xffff));
WRITE_ONCE(pt_cmd->state, SPRAID_CMD_TIMEOUT);
+ spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
return -EINVAL;
}
- if (result) {
- result[0] = pt_cmd->result0;
- result[1] = pt_cmd->result1;
+ if (result && reslen) {
+ if ((pt_cmd->status & 0x17f) == 0x101) {
+ memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE);
+ *reslen = SCSI_SENSE_BUFFERSIZE;
+ }
}
- if ((pt_cmd->status & 0x17f) == 0x101)
- *sense = pt_cmd->priv;
+ spraid_put_cmd(hdev, pt_cmd, SPRAID_CMD_IOPT);
return pt_cmd->status;
}
-static int spraid_user_ioq_cmd(struct spraid_dev *hdev,
- struct spraid_ioq_passthru_cmd __user *ucmd)
+static int spraid_user_ioq_cmd(struct spraid_dev *hdev, struct bsg_job *job)
{
- struct spraid_ioq_passthru_cmd cmd;
+ struct spraid_bsg_request *bsg_req = (struct spraid_bsg_request *)(job->request);
+ struct spraid_ioq_passthru_cmd *cmd = &(bsg_req->ioqcmd);
struct spraid_ioq_command ioq_cmd;
- u32 timeout = 0;
int status = 0;
- u8 *data_ptr = NULL;
- dma_addr_t data_dma;
- enum dma_data_direction dma_dir = DMA_NONE;
- void *sense = NULL;
-
- if (!capable(CAP_SYS_ADMIN)) {
- dev_err(hdev->dev, "Current user hasn't administrator right, reject service\n");
- return -EACCES;
- }
-
- if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
- dev_err(hdev->dev, "Copy command from user space to kernel space failed\n");
- return -EFAULT;
- }
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
- if (cmd.data_len > PAGE_SIZE) {
+ if (cmd->data_len > PAGE_SIZE) {
dev_err(hdev->dev, "[%s] data len bigger than 4k\n", __func__);
return -EFAULT;
}
- dev_info(hdev->dev, "[%s] opcode: 0x%x, subopcode: 0x%x, datalen: %d\n",
- __func__, cmd.opcode, cmd.info_1.subopcode, cmd.data_len);
-
- if (cmd.addr && cmd.data_len) {
- data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
- if (!data_ptr)
- return -ENOMEM;
-
- dma_dir = (cmd.opcode & 1) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ if (hdev->state != SPRAID_LIVE) {
+ dev_err(hdev->dev, "[%s] err, host state:[%d] is not live\n",
+ __func__, hdev->state);
+ return -EBUSY;
}
- if (dma_dir == DMA_TO_DEVICE) {
- if (copy_from_user(data_ptr, (void __user *)(uintptr_t)cmd.addr, cmd.data_len)) {
- dev_err(hdev->dev, "[%s] copy user data failed\n", __func__);
- status = -EFAULT;
- goto free_dma_mem;
- }
- }
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x] init, datalen[%d]\n",
+ __func__, cmd->opcode, cmd->info_1.subopcode, cmd->data_len);
memset(&ioq_cmd, 0, sizeof(ioq_cmd));
- ioq_cmd.common.opcode = cmd.opcode;
- ioq_cmd.common.flags = cmd.flags;
- ioq_cmd.common.hdid = cpu_to_le32(cmd.nsid);
- ioq_cmd.common.sense_len = cpu_to_le16(cmd.info_0.res_sense_len);
- ioq_cmd.common.cdb_len = cmd.info_0.cdb_len;
- ioq_cmd.common.rsvd2 = cmd.info_0.rsvd0;
- ioq_cmd.common.cdw3[0] = cpu_to_le32(cmd.cdw3);
- ioq_cmd.common.cdw3[1] = cpu_to_le32(cmd.cdw4);
- ioq_cmd.common.cdw3[2] = cpu_to_le32(cmd.cdw5);
- ioq_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
-
- ioq_cmd.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
- ioq_cmd.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
- ioq_cmd.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
- ioq_cmd.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
- ioq_cmd.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
- ioq_cmd.common.cdw10[5] = cpu_to_le32(cmd.data_len);
-
- memcpy(ioq_cmd.common.cdb, &cmd.cdw16, cmd.info_0.cdb_len);
-
- ioq_cmd.common.cdw26[0] = cpu_to_le32(cmd.cdw26[0]);
- ioq_cmd.common.cdw26[1] = cpu_to_le32(cmd.cdw26[1]);
- ioq_cmd.common.cdw26[2] = cpu_to_le32(cmd.cdw26[2]);
- ioq_cmd.common.cdw26[3] = cpu_to_le32(cmd.cdw26[3]);
-
- if (cmd.timeout_ms)
- timeout = msecs_to_jiffies(cmd.timeout_ms);
- timeout = timeout ? timeout : ADMIN_TIMEOUT;
-
- status = spraid_submit_ioq_sync_cmd(hdev, &ioq_cmd, &cmd.result0, &sense, timeout);
-
- if (status >= 0) {
- if (put_user(cmd.result0, &ucmd->result0)) {
- status = -EFAULT;
- goto free_dma_mem;
- }
- if (put_user(cmd.result1, &ucmd->result1)) {
- status = -EFAULT;
- goto free_dma_mem;
- }
- if (dma_dir == DMA_FROM_DEVICE &&
- copy_to_user((void __user *)(uintptr_t)cmd.addr, data_ptr, cmd.data_len)) {
- status = -EFAULT;
- goto free_dma_mem;
- }
- }
-
- if (sense) {
- if (copy_to_user((void *__user *)(uintptr_t)cmd.sense_addr,
- sense, cmd.info_0.res_sense_len)) {
- status = -EFAULT;
- goto free_dma_mem;
- }
- }
-
-free_dma_mem:
- if (data_ptr)
- dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
-
- return status;
-
-}
-
-static int spraid_reset_work_sync(struct spraid_dev *hdev);
-
-static int spraid_user_reset_cmd(struct spraid_dev *hdev)
-{
- int ret;
-
- dev_info(hdev->dev, "[%s] start user reset cmd\n", __func__);
- ret = spraid_reset_work_sync(hdev);
- dev_info(hdev->dev, "[%s] stop user reset cmd[%d]\n", __func__, ret);
-
- return ret;
-}
-
-static int hdev_open(struct inode *inode, struct file *file)
-{
- struct spraid_dev *hdev =
- container_of(inode->i_cdev, struct spraid_dev, cdev);
- file->private_data = hdev;
- return 0;
-}
-
-static long hdev_ioctl(struct file *file, u32 cmd, unsigned long arg)
-{
- struct spraid_dev *hdev = file->private_data;
- void __user *argp = (void __user *)arg;
-
- switch (cmd) {
- case SPRAID_IOCTL_ADMIN_CMD:
- return spraid_user_admin_cmd(hdev, argp);
- case SPRAID_IOCTL_IOQ_CMD:
- return spraid_user_ioq_cmd(hdev, argp);
- case SPRAID_IOCTL_RESET_CMD:
- return spraid_user_reset_cmd(hdev);
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations spraid_dev_fops = {
- .owner = THIS_MODULE,
- .open = hdev_open,
- .unlocked_ioctl = hdev_ioctl,
- .compat_ioctl = hdev_ioctl,
-};
-
-static int spraid_create_cdev(struct spraid_dev *hdev)
-{
- int ret;
-
- device_initialize(&hdev->ctrl_device);
- hdev->ctrl_device.devt = MKDEV(MAJOR(spraid_chr_devt), hdev->instance);
- hdev->ctrl_device.class = spraid_class;
- hdev->ctrl_device.parent = hdev->dev;
- dev_set_drvdata(&hdev->ctrl_device, hdev);
- ret = dev_set_name(&hdev->ctrl_device, "spraid%d", hdev->instance);
- if (ret)
- return ret;
- cdev_init(&hdev->cdev, &spraid_dev_fops);
- hdev->cdev.owner = THIS_MODULE;
- ret = cdev_device_add(&hdev->cdev, &hdev->ctrl_device);
- if (ret) {
- dev_err(hdev->dev, "Add cdev failed, ret: %d", ret);
- put_device(&hdev->ctrl_device);
- kfree_const(hdev->ctrl_device.kobj.name);
- return ret;
- }
-
- return 0;
-}
-
-static inline void spraid_remove_cdev(struct spraid_dev *hdev)
-{
- cdev_device_del(&hdev->cdev, &hdev->ctrl_device);
-}
-
-static const struct blk_mq_ops spraid_admin_mq_ops = {
- .queue_rq = spraid_queue_admin_rq,
- .complete = spraid_complete_admin_rq,
- .init_hctx = spraid_admin_init_hctx,
- .init_request = spraid_admin_init_request,
- .timeout = spraid_admin_timeout,
-};
-
-static void spraid_remove_admin_tagset(struct spraid_dev *hdev)
-{
- if (hdev->admin_q && !blk_queue_dying(hdev->admin_q)) {
- blk_mq_unquiesce_queue(hdev->admin_q);
- blk_cleanup_queue(hdev->admin_q);
- blk_mq_free_tag_set(&hdev->admin_tagset);
+ ioq_cmd.common.opcode = cmd->opcode;
+ ioq_cmd.common.flags = cmd->flags;
+ ioq_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ ioq_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len);
+ ioq_cmd.common.cdb_len = cmd->info_0.cdb_len;
+ ioq_cmd.common.rsvd2 = cmd->info_0.rsvd0;
+ ioq_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3);
+ ioq_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4);
+ ioq_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5);
+
+ ioq_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10);
+ ioq_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11);
+ ioq_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12);
+ ioq_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13);
+ ioq_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14);
+ ioq_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len);
+
+ memcpy(ioq_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len);
+
+ ioq_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]);
+ ioq_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]);
+ ioq_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]);
+ ioq_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]);
+
+ status = spraid_bsg_map_data(hdev, job, (struct spraid_admin_command *)&ioq_cmd);
+ if (status) {
+ dev_err(hdev->dev, "[%s] err, map data failed\n", __func__);
+ return status;
}
-}
-static int spraid_alloc_admin_tags(struct spraid_dev *hdev)
-{
- if (!hdev->admin_q) {
- hdev->admin_tagset.ops = &spraid_admin_mq_ops;
- hdev->admin_tagset.nr_hw_queues = 1;
+ status = spraid_submit_ioq_sync_cmd(hdev, &ioq_cmd, job->reply, &job->reply_len, timeout);
- hdev->admin_tagset.queue_depth = SPRAID_AQ_MQ_TAG_DEPTH;
- hdev->admin_tagset.timeout = ADMIN_TIMEOUT;
- hdev->admin_tagset.numa_node = hdev->numa_node;
- hdev->admin_tagset.cmd_size =
- spraid_cmd_size(hdev, true, false);
- hdev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
- hdev->admin_tagset.driver_data = hdev;
+ dev_info(hdev->dev, "[%s] opcode[0x%x] subopcode[0x%x], status[0x%x], reply_len[%d]\n",
+ __func__, cmd->opcode, cmd->info_1.subopcode, status, job->reply_len);
- if (blk_mq_alloc_tag_set(&hdev->admin_tagset)) {
- dev_err(hdev->dev, "Allocate admin tagset failed\n");
- return -ENOMEM;
- }
+ spraid_bsg_unmap_data(hdev, job);
- hdev->admin_q = blk_mq_init_queue(&hdev->admin_tagset);
- if (IS_ERR(hdev->admin_q)) {
- dev_err(hdev->dev, "Initialize admin request queue failed\n");
- blk_mq_free_tag_set(&hdev->admin_tagset);
- return -ENOMEM;
- }
- if (!blk_get_queue(hdev->admin_q)) {
- dev_err(hdev->dev, "Get admin request queue failed\n");
- spraid_remove_admin_tagset(hdev);
- hdev->admin_q = NULL;
- return -ENODEV;
- }
- } else {
- blk_mq_unquiesce_queue(hdev->admin_q);
- }
- return 0;
+ return status;
}
static bool spraid_check_scmd_completed(struct scsi_cmnd *scmd)
@@ -2891,7 +2690,7 @@ static bool spraid_check_scmd_completed(struct scsi_cmnd *scmd)
spraid_get_tag_from_scmd(scmd, &hwq, &cid);
spraidq = &hdev->queues[hwq];
if (READ_ONCE(iod->state) == SPRAID_CMD_COMPLETE || spraid_poll_cq(spraidq, cid)) {
- dev_warn(hdev->dev, "cid[%d], qid[%d] has been completed\n",
+ dev_warn(hdev->dev, "cid[%d] qid[%d] has been completed\n",
cid, spraidq->qid);
return true;
}
@@ -2927,8 +2726,7 @@ static int spraid_send_abort_cmd(struct spraid_dev *hdev, u32 hdid, u16 qid, u16
admin_cmd.abort.sqid = cpu_to_le16(qid);
admin_cmd.abort.cid = cpu_to_le16(cid);
- return spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL,
- NULL, 0, 0, 0, 0);
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
}
/* send reset command by admin quueue temporary */
@@ -2941,8 +2739,7 @@ static int spraid_send_reset_cmd(struct spraid_dev *hdev, int type, u32 hdid)
admin_cmd.reset.hdid = cpu_to_le32(hdid);
admin_cmd.reset.type = type;
- return spraid_submit_admin_sync_cmd(hdev->admin_q, &admin_cmd, NULL,
- NULL, 0, 0, 0, 0);
+ return spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
}
static bool spraid_change_host_state(struct spraid_dev *hdev, enum spraid_state newstate)
@@ -3022,7 +2819,7 @@ static void spraid_back_fault_cqe(struct spraid_queue *ioq, struct spraid_comple
scsi_dma_unmap(scmd);
spraid_free_iod_res(hdev, iod);
scmd->scsi_done(scmd);
- dev_warn(hdev->dev, "Back fault CQE, cid[%d], qid[%d]\n",
+ dev_warn(hdev->dev, "Back fault CQE, cid[%d] qid[%d]\n",
cqe->cmd_id, ioq->qid);
}
@@ -3032,6 +2829,8 @@ static void spraid_back_all_io(struct spraid_dev *hdev)
struct spraid_queue *ioq;
struct spraid_completion cqe = { 0 };
+ scsi_block_requests(hdev->shost);
+
for (i = 1; i <= hdev->shost->nr_hw_queues; i++) {
ioq = &hdev->queues[i];
for (j = 0; j < hdev->shost->can_queue; j++) {
@@ -3039,6 +2838,8 @@ static void spraid_back_all_io(struct spraid_dev *hdev)
spraid_back_fault_cqe(ioq, &cqe);
}
}
+
+ scsi_unblock_requests(hdev->shost);
}
static void spraid_dev_disable(struct spraid_dev *hdev, bool shutdown)
@@ -3106,17 +2907,13 @@ static void spraid_reset_work(struct work_struct *work)
if (ret)
goto pci_disable;
- ret = spraid_alloc_admin_tags(hdev);
- if (ret)
- goto pci_disable;
-
ret = spraid_setup_io_queues(hdev);
if (ret || hdev->online_queues <= hdev->shost->nr_hw_queues)
goto pci_disable;
spraid_change_host_state(hdev, SPRAID_LIVE);
- spraid_send_aen(hdev);
+ spraid_send_all_aen(hdev);
return;
@@ -3174,8 +2971,8 @@ static int spraid_abort_handler(struct scsi_cmnd *scmd)
scsi_print_command(scmd);
- if (!spraid_wait_abnl_cmd_done(iod) || spraid_check_scmd_completed(scmd) ||
- hdev->state != SPRAID_LIVE)
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
return SUCCESS;
hostdata = scmd->device->hostdata;
@@ -3206,8 +3003,8 @@ static int spraid_tgt_reset_handler(struct scsi_cmnd *scmd)
scsi_print_command(scmd);
- if (!spraid_wait_abnl_cmd_done(iod) || spraid_check_scmd_completed(scmd) ||
- hdev->state != SPRAID_LIVE)
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
return SUCCESS;
hostdata = scmd->device->hostdata;
@@ -3241,8 +3038,8 @@ static int spraid_bus_reset_handler(struct scsi_cmnd *scmd)
scsi_print_command(scmd);
- if (!spraid_wait_abnl_cmd_done(iod) || spraid_check_scmd_completed(scmd) ||
- hdev->state != SPRAID_LIVE)
+ if (hdev->state != SPRAID_LIVE || !spraid_wait_abnl_cmd_done(iod) ||
+ spraid_check_scmd_completed(scmd))
return SUCCESS;
hostdata = scmd->device->hostdata;
@@ -3272,7 +3069,7 @@ static int spraid_shost_reset_handler(struct scsi_cmnd *scmd)
struct spraid_dev *hdev = shost_priv(scmd->device->host);
scsi_print_command(scmd);
- if (spraid_check_scmd_completed(scmd) || hdev->state != SPRAID_LIVE)
+ if (hdev->state != SPRAID_LIVE || spraid_check_scmd_completed(scmd))
return SUCCESS;
spraid_get_tag_from_scmd(scmd, &hwq, &cid);
@@ -3288,6 +3085,62 @@ static int spraid_shost_reset_handler(struct scsi_cmnd *scmd)
return SUCCESS;
}
+static pci_ers_result_t spraid_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter pci error detect, state:%d\n", state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ dev_warn(hdev->dev, "channel is normal, do nothing\n");
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ dev_warn(hdev->dev, "channel io frozen, need reset controller\n");
+
+ scsi_block_requests(hdev->shost);
+
+ spraid_change_host_state(hdev, SPRAID_RESETTING);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ dev_warn(hdev->dev, "channel io failure, request disconnect\n");
+
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t spraid_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "restart after slot reset\n");
+
+ pci_restore_state(pdev);
+
+ if (!queue_work(spraid_wq, &hdev->reset_work)) {
+ dev_err(hdev->dev, "[%s] err, the device is resetting state\n", __func__);
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ flush_work(&hdev->reset_work);
+
+ scsi_unblock_requests(hdev->shost);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void spraid_reset_done(struct pci_dev *pdev)
+{
+ struct spraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter spraid reset done\n");
+}
+
static ssize_t csts_pp_show(struct device *cdev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(cdev);
@@ -3347,7 +3200,7 @@ static ssize_t fw_version_show(struct device *cdev, struct device_attribute *att
struct Scsi_Host *shost = class_to_shost(cdev);
struct spraid_dev *hdev = shost_priv(shost);
- return snprintf(buf, sizeof(hdev->ctrl_info->fr), "%s\n", hdev->ctrl_info->fr);
+ return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fr);
}
static DEVICE_ATTR_RO(csts_pp);
@@ -3365,6 +3218,185 @@ static struct device_attribute *spraid_host_attrs[] = {
NULL,
};
+static int spraid_get_vd_info(struct spraid_dev *hdev, struct spraid_vd_info *vd_info, u16 vid)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN);
+ admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(vd_info, data_ptr, sizeof(struct spraid_vd_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static int spraid_get_bgtask(struct spraid_dev *hdev, struct spraid_bgtask *bgtask)
+{
+ struct spraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t data_dma = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &data_dma, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(data_dma);
+
+ ret = spraid_submit_admin_sync_cmd(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(bgtask, data_ptr, sizeof(struct spraid_bgtask));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, data_dma);
+
+ return ret;
+}
+
+static ssize_t raid_level_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1;
+
+ ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ?
+ vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1);
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]);
+}
+
+static ssize_t raid_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret) {
+ vd_info->vd_status = 0;
+ vd_info->rg_id = 0xff;
+ }
+
+ ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ? vd_info->vd_status : 0;
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]);
+}
+
+static ssize_t raid_resync_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct spraid_dev *hdev;
+ struct spraid_vd_info *vd_info;
+ struct spraid_bgtask *bgtask;
+ struct spraid_sdev_hostdata *hostdata;
+ u8 rg_id, i, progress = 0;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !SPRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = spraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ goto out;
+
+ rg_id = vd_info->rg_id;
+
+ bgtask = (struct spraid_bgtask *)vd_info;
+ ret = spraid_get_bgtask(hdev, bgtask);
+ if (ret)
+ goto out;
+ for (i = 0; i < bgtask->task_num; i++) {
+ if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) &&
+ (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id))
+ progress = bgtask->bgtask[i].progress;
+ }
+
+out:
+ kfree(vd_info);
+ return snprintf(buf, PAGE_SIZE, "%d\n", progress);
+}
+
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(raid_state);
+static DEVICE_ATTR_RO(raid_resync);
+
+static struct device_attribute *spraid_dev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_raid_state,
+ &dev_attr_raid_resync,
+ NULL,
+};
+
+static struct pci_error_handlers spraid_err_handler = {
+ .error_detected = spraid_pci_error_detected,
+ .slot_reset = spraid_pci_slot_reset,
+ .reset_done = spraid_reset_done,
+};
+
+static int spraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ int ret;
+ struct spraid_dev *hdev = shost_priv(shost);
+
+ dev_info(hdev->dev, "[%s] start sysfs host reset cmd\n", __func__);
+ ret = spraid_reset_work_sync(hdev);
+ dev_info(hdev->dev, "[%s] stop sysfs host reset cmd[%d]\n", __func__, ret);
+
+ return ret;
+}
+
static struct scsi_host_template spraid_driver_template = {
.module = THIS_MODULE,
.name = "Ramaxel Logic spraid driver",
@@ -3379,9 +3411,11 @@ static struct scsi_host_template spraid_driver_template = {
.eh_bus_reset_handler = spraid_bus_reset_handler,
.eh_host_reset_handler = spraid_shost_reset_handler,
.change_queue_depth = scsi_change_queue_depth,
- .host_tagset = 1,
+ .host_tagset = 0,
.this_id = -1,
.shost_attrs = spraid_host_attrs,
+ .sdev_attrs = spraid_dev_attrs,
+ .host_reset = spraid_sysfs_host_reset,
};
static void spraid_shutdown(struct pci_dev *pdev)
@@ -3392,11 +3426,53 @@ static void spraid_shutdown(struct pci_dev *pdev)
spraid_disable_admin_queue(hdev, true);
}
+/* bsg dispatch user command */
+static int spraid_bsg_host_dispatch(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = dev_to_shost(job->dev);
+ struct spraid_dev *hdev = shost_priv(shost);
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct spraid_bsg_request *bsg_req = job->request;
+ int ret = 0;
+
+ dev_info(hdev->dev, "[%s] msgcode[%d], msglen[%d], timeout[%d], req_nsge[%d], req_len[%d]\n",
+ __func__, bsg_req->msgcode, job->request_len, rq->timeout,
+ job->request_payload.sg_cnt, job->request_payload.payload_len);
+
+ job->reply_len = 0;
+
+ switch (bsg_req->msgcode) {
+ case SPRAID_BSG_ADM:
+ ret = spraid_user_admin_cmd(hdev, job);
+ break;
+ case SPRAID_BSG_IOQ:
+ ret = spraid_user_ioq_cmd(hdev, job);
+ break;
+ default:
+ dev_info(hdev->dev, "[%s] unsupport msgcode[%d]\n", __func__, bsg_req->msgcode);
+ break;
+ }
+
+ if (ret > 0)
+ ret = ret | (ret << 8);
+
+ bsg_job_done(job, ret, 0);
+ return 0;
+}
+
+static inline void spraid_remove_bsg(struct spraid_dev *hdev)
+{
+ if (hdev->bsg_queue) {
+ bsg_unregister_queue(hdev->bsg_queue);
+ blk_cleanup_queue(hdev->bsg_queue);
+ }
+}
static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct spraid_dev *hdev;
struct Scsi_Host *shost;
int node, ret;
+ char bsg_name[15];
shost = scsi_host_alloc(&spraid_driver_template, sizeof(*hdev));
if (!shost) {
@@ -3421,10 +3497,10 @@ static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto put_dev;
init_rwsem(&hdev->devices_rwsem);
- INIT_WORK(&hdev->aen_work, spraid_async_event_work);
INIT_WORK(&hdev->scan_work, spraid_scan_work);
INIT_WORK(&hdev->timesyn_work, spraid_timesyn_work);
INIT_WORK(&hdev->reset_work, spraid_reset_work);
+ INIT_WORK(&hdev->fw_act_work, spraid_fw_act_work);
spin_lock_init(&hdev->state_lock);
ret = spraid_alloc_resources(hdev);
@@ -3439,17 +3515,13 @@ static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto pci_disable;
- ret = spraid_alloc_admin_tags(hdev);
- if (ret)
- goto disable_admin_q;
-
ret = spraid_init_ctrl_info(hdev);
if (ret)
- goto free_admin_tagset;
+ goto disable_admin_q;
ret = spraid_alloc_iod_ext_mem_pool(hdev);
if (ret)
- goto free_admin_tagset;
+ goto disable_admin_q;
ret = spraid_setup_io_queues(hdev);
if (ret)
@@ -3464,9 +3536,15 @@ static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto remove_io_queues;
}
- ret = spraid_create_cdev(hdev);
- if (ret)
+ snprintf(bsg_name, sizeof(bsg_name), "spraid%d", shost->host_no);
+ hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name,
+ spraid_bsg_host_dispatch, NULL,
+ spraid_cmd_size(hdev, true, false));
+ if (IS_ERR(hdev->bsg_queue)) {
+ dev_err(hdev->dev, "err, setup bsg failed\n");
+ hdev->bsg_queue = NULL;
goto remove_io_queues;
+ }
if (hdev->online_queues == SPRAID_ADMIN_QUEUE_NUM) {
dev_warn(hdev->dev, "warn only admin queue can be used\n");
@@ -3475,11 +3553,11 @@ static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hdev->state = SPRAID_LIVE;
- spraid_send_aen(hdev);
+ spraid_send_all_aen(hdev);
ret = spraid_dev_list_init(hdev);
if (ret)
- goto remove_cdev;
+ goto remove_bsg;
ret = spraid_configure_timestamp(hdev);
if (ret)
@@ -3487,20 +3565,18 @@ static int spraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = spraid_alloc_ioq_ptcmds(hdev);
if (ret)
- goto remove_cdev;
+ goto remove_bsg;
scsi_scan_host(hdev->shost);
return 0;
-remove_cdev:
- spraid_remove_cdev(hdev);
+remove_bsg:
+ spraid_remove_bsg(hdev);
remove_io_queues:
spraid_remove_io_queues(hdev);
free_iod_mempool:
spraid_free_iod_ext_mem_pool(hdev);
-free_admin_tagset:
- spraid_remove_admin_tagset(hdev);
disable_admin_q:
spraid_disable_admin_queue(hdev, false);
pci_disable:
@@ -3524,22 +3600,17 @@ static void spraid_remove(struct pci_dev *pdev)
dev_info(hdev->dev, "enter spraid remove\n");
spraid_change_host_state(hdev, SPRAID_DELETING);
+ flush_work(&hdev->reset_work);
- if (!pci_device_is_present(pdev)) {
- scsi_block_requests(shost);
+ if (!pci_device_is_present(pdev))
spraid_back_all_io(hdev);
- scsi_unblock_requests(shost);
- }
- flush_work(&hdev->reset_work);
+ spraid_remove_bsg(hdev);
scsi_remove_host(shost);
-
- kfree(hdev->ioq_ptcmds);
+ spraid_free_ioq_ptcmds(hdev);
kfree(hdev->devices);
- spraid_remove_cdev(hdev);
spraid_remove_io_queues(hdev);
spraid_free_iod_ext_mem_pool(hdev);
- spraid_remove_admin_tagset(hdev);
spraid_disable_admin_queue(hdev, false);
spraid_pci_disable(hdev);
spraid_free_resources(hdev);
@@ -3551,7 +3622,7 @@ static void spraid_remove(struct pci_dev *pdev)
}
static const struct pci_device_id spraid_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC, SPRAID_SERVER_DEVICE_HAB_DID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC, SPRAID_SERVER_DEVICE_HBA_DID) },
{ PCI_DEVICE(PCI_VENDOR_ID_RAMAXEL_LOGIC, SPRAID_SERVER_DEVICE_RAID_DID) },
{ 0, }
};
@@ -3563,6 +3634,7 @@ static struct pci_driver spraid_driver = {
.probe = spraid_probe,
.remove = spraid_remove,
.shutdown = spraid_shutdown,
+ .err_handler = &spraid_err_handler,
};
static int __init spraid_init(void)
@@ -3573,14 +3645,10 @@ static int __init spraid_init(void)
if (!spraid_wq)
return -ENOMEM;
- ret = alloc_chrdev_region(&spraid_chr_devt, 0, SPRAID_MINORS, "spraid");
- if (ret < 0)
- goto destroy_wq;
-
spraid_class = class_create(THIS_MODULE, "spraid");
if (IS_ERR(spraid_class)) {
ret = PTR_ERR(spraid_class);
- goto unregister_chrdev;
+ goto destroy_wq;
}
ret = pci_register_driver(&spraid_driver);
@@ -3591,8 +3659,6 @@ static int __init spraid_init(void)
destroy_class:
class_destroy(spraid_class);
-unregister_chrdev:
- unregister_chrdev_region(spraid_chr_devt, SPRAID_MINORS);
destroy_wq:
destroy_workqueue(spraid_wq);
@@ -3603,12 +3669,11 @@ static void __exit spraid_exit(void)
{
pci_unregister_driver(&spraid_driver);
class_destroy(spraid_class);
- unregister_chrdev_region(spraid_chr_devt, SPRAID_MINORS);
destroy_workqueue(spraid_wq);
ida_destroy(&spraid_instance_ida);
}
-MODULE_AUTHOR("Ramaxel Memory Technology");
+MODULE_AUTHOR("songyl(a)ramaxel.com");
MODULE_DESCRIPTION("Ramaxel Memory Technology SPraid Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(SPRAID_DRV_VERSION);
--
2.27.0
1
0

[PATCH openEuler-1.0-LTS v2 01/10] arm64: mm: Restore mm_cpumask (revert commit 38d96287504a ("arm64: mm: kill mm_cpumask usage"))
by Yang Yingliang 20 Dec '21
by Yang Yingliang 20 Dec '21
20 Dec '21
From: Takao Indoh <indou.takao(a)fujitsu.com>
hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4BLL0
CVE: NA
---------------------------
mm_cpumask was deleted by the commit 38d96287504a ("arm64: mm: kill
mm_cpumask usage") because it was not used at that time. Now this is needed
to find appropriate CPUs for TLB flush, so this patch reverts this commit.
Signed-off-by: QI Fuli <qi.fuli(a)fujitsu.com>
Signed-off-by: Takao Indoh <indou.takao(a)fujitsu.com>
Signed-off-by: Cheng Jian <cj.chengjian(a)huawei.com>
Reviewed-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/arm64/kernel/smp.c | 6 ++++++
arch/arm64/mm/context.c | 2 ++
2 files changed, 8 insertions(+)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index fe562778de352..e86940d353a3e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -387,6 +387,7 @@ asmlinkage notrace void secondary_start_kernel(void)
*/
mmgrab(mm);
current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
@@ -489,6 +490,11 @@ int __cpu_disable(void)
*/
irq_migrate_all_off_this_cpu();
+ /*
+ * Remove this CPU from the vm mask set of all processes.
+ */
+ clear_tasks_mm_cpumask(cpu);
+
return 0;
}
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 2b80ceff5d6c2..27d1f3fec1cc9 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -207,6 +207,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
+ cpumask_clear(mm_cpumask(mm));
return idx2asid(asid) | generation;
}
@@ -254,6 +255,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
switch_mm_fastpath:
arm64_apply_bp_hardening();
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
/*
* Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
--
2.25.1
1
9

[PATCH openEuler-1.0-LTS 1/2] audit: improve robustness of the audit queue handling
by Yang Yingliang 20 Dec '21
by Yang Yingliang 20 Dec '21
20 Dec '21
From: Paul Moore <paul(a)paul-moore.com>
hulk inclusion
category: bugfix
bugzilla: 185904, https://gitee.com/openeuler/kernel/issues/I4NAZN
CVE: NA
Reference: https://patchwork.kernel.org/project/linux-audit/patch/163942029335.62691.7…
-------------------------------------------------------------------
If the audit daemon were ever to get stuck in a stopped state the
kernel's kauditd_thread() could get blocked attempting to send audit
records to the userspace audit daemon. With the kernel thread
blocked it is possible that the audit queue could grow unbounded as
certain audit record generating events must be exempt from the queue
limits else the system enter a deadlock state.
This patch resolves this problem by lowering the kernel thread's
socket sending timeout from MAX_SCHEDULE_TIMEOUT to HZ/10 and tweaks
the kauditd_send_queue() function to better manage the various audit
queues when connection problems occur between the kernel and the
audit daemon. With this patch, the backlog may temporarily grow
beyond the defined limits when the audit daemon is stopped and the
system is under heavy audit pressure, but kauditd_thread() will
continue to make progress and drain the queues as it would for other
connection problems. For example, with the audit daemon put into a
stopped state and the system configured to audit every syscall it
was still possible to shutdown the system without a kernel panic,
deadlock, etc.; granted, the system was slow to shutdown but that is
to be expected given the extreme pressure of recording every syscall.
The timeout value of HZ/10 was chosen primarily through
experimentation and this developer's "gut feeling". There is likely
no one perfect value, but as this scenario is limited in scope (root
privileges would be needed to send SIGSTOP to the audit daemon), it
is likely not worth exposing this as a tunable at present. This can
always be done at a later date if it proves necessary.
Cc: stable(a)vger.kernel.org
Fixes: 5b52330bbfe63 ("audit: fix auditd/kernel connection state tracking")
Reported-by: Gaosheng Cui <cuigaosheng1(a)huawei.com>
Tested-by: Gaosheng Cui <cuigaosheng1(a)huawei.com>
Reviewed-by: Richard Guy Briggs <rgb(a)redhat.com>
Signed-off-by: Paul Moore <paul(a)paul-moore.com>
Signed-off-by: Cui GaoSheng <cuigaosheng1(a)huawei.com>
Reviewed-by: Xiu Jianfeng <xiujianfeng(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
kernel/audit.c | 21 ++++++++++-----------
1 file changed, 10 insertions(+), 11 deletions(-)
diff --git a/kernel/audit.c b/kernel/audit.c
index 45741c3c48a47..968921d376b98 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -726,7 +726,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
{
int rc = 0;
struct sk_buff *skb;
- static unsigned int failed = 0;
+ unsigned int failed = 0;
/* NOTE: kauditd_thread takes care of all our locking, we just use
* the netlink info passed to us (e.g. sk and portid) */
@@ -743,32 +743,30 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
continue;
}
+retry:
/* grab an extra skb reference in case of error */
skb_get(skb);
rc = netlink_unicast(sk, skb, portid, 0);
if (rc < 0) {
- /* fatal failure for our queue flush attempt? */
+ /* send failed - try a few times unless fatal error */
if (++failed >= retry_limit ||
rc == -ECONNREFUSED || rc == -EPERM) {
- /* yes - error processing for the queue */
sk = NULL;
if (err_hook)
(*err_hook)(skb);
- if (!skb_hook)
- goto out;
- /* keep processing with the skb_hook */
+ if (rc == -EAGAIN)
+ rc = 0;
+ /* continue to drain the queue */
continue;
} else
- /* no - requeue to preserve ordering */
- skb_queue_head(queue, skb);
+ goto retry;
} else {
- /* it worked - drop the extra reference and continue */
+ /* skb sent - drop the extra reference and continue */
consume_skb(skb);
failed = 0;
}
}
-out:
return (rc >= 0 ? 0 : rc);
}
@@ -1557,7 +1555,8 @@ static int __net_init audit_net_init(struct net *net)
audit_panic("cannot initialize netlink socket in namespace");
return -ENOMEM;
}
- aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+ /* limit the timeout in case auditd is blocked/stopped */
+ aunet->sk->sk_sndtimeo = HZ / 10;
return 0;
}
--
2.25.1
1
1
backport psi feature and avoid kabi change
bugzilla: https://gitee.com/openeuler/kernel/issues/I47QS2
Changes since v9:
1.Third-party kernel modules should not use struct cgroup,
to avoid kabi changes by __GENKSYMS__, from Xie Xiuqi.
Baruch Siach (1):
psi: fix reference to kernel commandline enable
Dan Schatzberg (1):
kernel/sched/psi.c: expose pressure metrics on root cgroup
Jason Xing (1):
psi: get poll_work to run when calling poll syscall next time
Johannes Weiner (14):
mm: workingset: tell cache transitions from workingset thrashing
sched: loadavg: consolidate LOAD_INT, LOAD_FRAC, CALC_LOAD
sched: loadavg: make calc_load_n() public
sched: sched.h: make rq locking and clock functions available in
stats.h
sched: introduce this_rq_lock_irq()
psi: pressure stall information for CPU, memory, and IO
psi: cgroup support
psi: make disabling/enabling easier for vendor kernels
psi: fix aggregation idle shut-off
psi: avoid divide-by-zero crash inside virtual machines
fs: kernfs: add poll file operation
kernel: cgroup: add poll file operation
sched/psi: Fix sampling error and rare div0 crashes with cgroups and
high uptime
psi: Fix a division error in psi poll()
Josef Bacik (1):
blk-iolatency: use a percentile approache for ssd's
Liu Xinpeng (2):
psi:enable psi in config
psi:avoid kabi change
Miklos Szeredi (1):
fuse: ignore PG_workingset after stealing
Miles Chen (1):
sched/psi: Correct overly pessimistic size calculation
Odin Ugedal (1):
cgroup: fix psi monitor for root cgroup
Olof Johansson (1):
kernel/sched/psi.c: simplify cgroup_move_task()
Peter Zijlstra (1):
sched/psi: Reduce psimon FIFO priority
Suren Baghdasaryan (9):
psi: introduce state_mask to represent stalled psi states
psi: make psi_enable static
psi: rename psi fields in preparation for psi trigger addition
psi: split update_stats into parts
psi: track changed states
include/: refactor headers to allow kthread.h inclusion in psi_types.h
psi: introduce psi monitor
sched/psi: Do not require setsched permission from the
sched/psi: Fix OOB write when writing 0 bytes to PSI files
Yafang Shao (1):
mm, memcg: add workingset_restore in memory.stat
Documentation/accounting/psi.txt | 180 ++++
Documentation/admin-guide/cgroup-v2.rst | 22 +
Documentation/admin-guide/kernel-parameters.txt | 4 +
arch/arm64/configs/openeuler_defconfig | 2 +
arch/powerpc/platforms/cell/cpufreq_spudemand.c | 2 +-
arch/powerpc/platforms/cell/spufs/sched.c | 9 +-
arch/s390/appldata/appldata_os.c | 4 -
arch/x86/configs/openeuler_defconfig | 2 +
block/blk-iolatency.c | 183 +++-
drivers/cpuidle/governors/menu.c | 4 -
drivers/spi/spi-rockchip.c | 1 +
fs/fuse/dev.c | 1 +
fs/kernfs/file.c | 31 +-
fs/proc/loadavg.c | 3 -
include/linux/cgroup-defs.h | 11 +
include/linux/cgroup.h | 15 +
include/linux/kernfs.h | 8 +
include/linux/kthread.h | 4 +
include/linux/page-flags.h | 5 +
include/linux/psi.h | 65 ++
include/linux/psi_types.h | 173 +++
include/linux/sched.h | 13 +
include/linux/sched/loadavg.h | 24 +-
include/linux/swap.h | 1 +
include/trace/events/mmflags.h | 1 +
init/Kconfig | 28 +
kernel/cgroup/cgroup.c | 131 ++-
kernel/debug/kdb/kdb_main.c | 7 +-
kernel/fork.c | 4 +
kernel/kthread.c | 1 +
kernel/sched/Makefile | 1 +
kernel/sched/core.c | 16 +-
kernel/sched/loadavg.c | 139 ++-
kernel/sched/psi.c | 1292 +++++++++++++++++++++++
kernel/sched/sched.h | 178 ++--
kernel/sched/stats.h | 86 ++
kernel/workqueue.c | 23 +
kernel/workqueue_internal.h | 6 +-
mm/compaction.c | 5 +
mm/filemap.c | 20 +-
mm/huge_memory.c | 1 +
mm/migrate.c | 2 +
mm/page_alloc.c | 9 +
mm/swap_state.c | 1 +
mm/vmscan.c | 10 +
mm/workingset.c | 113 +-
46 files changed, 2562 insertions(+), 279 deletions(-)
create mode 100644 Documentation/accounting/psi.txt
create mode 100644 include/linux/psi.h
create mode 100644 include/linux/psi_types.h
create mode 100644 kernel/sched/psi.c
--
1.8.3.1
1
35

[PATCH openEuler-1.0-LTS] block/wbt: fix negative inflight counter when remove scsi device
by Yang Yingliang 18 Dec '21
by Yang Yingliang 18 Dec '21
18 Dec '21
From: Laibin Qiu <qiulaibin(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 185857
CVE: NA
--------------------------------
Now that we disable wbt by set WBT_STATE_OFF_DEFAULT in
wbt_disable_default() when switch elevator to bfq. And when
we remove scsi device, wbt will be enabled by wbt_enable_default.
If it become false positive between wbt_wait() and wbt_track()
when submit write request.
The following is the scenario that triggered the problem.
T1 T2 T3
elevator_switch_mq
bfq_init_queue
wbt_disable_default <= Set
rwb->enable_state (OFF)
Submit_bio
blk_mq_make_request
rq_qos_throttle
<= rwb->enable_state (OFF)
scsi_remove_device
sd_remove
del_gendisk
blk_unregister_queue
elv_unregister_queue
wbt_enable_default
<= Set rwb->enable_state (ON)
q_qos_track
<= rwb->enable_state (ON)
^^^^^^ this request will mark WBT_TRACKED without inflight add and will
lead to drop rqw->inflight to -1 in wbt_done() which will trigger IO hung.
Fix this by move wbt_enable_default() from elv_unregister to
bfq_exit_queue(). Only re-enable wbt when bfq exit.
Fixes: 76a8040817b4b ("blk-wbt: make sure throttle is enabled properly")
confilct:
block/elevator.c
Signed-off-by: Laibin Qiu <qiulaibin(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
block/bfq-iosched.c | 4 ++++
block/elevator.c | 2 --
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 9078299ffe3da..fe4255698d810 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5381,6 +5381,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
{
struct bfq_data *bfqd = e->elevator_data;
struct bfq_queue *bfqq, *n;
+ struct request_queue *q = bfqd->queue;
hrtimer_cancel(&bfqd->idle_slice_timer);
@@ -5404,6 +5405,9 @@ static void bfq_exit_queue(struct elevator_queue *e)
#endif
kfree(bfqd);
+
+ /* Re-enable throttling in case elevator disabled it */
+ wbt_enable_default(q);
}
static void bfq_init_root_group(struct bfq_group *root_group,
diff --git a/block/elevator.c b/block/elevator.c
index 70ea9f3e64d7e..9621a812319e7 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -876,8 +876,6 @@ void elv_unregister_queue(struct request_queue *q)
kobject_uevent(&e->kobj, KOBJ_REMOVE);
kobject_del(&e->kobj);
e->registered = 0;
- /* Re-enable throttling in case elevator disabled it */
- wbt_enable_default(q);
}
}
--
2.25.1
1
0

18 Dec '21
From: Ye Bin <yebin10(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: 185875
CVE: NA
-----------------------------------------------
We got issue as follows:
[ 833.786542] nbd: failed to add new device
[ 833.791613] ==================================================================
[ 833.794918] BUG: KASAN: use-after-free in blk_mq_free_rqs+0x558/0x6c0
[ 833.798108] Read of size 8 at addr ffff800109b7c288 by task kworker/0:3/113
[ 833.804216] CPU: 0 PID: 113 Comm: kworker/0:3 Kdump: loaded Not tainted 4.19.90 #1
[ 833.807635] Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015
[ 833.811798] Workqueue: events __blk_release_queue
[ 833.815035] Call trace:
[ 833.817964] dump_backtrace+0x0/0x3c0
[ 833.821070] show_stack+0x28/0x38
[ 833.824091] dump_stack+0xfc/0x154
[ 833.827042] print_address_description+0x68/0x278
[ 833.830147] kasan_report+0x204/0x330
[ 833.833121] __asan_report_load8_noabort+0x30/0x40
[ 833.836180] blk_mq_free_rqs+0x558/0x6c0
[ 833.839089] blk_mq_sched_tags_teardown+0xf4/0x1c0
[ 833.842035] blk_mq_exit_sched+0x1b8/0x260
[ 833.844878] elevator_exit+0x114/0x148
[ 833.847634] blk_exit_queue+0x68/0xe8
[ 833.850352] __blk_release_queue+0xd0/0x408
[ 833.853113] process_one_work+0x55c/0x10d0
[ 833.855864] worker_thread+0x3d4/0xe30
[ 833.858558] kthread+0x2c8/0x348
[ 833.861139] ret_from_fork+0x10/0x18
[ 833.863714]
[ 833.866000] Allocated by task 186531:
[ 833.868467] kasan_kmalloc+0xe0/0x190
[ 833.870936] kmem_cache_alloc_trace+0x104/0x218
[ 833.873483] nbd_dev_add+0x54/0x760 [nbd]
[ 833.875988] nbd_genl_connect+0x3c4/0x1348 [nbd]
[ 833.878591] genl_family_rcv_msg+0x798/0xa10
[ 833.881113] genl_rcv_msg+0xc0/0x170
[ 833.883489] netlink_rcv_skb+0x1b4/0x370
[ 833.885897] genl_rcv+0x40/0x58
[ 833.888225] netlink_unicast+0x4bc/0x660
[ 833.890661] netlink_sendmsg+0x880/0xa60
[ 833.893112] sock_sendmsg+0xb8/0x110
[ 833.895513] ____sys_sendmsg+0x570/0x698
[ 833.897927] ___sys_sendmsg+0x108/0x188
[ 833.900350] __sys_sendmsg+0xe8/0x198
[ 833.900360] __arm64_sys_sendmsg+0x78/0xa8
[ 833.906911] el0_svc_common+0x10c/0x330
[ 833.909289] el0_svc_handler+0x60/0xd0
[ 833.911660] el0_svc+0x8/0x1b0
[ 833.913963]
[ 833.916117] Freed by task 186531:
[ 833.918445] __kasan_slab_free+0x120/0x228
[ 833.920860] kasan_slab_free+0x10/0x18
[ 833.923193] kfree+0x80/0x1f0
[ 833.925392] nbd_dev_add+0xf0/0x760 [nbd]
[ 833.927686] nbd_genl_connect+0x3c4/0x1348 [nbd]
[ 833.929989] genl_family_rcv_msg+0x798/0xa10
[ 833.932231] genl_rcv_msg+0xc0/0x170
[ 833.934335] netlink_rcv_skb+0x1b4/0x370
[ 833.936444] genl_rcv+0x40/0x58
[ 833.938460] netlink_unicast+0x4bc/0x660
[ 833.940570] netlink_sendmsg+0x880/0xa60
[ 833.942682] sock_sendmsg+0xb8/0x110
[ 833.944745] ____sys_sendmsg+0x570/0x698
[ 833.946849] ___sys_sendmsg+0x108/0x188
[ 833.948924] __sys_sendmsg+0xe8/0x198
[ 833.950980] __arm64_sys_sendmsg+0x78/0xa8
[ 833.953059] el0_svc_common+0x10c/0x330
[ 833.955121] el0_svc_handler+0x60/0xd0
[ 833.957143] el0_svc+0x8/0x1b0
[ 833.959088]
[ 833.960846] The buggy address belongs to the object at ffff800109b7c280
[ 833.960846] which belongs to the cache kmalloc-512 of size 512
[ 833.965502] The buggy address is located 8 bytes inside of
[ 833.965502] 512-byte region [ffff800109b7c280, ffff800109b7c480)
[ 833.970108] The buggy address belongs to the page:
[ 833.972390] page:ffff7e000426df00 count:1 mapcount:0 mapping:ffff8000c0003800 index:0x0 compound_mapcount: 0
[ 833.975269] flags: 0x7ffff0000008100(slab|head)
[ 833.977640] raw: 07ffff0000008100 ffff7e00035d1600 0000000200000002 ffff8000c0003800
[ 833.980426] raw: 0000000000000000 00000000000c000c 00000001ffffffff 0000000000000000
[ 833.983212] page dumped because: kasan: bad access detected
[ 833.985778]
[ 833.987935] Memory state around the buggy address:
[ 833.990448] ffff800109b7c180: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 833.993265] ffff800109b7c200: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[ 833.996097] >ffff800109b7c280: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 833.998930] ^
[ 834.001384] ffff800109b7c300: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 834.004329] ffff800109b7c380: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[ 834.007264] ==================================================================
As 98fd847a495f commit add check "disk->first_minor", if failed will
free tags and finally call put_disk will free request_queue.
blk_mq_free_tag_set
blk_mq_free_map_and_requests
blk_mq_free_rqs
put_disk:
__blk_release_queue
blk_exit_queue
elevator_exit
blk_mq_exit_sched
blk_mq_sched_tags_teardown
blk_mq_free_rqs -->will trigger UAF
To address this issue, just move 'disk->first_minor' check at the first
in nbd_dev_add.
Fixes:98fd847a495f("nbd: add sanity check for first_minor")
Signed-off-by: Ye Bin <yebin10(a)huawei.com>
Reviewed-by: Yu Kuai <yukuai3(a)huawei.com>
Reviewed-by: Jason Yan <yanaijie(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/block/nbd.c | 22 ++++++++++------------
1 file changed, 10 insertions(+), 12 deletions(-)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 775cbb4c1bbcd..a35189098581b 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1702,6 +1702,15 @@ static int nbd_dev_add(int index)
struct gendisk *disk;
struct request_queue *q;
int err = -ENOMEM;
+ int first_minor = index << part_shift;
+
+ /*
+ * Too big index can cause duplicate creation of sysfs files/links,
+ * because MKDEV() expect that the max first minor is MINORMASK, or
+ * index << part_shift can overflow.
+ */
+ if (first_minor < index || first_minor > MINORMASK)
+ return -EINVAL;
nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
if (!nbd)
@@ -1765,18 +1774,7 @@ static int nbd_dev_add(int index)
refcount_set(&nbd->refs, 1);
INIT_LIST_HEAD(&nbd->list);
disk->major = NBD_MAJOR;
-
- /*
- * Too big index can cause duplicate creation of sysfs files/links,
- * because MKDEV() expect that the max first minor is MINORMASK, or
- * index << part_shift can overflow.
- */
- disk->first_minor = index << part_shift;
- if (disk->first_minor < index || disk->first_minor > MINORMASK) {
- err = -EINVAL;
- goto out_free_tags;
- }
-
+ disk->first_minor = first_minor;
disk->fops = &nbd_fops;
disk->private_data = nbd;
sprintf(disk->disk_name, "nbd%d", index);
--
2.25.1
1
0

[PATCH openEuler-1.0-LTS 1/5] block, bfq: improve asymmetric scenarios detection
by Yang Yingliang 17 Dec '21
by Yang Yingliang 17 Dec '21
17 Dec '21
From: Federico Motta <federico(a)willer.it>
mainline inclusion
from mainline-v4.20-rc1
commit 2d29c9f89fcd9bf408fcdaaf515c90a169f22ecd
category: bugfix
bugzilla: 185905
CVE: NA
---------------------------
bfq defines as asymmetric a scenario where an active entity, say E
(representing either a single bfq_queue or a group of other entities),
has a higher weight than some other entities. If the entity E does sync
I/O in such a scenario, then bfq plugs the dispatch of the I/O of the
other entities in the following situation: E is in service but
temporarily has no pending I/O request. In fact, without this plugging,
all the times that E stops being temporarily idle, it may find the
internal queues of the storage device already filled with an
out-of-control number of extra requests, from other entities. So E may
have to wait for the service of these extra requests, before finally
having its own requests served. This may easily break service
guarantees, with E getting less than its fair share of the device
throughput. Usually, the end result is that E gets the same fraction of
the throughput as the other entities, instead of getting more, according
to its higher weight.
Yet there are two other more subtle cases where E, even if its weight is
actually equal to or even lower than the weight of any other active
entities, may get less than its fair share of the throughput in case the
above I/O plugging is not performed:
1. other entities issue larger requests than E;
2. other entities contain more active child entities than E (or in
general tend to have more backlog than E).
In the first case, other entities may get more service than E because
they get larger requests, than those of E, served during the temporary
idle periods of E. In the second case, other entities get more service
because, by having many child entities, they have many requests ready
for dispatching while E is temporarily idle.
This commit addresses this issue by extending the definition of
asymmetric scenario: a scenario is asymmetric when
- active entities representing bfq_queues have differentiated weights,
as in the original definition
or (inclusive)
- one or more entities representing groups of entities are active.
This broader definition makes sure that I/O plugging will be performed
in all the above cases, provided that there is at least one active
group. Of course, this definition is very coarse, so it will trigger
I/O plugging also in cases where it is not needed, such as, e.g.,
multiple active entities with just one child each, and all with the same
I/O-request size. The reason for this coarse definition is just that a
finer-grained definition would be rather heavy to compute.
On the opposite end, even this new definition does not trigger I/O
plugging in all cases where there is no active group, and all bfq_queues
have the same weight. So, in these cases some unfairness may occur if
there are asymmetries in I/O-request sizes. We made this choice because
I/O plugging may lower throughput, and probably a user that has not
created any group cares more about throughput than about perfect
fairness. At any rate, as for possible applications that may care about
service guarantees, bfq already guarantees a high responsiveness and a
low latency to soft real-time applications automatically.
Signed-off-by: Federico Motta <federico(a)willer.it>
Signed-off-by: Paolo Valente <paolo.valente(a)linaro.org>
Signed-off-by: Jens Axboe <axboe(a)kernel.dk>
Signed-off-by: Yu Kuai <yukuai3(a)huawei.com>
Reviewed-by: Hou Tao <houtao1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
block/bfq-iosched.c | 223 ++++++++++++++++++++++++--------------------
block/bfq-iosched.h | 27 +++---
block/bfq-wf2q.c | 35 +++----
3 files changed, 154 insertions(+), 131 deletions(-)
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 4686ca1e93afe..e2b5c3ccd5502 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -625,12 +625,13 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
}
/*
- * Tell whether there are active queues or groups with differentiated weights.
+ * Tell whether there are active queues with different weights or
+ * active groups.
*/
-static bool bfq_differentiated_weights(struct bfq_data *bfqd)
+static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
{
/*
- * For weights to differ, at least one of the trees must contain
+ * For queue weights to differ, queue_weights_tree must contain
* at least two nodes.
*/
return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
@@ -638,9 +639,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
bfqd->queue_weights_tree.rb_node->rb_right)
#ifdef CONFIG_BFQ_GROUP_IOSCHED
) ||
- (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
- (bfqd->group_weights_tree.rb_node->rb_left ||
- bfqd->group_weights_tree.rb_node->rb_right)
+ (bfqd->num_active_groups > 0
#endif
);
}
@@ -658,26 +657,25 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
* 3) all active groups at the same level in the groups tree have the same
* number of children.
*
- * Unfortunately, keeping the necessary state for evaluating exactly the
- * above symmetry conditions would be quite complex and time-consuming.
- * Therefore this function evaluates, instead, the following stronger
- * sub-conditions, for which it is much easier to maintain the needed
- * state:
+ * Unfortunately, keeping the necessary state for evaluating exactly
+ * the last two symmetry sub-conditions above would be quite complex
+ * and time consuming. Therefore this function evaluates, instead,
+ * only the following stronger two sub-conditions, for which it is
+ * much easier to maintain the needed state:
* 1) all active queues have the same weight,
- * 2) all active groups have the same weight,
- * 3) all active groups have at most one active child each.
- * In particular, the last two conditions are always true if hierarchical
- * support and the cgroups interface are not enabled, thus no state needs
- * to be maintained in this case.
+ * 2) there are no active groups.
+ * In particular, the last condition is always true if hierarchical
+ * support or the cgroups interface are not enabled, thus no state
+ * needs to be maintained in this case.
*/
static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
{
- return !bfq_differentiated_weights(bfqd);
+ return !bfq_varied_queue_weights_or_active_groups(bfqd);
}
/*
* If the weight-counter tree passed as input contains no counter for
- * the weight of the input entity, then add that counter; otherwise just
+ * the weight of the input queue, then add that counter; otherwise just
* increment the existing counter.
*
* Note that weight-counter trees contain few nodes in mostly symmetric
@@ -688,25 +686,25 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
* In most scenarios, the rate at which nodes are created/destroyed
* should be low too.
*/
-void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
+void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct rb_root *root)
{
+ struct bfq_entity *entity = &bfqq->entity;
struct rb_node **new = &(root->rb_node), *parent = NULL;
/*
- * Do not insert if the entity is already associated with a
+ * Do not insert if the queue is already associated with a
* counter, which happens if:
- * 1) the entity is associated with a queue,
- * 2) a request arrival has caused the queue to become both
+ * 1) a request arrival has caused the queue to become both
* non-weight-raised, and hence change its weight, and
* backlogged; in this respect, each of the two events
* causes an invocation of this function,
- * 3) this is the invocation of this function caused by the
+ * 2) this is the invocation of this function caused by the
* second event. This second invocation is actually useless,
* and we handle this fact by exiting immediately. More
* efficient or clearer solutions might possibly be adopted.
*/
- if (entity->weight_counter)
+ if (bfqq->weight_counter)
return;
while (*new) {
@@ -716,7 +714,7 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
parent = *new;
if (entity->weight == __counter->weight) {
- entity->weight_counter = __counter;
+ bfqq->weight_counter = __counter;
goto inc_counter;
}
if (entity->weight < __counter->weight)
@@ -725,66 +723,67 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
new = &((*new)->rb_right);
}
- entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
- GFP_ATOMIC);
+ bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
+ GFP_ATOMIC);
/*
* In the unlucky event of an allocation failure, we just
- * exit. This will cause the weight of entity to not be
- * considered in bfq_differentiated_weights, which, in its
- * turn, causes the scenario to be deemed wrongly symmetric in
- * case entity's weight would have been the only weight making
- * the scenario asymmetric. On the bright side, no unbalance
- * will however occur when entity becomes inactive again (the
- * invocation of this function is triggered by an activation
- * of entity). In fact, bfq_weights_tree_remove does nothing
- * if !entity->weight_counter.
+ * exit. This will cause the weight of queue to not be
+ * considered in bfq_varied_queue_weights_or_active_groups,
+ * which, in its turn, causes the scenario to be deemed
+ * wrongly symmetric in case bfqq's weight would have been
+ * the only weight making the scenario asymmetric. On the
+ * bright side, no unbalance will however occur when bfqq
+ * becomes inactive again (the invocation of this function
+ * is triggered by an activation of queue). In fact,
+ * bfq_weights_tree_remove does nothing if
+ * !bfqq->weight_counter.
*/
- if (unlikely(!entity->weight_counter))
+ if (unlikely(!bfqq->weight_counter))
return;
- entity->weight_counter->weight = entity->weight;
- rb_link_node(&entity->weight_counter->weights_node, parent, new);
- rb_insert_color(&entity->weight_counter->weights_node, root);
+ bfqq->weight_counter->weight = entity->weight;
+ rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
+ rb_insert_color(&bfqq->weight_counter->weights_node, root);
inc_counter:
- entity->weight_counter->num_active++;
+ bfqq->weight_counter->num_active++;
}
/*
- * Decrement the weight counter associated with the entity, and, if the
+ * Decrement the weight counter associated with the queue, and, if the
* counter reaches 0, remove the counter from the tree.
* See the comments to the function bfq_weights_tree_add() for considerations
* about overhead.
*/
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
- struct bfq_entity *entity,
+ struct bfq_queue *bfqq,
struct rb_root *root)
{
- if (!entity->weight_counter)
+ if (!bfqq->weight_counter)
return;
- entity->weight_counter->num_active--;
- if (entity->weight_counter->num_active > 0)
+ bfqq->weight_counter->num_active--;
+ if (bfqq->weight_counter->num_active > 0)
goto reset_entity_pointer;
- rb_erase(&entity->weight_counter->weights_node, root);
- kfree(entity->weight_counter);
+ rb_erase(&bfqq->weight_counter->weights_node, root);
+ kfree(bfqq->weight_counter);
reset_entity_pointer:
- entity->weight_counter = NULL;
+ bfqq->weight_counter = NULL;
}
/*
- * Invoke __bfq_weights_tree_remove on bfqq and all its inactive
- * parent entities.
+ * Invoke __bfq_weights_tree_remove on bfqq and decrement the number
+ * of active groups for each queue's inactive parent entity.
*/
void bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
struct bfq_entity *entity = bfqq->entity.parent;
- __bfq_weights_tree_remove(bfqd, &bfqq->entity,
+ __bfq_weights_tree_remove(bfqd, bfqq,
&bfqd->queue_weights_tree);
for_each_entity(entity) {
@@ -798,17 +797,13 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
* next_in_service for details on why
* in_service_entity must be checked too).
*
- * As a consequence, the weight of entity is
- * not to be removed. In addition, if entity
- * is active, then its parent entities are
- * active as well, and thus their weights are
- * not to be removed either. In the end, this
- * loop must stop here.
+ * As a consequence, its parent entities are
+ * active as well, and thus this loop must
+ * stop here.
*/
break;
}
- __bfq_weights_tree_remove(bfqd, entity,
- &bfqd->group_weights_tree);
+ bfqd->num_active_groups--;
}
}
@@ -3539,9 +3534,11 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* symmetric scenario where:
* (i) each of these processes must get the same throughput as
* the others;
- * (ii) all these processes have the same I/O pattern
- (either sequential or random).
- * In fact, in such a scenario, the drive will tend to treat
+ * (ii) the I/O of each process has the same properties, in
+ * terms of locality (sequential or random), direction
+ * (reads or writes), request sizes, greediness
+ * (from I/O-bound to sporadic), and so on.
+ * In fact, in such a scenario, the drive tends to treat
* the requests of each of these processes in about the same
* way as the requests of the others, and thus to provide
* each of these processes with about the same throughput
@@ -3550,18 +3547,50 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* certainly needed to guarantee that bfqq receives its
* assigned fraction of the device throughput (see [1] for
* details).
+ * The problem is that idling may significantly reduce
+ * throughput with certain combinations of types of I/O and
+ * devices. An important example is sync random I/O, on flash
+ * storage with command queueing. So, unless bfqq falls in the
+ * above cases where idling also boosts throughput, it would
+ * be important to check conditions (i) and (ii) accurately,
+ * so as to avoid idling when not strictly needed for service
+ * guarantees.
+ *
+ * Unfortunately, it is extremely difficult to thoroughly
+ * check condition (ii). And, in case there are active groups,
+ * it becomes very difficult to check condition (i) too. In
+ * fact, if there are active groups, then, for condition (i)
+ * to become false, it is enough that an active group contains
+ * more active processes or sub-groups than some other active
+ * group. We address this issue with the following bi-modal
+ * behavior, implemented in the function
+ * bfq_symmetric_scenario().
*
- * We address this issue by controlling, actually, only the
- * symmetry sub-condition (i), i.e., provided that
- * sub-condition (i) holds, idling is not performed,
- * regardless of whether sub-condition (ii) holds. In other
- * words, only if sub-condition (i) holds, then idling is
+ * If there are active groups, then the scenario is tagged as
+ * asymmetric, conservatively, without checking any of the
+ * conditions (i) and (ii). So the device is idled for bfqq.
+ * This behavior matches also the fact that groups are created
+ * exactly if controlling I/O (to preserve bandwidth and
+ * latency guarantees) is a primary concern.
+ *
+ * On the opposite end, if there are no active groups, then
+ * only condition (i) is actually controlled, i.e., provided
+ * that condition (i) holds, idling is not performed,
+ * regardless of whether condition (ii) holds. In other words,
+ * only if condition (i) does not hold, then idling is
* allowed, and the device tends to be prevented from queueing
- * many requests, possibly of several processes. The reason
- * for not controlling also sub-condition (ii) is that we
- * exploit preemption to preserve guarantees in case of
- * symmetric scenarios, even if (ii) does not hold, as
- * explained in the next two paragraphs.
+ * many requests, possibly of several processes. Since there
+ * are no active groups, then, to control condition (i) it is
+ * enough to check whether all active queues have the same
+ * weight.
+ *
+ * Not checking condition (ii) evidently exposes bfqq to the
+ * risk of getting less throughput than its fair share.
+ * However, for queues with the same weight, a further
+ * mechanism, preemption, mitigates or even eliminates this
+ * problem. And it does so without consequences on overall
+ * throughput. This mechanism and its benefits are explained
+ * in the next three paragraphs.
*
* Even if a queue, say Q, is expired when it remains idle, Q
* can still preempt the new in-service queue if the next
@@ -3575,11 +3604,7 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* idling allows the internal queues of the device to contain
* many requests, and thus to reorder requests, we can rather
* safely assume that the internal scheduler still preserves a
- * minimum of mid-term fairness. The motivation for using
- * preemption instead of idling is that, by not idling,
- * service guarantees are preserved without minimally
- * sacrificing throughput. In other words, both a high
- * throughput and its desired distribution are obtained.
+ * minimum of mid-term fairness.
*
* More precisely, this preemption-based, idleless approach
* provides fairness in terms of IOPS, and not sectors per
@@ -3598,27 +3623,27 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* 1024/8 times as high as the service received by the other
* queue.
*
- * On the other hand, device idling is performed, and thus
- * pure sector-domain guarantees are provided, for the
- * following queues, which are likely to need stronger
- * throughput guarantees: weight-raised queues, and queues
- * with a higher weight than other queues. When such queues
- * are active, sub-condition (i) is false, which triggers
- * device idling.
+ * The motivation for using preemption instead of idling (for
+ * queues with the same weight) is that, by not idling,
+ * service guarantees are preserved (completely or at least in
+ * part) without minimally sacrificing throughput. And, if
+ * there is no active group, then the primary expectation for
+ * this device is probably a high throughput.
*
- * According to the above considerations, the next variable is
- * true (only) if sub-condition (i) holds. To compute the
- * value of this variable, we not only use the return value of
- * the function bfq_symmetric_scenario(), but also check
- * whether bfqq is being weight-raised, because
- * bfq_symmetric_scenario() does not take into account also
- * weight-raised queues (see comments on
- * bfq_weights_tree_add()). In particular, if bfqq is being
- * weight-raised, it is important to idle only if there are
- * other, non-weight-raised queues that may steal throughput
- * to bfqq. Actually, we should be even more precise, and
- * differentiate between interactive weight raising and
- * soft real-time weight raising.
+ * We are now left only with explaining the additional
+ * compound condition that is checked below for deciding
+ * whether the scenario is asymmetric. To explain this
+ * compound condition, we need to add that the function
+ * bfq_symmetric_scenario checks the weights of only
+ * non-weight-raised queues, for efficiency reasons (see
+ * comments on bfq_weights_tree_add()). Then the fact that
+ * bfqq is weight-raised is checked explicitly here. More
+ * precisely, the compound condition below takes into account
+ * also the fact that, even if bfqq is being weight-raised,
+ * the scenario is still symmetric if all active queues happen
+ * to be weight-raised. Actually, we should be even more
+ * precise here, and differentiate between interactive weight
+ * raising and soft real-time weight raising.
*
* As a side note, it is worth considering that the above
* device-idling countermeasures may however fail in the
@@ -5408,7 +5433,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
bfqd->queue_weights_tree = RB_ROOT;
- bfqd->group_weights_tree = RB_ROOT;
+ bfqd->num_active_groups = 0;
INIT_LIST_HEAD(&bfqd->active_list);
INIT_LIST_HEAD(&bfqd->idle_list);
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index a41e9884f2dd2..e9960727cb565 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -108,15 +108,14 @@ struct bfq_sched_data {
};
/**
- * struct bfq_weight_counter - counter of the number of all active entities
+ * struct bfq_weight_counter - counter of the number of all active queues
* with a given weight.
*/
struct bfq_weight_counter {
- unsigned int weight; /* weight of the entities this counter refers to */
- unsigned int num_active; /* nr of active entities with this weight */
+ unsigned int weight; /* weight of the queues this counter refers to */
+ unsigned int num_active; /* nr of active queues with this weight */
/*
- * Weights tree member (see bfq_data's @queue_weights_tree and
- * @group_weights_tree)
+ * Weights tree member (see bfq_data's @queue_weights_tree)
*/
struct rb_node weights_node;
};
@@ -151,8 +150,6 @@ struct bfq_weight_counter {
struct bfq_entity {
/* service_tree member */
struct rb_node rb_node;
- /* pointer to the weight counter associated with this entity */
- struct bfq_weight_counter *weight_counter;
/*
* Flag, true if the entity is on a tree (either the active or
@@ -266,6 +263,9 @@ struct bfq_queue {
/* entity representing this queue in the scheduler */
struct bfq_entity entity;
+ /* pointer to the weight counter associated with this entity */
+ struct bfq_weight_counter *weight_counter;
+
/* maximum budget allowed from the feedback mechanism */
int max_budget;
/* budget expiration (in jiffies) */
@@ -449,14 +449,9 @@ struct bfq_data {
*/
struct rb_root queue_weights_tree;
/*
- * rbtree of non-queue @bfq_entity weight counters, sorted by
- * weight. Used to keep track of whether all @bfq_groups have
- * the same weight. The tree contains one counter for each
- * distinct weight associated to some active @bfq_group (see
- * the comments to the functions bfq_weights_tree_[add|remove]
- * for further details).
+ * number of groups with requests still waiting for completion
*/
- struct rb_root group_weights_tree;
+ unsigned int num_active_groups;
/*
* Number of bfq_queues containing requests (including the
@@ -854,10 +849,10 @@ struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync);
void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync);
struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
-void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
+void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct rb_root *root);
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
- struct bfq_entity *entity,
+ struct bfq_queue *bfqq,
struct rb_root *root);
void bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq);
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index e830715fe15d6..cdd164c82398a 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -788,25 +788,29 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
new_weight = entity->orig_weight *
(bfqq ? bfqq->wr_coeff : 1);
/*
- * If the weight of the entity changes, remove the entity
- * from its old weight counter (if there is a counter
- * associated with the entity), and add it to the counter
- * associated with its new weight.
+ * If the weight of the entity changes, and the entity is a
+ * queue, remove the entity from its old weight counter (if
+ * there is a counter associated with the entity).
*/
if (prev_weight != new_weight) {
- root = bfqq ? &bfqd->queue_weights_tree :
- &bfqd->group_weights_tree;
- __bfq_weights_tree_remove(bfqd, entity, root);
+ if (bfqq) {
+ root = &bfqd->queue_weights_tree;
+ __bfq_weights_tree_remove(bfqd, bfqq, root);
+ } else
+ bfqd->num_active_groups--;
}
entity->weight = new_weight;
/*
- * Add the entity to its weights tree only if it is
- * not associated with a weight-raised queue.
+ * Add the entity, if it is not a weight-raised queue,
+ * to the counter associated with its new weight.
*/
- if (prev_weight != new_weight &&
- (bfqq ? bfqq->wr_coeff == 1 : 1))
- /* If we get here, root has been initialized. */
- bfq_weights_tree_add(bfqd, entity, root);
+ if (prev_weight != new_weight) {
+ if (bfqq && bfqq->wr_coeff == 1) {
+ /* If we get here, root has been initialized. */
+ bfq_weights_tree_add(bfqd, bfqq, root);
+ } else
+ bfqd->num_active_groups++;
+ }
new_st->wsum += entity->weight;
@@ -1014,8 +1018,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
container_of(entity, struct bfq_group, entity);
struct bfq_data *bfqd = bfqg->bfqd;
- bfq_weights_tree_add(bfqg->bfqd, entity,
- &bfqd->group_weights_tree);
+ bfqd->num_active_groups++;
}
#endif
@@ -1702,7 +1705,7 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
if (!bfqq->dispatched)
if (bfqq->wr_coeff == 1)
- bfq_weights_tree_add(bfqd, &bfqq->entity,
+ bfq_weights_tree_add(bfqd, bfqq,
&bfqd->queue_weights_tree);
if (bfqq->wr_coeff > 1)
--
2.25.1
1
4

[PATCH openEuler-1.0-LTS 01/10] arm64: mm: Restore mm_cpumask (revert commit 38d96287504a ("arm64: mm: kill mm_cpumask usage"))
by Yang Yingliang 17 Dec '21
by Yang Yingliang 17 Dec '21
17 Dec '21
From: Takao Indoh <indou.takao(a)fujitsu.com>
hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4BLL0
CVE: NA
---------------------------
mm_cpumask was deleted by the commit 38d96287504a ("arm64: mm: kill
mm_cpumask usage") because it was not used at that time. Now this is needed
to find appropriate CPUs for TLB flush, so this patch reverts this commit.
Signed-off-by: QI Fuli <qi.fuli(a)fujitsu.com>
Signed-off-by: Takao Indoh <indou.takao(a)fujitsu.com>
Signed-off-by: Cheng Jian <cj.chengjian(a)huawei.com>
Reviewed-by: Xie XiuQi <xiexiuqi(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/arm64/kernel/smp.c | 6 ++++++
arch/arm64/mm/context.c | 2 ++
2 files changed, 8 insertions(+)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index fe562778de352..e86940d353a3e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -387,6 +387,7 @@ asmlinkage notrace void secondary_start_kernel(void)
*/
mmgrab(mm);
current->active_mm = mm;
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
@@ -489,6 +490,11 @@ int __cpu_disable(void)
*/
irq_migrate_all_off_this_cpu();
+ /*
+ * Remove this CPU from the vm mask set of all processes.
+ */
+ clear_tasks_mm_cpumask(cpu);
+
return 0;
}
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 2b80ceff5d6c2..27d1f3fec1cc9 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -207,6 +207,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
+ cpumask_clear(mm_cpumask(mm));
return idx2asid(asid) | generation;
}
@@ -254,6 +255,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
switch_mm_fastpath:
arm64_apply_bp_hardening();
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
/*
* Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
--
2.25.1
1
9

[PATCH openEuler-1.0-LTS] fget: check that the fd still exists after getting a ref to it
by Yang Yingliang 17 Dec '21
by Yang Yingliang 17 Dec '21
17 Dec '21
From: Linus Torvalds <torvalds(a)linux-foundation.org>
stable inclusion
from stable-4.19.220
commit 8bf31f9d9395b71af3ed33166a057cd3ec0c59da
category: bugfix
bugzilla: 185907
CVE: CVE-2021-4083
-------------------------------------------------
commit 054aa8d439b9185d4f5eb9a90282d1ce74772969 upstream.
Jann Horn points out that there is another possible race wrt Unix domain
socket garbage collection, somewhat reminiscent of the one fixed in
commit cbcf01128d0a ("af_unix: fix garbage collect vs MSG_PEEK").
See the extended comment about the garbage collection requirements added
to unix_peek_fds() by that commit for details.
The race comes from how we can locklessly look up a file descriptor just
as it is in the process of being closed, and with the right artificial
timing (Jann added a few strategic 'mdelay(500)' calls to do that), the
Unix domain socket garbage collector could see the reference count
decrement of the close() happen before fget() took its reference to the
file and the file was attached onto a new file descriptor.
This is all (intentionally) correct on the 'struct file *' side, with
RCU lookups and lockless reference counting very much part of the
design. Getting that reference count out of order isn't a problem per
se.
But the garbage collector can get confused by seeing this situation of
having seen a file not having any remaining external references and then
seeing it being attached to an fd.
In commit cbcf01128d0a ("af_unix: fix garbage collect vs MSG_PEEK") the
fix was to serialize the file descriptor install with the garbage
collector by taking and releasing the unix_gc_lock.
That's not really an option here, but since this all happens when we are
in the process of looking up a file descriptor, we can instead simply
just re-check that the file hasn't been closed in the meantime, and just
re-do the lookup if we raced with a concurrent close() of the same file
descriptor.
Reported-and-tested-by: Jann Horn <jannh(a)google.com>
Acked-by: Miklos Szeredi <mszeredi(a)redhat.com>
Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Signed-off-by: Baokun Li <libaokun1(a)huawei.com>
Reviewed-by: Xiu Jianfeng <xiujianfeng(a)huawei.com>
Reviewed-by: Zhang Yi <yi.zhang(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/file.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/fs/file.c b/fs/file.c
index 0caadedd3cc43..20a44d0ac400b 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -779,6 +779,10 @@ static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
file = NULL;
else if (!get_file_rcu_many(file, refs))
goto loop;
+ else if (__fcheck_files(files, fd) != file) {
+ fput_many(file, refs);
+ goto loop;
+ }
}
rcu_read_unlock();
--
2.25.1
1
0
1
0

[PATCH openEuler-1.0-LTS 1/4] Revert "Revert "ext4: Allow parallel DIO reads""
by Yang Yingliang 17 Dec '21
by Yang Yingliang 17 Dec '21
17 Dec '21
From: Zhang Xiaoxu <zhangxiaoxu5(a)huawei.com>
hulk inclusion
category: feature
bugzilla: 185871, https://gitee.com/openeuler/kernel/issues/I4MIQ0
CVE: NA
---------------------------
This reverts commit bb24e7e6309ae1bbc80062366b8f9bb2e8fcc025.
In order to improve the dio reads performance, we enable the
parallel dio reads.
And make it configuratable
Conflict:
fs/ext4/Kconfig
fs/ext4/inode.c
Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5(a)huawei.com>
Reviewed-by: Zhang Yi <yi.zhang(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/ext4/Kconfig | 5 +++++
fs/ext4/inode.c | 34 ++++++++++++++++++++++++++++------
2 files changed, 33 insertions(+), 6 deletions(-)
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index a453cc87082b5..de836b75c6f2b 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -120,3 +120,8 @@ config EXT4_DEBUG
If you select Y here, then you will be able to turn on debugging
with a command such as:
echo 1 > /sys/module/ext4/parameters/mballoc_debug
+config EXT4_PARALLEL_DIO_READ
+ bool "EXT4 parallel dio reads"
+ depends on EXT4_FS
+ help
+ Enable parallel dio read.
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index fb4de707cd9ad..2f5231899d342 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3795,15 +3795,31 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
{
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+ struct inode *inode = mapping->host;
+#ifdef CONFIG_EXT4_PARALLEL_DIO_READ
+ size_t count = iov_iter_count(iter);
+#else
int unlocked = 0;
- struct inode *inode = iocb->ki_filp->f_mapping->host;
+#endif
ssize_t ret;
loff_t offset = iocb->ki_pos;
loff_t size = i_size_read(inode);
if (offset >= size)
return 0;
-
+#ifdef CONFIG_EXT4_PARALLEL_DIO_READ
+ /*
+ * Shared inode_lock is enough for us - it protects against concurrent
+ * writes & truncates and since we take care of writing back page cache,
+ * we are protected against page writeback as well.
+ */
+ inode_lock_shared(inode);
+ ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
+ iocb->ki_pos + count - 1);
+ if (ret)
+ goto out_unlock;
+#else
if (ext4_should_dioread_nolock(inode)) {
/*
* Nolock dioread optimization may be dynamically disabled
@@ -3813,18 +3829,24 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
inode_dio_begin(inode);
smp_mb();
if (unlikely(ext4_test_inode_state(inode,
- EXT4_STATE_DIOREAD_LOCK)))
+ EXT4_STATE_DIOREAD_LOCK)))
inode_dio_end(inode);
else
unlocked = 1;
}
+#endif
+
ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
- iter, ext4_dio_get_block,
- NULL, NULL,
+ iter, ext4_dio_get_block, NULL, NULL,
+#ifdef CONFIG_EXT4_PARALLEL_DIO_READ
+ 0);
+out_unlock:
+ inode_unlock_shared(inode);
+#else
unlocked ? 0 : DIO_LOCKING);
-
if (unlocked)
inode_dio_end(inode);
+#endif
return ret;
}
--
2.25.1
1
3

[PATCH openEuler-1.0-LTS 1/4] Revert "Revert "ext4: Allow parallel DIO reads""
by Yang Yingliang 17 Dec '21
by Yang Yingliang 17 Dec '21
17 Dec '21
From: Zhang Xiaoxu <zhangxiaoxu5(a)huawei.com>
hulk inclusion
category: feature
bugzilla: 185871, https://gitee.com/openeuler/kernel/issues/I4MIQ0
CVE: NA
---------------------------
This reverts commit bb24e7e6309ae1bbc80062366b8f9bb2e8fcc025.
In order to improve the dio reads performance, we enable the
parallel dio reads.
And make it configuratable
Conflict:
fs/ext4/Kconfig
fs/ext4/inode.c
Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5(a)huawei.com>
Reviewed-by: Zhang Yi <yi.zhang(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
fs/ext4/Kconfig | 5 +++++
fs/ext4/inode.c | 34 ++++++++++++++++++++++++++++------
2 files changed, 33 insertions(+), 6 deletions(-)
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index a453cc87082b5..de836b75c6f2b 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -120,3 +120,8 @@ config EXT4_DEBUG
If you select Y here, then you will be able to turn on debugging
with a command such as:
echo 1 > /sys/module/ext4/parameters/mballoc_debug
+config EXT4_PARALLEL_DIO_READ
+ bool "EXT4 parallel dio reads"
+ depends on EXT4_FS
+ help
+ Enable parallel dio read.
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index fb4de707cd9ad..2f5231899d342 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3795,15 +3795,31 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
{
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+ struct inode *inode = mapping->host;
+#ifdef CONFIG_EXT4_PARALLEL_DIO_READ
+ size_t count = iov_iter_count(iter);
+#else
int unlocked = 0;
- struct inode *inode = iocb->ki_filp->f_mapping->host;
+#endif
ssize_t ret;
loff_t offset = iocb->ki_pos;
loff_t size = i_size_read(inode);
if (offset >= size)
return 0;
-
+#ifdef CONFIG_EXT4_PARALLEL_DIO_READ
+ /*
+ * Shared inode_lock is enough for us - it protects against concurrent
+ * writes & truncates and since we take care of writing back page cache,
+ * we are protected against page writeback as well.
+ */
+ inode_lock_shared(inode);
+ ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
+ iocb->ki_pos + count - 1);
+ if (ret)
+ goto out_unlock;
+#else
if (ext4_should_dioread_nolock(inode)) {
/*
* Nolock dioread optimization may be dynamically disabled
@@ -3813,18 +3829,24 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
inode_dio_begin(inode);
smp_mb();
if (unlikely(ext4_test_inode_state(inode,
- EXT4_STATE_DIOREAD_LOCK)))
+ EXT4_STATE_DIOREAD_LOCK)))
inode_dio_end(inode);
else
unlocked = 1;
}
+#endif
+
ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
- iter, ext4_dio_get_block,
- NULL, NULL,
+ iter, ext4_dio_get_block, NULL, NULL,
+#ifdef CONFIG_EXT4_PARALLEL_DIO_READ
+ 0);
+out_unlock:
+ inode_unlock_shared(inode);
+#else
unlocked ? 0 : DIO_LOCKING);
-
if (unlocked)
inode_dio_end(inode);
+#endif
return ret;
}
--
2.25.1
1
3

[PATCH openEuler-1.0-LTS 1/4] net: hns3: fix VF RSS failed problem after PF enable multi-TCs
by Yang Yingliang 16 Dec '21
by Yang Yingliang 16 Dec '21
16 Dec '21
From: Guangbin Huang <huangguangbin2(a)huawei.com>
mainline inclusion
from mainline-v5.16-rc3
commit 8d2ad993aa05c0768f00c886c9d369cd97a337ac
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4MWF6
CVE: NA
----------------------------
When PF is set to multi-TCs and configured mapping relationship between
priorities and TCs, the hardware will active these settings for this PF
and its VFs.
In this case when VF just uses one TC and its rx packets contain priority,
and if the priority is not mapped to TC0, as other TCs of VF is not valid,
hardware always put this kind of packets to the queue 0. It cause this kind
of packets of VF can not be used RSS function.
To fix this problem, set tc mode of all unused TCs of VF to the setting of
TC0, then rx packet with priority which map to unused TC will be direct to
TC0.
Fixes: e2cb1dec9779 ("net: hns3: Add HNS3 VF HCL(Hardware Compatibility Layer) Support")
Signed-off-by: Guangbin Huang <huangguangbin2(a)huawei.com>
Signed-off-by: Jakub Kicinski <kuba(a)kernel.org>
Signed-off-by: Yonglong Liu <liuyonglong(a)huawei.com>
Reviewed-by: li yongxin <liyongxin1(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 8e79818f149d2..ebe09a304aeda 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -686,9 +686,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
roundup_size = ilog2(roundup_size);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
+ tc_valid[i] = 1;
tc_size[i] = roundup_size;
- tc_offset[i] = rss_size * i;
+ tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
}
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
--
2.25.1
1
3

[PATCH openEuler-1.0-LTS 1/2] hugetlbfs: flush TLBs correctly after huge_pmd_unshare
by Yang Yingliang 16 Dec '21
by Yang Yingliang 16 Dec '21
16 Dec '21
From: Nadav Amit <namit(a)vmware.com>
stable inclusion
from linux-v4.19.219
commit b0313bc7f5fbb6beee327af39d818ffdc921821a
category: bugfix
bugzilla: 185854
CVE: CVE-2021-4002
-----------------------------------------------
commit a4a118f2eead1d6c49e00765de89878288d4b890 upstream.
When __unmap_hugepage_range() calls to huge_pmd_unshare() succeed, a TLB
flush is missing. This TLB flush must be performed before releasing the
i_mmap_rwsem, in order to prevent an unshared PMDs page from being
released and reused before the TLB flush took place.
Arguably, a comprehensive solution would use mmu_gather interface to
batch the TLB flushes and the PMDs page release, however it is not an
easy solution: (1) try_to_unmap_one() and try_to_migrate_one() also call
huge_pmd_unshare() and they cannot use the mmu_gather interface; and (2)
deferring the release of the page reference for the PMDs page until
after i_mmap_rwsem is dropeed can confuse huge_pmd_unshare() into
thinking PMDs are shared when they are not.
Fix __unmap_hugepage_range() by adding the missing TLB flush, and
forcing a flush when unshare is successful.
Fixes: 24669e58477e ("hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages)" # 3.6
Signed-off-by: Nadav Amit <namit(a)vmware.com>
Reviewed-by: Mike Kravetz <mike.kravetz(a)oracle.com>
Cc: Aneesh Kumar K.V <aneesh.kumar(a)linux.vnet.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu(a)jp.fujitsu.com>
Cc: Andrew Morton <akpm(a)linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds(a)linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh(a)linuxfoundation.org>
Conflicts:
include/asm-generic/tlb.h
mm/mmu_gather.c
Signed-off-by: Liu Shixin <liushixin2(a)huawei.com>
Reviewed-by: tong tiangen <tongtiangen(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
arch/arm/include/asm/tlb.h | 8 ++++++++
arch/ia64/include/asm/tlb.h | 10 ++++++++++
arch/s390/include/asm/tlb.h | 16 ++++++++++++++++
arch/sh/include/asm/tlb.h | 10 ++++++++++
arch/um/include/asm/tlb.h | 12 ++++++++++++
include/asm-generic/tlb.h | 2 ++
mm/hugetlb.c | 23 +++++++++++++++++++----
mm/mmu_gather.c | 10 ++++++++++
8 files changed, 87 insertions(+), 4 deletions(-)
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index f854148c8d7c2..00baa13c158d7 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -280,6 +280,14 @@ tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr
tlb_add_flush(tlb, addr);
}
+static inline void
+tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
+ unsigned long size)
+{
+ tlb_add_flush(tlb, address);
+ tlb_add_flush(tlb, address + size - PMD_SIZE);
+}
+
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 516355a774bfe..5d032d97c254e 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -268,6 +268,16 @@ __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long addre
tlb->end_addr = address + PAGE_SIZE;
}
+static inline void
+tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
+ unsigned long size)
+{
+ if (tlb->start_addr > address)
+ tlb->start_addr = address;
+ if (tlb->end_addr < address + size)
+ tlb->end_addr = address + size;
+}
+
#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
#define tlb_start_vma(tlb, vma) do { } while (0)
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b31c779cf5817..1df28a8e2f19e 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -116,6 +116,20 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
return tlb_remove_page(tlb, page);
}
+static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
+ unsigned long address, unsigned long size)
+{
+ /*
+ * the range might exceed the original range that was provided to
+ * tlb_gather_mmu(), so we need to update it despite the fact it is
+ * usually not updated.
+ */
+ if (tlb->start > address)
+ tlb->start = address;
+ if (tlb->end < address + size)
+ tlb->end = address + size;
+}
+
/*
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
@@ -177,6 +191,8 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
#define tlb_migrate_finish(mm) do { } while (0)
+#define tlb_flush_pmd_range(tlb, addr, sz) do { } while (0)
+
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 77abe192fb43d..adcb0bfe238e3 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -127,6 +127,16 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
return tlb_remove_page(tlb, page);
}
+static inline void
+tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
+ unsigned long size)
+{
+ if (tlb->start > address)
+ tlb->start = address;
+ if (tlb->end < address + size)
+ tlb->end = address + size;
+}
+
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index dce6db147f245..02e61f6abfcab 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -130,6 +130,18 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
return tlb_remove_page(tlb, page);
}
+static inline void
+tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
+ unsigned long size)
+{
+ tlb->need_flush = 1;
+
+ if (tlb->start > address)
+ tlb->start = address;
+ if (tlb->end < address + size)
+ tlb->end = address + size;
+}
+
/**
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
*
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 6be86c1c5c583..cfc86e3ba4606 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -139,6 +139,8 @@ void tlb_flush_mmu(struct mmu_gather *tlb);
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force);
void tlb_flush_mmu_free(struct mmu_gather *tlb);
+void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
+ unsigned long size);
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
int page_size);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 2f65dad443ab3..b2ed9174bfd73 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3595,6 +3595,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long sz = huge_page_size(h);
unsigned long mmun_start = start; /* For mmu_notifiers */
unsigned long mmun_end = end; /* For mmu_notifiers */
+ bool force_flush = false;
WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~huge_page_mask(h));
@@ -3621,10 +3622,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
ptl = huge_pte_lock(h, mm, ptep);
if (huge_pmd_unshare(mm, &address, ptep)) {
spin_unlock(ptl);
- /*
- * We just unmapped a page of PMDs by clearing a PUD.
- * The caller's TLB flush range should cover this area.
- */
+ tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
+ force_flush = true;
continue;
}
@@ -3688,6 +3687,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
}
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
tlb_end_vma(tlb, vma);
+
+ /*
+ * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
+ * could defer the flush until now, since by holding i_mmap_rwsem we
+ * guaranteed that the last refernece would not be dropped. But we must
+ * do the flushing before we return, as otherwise i_mmap_rwsem will be
+ * dropped and the last reference to the shared PMDs page might be
+ * dropped as well.
+ *
+ * In theory we could defer the freeing of the PMD pages as well, but
+ * huge_pmd_unshare() relies on the exact page_count for the PMD page to
+ * detect sharing, so we cannot defer the release of the page either.
+ * Instead, do flush now.
+ */
+ if (force_flush)
+ tlb_flush_mmu_tlbonly(tlb);
}
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 2a9fbc4a37d59..c147a5aacfa96 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -139,6 +139,16 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
return false;
}
+void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
+ unsigned long size)
+{
+ if (tlb->page_size != 0 && tlb->page_size != PMD_SIZE)
+ tlb_flush_mmu(tlb);
+
+ tlb->page_size = PMD_SIZE;
+ tlb->start = min(tlb->start, address);
+ tlb->end = max(tlb->end, address + size);
+}
#endif /* HAVE_GENERIC_MMU_GATHER */
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
--
2.25.1
1
1

[PATCH openEuler-1.0-LTS 1/2] mm: share_pool: adjust sp_make_share_k2u behavior when coredump
by Yang Yingliang 16 Dec '21
by Yang Yingliang 16 Dec '21
16 Dec '21
From: Guo Mengqi <guomengqi3(a)huawei.com>
ascend inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I4MUV2
CVE: NA
when k2u is being executed ont the whole sharepool group,
and one process coredumps, k2u will skip the coredumped process and
continue on the rest processes in the group.
Signed-off-by: Guo Mengqi <guomengqi3(a)huawei.com>
Reviewed-by: Weilong Chen <chenweilong(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
mm/share_pool.c | 50 +++++++++++++++++++++++++++++++------------------
1 file changed, 32 insertions(+), 18 deletions(-)
diff --git a/mm/share_pool.c b/mm/share_pool.c
index c9079e6a40b1a..ac97a68a70264 100644
--- a/mm/share_pool.c
+++ b/mm/share_pool.c
@@ -678,8 +678,25 @@ static unsigned long sp_mmap(struct mm_struct *mm, struct file *file,
struct sp_area *spa, unsigned long *populate,
unsigned long prot);
static void sp_munmap(struct mm_struct *mm, unsigned long addr, unsigned long size);
+
+#define K2U_NORMAL 0
+#define K2U_COREDUMP 1
+
+struct sp_k2u_context {
+ unsigned long kva;
+ unsigned long kva_aligned;
+ unsigned long size;
+ unsigned long size_aligned;
+ unsigned long sp_flags;
+ int state;
+ int spg_id;
+ bool to_task;
+ struct timespec64 start;
+ struct timespec64 end;
+};
+
static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa,
- struct mm_struct *mm, unsigned long prot);
+ struct mm_struct *mm, unsigned long prot, struct sp_k2u_context *kc);
static void free_sp_group_id(int spg_id)
{
@@ -1334,7 +1351,7 @@ int mg_sp_group_add_task(int pid, unsigned long prot, int spg_id)
spin_unlock(&sp_area_lock);
if (spa->type == SPA_TYPE_K2SPG && spa->kva) {
- addr = sp_remap_kva_to_vma(spa->kva, spa, mm, prot);
+ addr = sp_remap_kva_to_vma(spa->kva, spa, mm, prot, NULL);
if (IS_ERR_VALUE(addr))
pr_warn("add group remap k2u failed %ld\n", addr);
@@ -2606,7 +2623,7 @@ static unsigned long __sp_remap_get_pfn(unsigned long kva)
/* when called by k2u to group, always make sure rw_lock of spg is down */
static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa,
- struct mm_struct *mm, unsigned long prot)
+ struct mm_struct *mm, unsigned long prot, struct sp_k2u_context *kc)
{
struct vm_area_struct *vma;
unsigned long ret_addr;
@@ -2618,6 +2635,8 @@ static unsigned long sp_remap_kva_to_vma(unsigned long kva, struct sp_area *spa,
if (unlikely(mm->core_state)) {
pr_err("k2u mmap: encountered coredump, abort\n");
ret_addr = -EBUSY;
+ if (kc)
+ kc->state = K2U_COREDUMP;
goto put_mm;
}
@@ -2703,7 +2722,7 @@ static void *sp_make_share_kva_to_task(unsigned long kva, unsigned long size, un
spa->kva = kva;
- uva = (void *)sp_remap_kva_to_vma(kva, spa, current->mm, prot);
+ uva = (void *)sp_remap_kva_to_vma(kva, spa, current->mm, prot, NULL);
__sp_area_drop(spa);
if (IS_ERR(uva))
pr_err("remap k2u to task failed %ld\n", PTR_ERR(uva));
@@ -2731,6 +2750,8 @@ static void *sp_make_share_kva_to_spg(unsigned long kva, unsigned long size,
struct mm_struct *mm;
struct sp_group_node *spg_node;
void *uva = ERR_PTR(-ENODEV);
+ struct sp_k2u_context kc;
+ unsigned long ret_addr = -ENODEV;
down_read(&spg->rw_lock);
spa = sp_alloc_area(size, sp_flags, spg, SPA_TYPE_K2SPG, current->tgid);
@@ -2745,12 +2766,17 @@ static void *sp_make_share_kva_to_spg(unsigned long kva, unsigned long size,
list_for_each_entry(spg_node, &spg->procs, proc_node) {
mm = spg_node->master->mm;
- uva = (void *)sp_remap_kva_to_vma(kva, spa, mm, spg_node->prot);
- if (IS_ERR(uva)) {
+ kc.state = K2U_NORMAL;
+ ret_addr = sp_remap_kva_to_vma(kva, spa, mm, spg_node->prot, &kc);
+ if (IS_ERR_VALUE(ret_addr)) {
+ if (kc.state == K2U_COREDUMP)
+ continue;
+ uva = (void *)ret_addr;
pr_err("remap k2u to spg failed %ld\n", PTR_ERR(uva));
__sp_free(spg, spa->va_start, spa_size(spa), mm);
goto out;
}
+ uva = (void *)ret_addr;
}
out:
@@ -2775,18 +2801,6 @@ static bool vmalloc_area_set_flag(unsigned long kva, unsigned long flags)
return false;
}
-struct sp_k2u_context {
- unsigned long kva;
- unsigned long kva_aligned;
- unsigned long size;
- unsigned long size_aligned;
- unsigned long sp_flags;
- int spg_id;
- bool to_task;
- struct timespec64 start;
- struct timespec64 end;
-};
-
static void trace_sp_k2u_begin(struct sp_k2u_context *kc)
{
if (!sysctl_sp_perf_k2u)
--
2.25.1
1
1
bugzilla: https://gitee.com/openeuler/kernel/issues/I469VQ
Tian Tao (2):
drm/hisilicon: Support i2c driver algorithms for bit-shift adapters
drm/hisilicon: Features to support reading resolutions from EDID
drivers/gpu/drm/hisilicon/hibmc/Makefile | 2 +-
.../gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h | 25 ++++-
.../gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c | 99 +++++++++++++++++++
.../gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c | 38 ++++++-
4 files changed, 158 insertions(+), 6 deletions(-)
create mode 100644 drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
--
2.20.1
1
2