[PATCH openEuler-1.0-LTS] scsi/hiraid: Support New Raid feature
From: 岳智超 <yuezhichao1@h-partners.com> driver inclusion category: feature bugzilla: https://atomgit.com/openeuler/kernel/issues/8291 CVE: NA -------------------------------- Add thread irq for io queue Add stream detect Signed-off-by: 岳智超 <yuezhichao1@h-partners.com> --- drivers/scsi/hisi_raid/hiraid.h | 34 ++ drivers/scsi/hisi_raid/hiraid_main.c | 586 ++++++++++++++++++++++++++- 2 files changed, 603 insertions(+), 17 deletions(-) diff --git a/drivers/scsi/hisi_raid/hiraid.h b/drivers/scsi/hisi_raid/hiraid.h index 1ebc3dd..d107951 100644 --- a/drivers/scsi/hisi_raid/hiraid.h +++ b/drivers/scsi/hisi_raid/hiraid.h @@ -683,6 +683,7 @@ struct hiraid_queue { atomic_t inflight; void *sense_buffer_virt; dma_addr_t sense_buffer_phy; + s32 pci_irq; struct dma_pool *prp_small_pool; }; @@ -756,5 +757,38 @@ struct hiraid_sdev_hostdata { u16 pend_count; }; +enum stream_type { + TYPE_TOTAL, + TYPE_WRITE, + TYPE_READ, + TYPE_CLEAN, + TYPE_BOTTOM +}; + +struct HIRAID_STREAM_S { + /* recog-window */ + u64 stream_lba; + u32 stream_len; + u16 vd_id; + u16 type; + /* aging ctrl */ + int aging_credit; + int aging_grade; + u16 stream_id; + u16 using; +}; + +struct IO_LIST_S { + struct list_head list; + struct hiraid_scsi_io_cmd io_cmd; + struct hiraid_queue *submit_queue; + unsigned int sector_size; +}; + +struct spinlock_list_head_s { + struct list_head list; + spinlock_t lock; +}; + #endif diff --git a/drivers/scsi/hisi_raid/hiraid_main.c b/drivers/scsi/hisi_raid/hiraid_main.c index f84182f..281fe79 100644 --- a/drivers/scsi/hisi_raid/hiraid_main.c +++ b/drivers/scsi/hisi_raid/hiraid_main.c @@ -35,6 +35,10 @@ #include <scsi/scsi_transport.h> #include <scsi/scsi_dbg.h> #include <scsi/sg.h> +#include <linux/kthread.h> +#include <linux/mutex.h> +#include <linux/sched.h> +#include <linux/sched/prio.h> #include "hiraid.h" @@ -107,6 +111,13 @@ static u32 log_debug_switch; module_param(log_debug_switch, uint, 0644); MODULE_PARM_DESC(log_debug_switch, "set log state, default zero for switch off"); +static bool threaded_irq = false; +module_param(threaded_irq, bool, 0444); +MODULE_PARM_DESC(threaded_irq, "use threaded irq for io queue, default off"); + +static u32 poll_delay_min = 9; +static u32 poll_delay_max = 19; + static int extra_pool_num_set(const char *val, const struct kernel_param *kp) { u8 n = 0; @@ -153,7 +164,7 @@ static struct workqueue_struct *work_queue; __func__, ##__VA_ARGS__); \ } while (0) -#define HIRAID_DRV_VERSION "1.1.0.0" +#define HIRAID_DRV_VERSION "1.1.0.1" #define ADMIN_TIMEOUT (admin_tmout * HZ) #define USRCMD_TIMEOUT (180 * HZ) @@ -170,6 +181,15 @@ static struct workqueue_struct *work_queue; #define MAX_CAN_QUEUE (4096 - 1) #define MIN_CAN_QUEUE (1024 - 1) +#define MAX_DECREASE_GRADE (-8) +#define MAX_INCREASE_GRADE 8 +#define INC_GRADE 1 +#define MIN_CREDIT 0 +#define MAX_CREDIT 64 +#define CREDIT_THRES 32 +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) + enum SENSE_STATE_CODE { SENSE_STATE_OK = 0, SENSE_STATE_NEED_CHECK, @@ -765,6 +785,404 @@ static int hiraid_build_sgl(struct hiraid_dev *hdev, return 0; } +#define MAX_PD_NUM (40 + 1) +#define MAX_STREAM_NUM 8 +#define PER_MB (1024 * 1024) +#define MAX_IO_NUM (200 * PER_MB) +#define STREAM_LEN (4 * PER_MB) +#define MAX_IO_NUM_ONCE 128 +#define IO_SUBMIT_TIME_OUT 100 +#define MAX_AGING_NUM 130 + +#define MIN_IO_SEND_TIME 10 +#define MAX_IO_SEND_TIME 50 + +#define MIN_WAIT_IO_SEND_TIME 10 +#define MAX_WAIT_IO_SEND_TIME 20 + +enum io_operation_type { + TYPE_DELETE_SINGLE_IO = 1, + TYPE_DELETE_SINGLE_IO_LIST, + TYPE_DELETE_ALL_IO_LIST +}; + +struct HIRAID_STREAM_S stream_array[MAX_PD_NUM][MAX_STREAM_NUM] = {0}; +struct spinlock_list_head_s io_heads_per_stream[MAX_PD_NUM * MAX_STREAM_NUM]; +spinlock_t stream_array_lock; + +u64 g_io_transport_num[MAX_PD_NUM][MAX_STREAM_NUM] = {0}; +u16 g_io_stream_num[MAX_PD_NUM][TYPE_BOTTOM] = {0}; +u16 g_io_count = 1; + +void hiraid_inc_io_transport_num(u16 disk_id, u16 streamd_id, u16 nlb) +{ + g_io_transport_num[disk_id][streamd_id] += nlb; +} + +void hiraid_refresh_io_transport_num(u16 disk_id, u16 streamd_id) +{ + g_io_transport_num[disk_id][streamd_id] = 0; +} + +void hiraid_inc_stream_num(u16 disk_id) +{ + spin_lock(&stream_array_lock); + g_io_stream_num[disk_id][TYPE_TOTAL]++; + spin_unlock(&stream_array_lock); +} + +void hiraid_dec_stream_num(u16 disk_id) +{ + spin_lock(&stream_array_lock); + if (g_io_stream_num[disk_id][TYPE_TOTAL] > 0) + g_io_stream_num[disk_id][TYPE_TOTAL]--; + spin_unlock(&stream_array_lock); +} + +static bool hiraid_io_recog_check_stream_exceed(u16 disk_id) +{ + bool exceed_flag; + + spin_lock(&stream_array_lock); + exceed_flag = (g_io_stream_num[disk_id][TYPE_TOTAL] >= MAX_STREAM_NUM); + spin_unlock(&stream_array_lock); + return exceed_flag; +} + +static u16 hiraid_get_stream_num(u16 disk_id) +{ + return g_io_stream_num[disk_id][TYPE_TOTAL]; +} + +static inline struct HIRAID_STREAM_S *hiraid_get_stream(u16 disk_id, + u16 stream_id) +{ + return &stream_array[disk_id][stream_id]; +} + +static inline struct spinlock_list_head_s *hiraid_get_io_head(u16 disk_id) +{ + return &(io_heads_per_stream[disk_id]); +} + +static bool hiraid_recognition_acknowledge(const struct HIRAID_STREAM_S *stream) +{ + return (stream->aging_credit >= CREDIT_THRES) ? true : false; +} + +void hiraid_io_recognition_init(void) +{ + u16 i; + + spin_lock_init(&stream_array_lock); + for (i = 0; i < (MAX_PD_NUM * MAX_STREAM_NUM); i++) { + INIT_LIST_HEAD(&hiraid_get_io_head(i)->list); + spin_lock_init(&hiraid_get_io_head(i)->lock); + } +} + +static void hiraid_io_recognition_iterator(struct HIRAID_STREAM_S *stream, + int direction) +{ + stream->aging_grade = stream->aging_grade + direction * INC_GRADE; + stream->aging_grade = MAX(stream->aging_grade, MAX_DECREASE_GRADE); + stream->aging_grade = MIN(stream->aging_grade, MAX_INCREASE_GRADE); + stream->aging_credit = stream->aging_credit + stream->aging_grade; + stream->aging_credit = MAX(stream->aging_credit, MIN_CREDIT); + stream->aging_credit = MIN(stream->aging_credit, MAX_CREDIT); +} + +struct HIRAID_STREAM_S *hiraid_io_pick_stream( + struct hiraid_scsi_rw_cmd *req, u16 type) +{ + struct HIRAID_STREAM_S *first_hit_stream = NULL; + struct HIRAID_STREAM_S *temp_stream = NULL; + u16 pick_flag = 0; + u8 i; + + for (i = 0; i < MAX_STREAM_NUM; i++) { + temp_stream = &stream_array[req->hdid][i]; + temp_stream->stream_id = i; + if (req->slba < temp_stream->stream_lba || + req->slba >= temp_stream->stream_lba + + temp_stream->stream_len || + temp_stream->type != type) { + continue; + } + if (!pick_flag) { + temp_stream->stream_lba = req->slba; + first_hit_stream = temp_stream; + pick_flag = 1; + continue; + } + hiraid_dec_stream_num(req->hdid); + memset(temp_stream, 0, + sizeof(struct HIRAID_STREAM_S)); // 去重影 + } + return first_hit_stream; +} + +static struct HIRAID_STREAM_S *hiraid_init_flow_stream(struct hiraid_scsi_rw_cmd *req, + u16 type) +{ + int i; + struct HIRAID_STREAM_S *stream = NULL; + + for (i = 0; i < MAX_STREAM_NUM; i++) { + stream = hiraid_get_stream(req->hdid, i); + if (!stream->using) { + stream->using = 1; + stream->stream_id = i; + break; + } + } + stream->stream_lba = req->slba; + stream->vd_id = req->hdid; + stream->type = type; + stream->aging_credit = 0; + stream->aging_grade = 0; + stream->stream_len = STREAM_LEN; + return stream; +} + +static struct HIRAID_STREAM_S *hiraid_stream_detect(struct hiraid_dev *hdev, + struct hiraid_scsi_rw_cmd *io_cmd) +{ + u16 type = io_cmd->opcode == HIRAID_CMD_WRITE ? TYPE_WRITE : TYPE_READ; + struct HIRAID_STREAM_S *stream = hiraid_io_pick_stream(io_cmd, type); + + if (stream != NULL) { /* 可以命中一个stream */ + return stream; + } + + if (hiraid_io_recog_check_stream_exceed(io_cmd->hdid)) + return NULL; + stream = hiraid_init_flow_stream(io_cmd, type); + hiraid_inc_stream_num(io_cmd->hdid); + return stream; +} + +u64 g_io_last_pull_time[MAX_PD_NUM] = {0}; + +static u16 hiraid_get_submit_io_stream(u16 did, struct hiraid_dev *hdev) +{ + u64 temp_num, i; + static u16 stream_num[MAX_PD_NUM] = {0}; + + if (g_io_last_pull_time[did] == 0) + g_io_last_pull_time[did] = jiffies_to_msecs(jiffies); + + for (i = 0; i < MAX_STREAM_NUM; i++) { + temp_num = g_io_transport_num[did][i]; + if (temp_num != 0) { + if ((temp_num < MAX_IO_NUM) && + ((jiffies_to_msecs(jiffies) - g_io_last_pull_time[did]) + < IO_SUBMIT_TIME_OUT)) { + stream_num[did] = i; + return i; + } + g_io_last_pull_time[did] = jiffies_to_msecs(jiffies); + hiraid_refresh_io_transport_num(did, i); + stream_num[did] = ((i+1) % MAX_STREAM_NUM); + return ((i+1) % MAX_STREAM_NUM); + } + } + g_io_last_pull_time[did] = jiffies_to_msecs(jiffies); + return ((stream_num[did]++) % MAX_STREAM_NUM); +} + +static void hiraid_submit_io_stream(u16 hdid, struct hiraid_dev *hdev) +{ + struct spinlock_list_head_s *io_slist = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct hiraid_scsi_io_cmd io_cmd = {0}; + struct hiraid_queue *submit_queue = NULL; + unsigned int sector_size = 0; + u16 submit_stream_id = hiraid_get_submit_io_stream(hdid, hdev); + + struct IO_LIST_S *temp_io_stream = NULL; + u16 count = 0; + + io_slist = hiraid_get_io_head(hdid * MAX_STREAM_NUM + submit_stream_id); + spin_lock(&io_slist->lock); + list_for_each_safe(node, next_node, &io_slist->list) { + temp_io_stream = list_entry(node, struct IO_LIST_S, list); + list_del_init(node); + io_cmd = temp_io_stream->io_cmd; + submit_queue = temp_io_stream->submit_queue; + sector_size = temp_io_stream->sector_size; + kfree(temp_io_stream); + temp_io_stream = NULL; + spin_unlock(&io_slist->lock); + hiraid_submit_cmd(submit_queue, &io_cmd); + hiraid_inc_io_transport_num(hdid, + submit_stream_id, io_cmd.rw.nlb * sector_size); + // 单次下发不超过MAX_IO_NUM_ONCE,避免仅发送单盘 + if (++count >= MAX_IO_NUM_ONCE) { + spin_lock(&io_slist->lock); + break; + } + spin_lock(&io_slist->lock); + } + spin_unlock(&io_slist->lock); +} + +static u8 hiraid_detect_if_aging(void) +{ + if (++g_io_count == MAX_AGING_NUM) { + g_io_count = 0; + return 1; + } + return 0; +} + +static void hiraid_aging(struct hiraid_dev *hdev) +{ + struct HIRAID_STREAM_S *temp_stream = NULL; + int i = 0; + int j = 0; + + for (i = 1; i < MAX_PD_NUM; i++) { + for (j = 0; j < MAX_STREAM_NUM; j++) { + temp_stream = hiraid_get_stream(i, j); + if (temp_stream->using) { + hiraid_io_recognition_iterator(temp_stream, -1); + if (temp_stream->aging_credit <= 0) { + hiraid_dec_stream_num(i); + memset(temp_stream, + 0, sizeof(struct HIRAID_STREAM_S)); // 老化 + } + } + } + } +} + +static u8 hiraid_io_list_operation(u32 hdid, u16 cid, u16 hwq, u8 operation) +{ + int i, j; + + struct spinlock_list_head_s *io_slist = NULL; + struct list_head *node = NULL; + struct list_head *next_node = NULL; + struct hiraid_scsi_io_cmd *io_cmd = NULL; + struct hiraid_queue *hiraidq = NULL; + struct IO_LIST_S *temp_io_stream = NULL; + + u8 max_hd_num = operation == TYPE_DELETE_ALL_IO_LIST ? + MAX_PD_NUM : hdid + 1; + for (i = hdid; i < max_hd_num; i++) { + for (j = 0; j < MAX_STREAM_NUM; j++) { + io_slist = hiraid_get_io_head(i * MAX_STREAM_NUM + j); + spin_lock(&io_slist->lock); + list_for_each_safe(node, next_node, &io_slist->list) { + temp_io_stream = list_entry(node, + struct IO_LIST_S, list); + io_cmd = &(temp_io_stream->io_cmd); + hiraidq = temp_io_stream->submit_queue; + if (operation >= TYPE_DELETE_SINGLE_IO_LIST) { + list_del_init(node); + kfree(temp_io_stream); + temp_io_stream = NULL; + } else { + if ((io_cmd->rw.cmd_id == cid) && + (hiraidq->qid == hwq)) { + list_del_init(node); + spin_unlock(&io_slist->lock); + kfree(temp_io_stream); + return 1; + } + } + } + spin_unlock(&io_slist->lock); + } + } + return 0; +} + +static u8 hiraid_check_io_list(u32 hdid, u16 cid, u16 hwq) +{ + return hiraid_io_list_operation(hdid, cid, hwq, TYPE_DELETE_SINGLE_IO); +} + +static u8 hiraid_delete_single_pd_io_list(u32 hdid) +{ + return hiraid_io_list_operation(hdid, 0, 0, TYPE_DELETE_SINGLE_IO_LIST); +} + +static u8 hiraid_delete_all_io_list(void) +{ + return hiraid_io_list_operation(0, 0, 0, TYPE_DELETE_ALL_IO_LIST); +} + +static void hiraid_wait_for_io_submit(struct hiraid_dev *hdev) +{ + struct spinlock_list_head_s *io_slist = NULL; + int i = 0; + int io_flush_finished; + + do { + io_flush_finished = 1; + for (i = 0; i < (MAX_PD_NUM * MAX_STREAM_NUM); i++) { + io_slist = hiraid_get_io_head(i); + if (!list_empty(&io_slist->list)) { + io_flush_finished = 0; + break; + } + } + usleep_range(MIN_WAIT_IO_SEND_TIME, MAX_WAIT_IO_SEND_TIME); + } while (!io_flush_finished); +} + +static u8 hiraid_add_io_to_list(struct hiraid_queue *submit_queue, + struct HIRAID_STREAM_S *tmp_stream, struct hiraid_scsi_io_cmd io_cmd, + unsigned int sector_size) +{ + struct spinlock_list_head_s *io_slist = NULL; + struct IO_LIST_S *new_io_node = NULL; + + new_io_node = kmalloc(sizeof(struct IO_LIST_S), GFP_KERNEL); + if (!new_io_node) + return 0; + new_io_node->io_cmd = io_cmd; + new_io_node->submit_queue = submit_queue; + new_io_node->sector_size = sector_size; + io_slist = hiraid_get_io_head(io_cmd.rw.hdid * + MAX_STREAM_NUM + tmp_stream->stream_id); + spin_lock(&io_slist->lock); + INIT_LIST_HEAD(&(new_io_node->list)); + list_add_tail(&(new_io_node->list), &io_slist->list); + spin_unlock(&io_slist->lock); + return 1; +} + +static void hiraid_submit_io_threading(struct hiraid_dev *hdev) +{ + int i = 1; + + while (!kthread_should_stop()) { + for (i = 1; i < MAX_PD_NUM; i++) + hiraid_submit_io_stream(i, hdev); + usleep_range(MIN_IO_SEND_TIME, MAX_IO_SEND_TIME); + } +} + +static void hiraid_destroy_io_stream_resource(struct hiraid_dev *hdev) +{ + u16 i; + + for (i = 0; i < (MAX_PD_NUM * MAX_STREAM_NUM); i++) + list_del_init(&hiraid_get_io_head(i)->list); +} + +struct task_struct *g_hiraid_submit_task; +static void hiraid_init_io_stream(struct hiraid_dev *hdev) +{ + hiraid_io_recognition_init(); + g_hiraid_submit_task = kthread_run((void *)hiraid_submit_io_threading, + hdev, "hiraid_submit_thread"); +} + #define HIRAID_RW_FUA BIT(14) static int hiraid_setup_rw_cmd(struct hiraid_dev *hdev, @@ -866,6 +1284,30 @@ static int hiraid_setup_nonrw_cmd(struct hiraid_dev *hdev, return 0; } +static bool hiraid_disk_is_hdd(u8 attr) +{ + switch (HIRAID_DEV_DISK_TYPE(attr)) { + case HIRAID_SAS_HDD_VD: + case HIRAID_SATA_HDD_VD: + case HIRAID_SAS_HDD_PD: + case HIRAID_SATA_HDD_PD: + return true; + default: + return false; + } +} + +static bool hiraid_disk_is_hdd_rawdrive(u8 attr) +{ + switch (HIRAID_DEV_DISK_TYPE(attr)) { + case HIRAID_SAS_HDD_PD: + case HIRAID_SATA_HDD_PD: + return true; + default: + return false; + } +} + static int hiraid_setup_io_cmd(struct hiraid_dev *hdev, struct hiraid_scsi_io_cmd *io_cmd, struct scsi_cmnd *scmd) @@ -1023,6 +1465,7 @@ static int hiraid_queue_command(struct Scsi_Host *shost, struct hiraid_sdev_hostdata *hostdata; struct hiraid_scsi_io_cmd io_cmd; struct hiraid_queue *ioq; + struct HIRAID_STREAM_S *tmp_stm = NULL; u16 hwq, cid; int ret; @@ -1085,6 +1528,23 @@ static int hiraid_queue_command(struct Scsi_Host *shost, } WRITE_ONCE(mapbuf->state, CMD_FLIGHT); + + if (hiraid_is_rw_scmd(scmd) && + hiraid_disk_is_hdd_rawdrive(hostdata->attr)) { + if (hiraid_detect_if_aging()) + hiraid_aging(hdev); + tmp_stm = hiraid_stream_detect(hdev, &(io_cmd.rw)); + if (tmp_stm != NULL) { + hiraid_io_recognition_iterator(tmp_stm, 1); + if (hiraid_recognition_acknowledge(tmp_stm) && + (hiraid_get_stream_num(io_cmd.rw.hdid) > 1)) { + if (hiraid_add_io_to_list(ioq, + tmp_stm, io_cmd, sdev->sector_size)) { + return 0; + } + } + } + } hiraid_submit_cmd(ioq, &io_cmd); return 0; @@ -1135,19 +1595,6 @@ static int hiraid_disk_qd(u8 attr) } } -static bool hiraid_disk_is_hdd(u8 attr) -{ - switch (HIRAID_DEV_DISK_TYPE(attr)) { - case HIRAID_SAS_HDD_VD: - case HIRAID_SATA_HDD_VD: - case HIRAID_SAS_HDD_PD: - case HIRAID_SATA_HDD_PD: - return true; - default: - return false; - } -} - static int hiraid_slave_alloc(struct scsi_device *sdev) { struct hiraid_sdev_hostdata *hostdata; @@ -1305,6 +1752,7 @@ static int hiraid_alloc_queue(struct hiraid_dev *hdev, u16 qid, u16 depth) hiraidq->q_depth = depth; hiraidq->qid = qid; hiraidq->cq_vector = -1; + hiraidq->pci_irq = -1; hdev->queue_count++; return 0; @@ -1593,12 +2041,38 @@ static inline bool hiraid_process_cq(struct hiraid_queue *hiraidq, u16 *start, u16 *end, int tag) { bool found = false; + *start = hiraidq->cq_head; + while (!found && hiraid_cqe_pending(hiraidq)) { + if (le16_to_cpu(hiraidq->cqes[hiraidq->cq_head].cmd_id) == tag) + found = true; + hiraid_update_cq_head(hiraidq); + } + *end = hiraidq->cq_head; + + if (*start != *end) + writel(hiraidq->cq_head, + hiraidq->q_db + hiraidq->hdev->db_stride); + + return found; +} +static inline bool hiraid_process_cq_for_thread(struct hiraid_queue *hiraidq, + u16 *start, u16 *end, + u8 *wakeup_thread_flag, int tag) +{ + bool found = false; + u16 max_io_num = hiraidq->q_depth / 4; + u16 io_count = 0; *start = hiraidq->cq_head; while (!found && hiraid_cqe_pending(hiraidq)) { if (le16_to_cpu(hiraidq->cqes[hiraidq->cq_head].cmd_id) == tag) found = true; hiraid_update_cq_head(hiraidq); + + if (++io_count >= max_io_num) { + *wakeup_thread_flag = 1; + break; + } } *end = hiraidq->cq_head; @@ -1646,6 +2120,55 @@ static irqreturn_t hiraid_handle_irq(int irq, void *data) return ret; } +static irqreturn_t hiraid_io_poll(int irq, void *data) +{ + struct hiraid_queue *hiraidq = data; + irqreturn_t ret = IRQ_NONE; + u16 start, end; + + do { + spin_lock(&hiraidq->cq_lock); + hiraid_process_cq(hiraidq, &start, &end, -1); + hiraidq->last_cq_head = hiraidq->cq_head; + spin_unlock(&hiraidq->cq_lock); + + if (start != end) { + hiraid_complete_cqes(hiraidq, start, end); + ret = IRQ_HANDLED; + } + usleep_range(poll_delay_min, poll_delay_max); + } while (start != end); + enable_irq(hiraidq->pci_irq); + return ret; +} + +static irqreturn_t hiraid_io_irq(int irq, void *data) +{ + struct hiraid_queue *hiraidq = data; + irqreturn_t ret = IRQ_NONE; + u16 start, end; + u8 wakeup_thread_flag = 0; + + spin_lock(&hiraidq->cq_lock); + if (hiraidq->cq_head != hiraidq->last_cq_head) + ret = IRQ_HANDLED; + + hiraid_process_cq_for_thread(hiraidq, &start, + &end, &wakeup_thread_flag, -1); + hiraidq->last_cq_head = hiraidq->cq_head; + spin_unlock(&hiraidq->cq_lock); + + if (start != end) { + hiraid_complete_cqes(hiraidq, start, end); + ret = IRQ_HANDLED; + } + if (wakeup_thread_flag) { + disable_irq_nosync(hiraidq->pci_irq); + ret = IRQ_WAKE_THREAD; + } + return ret; +} + static int hiraid_setup_admin_queue(struct hiraid_dev *hdev) { struct hiraid_queue *adminq = &hdev->queues[0]; @@ -1681,9 +2204,11 @@ static int hiraid_setup_admin_queue(struct hiraid_dev *hdev) NULL, adminq, "hiraid%d_q%d", hdev->instance, adminq->qid); if (ret) { adminq->cq_vector = -1; + adminq->pci_irq = -1; return ret; } + adminq->pci_irq = pci_irq_vector(hdev->pdev, adminq->cq_vector); hiraid_init_queue(adminq, 0); dev_info(hdev->dev, "setup admin queue success, queuecount[%d] online[%d] pagesize[%d]\n", @@ -1958,14 +2483,23 @@ static int hiraid_create_queue(struct hiraid_queue *hiraidq, u16 qid) goto delete_cq; hiraidq->cq_vector = cq_vector; - ret = pci_request_irq(hdev->pdev, cq_vector, hiraid_handle_irq, NULL, - hiraidq, "hiraid%d_q%d", hdev->instance, qid); + if (threaded_irq) + ret = pci_request_irq(hdev->pdev, cq_vector, hiraid_io_irq, + hiraid_io_poll, hiraidq, "hiraid%d_q%d", + hdev->instance, qid); + else + ret = pci_request_irq(hdev->pdev, cq_vector, hiraid_handle_irq, + NULL, hiraidq, "hiraid%d_q%d", + hdev->instance, qid); + if (ret) { hiraidq->cq_vector = -1; + hiraidq->pci_irq = -1; dev_err(hdev->dev, "request queue[%d] irq failed\n", qid); goto delete_sq; } + hiraidq->pci_irq = pci_irq_vector(hdev->pdev, hiraidq->cq_vector); hiraid_init_queue(hiraidq, qid); return 0; @@ -2122,10 +2656,11 @@ static int hiraid_setup_io_queues(struct hiraid_dev *hdev) adminq, "hiraid%d_q%d", hdev->instance, adminq->qid); if (ret) { dev_err(hdev->dev, "request admin irq failed\n"); + adminq->pci_irq = -1; adminq->cq_vector = -1; return ret; } - + adminq->pci_irq = pci_irq_vector(hdev->pdev, adminq->cq_vector); hdev->online_queues++; for (i = hdev->queue_count; i <= hdev->max_qid; i++) { @@ -3304,6 +3839,12 @@ static int hiraid_abort(struct scsi_cmnd *scmd) cid = mapbuf->cid; hwq = mapbuf->hiraidq->qid; + if (hiraid_check_io_list(hostdata->hdid, cid, hwq)) { + dev_warn(hdev->dev, "find cid[%d] qid[%d] in host, abort succ\n", + cid, hwq); + return SUCCESS; + } + dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, send abort\n", cid, hwq); ret = hiraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid); if (ret != -ETIME) { @@ -3339,6 +3880,7 @@ static int hiraid_scsi_reset(struct scsi_cmnd *scmd, enum hiraid_rst_type rst) if ((ret == 0) || (ret == FW_EH_DEV_NONE && rst == HIRAID_RESET_TARGET)) { if (rst == HIRAID_RESET_TARGET) { + hiraid_delete_single_pd_io_list(hostdata->hdid); ret = wait_tgt_reset_io_done(scmd); if (ret) { dev_warn(hdev->dev, "sdev[%d:%d] target has %d peding cmd, target reset failed\n", @@ -3378,6 +3920,7 @@ static int hiraid_host_reset(struct scsi_cmnd *scmd) dev_warn(hdev->dev, "sdev[%d:%d] send host reset\n", scmd->device->channel, scmd->device->id); + hiraid_delete_all_io_list(); if (hiraid_reset_work_sync(hdev) == -EBUSY) flush_work(&hdev->reset_work); @@ -3411,6 +3954,7 @@ static pci_ers_result_t hiraid_pci_error_detected(struct pci_dev *pdev, scsi_block_requests(hdev->shost); hiraid_dev_state_trans(hdev, DEV_RESETTING); + hiraid_delete_all_io_list(); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: @@ -4044,6 +4588,7 @@ static int hiraid_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto unregist_bsg; scsi_scan_host(hdev->shost); + hiraid_init_io_stream(hdev); return 0; @@ -4075,6 +4620,13 @@ static void hiraid_remove(struct pci_dev *pdev) dev_info(hdev->dev, "enter hiraid remove\n"); + if (pci_device_is_present(pdev)) + hiraid_wait_for_io_submit(hdev); + + kthread_stop(g_hiraid_submit_task); + hiraid_delete_all_io_list(); + hiraid_destroy_io_stream_resource(hdev); + hiraid_dev_state_trans(hdev, DEV_DELETING); flush_work(&hdev->reset_work); -- 2.45.1.windows.1
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://atomgit.com/openeuler/kernel/merge_requests/20126 邮件列表地址:https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/3FZ... FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://atomgit.com/openeuler/kernel/merge_requests/20126 Mailing list address: https://mailweb.openeuler.org/archives/list/kernel@openeuler.org/message/3FZ...
participants (2)
-
LinKun -
patchwork bot