Kernel
Threads by month
- ----- 2025 -----
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
November 2023
- 58 participants
- 320 discussions

[PATCH openEuler-1.0-LTS] sched/rt: Fix double enqueue caused by rt_effective_prio
by Xia Fukun 16 Nov '23
by Xia Fukun 16 Nov '23
16 Nov '23
From: Peter Zijlstra <peterz(a)infradead.org>
mainline inclusion
from mainline-v6.5-rc7
commit f558c2b834ec27e75d37b1c860c139e7b7c3a8e4
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I8H4SN
CVE: NA
--------------------------------
Double enqueues in rt runqueues (list) have been reported while running
a simple test that spawns a number of threads doing a short sleep/run
pattern while being concurrently setscheduled between rt and fair class.
WARNING: CPU: 3 PID: 2825 at kernel/sched/rt.c:1294 enqueue_task_rt+0x355/0x360
CPU: 3 PID: 2825 Comm: setsched__13
RIP: 0010:enqueue_task_rt+0x355/0x360
Call Trace:
__sched_setscheduler+0x581/0x9d0
_sched_setscheduler+0x63/0xa0
do_sched_setscheduler+0xa0/0x150
__x64_sys_sched_setscheduler+0x1a/0x30
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xae
list_add double add: new=ffff9867cb629b40, prev=ffff9867cb629b40,
next=ffff98679fc67ca0.
kernel BUG at lib/list_debug.c:31!
invalid opcode: 0000 [#1] PREEMPT_RT SMP PTI
CPU: 3 PID: 2825 Comm: setsched__13
RIP: 0010:__list_add_valid+0x41/0x50
Call Trace:
enqueue_task_rt+0x291/0x360
__sched_setscheduler+0x581/0x9d0
_sched_setscheduler+0x63/0xa0
do_sched_setscheduler+0xa0/0x150
__x64_sys_sched_setscheduler+0x1a/0x30
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xae
__sched_setscheduler() uses rt_effective_prio() to handle proper queuing
of priority boosted tasks that are setscheduled while being boosted.
rt_effective_prio() is however called twice per each
__sched_setscheduler() call: first directly by __sched_setscheduler()
before dequeuing the task and then by __setscheduler() to actually do
the priority change. If the priority of the pi_top_task is concurrently
being changed however, it might happen that the two calls return
different results. If, for example, the first call returned the same rt
priority the task was running at and the second one a fair priority, the
task won't be removed by the rt list (on_list still set) and then
enqueued in the fair runqueue. When eventually setscheduled back to rt
it will be seen as enqueued already and the WARNING/BUG be issued.
Fix this by calling rt_effective_prio() only once and then reusing the
return value. While at it refactor code as well for clarity. Concurrent
priority inheritance handling is still safe and will eventually converge
to a new state by following the inheritance chain(s).
Fixes: 0782e63bc6fe ("sched: Handle priority boosted tasks proper in setscheduler()")
[squashed Peterz changes; added changelog]
Reported-by: Mark Simmons <msimmons(a)redhat.com>
Signed-off-by: Juri Lelli <juri.lelli(a)redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz(a)infradead.org>
Link: https://lkml.kernel.org/r/20210803104501.38333-1-juri.lelli@redhat.com
Signed-off-by: Xia Fukun <xiafukun(a)huawei.com>
---
kernel/sched/core.c | 82 +++++++++++++++++++--------------------------
1 file changed, 35 insertions(+), 47 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7825ceaae0c4..ce0a9026450d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -784,12 +784,18 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
dequeue_task(rq, p, flags);
}
-/*
- * __normal_prio - return the priority that is based on the static prio
- */
-static inline int __normal_prio(struct task_struct *p)
+static inline int __normal_prio(int policy, int rt_prio, int nice)
{
- return p->static_prio;
+ int prio;
+
+ if (dl_policy(policy))
+ prio = MAX_DL_PRIO - 1;
+ else if (rt_policy(policy))
+ prio = MAX_RT_PRIO - 1 - rt_prio;
+ else
+ prio = NICE_TO_PRIO(nice);
+
+ return prio;
}
/*
@@ -801,15 +807,7 @@ static inline int __normal_prio(struct task_struct *p)
*/
static inline int normal_prio(struct task_struct *p)
{
- int prio;
-
- if (task_has_dl_policy(p))
- prio = MAX_DL_PRIO-1;
- else if (task_has_rt_policy(p))
- prio = MAX_RT_PRIO-1 - p->rt_priority;
- else
- prio = __normal_prio(p);
- return prio;
+ return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio));
}
/*
@@ -2327,7 +2325,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
} else if (PRIO_TO_NICE(p->static_prio) < 0)
p->static_prio = NICE_TO_PRIO(0);
- p->prio = p->normal_prio = __normal_prio(p);
+ p->prio = p->normal_prio = p->static_prio;
set_load_weight(p, false);
/*
@@ -3795,6 +3793,18 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag
}
EXPORT_SYMBOL(default_wake_function);
+static void __setscheduler_prio(struct task_struct *p, int prio)
+{
+ if (dl_prio(prio))
+ p->sched_class = &dl_sched_class;
+ else if (rt_prio(prio))
+ p->sched_class = &rt_sched_class;
+ else
+ p->sched_class = &fair_sched_class;
+
+ p->prio = prio;
+}
+
#ifdef CONFIG_RT_MUTEXES
static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
@@ -3909,22 +3919,19 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
queue_flag |= ENQUEUE_REPLENISH;
} else
p->dl.dl_boosted = 0;
- p->sched_class = &dl_sched_class;
} else if (rt_prio(prio)) {
if (dl_prio(oldprio))
p->dl.dl_boosted = 0;
if (oldprio < prio)
queue_flag |= ENQUEUE_HEAD;
- p->sched_class = &rt_sched_class;
} else {
if (dl_prio(oldprio))
p->dl.dl_boosted = 0;
if (rt_prio(oldprio))
p->rt.timeout = 0;
- p->sched_class = &fair_sched_class;
}
- p->prio = prio;
+ __setscheduler_prio(p, prio);
if (queued)
enqueue_task(rq, p, queue_flag);
@@ -4158,27 +4165,6 @@ static void __setscheduler_params(struct task_struct *p,
set_load_weight(p, true);
}
-/* Actually do priority change: must hold pi & rq lock. */
-static void __setscheduler(struct rq *rq, struct task_struct *p,
- const struct sched_attr *attr, bool keep_boost)
-{
- __setscheduler_params(p, attr);
-
- /*
- * Keep a potential priority boosting if called from
- * sched_setscheduler().
- */
- p->prio = normal_prio(p);
- if (keep_boost)
- p->prio = rt_effective_prio(p, p->prio);
-
- if (dl_prio(p->prio))
- p->sched_class = &dl_sched_class;
- else if (rt_prio(p->prio))
- p->sched_class = &rt_sched_class;
- else
- p->sched_class = &fair_sched_class;
-}
/*
* Check the target process has a UID that matches the current process's:
@@ -4200,10 +4186,8 @@ static int __sched_setscheduler(struct task_struct *p,
const struct sched_attr *attr,
bool user, bool pi)
{
- int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
- MAX_RT_PRIO - 1 - attr->sched_priority;
- int retval, oldprio, oldpolicy = -1, queued, running;
- int new_effective_prio, policy = attr->sched_policy;
+ int oldpolicy = -1, policy = attr->sched_policy;
+ int retval, oldprio, newprio, queued, running;
const struct sched_class *prev_class;
struct rq_flags rf;
int reset_on_fork;
@@ -4399,6 +4383,7 @@ static int __sched_setscheduler(struct task_struct *p,
p->sched_reset_on_fork = reset_on_fork;
oldprio = p->prio;
+ newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice);
if (pi) {
/*
* Take priority boosted tasks into account. If the new
@@ -4407,8 +4392,8 @@ static int __sched_setscheduler(struct task_struct *p,
* the runqueue. This will be done when the task deboost
* itself.
*/
- new_effective_prio = rt_effective_prio(p, newprio);
- if (new_effective_prio == oldprio)
+ newprio = rt_effective_prio(p, newprio);
+ if (newprio == oldprio)
queue_flags &= ~DEQUEUE_MOVE;
}
@@ -4420,7 +4405,10 @@ static int __sched_setscheduler(struct task_struct *p,
put_prev_task(rq, p);
prev_class = p->sched_class;
- __setscheduler(rq, p, attr, pi);
+ if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
+ __setscheduler_params(p, attr);
+ __setscheduler_prio(p, newprio);
+ }
if (queued) {
/*
--
2.34.1
2
1
From: Bibo Mao <maobibo(a)loongson.cn>
LoongArch inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I8H1QC
------------------------------------------
1. When cpu is hotplug out, cpu is in idle state and function
arch_cpu_idle_dead is called. Timer interrupt for this processor should
be disabled, else there will be timer interrupt for the dead cpu. Also
this prevents vcpu to schedule out during halt-polling flow when system
is running in vm mode, since there is pending timer interrupt.
This patch adds detailed implementation for timer shutdown interface, so
that timer will be disabled when cpu is plug-out.
2. for woken-up cpu, entry address is 8 bytes, we should check first low 4
bytes and then high 4 bytes.
Signed-off-by: Bibo Mao <maobibo(a)loongson.cn>
Signed-off-by: Hongchen Zhang <zhanghongchen(a)loongson.cn>
---
arch/loongarch/kernel/smp.c | 17 +++++++++++++++--
arch/loongarch/kernel/time.c | 25 +++++++++----------------
2 files changed, 24 insertions(+), 18 deletions(-)
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index f5a94559d441..abf6484fac70 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -310,16 +310,29 @@ void play_dead(void)
register void (*init_fn)(void);
idle_task_exit();
- local_irq_enable();
+ /*
+ * vcpu can be woken up from idle emulation in vm if irq is disabled
+ */
+ if (!cpu_has_hypervisor)
+ local_irq_enable();
set_csr_ecfg(ECFGF_IPI);
__this_cpu_write(cpu_state, CPU_DEAD);
__smp_mb();
do {
__asm__ __volatile__("idle 0\n\t");
- addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
+ /*
+ * mailbox info is wroten from other CPU with IPI send method
+ * in function csr_mail_send, only 4 bytes can be wroten with
+ * IPI send method in one time.
+ *
+ * High 4 bytes is sent and then low 4 bytes for 8 bytes mail
+ * sending method. Here low 4 bytes is read by the first.
+ */
+ addr = iocsr_read32(LOONGARCH_IOCSR_MBUF0);
} while (addr == 0);
+ addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
init_fn = (void *)TO_CACHE(addr);
iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index 18fa38705da7..b2e8108bee10 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -59,21 +59,6 @@ static int constant_set_state_oneshot(struct clock_event_device *evt)
return 0;
}
-static int constant_set_state_oneshot_stopped(struct clock_event_device *evt)
-{
- unsigned long timer_config;
-
- raw_spin_lock(&state_lock);
-
- timer_config = csr_read64(LOONGARCH_CSR_TCFG);
- timer_config &= ~CSR_TCFG_EN;
- csr_write64(timer_config, LOONGARCH_CSR_TCFG);
-
- raw_spin_unlock(&state_lock);
-
- return 0;
-}
-
static int constant_set_state_periodic(struct clock_event_device *evt)
{
unsigned long period;
@@ -93,6 +78,14 @@ static int constant_set_state_periodic(struct clock_event_device *evt)
static int constant_set_state_shutdown(struct clock_event_device *evt)
{
+ unsigned long timer_config;
+
+ raw_spin_lock(&state_lock);
+ timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+ timer_config &= ~CSR_TCFG_EN;
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+ raw_spin_unlock(&state_lock);
+
return 0;
}
@@ -161,7 +154,7 @@ int constant_clockevent_init(void)
cd->rating = 320;
cd->cpumask = cpumask_of(cpu);
cd->set_state_oneshot = constant_set_state_oneshot;
- cd->set_state_oneshot_stopped = constant_set_state_oneshot_stopped;
+ cd->set_state_oneshot_stopped = constant_set_state_shutdown;
cd->set_state_periodic = constant_set_state_periodic;
cd->set_state_shutdown = constant_set_state_shutdown;
cd->set_next_event = constant_timer_next_event;
--
2.33.0
2
1
The patch sets include two parts:
1. patch 1~15: Rebase smart_grid from openeuler-1.0-LTS to OLK-5.10
2. patch 16~19: introduce smart_grid zone qos and cpufreq
Since v4:
1. Place the highest level task in current domain level itself in
sched_grid_prefer_cpus
Since v3:
1. fix CI warning
Since v2:
1. static alloc sg_zone cpumask.
2. fix some warning
Hui Tang (13):
sched: Introduce smart grid scheduling strategy for cfs
sched: fix smart grid usage count
sched: fix WARN found by deadlock detect
sched: Fix possible deadlock in tg_set_dynamic_affinity_mode
sched: Fix negative count for jump label
sched: Fix timer storm for smart grid
sched: fix dereference NULL pointers
sched: Fix memory leak on error branch
sched: clear credit count in error branch
sched: Adjust few parameters range for smart grid
sched: Delete redundant updates to p->prefer_cpus
sched: Fix memory leak for smart grid
sched: Fix null pointer derefrence for sd->span
Wang ShaoBo (2):
sched: smart grid: init sched_grid_qos structure on QOS purpose
config: enable CONFIG_QOS_SCHED_SMART_GRID by default
Yipeng Zou (4):
sched: introduce smart grid qos zone
smart_grid: introduce /proc/pid/smart_grid_level
smart_grid: introduce smart_grid_strategy_ctrl sysctl
smart_grid: cpufreq: introduce smart_grid cpufreq control
arch/arm64/configs/openeuler_defconfig | 1 +
drivers/cpufreq/cpufreq.c | 234 ++++++++++++
fs/proc/array.c | 13 +
fs/proc/base.c | 76 ++++
include/linux/cpufreq.h | 11 +
include/linux/sched.h | 22 ++
include/linux/sched/grid_qos.h | 135 +++++++
include/linux/sched/sysctl.h | 5 +
init/Kconfig | 13 +
kernel/fork.c | 15 +-
kernel/sched/Makefile | 1 +
kernel/sched/core.c | 160 +++++++-
kernel/sched/fair.c | 496 ++++++++++++++++++++++++-
kernel/sched/grid/Makefile | 2 +
kernel/sched/grid/internal.h | 6 +
kernel/sched/grid/power.c | 27 ++
kernel/sched/grid/qos.c | 273 ++++++++++++++
kernel/sched/grid/stat.c | 47 +++
kernel/sched/sched.h | 48 +++
kernel/sysctl.c | 22 +-
mm/mempolicy.c | 12 +-
21 files changed, 1601 insertions(+), 18 deletions(-)
create mode 100644 include/linux/sched/grid_qos.h
create mode 100644 kernel/sched/grid/Makefile
create mode 100644 kernel/sched/grid/internal.h
create mode 100644 kernel/sched/grid/power.c
create mode 100644 kernel/sched/grid/qos.c
create mode 100644 kernel/sched/grid/stat.c
--
2.34.1
2
20

16 Nov '23
driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I89D3P
CVE: NA
------------------------------------------
This commit is to support SPxxx RAID/HBA controllers.
RAID controllers support RAID 0/1/5/6/10/50/60 modes.
HBA controlllers support RAID 0/1/10 modes.
RAID/HBA support SAS/SATA HDD/SSD.
Signed-off-by: zhanglei <zhanglei48(a)huawei.com>
---
Documentation/scsi/hisi_raid.rst | 84 +
MAINTAINERS | 7 +
arch/arm64/configs/openeuler_defconfig | 1 +
arch/x86/configs/openeuler_defconfig | 1 +
drivers/scsi/Kconfig | 1 +
drivers/scsi/Makefile | 1 +
drivers/scsi/hisi_raid/Kconfig | 14 +
drivers/scsi/hisi_raid/Makefile | 7 +
drivers/scsi/hisi_raid/hiraid.h | 760 +++++
drivers/scsi/hisi_raid/hiraid_main.c | 3982 ++++++++++++++++++++++++
10 files changed, 4858 insertions(+)
create mode 100644 Documentation/scsi/hisi_raid.rst
create mode 100644 drivers/scsi/hisi_raid/Kconfig
create mode 100644 drivers/scsi/hisi_raid/Makefile
create mode 100644 drivers/scsi/hisi_raid/hiraid.h
create mode 100644 drivers/scsi/hisi_raid/hiraid_main.c
diff --git a/Documentation/scsi/hisi_raid.rst b/Documentation/scsi/hisi_raid.rst
new file mode 100644
index 000000000000..523a6763a7fd
--- /dev/null
+++ b/Documentation/scsi/hisi_raid.rst
@@ -0,0 +1,84 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================================
+hisi_raid - HUAWEI SCSI RAID Controller driver
+==============================================
+
+This file describes the hisi_raid SCSI driver for HUAWEI
+RAID controllers. The hisi_raid driver is the first
+generation RAID driver.
+
+For hisi_raid controller support, enable the hisi_raid driver
+when configuring the kernel.
+
+hisi_raid specific entries in /sys
+=================================
+
+hisi_raid host attributes
+------------------------
+ - /sys/class/scsi_host/host*/csts_pp
+ - /sys/class/scsi_host/host*/csts_shst
+ - /sys/class/scsi_host/host*/csts_cfs
+ - /sys/class/scsi_host/host*/csts_rdy
+ - /sys/class/scsi_host/host*/fw_version
+
+ The host csts_pp attribute is a read only attribute. This attribute
+ indicates whether the controller is processing commands. If this attribute
+ is set to ‘1’, then the controller is processing commands normally. If
+ this attribute is cleared to ‘0’, then the controller has temporarily stopped
+ processing commands in order to handle an event (e.g., firmware activation).
+
+ The host csts_shst attribute is a read only attribute. This attribute
+ indicates status of shutdown processing.The shutdown status values are defined
+ as:
+ ====== ==============================
+ Value Definition
+ ====== ==============================
+ 00b Normal operation
+ 01b Shutdown processing occurring
+ 10b Shutdown processing complete
+ 11b Reserved
+ ====== ==============================
+ The host csts_cfs attribute is a read only attribute. This attribute is set to
+ ’1’ when a fatal controller error occurred that could not be communicated in the
+ appropriate Completion Queue. This bit is cleared to ‘0’ when a fatal controller
+ error has not occurred.
+
+ The host csts_rdy attribute is a read only attribute. This attribute is set to
+ ‘1’ when the controller is ready to process submission queue entries.
+
+ The fw_version attribute is read-only and will return the driver version and the
+ controller firmware version.
+
+hisi_raid scsi device attributes
+------------------------------
+ - /sys/class/scsi_device/X\:X\:X\:X/device/raid_level
+ - /sys/class/scsi_device/X\:X\:X\:X/device/raid_state
+ - /sys/class/scsi_device/X\:X\:X\:X/device/raid_resync
+
+ The device raid_level attribute is a read only attribute. This attribute indicates
+ RAID level of scsi device(will dispaly "NA" if scsi device is not virtual disk type).
+
+ The device raid_state attribute is read-only and indicates RAID status of scsi
+ device(will dispaly "NA" if scsi device is not virtual disk type).
+
+ The device raid_resync attribute is read-only and indicates RAID rebuild processing
+ of scsi device(will dispaly "NA" if scsi device is not virtual disk type).
+
+Supported devices
+=================
+
+ =================== ======= =======================================
+ PCI ID (pci.ids) OEM Product
+ =================== ======= =======================================
+ 19E5:3858 HUAWEI SP186-M-8i(HBA:8Ports)
+ 19E5:3858 HUAWEI SP186-M-16i(HBA:16Ports)
+ 19E5:3858 HUAWEI SP186-M-32i(HBA:32Ports)
+ 19E5:3858 HUAWEI SP186-M-40i(HBA:40Ports)
+ 19E5:3758 HUAWEI SP686C-M-16i(RAID:16Ports,2G cache)
+ 19E5:3758 HUAWEI SP686C-M-16i(RAID:16Ports,4G cache)
+ 19E5:3758 HUAWEI SP686C-MH-32i(RAID:32Ports,4G cache)
+ 19E5:3758 HUAWEI SP686C-M-40i(RAID:40Ports,2G cache)
+ 19E5:3758 HUAWEI SP686C-M-40i(RAID:40Ports,4G cache)
+ =================== ======= =======================================
+
diff --git a/MAINTAINERS b/MAINTAINERS
index a7815fd1072f..8324f56a2096 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8070,6 +8070,13 @@ M: Yonglong Liu <liuyonglong(a)huawei.com>
S: Supported
F: drivers/ptp/ptp_hisi.c
+HISI_RAID SCSI RAID DRIVERS
+M: Zhang Lei <zhanglei48(a)huawei.com>
+L: linux-scsi(a)vger.kernel.org
+S: Maintained
+F: Documentation/scsi/hisi_raid.rst
+F: drivers/scsi/hisi_raid/
+
HMM - Heterogeneous Memory Management
M: Jérôme Glisse <jglisse(a)redhat.com>
L: linux-mm(a)kvack.org
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index ec758f0530c1..b9a50ef6d768 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -2413,6 +2413,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_SMARTPQI=m
+CONFIG_SCSI_HISI_RAID=m
# CONFIG_SCSI_UFSHCD is not set
# CONFIG_SCSI_HPTIOP is not set
# CONFIG_SCSI_MYRB is not set
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index 5171aa50736b..43b5294326e6 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -2369,6 +2369,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_SMARTPQI=m
+CONFIG_SCSI_HISI_RAID=m
# CONFIG_SCSI_UFSHCD is not set
# CONFIG_SCSI_HPTIOP is not set
# CONFIG_SCSI_BUSLOGIC is not set
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a9da1b2dec4a..41ef664cf0ed 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -473,6 +473,7 @@ source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/sssraid/Kconfig"
source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
+source "drivers/scsi/hisi_raid/Kconfig"
source "drivers/scsi/ufs/Kconfig"
config SCSI_HPTIOP
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index c2a1efa16912..8f26dbb5ee37 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -101,6 +101,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
+obj-$(CONFIG_SCSI_HISI_RAID) += hisi_raid/
obj-$(CONFIG_SCSI_UFSHCD) += ufs/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
diff --git a/drivers/scsi/hisi_raid/Kconfig b/drivers/scsi/hisi_raid/Kconfig
new file mode 100644
index 000000000000..d402dc45a7c1
--- /dev/null
+++ b/drivers/scsi/hisi_raid/Kconfig
@@ -0,0 +1,14 @@
+#
+# Kernel configuration file for the hisi_raid
+#
+
+config SCSI_HISI_RAID
+ tristate "Huawei Hisi_Raid Adapter"
+ depends on PCI && SCSI
+ select BLK_DEV_BSGLIB
+ depends on ARM64 || X86_64
+ help
+ This driver supports hisi_raid SPxxx serial RAID controller, which has
+ PCI Express Gen4 interface with host and supports SAS/SATA HDD/SSD.
+ To compile this driver as a module, choose M here: the module will
+ be called hisi_raid.
diff --git a/drivers/scsi/hisi_raid/Makefile b/drivers/scsi/hisi_raid/Makefile
new file mode 100644
index 000000000000..b71a675f4190
--- /dev/null
+++ b/drivers/scsi/hisi_raid/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the hisi_raid drivers.
+#
+
+obj-$(CONFIG_SCSI_HISI_RAID) += hiraid.o
+
+hiraid-objs := hiraid_main.o
diff --git a/drivers/scsi/hisi_raid/hiraid.h b/drivers/scsi/hisi_raid/hiraid.h
new file mode 100644
index 000000000000..1ebc3dd3f2ec
--- /dev/null
+++ b/drivers/scsi/hisi_raid/hiraid.h
@@ -0,0 +1,760 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 Huawei Technologies Co., Ltd */
+
+#ifndef __HIRAID_H_
+#define __HIRAID_H_
+
+#define HIRAID_HDD_PD_QD 64
+#define HIRAID_HDD_VD_QD 256
+#define HIRAID_SSD_PD_QD 64
+#define HIRAID_SSD_VD_QD 256
+
+#define BGTASK_TYPE_REBUILD 4
+#define USR_CMD_READ 0xc2
+#define USR_CMD_RDLEN 0x1000
+#define USR_CMD_VDINFO 0x704
+#define USR_CMD_BGTASK 0x504
+#define VDINFO_PARAM_LEN 0x04
+
+#define HIRAID_DEFAULT_MAX_CHANNEL 4
+#define HIRAID_DEFAULT_MAX_ID 240
+#define HIRAID_DEFAULT_MAX_LUN_PER_HOST 8
+
+#define FUA_MASK 0x08
+
+#define HIRAID_IO_SQES 7
+#define HIRAID_IO_CQES 4
+#define PRP_ENTRY_SIZE 8
+
+#define EXTRA_POOL_SIZE 256
+#define MAX_EXTRA_POOL_NUM 16
+#define MAX_CMD_PER_DEV 64
+#define MAX_CDB_LEN 16
+
+#define HIRAID_AQ_DEPTH 128
+#define HIRAID_ASYN_COMMANDS 16
+#define HIRAID_AQ_BLK_MQ_DEPTH (HIRAID_AQ_DEPTH - HIRAID_ASYN_COMMANDS)
+#define HIRAID_AQ_MQ_TAG_DEPTH (HIRAID_AQ_BLK_MQ_DEPTH - 1)
+
+#define HIRAID_ADMIN_QUEUE_NUM 1
+#define HIRAID_PTHRU_CMDS_PERQ 1
+#define HIRAID_TOTAL_PTCMDS(qn) (HIRAID_PTHRU_CMDS_PERQ * (qn))
+
+#define HIRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01)
+#define HIRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0)
+#define HIRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02)
+#define HIRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20)
+#define HIRAID_DEV_DISK_TYPE(attr) ((attr) & 0x1e)
+
+#define HIRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01)
+#define HIRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02)
+
+#define HIRAID_CAP_MQES(cap) ((cap) & 0xffff)
+#define HIRAID_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define HIRAID_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define HIRAID_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define HIRAID_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
+#define HIRAID_CAP_DMAMASK(cap) (((cap) >> 37) & 0xff)
+
+#define IO_SQE_SIZE sizeof(struct hiraid_scsi_io_cmd)
+#define ADMIN_SQE_SIZE sizeof(struct hiraid_admin_command)
+#define SQE_SIZE(qid) (((qid) > 0) ? IO_SQE_SIZE : ADMIN_SQE_SIZE)
+#define CQ_SIZE(depth) ((depth) * sizeof(struct hiraid_completion))
+#define SQ_SIZE(qid, depth) ((depth) * SQE_SIZE(qid))
+
+#define SENSE_SIZE(depth) ((depth) * SCSI_SENSE_BUFFERSIZE)
+
+#define IO_6_DEFAULT_TX_LEN 256
+
+#define MAX_DEV_ENTRY_PER_PAGE_4K 340
+
+#define MAX_REALTIME_BGTASK_NUM 32
+
+#define PCI_VENDOR_ID_HUAWEI_LOGIC 0x19E5
+#define HIRAID_SERVER_DEVICE_HBA_DID 0x3858
+#define HIRAID_SERVER_DEVICE_RAID_DID 0x3758
+
+enum {
+ HIRAID_SC_SUCCESS = 0x0,
+ HIRAID_SC_INVALID_OPCODE = 0x1,
+ HIRAID_SC_INVALID_FIELD = 0x2,
+
+ HIRAID_SC_ABORT_LIMIT = 0x103,
+ HIRAID_SC_ABORT_MISSING = 0x104,
+ HIRAID_SC_ASYNC_LIMIT = 0x105,
+
+ HIRAID_SC_DNR = 0x4000,
+};
+
+enum {
+ HIRAID_REG_CAP = 0x0000,
+ HIRAID_REG_CC = 0x0014,
+ HIRAID_REG_CSTS = 0x001c,
+ HIRAID_REG_AQA = 0x0024,
+ HIRAID_REG_ASQ = 0x0028,
+ HIRAID_REG_ACQ = 0x0030,
+ HIRAID_REG_DBS = 0x1000,
+};
+
+enum {
+ HIRAID_CC_ENABLE = 1 << 0,
+ HIRAID_CC_CSS_NVM = 0 << 4,
+ HIRAID_CC_MPS_SHIFT = 7,
+ HIRAID_CC_AMS_SHIFT = 11,
+ HIRAID_CC_SHN_SHIFT = 14,
+ HIRAID_CC_IOSQES_SHIFT = 16,
+ HIRAID_CC_IOCQES_SHIFT = 20,
+ HIRAID_CC_AMS_RR = 0 << HIRAID_CC_AMS_SHIFT,
+ HIRAID_CC_SHN_NONE = 0 << HIRAID_CC_SHN_SHIFT,
+ HIRAID_CC_IOSQES = HIRAID_IO_SQES << HIRAID_CC_IOSQES_SHIFT,
+ HIRAID_CC_IOCQES = HIRAID_IO_CQES << HIRAID_CC_IOCQES_SHIFT,
+ HIRAID_CC_SHN_NORMAL = 1 << HIRAID_CC_SHN_SHIFT,
+ HIRAID_CC_SHN_MASK = 3 << HIRAID_CC_SHN_SHIFT,
+ HIRAID_CSTS_CFS_SHIFT = 1,
+ HIRAID_CSTS_SHST_SHIFT = 2,
+ HIRAID_CSTS_PP_SHIFT = 5,
+ HIRAID_CSTS_RDY = 1 << 0,
+ HIRAID_CSTS_SHST_CMPLT = 2 << 2,
+ HIRAID_CSTS_SHST_MASK = 3 << 2,
+ HIRAID_CSTS_CFS_MASK = 1 << HIRAID_CSTS_CFS_SHIFT,
+ HIRAID_CSTS_PP_MASK = 1 << HIRAID_CSTS_PP_SHIFT,
+};
+
+enum {
+ HIRAID_ADMIN_DELETE_SQ = 0x00,
+ HIRAID_ADMIN_CREATE_SQ = 0x01,
+ HIRAID_ADMIN_DELETE_CQ = 0x04,
+ HIRAID_ADMIN_CREATE_CQ = 0x05,
+ HIRAID_ADMIN_ABORT_CMD = 0x08,
+ HIRAID_ADMIN_SET_FEATURES = 0x09,
+ HIRAID_ADMIN_ASYNC_EVENT = 0x0c,
+ HIRAID_ADMIN_GET_INFO = 0xc6,
+ HIRAID_ADMIN_RESET = 0xc8,
+};
+
+enum {
+ HIRAID_GET_CTRL_INFO = 0,
+ HIRAID_GET_DEVLIST_INFO = 1,
+};
+
+enum hiraid_rst_type {
+ HIRAID_RESET_TARGET = 0,
+ HIRAID_RESET_BUS = 1,
+};
+
+enum {
+ HIRAID_ASYN_EVENT_ERROR = 0,
+ HIRAID_ASYN_EVENT_NOTICE = 2,
+ HIRAID_ASYN_EVENT_VS = 7,
+};
+
+enum {
+ HIRAID_ASYN_DEV_CHANGED = 0x00,
+ HIRAID_ASYN_FW_ACT_START = 0x01,
+ HIRAID_ASYN_HOST_PROBING = 0x10,
+};
+
+enum {
+ HIRAID_ASYN_TIMESYN = 0x00,
+ HIRAID_ASYN_FW_ACT_FINISH = 0x02,
+ HIRAID_ASYN_EVENT_MIN = 0x80,
+ HIRAID_ASYN_EVENT_MAX = 0xff,
+};
+
+enum {
+ HIRAID_CMD_WRITE = 0x01,
+ HIRAID_CMD_READ = 0x02,
+
+ HIRAID_CMD_NONRW_NONE = 0x80,
+ HIRAID_CMD_NONRW_TODEV = 0x81,
+ HIRAID_CMD_NONRW_FROMDEV = 0x82,
+};
+
+enum {
+ HIRAID_QUEUE_PHYS_CONTIG = (1 << 0),
+ HIRAID_CQ_IRQ_ENABLED = (1 << 1),
+
+ HIRAID_FEATURE_NUM_QUEUES = 0x07,
+ HIRAID_FEATURE_ASYNC_EVENT = 0x0b,
+ HIRAID_FEATURE_TIMESTAMP = 0x0e,
+};
+
+enum hiraid_dev_state {
+ DEV_NEW,
+ DEV_LIVE,
+ DEV_RESETTING,
+ DEV_DELETING,
+ DEV_DEAD,
+};
+
+enum {
+ HIRAID_CARD_HBA,
+ HIRAID_CARD_RAID,
+};
+
+enum hiraid_cmd_type {
+ HIRAID_CMD_ADMIN,
+ HIRAID_CMD_PTHRU,
+};
+
+enum {
+ SQE_FLAG_SGL_METABUF = (1 << 6),
+ SQE_FLAG_SGL_METASEG = (1 << 7),
+ SQE_FLAG_SGL_ALL = SQE_FLAG_SGL_METABUF | SQE_FLAG_SGL_METASEG,
+};
+
+enum hiraid_cmd_state {
+ CMD_IDLE = 0,
+ CMD_FLIGHT = 1,
+ CMD_COMPLETE = 2,
+ CMD_TIMEOUT = 3,
+ CMD_TMO_COMPLETE = 4,
+};
+
+enum {
+ HIRAID_BSG_ADMIN,
+ HIRAID_BSG_IOPTHRU,
+};
+
+enum {
+ HIRAID_SAS_HDD_VD = 0x04,
+ HIRAID_SATA_HDD_VD = 0x08,
+ HIRAID_SAS_SSD_VD = 0x0c,
+ HIRAID_SATA_SSD_VD = 0x10,
+ HIRAID_NVME_SSD_VD = 0x14,
+ HIRAID_SAS_HDD_PD = 0x06,
+ HIRAID_SATA_HDD_PD = 0x0a,
+ HIRAID_SAS_SSD_PD = 0x0e,
+ HIRAID_SATA_SSD_PD = 0x12,
+ HIRAID_NVME_SSD_PD = 0x16,
+};
+
+enum {
+ DISPATCH_BY_CPU,
+ DISPATCH_BY_DISK,
+};
+
+struct hiraid_completion {
+ __le32 result;
+ union {
+ struct {
+ __u8 sense_len;
+ __u8 resv[3];
+ };
+ __le32 result1;
+ };
+ __le16 sq_head;
+ __le16 sq_id;
+ __le16 cmd_id;
+ __le16 status;
+};
+
+struct hiraid_ctrl_info {
+ __le32 nd;
+ __le16 max_cmds;
+ __le16 max_channel;
+ __le32 max_tgt_id;
+ __le16 max_lun;
+ __le16 max_num_sge;
+ __le16 lun_num_boot;
+ __u8 mdts;
+ __u8 acl;
+ __u8 asynevent;
+ __u8 card_type;
+ __u8 pt_use_sgl;
+ __u8 rsvd;
+ __le32 rtd3e;
+ __u8 sn[32];
+ __u8 fw_version[16];
+ __u8 rsvd1[4020];
+};
+
+struct hiraid_dev {
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct Scsi_Host *shost;
+ struct hiraid_queue *queues;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_extra_pool[MAX_EXTRA_POOL_NUM];
+ void __iomem *bar;
+ u32 max_qid;
+ u32 num_vecs;
+ u32 queue_count;
+ u32 ioq_depth;
+ u32 db_stride;
+ u32 __iomem *dbs;
+ struct rw_semaphore dev_rwsem;
+ int numa_node;
+ u32 page_size;
+ u32 ctrl_config;
+ u32 online_queues;
+ u64 cap;
+ u32 scsi_qd;
+ u32 instance;
+ struct hiraid_ctrl_info *ctrl_info;
+ struct hiraid_dev_info *dev_info;
+
+ struct hiraid_cmd *adm_cmds;
+ struct list_head adm_cmd_list;
+ spinlock_t adm_cmd_lock;
+
+ struct hiraid_cmd *io_ptcmds;
+ struct list_head io_pt_list;
+ spinlock_t io_pt_lock;
+
+ struct work_struct scan_work;
+ struct work_struct timesyn_work;
+ struct work_struct reset_work;
+ struct work_struct fwact_work;
+
+ enum hiraid_dev_state state;
+ spinlock_t state_lock;
+
+ void *sense_buffer_virt;
+ dma_addr_t sense_buffer_phy;
+ u32 last_qcnt;
+ u8 hdd_dispatch;
+
+ struct request_queue *bsg_queue;
+};
+
+struct hiraid_sgl_desc {
+ __le64 addr;
+ __le32 length;
+ __u8 rsvd[3];
+ __u8 type;
+};
+
+union hiraid_data_ptr {
+ struct {
+ __le64 prp1;
+ __le64 prp2;
+ };
+ struct hiraid_sgl_desc sgl;
+};
+
+struct hiraid_admin_com_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __le32 cdw2[4];
+ union hiraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+struct hiraid_features {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __u64 rsvd2[2];
+ union hiraid_data_ptr dptr;
+ __le32 fid;
+ __le32 dword11;
+ __le32 dword12;
+ __le32 dword13;
+ __le32 dword14;
+ __le32 dword15;
+};
+
+struct hiraid_create_cq {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 cqid;
+ __le16 qsize;
+ __le16 cq_flags;
+ __le16 irq_vector;
+ __u32 rsvd12[4];
+};
+
+struct hiraid_create_sq {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 sqid;
+ __le16 qsize;
+ __le16 sq_flags;
+ __le16 cqid;
+ __u32 rsvd12[4];
+};
+
+struct hiraid_delete_queue {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __u32 rsvd1[9];
+ __le16 qid;
+ __u16 rsvd10;
+ __u32 rsvd11[5];
+};
+
+struct hiraid_get_info {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __u32 rsvd2[4];
+ union hiraid_data_ptr dptr;
+ __u8 type;
+ __u8 rsvd10[3];
+ __le32 cdw11;
+ __u32 rsvd12[4];
+};
+
+struct hiraid_usr_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ union {
+ struct {
+ __le16 subopcode;
+ __le16 rsvd1;
+ } info_0;
+ __le32 cdw2;
+ };
+ union {
+ struct {
+ __le16 data_len;
+ __le16 param_len;
+ } info_1;
+ __le32 cdw3;
+ };
+ __u64 metadata;
+ union hiraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+struct hiraid_abort_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __le16 sqid;
+ __le16 cid;
+ __u32 rsvd11[5];
+};
+
+struct hiraid_reset_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __u8 type;
+ __u8 rsvd10[3];
+ __u32 rsvd11[5];
+};
+
+struct hiraid_admin_command {
+ union {
+ struct hiraid_admin_com_cmd common;
+ struct hiraid_features features;
+ struct hiraid_create_cq create_cq;
+ struct hiraid_create_sq create_sq;
+ struct hiraid_delete_queue delete_queue;
+ struct hiraid_get_info get_info;
+ struct hiraid_abort_cmd abort;
+ struct hiraid_reset_cmd reset;
+ struct hiraid_usr_cmd usr_cmd;
+ };
+};
+
+struct hiraid_scsi_io_com_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __le32 cdw3[3];
+ union hiraid_data_ptr dptr;
+ __le32 cdw10[6];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __le32 cdw26[6];
+};
+
+struct hiraid_scsi_rw_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union hiraid_data_ptr dptr;
+ __le64 slba;
+ __le16 nlb;
+ __le16 control;
+ __u32 rsvd13[3];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct hiraid_scsi_nonrw_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_length;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union hiraid_data_ptr dptr;
+ __u32 rsvd10[5];
+ __le32 buf_len;
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct hiraid_scsi_io_cmd {
+ union {
+ struct hiraid_scsi_io_com_cmd common;
+ struct hiraid_scsi_rw_cmd rw;
+ struct hiraid_scsi_nonrw_cmd nonrw;
+ };
+};
+
+struct hiraid_passthru_common_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 data_len;
+ __u16 param_len;
+ } info_1;
+ __u32 cdw3;
+ };
+ __u64 metadata;
+
+ __u64 addr;
+ __u64 prp2;
+
+ __u32 cdw10;
+ __u32 cdw11;
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 cdw15;
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct hiraid_passthru_io_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 res_sense_len;
+ __u8 cdb_len;
+ __u8 rsvd0;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_1;
+ __u32 cdw3;
+ };
+ union {
+ struct {
+ __u16 rsvd;
+ __u16 param_len;
+ } info_2;
+ __u32 cdw4;
+ };
+ __u32 cdw5;
+ __u64 addr;
+ __u64 prp2;
+ union {
+ struct {
+ __u16 eid;
+ __u16 sid;
+ } info_3;
+ __u32 cdw10;
+ };
+ union {
+ struct {
+ __u16 did;
+ __u8 did_flag;
+ __u8 rsvd2;
+ } info_4;
+ __u32 cdw11;
+ };
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 data_len;
+ __u32 cdw16;
+ __u32 cdw17;
+ __u32 cdw18;
+ __u32 cdw19;
+ __u32 cdw20;
+ __u32 cdw21;
+ __u32 cdw22;
+ __u32 cdw23;
+ __u64 sense_addr;
+ __u32 cdw26[4];
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct hiraid_bsg_request {
+ u32 msgcode;
+ u32 control;
+ union {
+ struct hiraid_passthru_common_cmd admcmd;
+ struct hiraid_passthru_io_cmd pthrucmd;
+ };
+};
+
+struct hiraid_cmd {
+ u16 qid;
+ u16 cid;
+ u32 result0;
+ u32 result1;
+ u16 status;
+ void *priv;
+ enum hiraid_cmd_state state;
+ struct completion cmd_done;
+ struct list_head list;
+};
+
+struct hiraid_queue {
+ struct hiraid_dev *hdev;
+ spinlock_t sq_lock;
+
+ spinlock_t cq_lock ____cacheline_aligned_in_smp;
+
+ void *sq_cmds;
+
+ struct hiraid_completion *cqes;
+
+ dma_addr_t sq_buffer_phy;
+ dma_addr_t cq_buffer_phy;
+ u32 __iomem *q_db;
+ u8 cq_phase;
+ u8 sqes;
+ u16 qid;
+ u16 sq_tail;
+ u16 cq_head;
+ u16 last_cq_head;
+ u16 q_depth;
+ s16 cq_vector;
+ atomic_t inflight;
+ void *sense_buffer_virt;
+ dma_addr_t sense_buffer_phy;
+ struct dma_pool *prp_small_pool;
+};
+
+struct hiraid_mapmange {
+ struct hiraid_queue *hiraidq;
+ enum hiraid_cmd_state state;
+ u16 cid;
+ int page_cnt;
+ u32 sge_cnt;
+ u32 len;
+ bool use_sgl;
+ dma_addr_t first_dma;
+ void *sense_buffer_virt;
+ dma_addr_t sense_buffer_phy;
+ struct scatterlist *sgl;
+ void *list[0];
+};
+
+struct hiraid_vd_info {
+ __u8 name[32];
+ __le16 id;
+ __u8 rg_id;
+ __u8 rg_level;
+ __u8 sg_num;
+ __u8 sg_disk_num;
+ __u8 vd_status;
+ __u8 vd_type;
+ __u8 rsvd1[4056];
+};
+
+struct bgtask_info {
+ __u8 type;
+ __u8 progress;
+ __u8 rate;
+ __u8 rsvd0;
+ __le16 vd_id;
+ __le16 time_left;
+ __u8 rsvd1[4];
+};
+
+struct hiraid_bgtask {
+ __u8 sw;
+ __u8 task_num;
+ __u8 rsvd[6];
+ struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM];
+};
+
+struct hiraid_dev_info {
+ __le32 hdid;
+ __le16 target;
+ __u8 channel;
+ __u8 lun;
+ __u8 attr;
+ __u8 flag;
+ __le16 max_io_kb;
+};
+
+struct hiraid_dev_list {
+ __le32 dev_num;
+ __u32 rsvd0[3];
+ struct hiraid_dev_info devinfo[MAX_DEV_ENTRY_PER_PAGE_4K];
+};
+
+struct hiraid_sdev_hostdata {
+ u32 hdid;
+ u16 max_io_kb;
+ u8 attr;
+ u8 flag;
+ u8 rg_id;
+ u8 hwq;
+ u16 pend_count;
+};
+
+#endif
+
diff --git a/drivers/scsi/hisi_raid/hiraid_main.c b/drivers/scsi/hisi_raid/hiraid_main.c
new file mode 100644
index 000000000000..b9ffa642479c
--- /dev/null
+++ b/drivers/scsi/hisi_raid/hiraid_main.c
@@ -0,0 +1,3982 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 Huawei Technologies Co., Ltd */
+
+/* Huawei Raid Series Linux Driver */
+
+#define pr_fmt(fmt) "hiraid: " fmt
+
+#include <linux/sched/signal.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/sysfs.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+#include <linux/once.h>
+#include <linux/debugfs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <asm/unaligned.h>
+#include <linux/sort.h>
+#include <target/target_core_backend.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/sg.h>
+
+#include "hiraid.h"
+
+static u32 admin_tmout = 60;
+module_param(admin_tmout, uint, 0644);
+MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)");
+
+static u32 scmd_tmout_rawdisk = 180;
+module_param(scmd_tmout_rawdisk, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_rawdisk, "scsi commands timeout for rawdisk(seconds)");
+
+static u32 scmd_tmout_vd = 180;
+module_param(scmd_tmout_vd, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_vd, "scsi commands timeout for vd(seconds)");
+
+static bool max_io_force;
+module_param(max_io_force, bool, 0644);
+MODULE_PARM_DESC(max_io_force, "force max_hw_sectors_kb = 1024, default false(performance first)");
+
+static bool work_mode;
+module_param(work_mode, bool, 0444);
+MODULE_PARM_DESC(work_mode, "work mode switch, default false for multi hw queues");
+
+#define MAX_IO_QUEUES 128
+#define MIN_IO_QUEUES 1
+
+static int ioq_num_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0;
+ int ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < MIN_IO_QUEUES || n > MAX_IO_QUEUES)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops max_hwq_num_ops = {
+ .set = ioq_num_set,
+ .get = param_get_uint,
+};
+
+static u32 max_hwq_num = 128;
+module_param_cb(max_hwq_num, &max_hwq_num_ops, &max_hwq_num, 0444);
+MODULE_PARM_DESC(max_hwq_num, "max num of hw io queues, should >= 1, default 128");
+
+static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0;
+ int ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < 2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops io_queue_depth_ops = {
+ .set = io_queue_depth_set,
+ .get = param_get_uint,
+};
+
+static u32 io_queue_depth = 1024;
+module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+
+static u32 log_debug_switch;
+module_param(log_debug_switch, uint, 0644);
+MODULE_PARM_DESC(log_debug_switch, "set log state, default zero for switch off");
+
+static int extra_pool_num_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+ if (n > MAX_EXTRA_POOL_NUM)
+ n = MAX_EXTRA_POOL_NUM;
+ if (n < 1)
+ n = 1;
+ *((u8 *)kp->arg) = n;
+
+ return 0;
+}
+
+static const struct kernel_param_ops small_pool_num_ops = {
+ .set = extra_pool_num_set,
+ .get = param_get_byte,
+};
+
+/* It was found that the spindlock of a single pool conflicts
+ * a lot with multiple CPUs.So multiple pools are introduced
+ * to reduce the conflictions.
+ */
+static unsigned char extra_pool_num = 4;
+module_param_cb(extra_pool_num, &small_pool_num_ops, &extra_pool_num, 0644);
+MODULE_PARM_DESC(extra_pool_num, "set prp extra pool num, default 4, MAX 16");
+
+static void hiraid_handle_async_notice(struct hiraid_dev *hdev, u32 result);
+static void hiraid_handle_async_vs(struct hiraid_dev *hdev, u32 result, u32 result1);
+
+static struct class *hiraid_class;
+
+#define HIRAID_CAP_TIMEOUT_UNIT_MS (HZ / 2)
+
+static struct workqueue_struct *work_queue;
+
+#define dev_log_dbg(dev, fmt, ...) do { \
+ if (unlikely(log_debug_switch)) \
+ dev_info(dev, "[%s] " fmt, \
+ __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define HIRAID_DRV_VERSION "1.1.0.0"
+
+#define ADMIN_TIMEOUT (admin_tmout * HZ)
+#define USRCMD_TIMEOUT (180 * HZ)
+#define CTL_RST_TIME (600 * HZ)
+
+#define HIRAID_WAIT_ABNL_CMD_TIMEOUT 6
+#define HIRAID_WAIT_RST_IO_TIMEOUT 10
+
+#define HIRAID_DMA_MSK_BIT_MAX 64
+
+#define IOQ_PT_DATA_LEN 4096
+#define IOQ_PT_SGL_DATA_LEN (1024 * 1024)
+
+#define MAX_CAN_QUEUE (4096 - 1)
+#define MIN_CAN_QUEUE (1024 - 1)
+
+enum SENSE_STATE_CODE {
+ SENSE_STATE_OK = 0,
+ SENSE_STATE_NEED_CHECK,
+ SENSE_STATE_ERROR,
+ SENSE_STATE_EP_PCIE_ERROR,
+ SENSE_STATE_NAC_DMA_ERROR,
+ SENSE_STATE_ABORTED,
+ SENSE_STATE_NEED_RETRY
+};
+
+enum {
+ FW_EH_OK = 0,
+ FW_EH_DEV_NONE = 0x701
+};
+
+static const char * const raid_levels[] = {"0", "1", "5", "6", "10", "50", "60", "NA"};
+
+static const char * const raid_states[] = {
+ "NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED", "FORMATTING", "SANITIZING",
+ "INITIALIZING", "INITIALIZE_FAIL", "DELETING", "DELETE_FAIL", "WRITE_PROTECT"
+};
+
+static int hiraid_remap_bar(struct hiraid_dev *hdev, u32 size)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (size > pci_resource_len(pdev, 0)) {
+ dev_err(hdev->dev, "input size[%u] exceed bar0 length[%llu]\n",
+ size, pci_resource_len(pdev, 0));
+ return -ENOMEM;
+ }
+
+ if (hdev->bar)
+ iounmap(hdev->bar);
+
+ hdev->bar = ioremap(pci_resource_start(pdev, 0), size);
+ if (!hdev->bar) {
+ dev_err(hdev->dev, "ioremap for bar0 failed\n");
+ return -ENOMEM;
+ }
+ hdev->dbs = hdev->bar + HIRAID_REG_DBS;
+
+ return 0;
+}
+
+static int hiraid_dev_map(struct hiraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ ret = pci_request_mem_regions(pdev, "hiraid");
+ if (ret) {
+ dev_err(hdev->dev, "fail to request memory regions\n");
+ return ret;
+ }
+
+ ret = hiraid_remap_bar(hdev, HIRAID_REG_DBS + 4096);
+ if (ret) {
+ pci_release_mem_regions(pdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hiraid_dev_unmap(struct hiraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (hdev->bar) {
+ iounmap(hdev->bar);
+ hdev->bar = NULL;
+ }
+ pci_release_mem_regions(pdev);
+}
+
+static int hiraid_pci_enable(struct hiraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = -ENOMEM;
+ u64 maskbit = HIRAID_DMA_MSK_BIT_MAX;
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(hdev->dev, "enable pci device memory resources failed\n");
+ return ret;
+ }
+ pci_set_master(pdev);
+
+ if (readl(hdev->bar + HIRAID_REG_CSTS) == U32_MAX) {
+ ret = -ENODEV;
+ dev_err(hdev->dev, "read CSTS register failed\n");
+ goto disable;
+ }
+
+ hdev->cap = lo_hi_readq(hdev->bar + HIRAID_REG_CAP);
+ hdev->ioq_depth = min_t(u32, HIRAID_CAP_MQES(hdev->cap) + 1, io_queue_depth);
+ hdev->db_stride = 1 << HIRAID_CAP_STRIDE(hdev->cap);
+
+ maskbit = HIRAID_CAP_DMAMASK(hdev->cap);
+ if (maskbit < 32 || maskbit > HIRAID_DMA_MSK_BIT_MAX) {
+ dev_err(hdev->dev, "err, dma mask invalid[%llu], set to default\n", maskbit);
+ maskbit = HIRAID_DMA_MSK_BIT_MAX;
+ }
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit))) {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_err(hdev->dev, "set dma mask[32] and coherent failed\n");
+ goto disable;
+ }
+ dev_info(hdev->dev, "set dma mask[32] success\n");
+ } else {
+ dev_info(hdev->dev, "set dma mask[%llu] success\n", maskbit);
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(hdev->dev, "allocate one IRQ for setup admin queue failed\n");
+ goto disable;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_save_state(pdev);
+
+ return 0;
+
+disable:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+
+/*
+ * It is fact that first prp and last prp may be not full page.
+ * The size to count total nprps for the io equal to size + page_size,
+ * it may be a slightly overestimate.
+ *
+ * 8B per prp address. It may be there is one prp_list address per page,
+ * prp_list address does not count in io data prps. So divisor equal to
+ * PAGE_SIZE - 8, it may be a slightly overestimate.
+ */
+static int hiraid_prp_pagenum(struct hiraid_dev *hdev)
+{
+ u32 size = 1U << ((hdev->ctrl_info->mdts) * 1U) << 12;
+ u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size);
+
+ return DIV_ROUND_UP(PRP_ENTRY_SIZE * nprps, hdev->page_size - PRP_ENTRY_SIZE);
+}
+
+/*
+ * Calculates the number of pages needed for the SGL segments. For example a 4k
+ * page can accommodate 256 SGL descriptors.
+ */
+static int hiraid_sgl_pagenum(struct hiraid_dev *hdev)
+{
+ u32 nsge = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+
+ return DIV_ROUND_UP(nsge * sizeof(struct hiraid_sgl_desc), hdev->page_size);
+}
+
+static inline void **hiraid_mapbuf_list(struct hiraid_mapmange *mapbuf)
+{
+ return mapbuf->list;
+}
+
+static u32 hiraid_get_max_cmd_size(struct hiraid_dev *hdev)
+{
+ u32 alloc_size = sizeof(__le64 *) * max(hiraid_prp_pagenum(hdev), hiraid_sgl_pagenum(hdev));
+
+ dev_info(hdev->dev, "mapbuf size[%lu], alloc_size[%u]\n",
+ sizeof(struct hiraid_mapmange), alloc_size);
+
+ return sizeof(struct hiraid_mapmange) + alloc_size;
+}
+
+static int hiraid_build_passthru_prp(struct hiraid_dev *hdev, struct hiraid_mapmange *mapbuf)
+{
+ struct scatterlist *sg = mapbuf->sgl;
+ __le64 *phy_regpage, *prior_list;
+ u64 buf_addr = sg_dma_address(sg);
+ int buf_length = sg_dma_len(sg);
+ u32 page_size = hdev->page_size;
+ int offset = buf_addr & (page_size - 1);
+ void **list = hiraid_mapbuf_list(mapbuf);
+ int maplen = mapbuf->len;
+ struct dma_pool *pool;
+ dma_addr_t buffer_phy;
+ int i;
+
+ maplen -= (page_size - offset);
+ if (maplen <= 0) {
+ mapbuf->first_dma = 0;
+ return 0;
+ }
+
+ buf_length -= (page_size - offset);
+ if (buf_length) {
+ buf_addr += (page_size - offset);
+ } else {
+ sg = sg_next(sg);
+ buf_addr = sg_dma_address(sg);
+ buf_length = sg_dma_len(sg);
+ }
+
+ if (maplen <= page_size) {
+ mapbuf->first_dma = buf_addr;
+ return 0;
+ }
+
+ pool = hdev->prp_page_pool;
+ mapbuf->page_cnt = 1;
+
+ phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!phy_regpage) {
+ dev_err_ratelimited(hdev->dev, "allocate first admin prp_list memory failed\n");
+ mapbuf->first_dma = buf_addr;
+ mapbuf->page_cnt = -1;
+ return -ENOMEM;
+ }
+ list[0] = phy_regpage;
+ mapbuf->first_dma = buffer_phy;
+ i = 0;
+ for (;;) {
+ if (i == page_size / PRP_ENTRY_SIZE) {
+ prior_list = phy_regpage;
+
+ phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!phy_regpage) {
+ dev_err_ratelimited(hdev->dev, "allocate [%d]th admin prp list memory failed\n",
+ mapbuf->page_cnt + 1);
+ return -ENOMEM;
+ }
+ list[mapbuf->page_cnt++] = phy_regpage;
+ phy_regpage[0] = prior_list[i - 1];
+ prior_list[i - 1] = cpu_to_le64(buffer_phy);
+ i = 1;
+ }
+ phy_regpage[i++] = cpu_to_le64(buf_addr);
+ buf_addr += page_size;
+ buf_length -= page_size;
+ maplen -= page_size;
+ if (maplen <= 0)
+ break;
+ if (buf_length > 0)
+ continue;
+ if (unlikely(buf_length < 0))
+ goto bad_admin_sgl;
+ sg = sg_next(sg);
+ buf_addr = sg_dma_address(sg);
+ buf_length = sg_dma_len(sg);
+ }
+
+ return 0;
+
+bad_admin_sgl:
+ dev_err(hdev->dev, "setup prps, invalid admin SGL for payload[%d] nents[%d]\n",
+ mapbuf->len, mapbuf->sge_cnt);
+ return -EIO;
+}
+
+static int hiraid_build_prp(struct hiraid_dev *hdev, struct hiraid_mapmange *mapbuf)
+{
+ struct scatterlist *sg = mapbuf->sgl;
+ __le64 *phy_regpage, *prior_list;
+ u64 buf_addr = sg_dma_address(sg);
+ int buf_length = sg_dma_len(sg);
+ u32 page_size = hdev->page_size;
+ int offset = buf_addr & (page_size - 1);
+ void **list = hiraid_mapbuf_list(mapbuf);
+ int maplen = mapbuf->len;
+ struct dma_pool *pool;
+ dma_addr_t buffer_phy;
+ int nprps, i;
+
+ maplen -= (page_size - offset);
+ if (maplen <= 0) {
+ mapbuf->first_dma = 0;
+ return 0;
+ }
+
+ buf_length -= (page_size - offset);
+ if (buf_length) {
+ buf_addr += (page_size - offset);
+ } else {
+ sg = sg_next(sg);
+ buf_addr = sg_dma_address(sg);
+ buf_length = sg_dma_len(sg);
+ }
+
+ if (maplen <= page_size) {
+ mapbuf->first_dma = buf_addr;
+ return 0;
+ }
+
+ nprps = DIV_ROUND_UP(maplen, page_size);
+ if (nprps <= (EXTRA_POOL_SIZE / PRP_ENTRY_SIZE)) {
+ pool = mapbuf->hiraidq->prp_small_pool;
+ mapbuf->page_cnt = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ mapbuf->page_cnt = 1;
+ }
+
+ phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!phy_regpage) {
+ dev_err_ratelimited(hdev->dev, "allocate first prp_list memory failed\n");
+ mapbuf->first_dma = buf_addr;
+ mapbuf->page_cnt = -1;
+ return -ENOMEM;
+ }
+ list[0] = phy_regpage;
+ mapbuf->first_dma = buffer_phy;
+ i = 0;
+ for (;;) {
+ if (i == page_size / PRP_ENTRY_SIZE) {
+ prior_list = phy_regpage;
+
+ phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!phy_regpage) {
+ dev_err_ratelimited(hdev->dev, "allocate [%d]th prp list memory failed\n",
+ mapbuf->page_cnt + 1);
+ return -ENOMEM;
+ }
+ list[mapbuf->page_cnt++] = phy_regpage;
+ phy_regpage[0] = prior_list[i - 1];
+ prior_list[i - 1] = cpu_to_le64(buffer_phy);
+ i = 1;
+ }
+ phy_regpage[i++] = cpu_to_le64(buf_addr);
+ buf_addr += page_size;
+ buf_length -= page_size;
+ maplen -= page_size;
+ if (maplen <= 0)
+ break;
+ if (buf_length > 0)
+ continue;
+ if (unlikely(buf_length < 0))
+ goto bad_sgl;
+ sg = sg_next(sg);
+ buf_addr = sg_dma_address(sg);
+ buf_length = sg_dma_len(sg);
+ }
+
+ return 0;
+
+bad_sgl:
+ dev_err(hdev->dev, "setup prps, invalid SGL for payload[%d] nents[%d]\n",
+ mapbuf->len, mapbuf->sge_cnt);
+ return -EIO;
+}
+
+#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct hiraid_sgl_desc))
+
+static void hiraid_submit_cmd(struct hiraid_queue *hiraidq, const void *cmd)
+{
+ u32 sqes = SQE_SIZE(hiraidq->qid);
+ unsigned long flags;
+ struct hiraid_admin_com_cmd *acd = (struct hiraid_admin_com_cmd *)cmd;
+
+ spin_lock_irqsave(&hiraidq->sq_lock, flags);
+ memcpy((hiraidq->sq_cmds + sqes * hiraidq->sq_tail), cmd, sqes);
+ if (++hiraidq->sq_tail == hiraidq->q_depth)
+ hiraidq->sq_tail = 0;
+
+ writel(hiraidq->sq_tail, hiraidq->q_db);
+ spin_unlock_irqrestore(&hiraidq->sq_lock, flags);
+
+ dev_log_dbg(hiraidq->hdev->dev, "cid[%d] qid[%d] opcode[0x%x] flags[0x%x] hdid[%u]\n",
+ le16_to_cpu(acd->cmd_id), hiraidq->qid, acd->opcode, acd->flags,
+ le32_to_cpu(acd->hdid));
+}
+
+static inline bool hiraid_is_rw_scmd(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * checks if prps can be built for the IO cmd
+ */
+static bool hiraid_is_prp(struct hiraid_dev *hdev, struct scatterlist *sgl, u32 nsge)
+{
+ struct scatterlist *sg = sgl;
+ u32 page_mask = hdev->page_size - 1;
+ bool is_prp = true;
+ u32 i = 0;
+
+ for_each_sg(sgl, sg, nsge, i) {
+ /*
+ * Data length of the middle sge multiple of page_size,
+ * address page_size aligned.
+ */
+ if (i != 0 && i != nsge - 1) {
+ if ((sg_dma_len(sg) & page_mask) ||
+ (sg_dma_address(sg) & page_mask)) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ /*
+ * The first sge addr plus the data length meets
+ * the page_size alignment.
+ */
+ if (nsge > 1 && i == 0) {
+ if ((sg_dma_address(sg) + sg_dma_len(sg)) & page_mask) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ /* The last sge addr meets the page_size alignment. */
+ if (nsge > 1 && i == (nsge - 1)) {
+ if (sg_dma_address(sg) & page_mask) {
+ is_prp = false;
+ break;
+ }
+ }
+ }
+
+ return is_prp;
+}
+
+enum {
+ HIRAID_SGL_FMT_DATA_DESC = 0x00,
+ HIRAID_SGL_FMT_SEG_DESC = 0x02,
+ HIRAID_SGL_FMT_LAST_SEG_DESC = 0x03,
+ HIRAID_KEY_SGL_FMT_DATA_DESC = 0x04,
+ HIRAID_TRANSPORT_SGL_DATA_DESC = 0x05
+};
+
+static void hiraid_sgl_set_data(struct hiraid_sgl_desc *sge, struct scatterlist *sg)
+{
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->type = HIRAID_SGL_FMT_DATA_DESC << 4;
+}
+
+static void hiraid_sgl_set_seg(struct hiraid_sgl_desc *sge, dma_addr_t buffer_phy, int entries)
+{
+ sge->addr = cpu_to_le64(buffer_phy);
+ if (entries <= SGES_PER_PAGE) {
+ sge->length = cpu_to_le32(entries * sizeof(*sge));
+ sge->type = HIRAID_SGL_FMT_LAST_SEG_DESC << 4;
+ } else {
+ sge->length = cpu_to_le32(PAGE_SIZE);
+ sge->type = HIRAID_SGL_FMT_SEG_DESC << 4;
+ }
+}
+
+static int hiraid_build_passthru_sgl(struct hiraid_dev *hdev,
+ struct hiraid_admin_command *admin_cmd,
+ struct hiraid_mapmange *mapbuf)
+{
+ struct hiraid_sgl_desc *sg_list, *link, *old_sg_list;
+ struct scatterlist *sg = mapbuf->sgl;
+ void **list = hiraid_mapbuf_list(mapbuf);
+ struct dma_pool *pool;
+ int nsge = mapbuf->sge_cnt;
+ dma_addr_t buffer_phy;
+ int i = 0;
+
+ admin_cmd->common.flags |= SQE_FLAG_SGL_METABUF;
+
+ if (nsge == 1) {
+ hiraid_sgl_set_data(&admin_cmd->common.dptr.sgl, sg);
+ return 0;
+ }
+
+ pool = hdev->prp_page_pool;
+ mapbuf->page_cnt = 1;
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev, "allocate first admin sgl_list failed\n");
+ mapbuf->page_cnt = -1;
+ return -ENOMEM;
+ }
+
+ list[0] = sg_list;
+ mapbuf->first_dma = buffer_phy;
+ hiraid_sgl_set_seg(&admin_cmd->common.dptr.sgl, buffer_phy, nsge);
+ do {
+ if (i == SGES_PER_PAGE) {
+ old_sg_list = sg_list;
+ link = &old_sg_list[SGES_PER_PAGE - 1];
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev, "allocate [%d]th admin sgl_list failed\n",
+ mapbuf->page_cnt + 1);
+ return -ENOMEM;
+ }
+ list[mapbuf->page_cnt++] = sg_list;
+
+ i = 0;
+ memcpy(&sg_list[i++], link, sizeof(*link));
+ hiraid_sgl_set_seg(link, buffer_phy, nsge);
+ }
+
+ hiraid_sgl_set_data(&sg_list[i++], sg);
+ sg = sg_next(sg);
+ } while (--nsge > 0);
+
+ return 0;
+}
+
+
+static int hiraid_build_sgl(struct hiraid_dev *hdev, struct hiraid_scsi_io_cmd *io_cmd,
+ struct hiraid_mapmange *mapbuf)
+{
+ struct hiraid_sgl_desc *sg_list, *link, *old_sg_list;
+ struct scatterlist *sg = mapbuf->sgl;
+ void **list = hiraid_mapbuf_list(mapbuf);
+ struct dma_pool *pool;
+ int nsge = mapbuf->sge_cnt;
+ dma_addr_t buffer_phy;
+ int i = 0;
+
+ io_cmd->common.flags |= SQE_FLAG_SGL_METABUF;
+
+ if (nsge == 1) {
+ hiraid_sgl_set_data(&io_cmd->common.dptr.sgl, sg);
+ return 0;
+ }
+
+ if (nsge <= (EXTRA_POOL_SIZE / sizeof(struct hiraid_sgl_desc))) {
+ pool = mapbuf->hiraidq->prp_small_pool;
+ mapbuf->page_cnt = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ mapbuf->page_cnt = 1;
+ }
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev, "allocate first sgl_list failed\n");
+ mapbuf->page_cnt = -1;
+ return -ENOMEM;
+ }
+
+ list[0] = sg_list;
+ mapbuf->first_dma = buffer_phy;
+ hiraid_sgl_set_seg(&io_cmd->common.dptr.sgl, buffer_phy, nsge);
+ do {
+ if (i == SGES_PER_PAGE) {
+ old_sg_list = sg_list;
+ link = &old_sg_list[SGES_PER_PAGE - 1];
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev, "allocate [%d]th sgl_list failed\n",
+ mapbuf->page_cnt + 1);
+ return -ENOMEM;
+ }
+ list[mapbuf->page_cnt++] = sg_list;
+
+ i = 0;
+ memcpy(&sg_list[i++], link, sizeof(*link));
+ hiraid_sgl_set_seg(link, buffer_phy, nsge);
+ }
+
+ hiraid_sgl_set_data(&sg_list[i++], sg);
+ sg = sg_next(sg);
+ } while (--nsge > 0);
+
+ return 0;
+}
+
+#define HIRAID_RW_FUA BIT(14)
+
+static int hiraid_setup_rw_cmd(struct hiraid_dev *hdev,
+ struct hiraid_scsi_rw_cmd *io_cmd,
+ struct scsi_cmnd *scmd)
+{
+ u32 start_lba_lo, start_lba_hi;
+ u32 datalength = 0;
+ u16 control = 0;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+
+ if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+ io_cmd->opcode = HIRAID_CMD_WRITE;
+ } else if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
+ io_cmd->opcode = HIRAID_CMD_READ;
+ } else {
+ dev_err(hdev->dev, "invalid RW_IO for unsupported data direction[%d]\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* 6-byte READ(0x08) or WRITE(0x0A) cdb */
+ if (scmd->cmd_len == 6) {
+ datalength = (u32)(scmd->cmnd[4] == 0 ?
+ IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]);
+ start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]);
+
+ start_lba_lo &= 0x1FFFFF;
+ }
+
+ /* 10-byte READ(0x28) or WRITE(0x2A) cdb */
+ else if (scmd->cmd_len == 10) {
+ datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= HIRAID_RW_FUA;
+ }
+
+ /* 12-byte READ(0xA8) or WRITE(0xAA) cdb */
+ else if (scmd->cmd_len == 12) {
+ datalength = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= HIRAID_RW_FUA;
+ }
+ /* 16-byte READ(0x88) or WRITE(0x8A) cdb */
+ else if (scmd->cmd_len == 16) {
+ datalength = get_unaligned_be32(&scmd->cmnd[10]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= HIRAID_RW_FUA;
+ }
+
+ if (unlikely(datalength > U16_MAX || datalength == 0)) {
+ dev_err(hdev->dev, "invalid IO for illegal transfer data length[%u]\n", datalength);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ io_cmd->slba = cpu_to_le64(((u64)start_lba_hi << 32) | start_lba_lo);
+ /* 0base for nlb */
+ io_cmd->nlb = cpu_to_le16((u16)(datalength - 1));
+ io_cmd->control = cpu_to_le16(control);
+
+ return 0;
+}
+
+static int hiraid_setup_nonrw_cmd(struct hiraid_dev *hdev,
+ struct hiraid_scsi_nonrw_cmd *io_cmd, struct scsi_cmnd *scmd)
+{
+ io_cmd->buf_len = cpu_to_le32(scsi_bufflen(scmd));
+
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ io_cmd->opcode = HIRAID_CMD_NONRW_NONE;
+ break;
+ case DMA_TO_DEVICE:
+ io_cmd->opcode = HIRAID_CMD_NONRW_TODEV;
+ break;
+ case DMA_FROM_DEVICE:
+ io_cmd->opcode = HIRAID_CMD_NONRW_FROMDEV;
+ break;
+ default:
+ dev_err(hdev->dev, "invalid NON_IO for unsupported data direction[%d]\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hiraid_setup_io_cmd(struct hiraid_dev *hdev,
+ struct hiraid_scsi_io_cmd *io_cmd, struct scsi_cmnd *scmd)
+{
+ memcpy(io_cmd->common.cdb, scmd->cmnd, scmd->cmd_len);
+ io_cmd->common.cdb_len = scmd->cmd_len;
+
+ if (hiraid_is_rw_scmd(scmd))
+ return hiraid_setup_rw_cmd(hdev, &io_cmd->rw, scmd);
+ else
+ return hiraid_setup_nonrw_cmd(hdev, &io_cmd->nonrw, scmd);
+}
+
+static inline void hiraid_init_mapbuff(struct hiraid_mapmange *mapbuf)
+{
+ mapbuf->sge_cnt = 0;
+ mapbuf->page_cnt = -1;
+ mapbuf->use_sgl = false;
+ WRITE_ONCE(mapbuf->state, CMD_IDLE);
+}
+
+static void hiraid_free_mapbuf(struct hiraid_dev *hdev, struct hiraid_mapmange *mapbuf)
+{
+ const int last_prp = hdev->page_size / sizeof(__le64) - 1;
+ dma_addr_t buffer_phy, next_buffer_phy;
+ struct hiraid_sgl_desc *sg_list;
+ __le64 *prp_list;
+ void *addr;
+ int i;
+
+ buffer_phy = mapbuf->first_dma;
+ if (mapbuf->page_cnt == 0)
+ dma_pool_free(mapbuf->hiraidq->prp_small_pool,
+ hiraid_mapbuf_list(mapbuf)[0], buffer_phy);
+
+ for (i = 0; i < mapbuf->page_cnt; i++) {
+ addr = hiraid_mapbuf_list(mapbuf)[i];
+
+ if (mapbuf->use_sgl) {
+ sg_list = addr;
+ next_buffer_phy =
+ le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
+ } else {
+ prp_list = addr;
+ next_buffer_phy = le64_to_cpu(prp_list[last_prp]);
+ }
+
+ dma_pool_free(hdev->prp_page_pool, addr, buffer_phy);
+ buffer_phy = next_buffer_phy;
+ }
+
+ mapbuf->sense_buffer_virt = NULL;
+ mapbuf->page_cnt = -1;
+}
+
+static int hiraid_io_map_data(struct hiraid_dev *hdev, struct hiraid_mapmange *mapbuf,
+ struct scsi_cmnd *scmd, struct hiraid_scsi_io_cmd *io_cmd)
+{
+ int ret;
+
+ ret = scsi_dma_map(scmd);
+ if (unlikely(ret < 0))
+ return ret;
+ mapbuf->sge_cnt = ret;
+
+ /* No data to DMA, it may be scsi no-rw command */
+ if (unlikely(mapbuf->sge_cnt == 0))
+ return 0;
+
+ mapbuf->len = scsi_bufflen(scmd);
+ mapbuf->sgl = scsi_sglist(scmd);
+ mapbuf->use_sgl = !hiraid_is_prp(hdev, mapbuf->sgl, mapbuf->sge_cnt);
+
+ if (mapbuf->use_sgl) {
+ ret = hiraid_build_sgl(hdev, io_cmd, mapbuf);
+ } else {
+ ret = hiraid_build_prp(hdev, mapbuf);
+ io_cmd->common.dptr.prp1 =
+ cpu_to_le64(sg_dma_address(mapbuf->sgl));
+ io_cmd->common.dptr.prp2 = cpu_to_le64(mapbuf->first_dma);
+ }
+
+ if (ret)
+ scsi_dma_unmap(scmd);
+
+ return ret;
+}
+
+static void hiraid_check_status(struct hiraid_mapmange *mapbuf, struct scsi_cmnd *scmd,
+ struct hiraid_completion *cqe)
+{
+ scsi_set_resid(scmd, 0);
+
+ switch ((le16_to_cpu(cqe->status) >> 1) & 0x7f) {
+ case SENSE_STATE_OK:
+ set_host_byte(scmd, DID_OK);
+ break;
+ case SENSE_STATE_NEED_CHECK:
+ set_host_byte(scmd, DID_OK);
+ scmd->result |= le16_to_cpu(cqe->status) >> 8;
+ if (scmd->result & SAM_STAT_CHECK_CONDITION) {
+ memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ memcpy(scmd->sense_buffer,
+ mapbuf->sense_buffer_virt, SCSI_SENSE_BUFFERSIZE);
+ scmd->result = (scmd->result & 0x00ffffff) | (DRIVER_SENSE << 24);
+ }
+ break;
+ case SENSE_STATE_ABORTED:
+ set_host_byte(scmd, DID_ABORT);
+ break;
+ case SENSE_STATE_NEED_RETRY:
+ set_host_byte(scmd, DID_REQUEUE);
+ break;
+ default:
+ set_host_byte(scmd, DID_BAD_TARGET);
+ dev_warn_ratelimited(mapbuf->hiraidq->hdev->dev, "cid[%d] qid[%d] sdev[%d:%d] opcode[%.2x] bad status[0x%x]\n",
+ le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id), scmd->device->channel,
+ scmd->device->id, scmd->cmnd[0], le16_to_cpu(cqe->status));
+ break;
+ }
+}
+
+static inline void hiraid_query_scmd_tag(struct scsi_cmnd *scmd, u16 *qid, u16 *cid,
+ struct hiraid_dev *hdev, struct hiraid_sdev_hostdata *hostdata)
+{
+ u32 tag = blk_mq_unique_tag(blk_mq_rq_from_pdu((void *)scmd));
+
+ if (work_mode) {
+ if ((hdev->hdd_dispatch == DISPATCH_BY_DISK) && (hostdata->hwq != 0))
+ *qid = hostdata->hwq;
+ else
+ *qid = raw_smp_processor_id() % (hdev->online_queues - 1) + 1;
+ } else {
+ *qid = blk_mq_unique_tag_to_hwq(tag) + 1;
+ }
+ *cid = blk_mq_unique_tag_to_tag(tag);
+}
+
+static int hiraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+ struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd);
+ struct hiraid_dev *hdev = shost_priv(shost);
+ struct scsi_device *sdev = scmd->device;
+ struct hiraid_sdev_hostdata *hostdata;
+ struct hiraid_scsi_io_cmd io_cmd;
+ struct hiraid_queue *ioq;
+ u16 hwq, cid;
+ int ret;
+
+ if (unlikely(hdev->state == DEV_RESETTING))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ if (unlikely(hdev->state != DEV_LIVE)) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (log_debug_switch)
+ scsi_print_command(scmd);
+
+ hostdata = sdev->hostdata;
+ hiraid_query_scmd_tag(scmd, &hwq, &cid, hdev, hostdata);
+ ioq = &hdev->queues[hwq];
+
+ if (unlikely(atomic_inc_return(&ioq->inflight) >
+ (hdev->ioq_depth - HIRAID_PTHRU_CMDS_PERQ))) {
+ atomic_dec(&ioq->inflight);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ io_cmd.rw.hdid = cpu_to_le32(hostdata->hdid);
+ io_cmd.rw.cmd_id = cpu_to_le16(cid);
+
+ ret = hiraid_setup_io_cmd(hdev, &io_cmd, scmd);
+ if (unlikely(ret)) {
+ set_host_byte(scmd, DID_ERROR);
+ scmd->scsi_done(scmd);
+ atomic_dec(&ioq->inflight);
+ return 0;
+ }
+
+ ret = cid * SCSI_SENSE_BUFFERSIZE;
+ if (work_mode) {
+ mapbuf->sense_buffer_virt = hdev->sense_buffer_virt + ret;
+ mapbuf->sense_buffer_phy = hdev->sense_buffer_phy + ret;
+ } else {
+ mapbuf->sense_buffer_virt = ioq->sense_buffer_virt + ret;
+ mapbuf->sense_buffer_phy = ioq->sense_buffer_phy + ret;
+ }
+ io_cmd.common.sense_addr = cpu_to_le64(mapbuf->sense_buffer_phy);
+ io_cmd.common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+
+ hiraid_init_mapbuff(mapbuf);
+
+ mapbuf->hiraidq = ioq;
+ mapbuf->cid = cid;
+ ret = hiraid_io_map_data(hdev, mapbuf, scmd, &io_cmd);
+ if (unlikely(ret)) {
+ dev_err(hdev->dev, "io map data err\n");
+ set_host_byte(scmd, DID_ERROR);
+ scmd->scsi_done(scmd);
+ ret = 0;
+ goto deinit_iobuf;
+ }
+
+ WRITE_ONCE(mapbuf->state, CMD_FLIGHT);
+ hiraid_submit_cmd(ioq, &io_cmd);
+
+ return 0;
+
+deinit_iobuf:
+ atomic_dec(&ioq->inflight);
+ hiraid_free_mapbuf(hdev, mapbuf);
+ return ret;
+}
+
+static int hiraid_match_dev(struct hiraid_dev *hdev, u16 idx, struct scsi_device *sdev)
+{
+ if (HIRAID_DEV_INFO_FLAG_VALID(hdev->dev_info[idx].flag)) {
+ if (sdev->channel == hdev->dev_info[idx].channel &&
+ sdev->id == le16_to_cpu(hdev->dev_info[idx].target) &&
+ sdev->lun < hdev->dev_info[idx].lun) {
+ dev_info(hdev->dev, "match device success, channel:target:lun[%d:%d:%d]\n",
+ hdev->dev_info[idx].channel,
+ hdev->dev_info[idx].target,
+ hdev->dev_info[idx].lun);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int hiraid_disk_qd(u8 attr)
+{
+ switch (HIRAID_DEV_DISK_TYPE(attr)) {
+ case HIRAID_SAS_HDD_VD:
+ case HIRAID_SATA_HDD_VD:
+ return HIRAID_HDD_VD_QD;
+ case HIRAID_SAS_SSD_VD:
+ case HIRAID_SATA_SSD_VD:
+ case HIRAID_NVME_SSD_VD:
+ return HIRAID_SSD_VD_QD;
+ case HIRAID_SAS_HDD_PD:
+ case HIRAID_SATA_HDD_PD:
+ return HIRAID_HDD_PD_QD;
+ case HIRAID_SAS_SSD_PD:
+ case HIRAID_SATA_SSD_PD:
+ case HIRAID_NVME_SSD_PD:
+ return HIRAID_SSD_PD_QD;
+ default:
+ return MAX_CMD_PER_DEV;
+ }
+}
+
+static bool hiraid_disk_is_hdd(u8 attr)
+{
+ switch (HIRAID_DEV_DISK_TYPE(attr)) {
+ case HIRAID_SAS_HDD_VD:
+ case HIRAID_SATA_HDD_VD:
+ case HIRAID_SAS_HDD_PD:
+ case HIRAID_SATA_HDD_PD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int hiraid_slave_alloc(struct scsi_device *sdev)
+{
+ struct hiraid_sdev_hostdata *hostdata;
+ struct hiraid_dev *hdev;
+ u16 idx;
+
+ hdev = shost_priv(sdev->host);
+ hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+ if (!hostdata) {
+ dev_err(hdev->dev, "alloc scsi host data memory failed\n");
+ return -ENOMEM;
+ }
+
+ down_read(&hdev->dev_rwsem);
+ for (idx = 0; idx < le32_to_cpu(hdev->ctrl_info->nd); idx++) {
+ if (hiraid_match_dev(hdev, idx, sdev))
+ goto scan_host;
+ }
+ up_read(&hdev->dev_rwsem);
+
+ kfree(hostdata);
+ return -ENXIO;
+
+scan_host:
+ hostdata->hdid = le32_to_cpu(hdev->dev_info[idx].hdid);
+ hostdata->max_io_kb = le16_to_cpu(hdev->dev_info[idx].max_io_kb);
+ hostdata->attr = hdev->dev_info[idx].attr;
+ hostdata->flag = hdev->dev_info[idx].flag;
+ hostdata->rg_id = 0xff;
+ sdev->hostdata = hostdata;
+ up_read(&hdev->dev_rwsem);
+ return 0;
+}
+
+static void hiraid_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+static int hiraid_slave_configure(struct scsi_device *sdev)
+{
+ unsigned int timeout = scmd_tmout_rawdisk * HZ;
+ struct hiraid_dev *hdev = shost_priv(sdev->host);
+ struct hiraid_sdev_hostdata *hostdata = sdev->hostdata;
+ u32 max_sec = sdev->host->max_sectors;
+ int qd = MAX_CMD_PER_DEV;
+
+ if (hostdata) {
+ if (HIRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ timeout = scmd_tmout_vd * HZ;
+ else if (HIRAID_DEV_INFO_ATTR_RAWDISK(hostdata->attr))
+ timeout = scmd_tmout_rawdisk * HZ;
+ max_sec = hostdata->max_io_kb << 1;
+ qd = hiraid_disk_qd(hostdata->attr);
+
+ if (hiraid_disk_is_hdd(hostdata->attr))
+ hostdata->hwq = hostdata->hdid % (hdev->online_queues - 1) + 1;
+ else
+ hostdata->hwq = 0;
+ } else {
+ dev_err(hdev->dev, "err, sdev->hostdata is null\n");
+ }
+
+ blk_queue_rq_timeout(sdev->request_queue, timeout);
+ sdev->eh_timeout = timeout;
+ scsi_change_queue_depth(sdev, qd);
+
+ if ((max_sec == 0) || (max_sec > sdev->host->max_sectors))
+ max_sec = sdev->host->max_sectors;
+
+ if (!max_io_force)
+ blk_queue_max_hw_sectors(sdev->request_queue, max_sec);
+
+ dev_info(hdev->dev, "sdev->channel:id:lun[%d:%d:%lld] scmd_timeout[%d]s maxsec[%d]\n",
+ sdev->channel, sdev->id, sdev->lun, timeout / HZ, max_sec);
+
+ return 0;
+}
+
+static void hiraid_shost_init(struct hiraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u8 domain, bus;
+ u32 dev_func;
+
+ domain = pci_domain_nr(pdev->bus);
+ bus = pdev->bus->number;
+ dev_func = pdev->devfn;
+
+ hdev->shost->nr_hw_queues = work_mode ? 1 : hdev->online_queues - 1;
+ hdev->shost->can_queue = hdev->scsi_qd;
+
+ hdev->shost->sg_tablesize = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+ /* 512B per sector */
+ hdev->shost->max_sectors = (1U << ((hdev->ctrl_info->mdts) * 1U) << 12) / 512;
+ hdev->shost->cmd_per_lun = MAX_CMD_PER_DEV;
+ hdev->shost->max_channel = le16_to_cpu(hdev->ctrl_info->max_channel) - 1;
+ hdev->shost->max_id = le32_to_cpu(hdev->ctrl_info->max_tgt_id);
+ hdev->shost->max_lun = le16_to_cpu(hdev->ctrl_info->max_lun);
+
+ hdev->shost->this_id = -1;
+ hdev->shost->unique_id = (domain << 16) | (bus << 8) | dev_func;
+ hdev->shost->max_cmd_len = MAX_CDB_LEN;
+ hdev->shost->hostt->cmd_size = hiraid_get_max_cmd_size(hdev);
+}
+
+static int hiraid_alloc_queue(struct hiraid_dev *hdev, u16 qid, u16 depth)
+{
+ struct hiraid_queue *hiraidq = &hdev->queues[qid];
+ int ret = 0;
+
+ if (hdev->queue_count > qid) {
+ dev_info(hdev->dev, "warn: queue[%d] is exist\n", qid);
+ return 0;
+ }
+
+ hiraidq->cqes = dma_alloc_coherent(hdev->dev, CQ_SIZE(depth),
+ &hiraidq->cq_buffer_phy, GFP_KERNEL | __GFP_ZERO);
+ if (!hiraidq->cqes)
+ return -ENOMEM;
+
+ hiraidq->sq_cmds = dma_alloc_coherent(hdev->dev, SQ_SIZE(qid, depth),
+ &hiraidq->sq_buffer_phy, GFP_KERNEL);
+ if (!hiraidq->sq_cmds) {
+ ret = -ENOMEM;
+ goto free_cqes;
+ }
+
+ /*
+ * if single hw queue, we do not need to alloc sense buffer for every queue,
+ * we have alloced all on hiraid_alloc_resources.
+ */
+ if (work_mode)
+ goto initq;
+
+ /* alloc sense buffer */
+ hiraidq->sense_buffer_virt = dma_alloc_coherent(hdev->dev, SENSE_SIZE(depth),
+ &hiraidq->sense_buffer_phy, GFP_KERNEL | __GFP_ZERO);
+ if (!hiraidq->sense_buffer_virt) {
+ ret = -ENOMEM;
+ goto free_sq_cmds;
+ }
+
+initq:
+ spin_lock_init(&hiraidq->sq_lock);
+ spin_lock_init(&hiraidq->cq_lock);
+ hiraidq->hdev = hdev;
+ hiraidq->q_depth = depth;
+ hiraidq->qid = qid;
+ hiraidq->cq_vector = -1;
+ hdev->queue_count++;
+
+ return 0;
+
+free_sq_cmds:
+ dma_free_coherent(hdev->dev, SQ_SIZE(qid, depth), (void *)hiraidq->sq_cmds,
+ hiraidq->sq_buffer_phy);
+free_cqes:
+ dma_free_coherent(hdev->dev, CQ_SIZE(depth), (void *)hiraidq->cqes,
+ hiraidq->cq_buffer_phy);
+ return ret;
+}
+
+static int hiraid_wait_control_ready(struct hiraid_dev *hdev, u64 cap, bool enabled)
+{
+ unsigned long timeout =
+ ((HIRAID_CAP_TIMEOUT(cap) + 1) * HIRAID_CAP_TIMEOUT_UNIT_MS) + jiffies;
+ u32 bit = enabled ? HIRAID_CSTS_RDY : 0;
+
+ while ((readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_RDY) != bit) {
+ usleep_range(1000, 2000);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(hdev->dev, "device not ready; aborting %s\n",
+ enabled ? "initialisation" : "reset");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int hiraid_shutdown_control(struct hiraid_dev *hdev)
+{
+ unsigned long timeout = le32_to_cpu(hdev->ctrl_info->rtd3e) / 1000000 * HZ + jiffies;
+
+ hdev->ctrl_config &= ~HIRAID_CC_SHN_MASK;
+ hdev->ctrl_config |= HIRAID_CC_SHN_NORMAL;
+ writel(hdev->ctrl_config, hdev->bar + HIRAID_REG_CC);
+
+ while ((readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_SHST_MASK) !=
+ HIRAID_CSTS_SHST_CMPLT) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(hdev->dev, "device shutdown incomplete, abort shutdown\n");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int hiraid_disable_control(struct hiraid_dev *hdev)
+{
+ hdev->ctrl_config &= ~HIRAID_CC_SHN_MASK;
+ hdev->ctrl_config &= ~HIRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + HIRAID_REG_CC);
+
+ return hiraid_wait_control_ready(hdev, hdev->cap, false);
+}
+
+static int hiraid_enable_control(struct hiraid_dev *hdev)
+{
+ u64 cap = hdev->cap;
+ u32 dev_page_min = HIRAID_CAP_MPSMIN(cap) + 12;
+ u32 page_shift = PAGE_SHIFT;
+
+ if (page_shift < dev_page_min) {
+ dev_err(hdev->dev, "minimum device page size[%u], too large for host[%u]\n",
+ 1U << dev_page_min, 1U << page_shift);
+ return -ENODEV;
+ }
+
+ page_shift = min_t(unsigned int, HIRAID_CAP_MPSMAX(cap) + 12, PAGE_SHIFT);
+ hdev->page_size = 1U << page_shift;
+
+ hdev->ctrl_config = HIRAID_CC_CSS_NVM;
+ hdev->ctrl_config |= (page_shift - 12) << HIRAID_CC_MPS_SHIFT;
+ hdev->ctrl_config |= HIRAID_CC_AMS_RR | HIRAID_CC_SHN_NONE;
+ hdev->ctrl_config |= HIRAID_CC_IOSQES | HIRAID_CC_IOCQES;
+ hdev->ctrl_config |= HIRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + HIRAID_REG_CC);
+
+ return hiraid_wait_control_ready(hdev, cap, true);
+}
+
+static void hiraid_init_queue(struct hiraid_queue *hiraidq, u16 qid)
+{
+ struct hiraid_dev *hdev = hiraidq->hdev;
+
+ memset((void *)hiraidq->cqes, 0, CQ_SIZE(hiraidq->q_depth));
+
+ hiraidq->sq_tail = 0;
+ hiraidq->cq_head = 0;
+ hiraidq->cq_phase = 1;
+ hiraidq->q_db = &hdev->dbs[qid * 2 * hdev->db_stride];
+ hiraidq->prp_small_pool = hdev->prp_extra_pool[qid % extra_pool_num];
+ hdev->online_queues++;
+ atomic_set(&hiraidq->inflight, 0);
+}
+
+static inline bool hiraid_cqe_pending(struct hiraid_queue *hiraidq)
+{
+ return (le16_to_cpu(hiraidq->cqes[hiraidq->cq_head].status) & 1) ==
+ hiraidq->cq_phase;
+}
+
+static void hiraid_complete_io_cmnd(struct hiraid_queue *ioq, struct hiraid_completion *cqe)
+{
+ struct hiraid_dev *hdev = ioq->hdev;
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct hiraid_mapmange *mapbuf;
+ struct request *req;
+ unsigned long elapsed;
+
+ atomic_dec(&ioq->inflight);
+
+ if (work_mode)
+ tags = hdev->shost->tag_set.tags[0];
+ else
+ tags = hdev->shost->tag_set.tags[ioq->qid - 1];
+ req = blk_mq_tag_to_rq(tags, le16_to_cpu(cqe->cmd_id));
+ if (unlikely(!req || !blk_mq_request_started(req))) {
+ dev_warn(hdev->dev, "invalid id[%d] completed on queue[%d]\n",
+ le16_to_cpu(cqe->cmd_id), ioq->qid);
+ return;
+ }
+
+ scmd = blk_mq_rq_to_pdu(req);
+ mapbuf = scsi_cmd_priv(scmd);
+
+ elapsed = jiffies - scmd->jiffies_at_alloc;
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d] finish IO cost %3ld.%3ld seconds\n",
+ le16_to_cpu(cqe->cmd_id), ioq->qid, elapsed / HZ, elapsed % HZ);
+
+ if (cmpxchg(&mapbuf->state, CMD_FLIGHT, CMD_COMPLETE) != CMD_FLIGHT) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] enters abnormal handler, cost %3ld.%3ld seconds\n",
+ le16_to_cpu(cqe->cmd_id), ioq->qid, elapsed / HZ, elapsed % HZ);
+ WRITE_ONCE(mapbuf->state, CMD_TMO_COMPLETE);
+
+ if (mapbuf->sge_cnt) {
+ mapbuf->sge_cnt = 0;
+ scsi_dma_unmap(scmd);
+ }
+ hiraid_free_mapbuf(hdev, mapbuf);
+
+ return;
+ }
+
+ hiraid_check_status(mapbuf, scmd, cqe);
+ if (mapbuf->sge_cnt) {
+ mapbuf->sge_cnt = 0;
+ scsi_dma_unmap(scmd);
+ }
+ hiraid_free_mapbuf(hdev, mapbuf);
+ scmd->scsi_done(scmd);
+}
+
+static void hiraid_complete_admin_cmnd(struct hiraid_queue *adminq, struct hiraid_completion *cqe)
+{
+ struct hiraid_dev *hdev = adminq->hdev;
+ struct hiraid_cmd *adm_cmd;
+
+ adm_cmd = hdev->adm_cmds + le16_to_cpu(cqe->cmd_id);
+ if (unlikely(adm_cmd->state == CMD_IDLE)) {
+ dev_warn(adminq->hdev->dev, "invalid id[%d] completed on queue[%d]\n",
+ le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id));
+ return;
+ }
+
+ adm_cmd->status = le16_to_cpu(cqe->status) >> 1;
+ adm_cmd->result0 = le32_to_cpu(cqe->result);
+ adm_cmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&adm_cmd->cmd_done);
+}
+
+static void hiraid_send_async_event(struct hiraid_dev *hdev, u16 cid);
+
+static void hiraid_complete_async_event(struct hiraid_queue *hiraidq, struct hiraid_completion *cqe)
+{
+ struct hiraid_dev *hdev = hiraidq->hdev;
+ u32 result = le32_to_cpu(cqe->result);
+
+ dev_info(hdev->dev, "recv async event, cid[%d] status[0x%x] result[0x%x]\n",
+ le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->status) >> 1, result);
+
+ hiraid_send_async_event(hdev, le16_to_cpu(cqe->cmd_id));
+
+ if ((le16_to_cpu(cqe->status) >> 1) != HIRAID_SC_SUCCESS)
+ return;
+ switch (result & 0x7) {
+ case HIRAID_ASYN_EVENT_NOTICE:
+ hiraid_handle_async_notice(hdev, result);
+ break;
+ case HIRAID_ASYN_EVENT_VS:
+ hiraid_handle_async_vs(hdev, result, le32_to_cpu(cqe->result1));
+ break;
+ default:
+ dev_warn(hdev->dev, "unsupported async event type[%u]\n", result & 0x7);
+ break;
+ }
+}
+
+static void hiraid_complete_pthru_cmnd(struct hiraid_queue *ioq, struct hiraid_completion *cqe)
+{
+ struct hiraid_dev *hdev = ioq->hdev;
+ struct hiraid_cmd *ptcmd;
+
+ ptcmd = hdev->io_ptcmds + (ioq->qid - 1) * HIRAID_PTHRU_CMDS_PERQ +
+ le16_to_cpu(cqe->cmd_id) - hdev->scsi_qd;
+
+ ptcmd->status = le16_to_cpu(cqe->status) >> 1;
+ ptcmd->result0 = le32_to_cpu(cqe->result);
+ ptcmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&ptcmd->cmd_done);
+}
+
+static inline void hiraid_handle_cqe(struct hiraid_queue *hiraidq, u16 idx)
+{
+ struct hiraid_completion *cqe = &hiraidq->cqes[idx];
+ struct hiraid_dev *hdev = hiraidq->hdev;
+ u16 cid = le16_to_cpu(cqe->cmd_id);
+
+ if (unlikely(!work_mode && (cid >= hiraidq->q_depth))) {
+ dev_err(hdev->dev, "invalid command id[%d] completed on queue[%d]\n",
+ cid, cqe->sq_id);
+ return;
+ }
+
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d] result[0x%x] sqid[%d] status[0x%x]\n",
+ cid, hiraidq->qid, le32_to_cpu(cqe->result),
+ le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status));
+
+ if (unlikely(hiraidq->qid == 0 && cid >= HIRAID_AQ_BLK_MQ_DEPTH)) {
+ hiraid_complete_async_event(hiraidq, cqe);
+ return;
+ }
+
+ if (unlikely(hiraidq->qid && cid >= hdev->scsi_qd)) {
+ hiraid_complete_pthru_cmnd(hiraidq, cqe);
+ return;
+ }
+
+ if (hiraidq->qid)
+ hiraid_complete_io_cmnd(hiraidq, cqe);
+ else
+ hiraid_complete_admin_cmnd(hiraidq, cqe);
+}
+
+static void hiraid_complete_cqes(struct hiraid_queue *hiraidq, u16 start, u16 end)
+{
+ while (start != end) {
+ hiraid_handle_cqe(hiraidq, start);
+ if (++start == hiraidq->q_depth)
+ start = 0;
+ }
+}
+
+static inline void hiraid_update_cq_head(struct hiraid_queue *hiraidq)
+{
+ if (++hiraidq->cq_head == hiraidq->q_depth) {
+ hiraidq->cq_head = 0;
+ hiraidq->cq_phase = !hiraidq->cq_phase;
+ }
+}
+
+static inline bool hiraid_process_cq(struct hiraid_queue *hiraidq, u16 *start, u16 *end, int tag)
+{
+ bool found = false;
+
+ *start = hiraidq->cq_head;
+ while (!found && hiraid_cqe_pending(hiraidq)) {
+ if (le16_to_cpu(hiraidq->cqes[hiraidq->cq_head].cmd_id) == tag)
+ found = true;
+ hiraid_update_cq_head(hiraidq);
+ }
+ *end = hiraidq->cq_head;
+
+ if (*start != *end)
+ writel(hiraidq->cq_head, hiraidq->q_db + hiraidq->hdev->db_stride);
+
+ return found;
+}
+
+static bool hiraid_poll_cq(struct hiraid_queue *hiraidq, int cid)
+{
+ u16 start, end;
+ bool found;
+
+ if (!hiraid_cqe_pending(hiraidq))
+ return 0;
+
+ spin_lock_irq(&hiraidq->cq_lock);
+ found = hiraid_process_cq(hiraidq, &start, &end, cid);
+ spin_unlock_irq(&hiraidq->cq_lock);
+
+ hiraid_complete_cqes(hiraidq, start, end);
+ return found;
+}
+
+static irqreturn_t hiraid_handle_irq(int irq, void *data)
+{
+ struct hiraid_queue *hiraidq = data;
+ irqreturn_t ret = IRQ_NONE;
+ u16 start, end;
+
+ spin_lock(&hiraidq->cq_lock);
+ if (hiraidq->cq_head != hiraidq->last_cq_head)
+ ret = IRQ_HANDLED;
+
+ hiraid_process_cq(hiraidq, &start, &end, -1);
+ hiraidq->last_cq_head = hiraidq->cq_head;
+ spin_unlock(&hiraidq->cq_lock);
+
+ if (start != end) {
+ hiraid_complete_cqes(hiraidq, start, end);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+static int hiraid_setup_admin_queue(struct hiraid_dev *hdev)
+{
+ struct hiraid_queue *adminq = &hdev->queues[0];
+ u32 aqa;
+ int ret;
+
+ dev_info(hdev->dev, "start disable controller\n");
+
+ ret = hiraid_disable_control(hdev);
+ if (ret)
+ return ret;
+
+ ret = hiraid_alloc_queue(hdev, 0, HIRAID_AQ_DEPTH);
+ if (ret)
+ return ret;
+
+ aqa = adminq->q_depth - 1;
+ aqa |= aqa << 16;
+ writel(aqa, hdev->bar + HIRAID_REG_AQA);
+ lo_hi_writeq(adminq->sq_buffer_phy, hdev->bar + HIRAID_REG_ASQ);
+ lo_hi_writeq(adminq->cq_buffer_phy, hdev->bar + HIRAID_REG_ACQ);
+
+ dev_info(hdev->dev, "start enable controller\n");
+
+ ret = hiraid_enable_control(hdev);
+ if (ret) {
+ ret = -ENODEV;
+ return ret;
+ }
+
+ adminq->cq_vector = 0;
+ ret = pci_request_irq(hdev->pdev, adminq->cq_vector, hiraid_handle_irq, NULL,
+ adminq, "hiraid%d_q%d", hdev->instance, adminq->qid);
+ if (ret) {
+ adminq->cq_vector = -1;
+ return ret;
+ }
+
+ hiraid_init_queue(adminq, 0);
+
+ dev_info(hdev->dev, "setup admin queue success, queuecount[%d] online[%d] pagesize[%d]\n",
+ hdev->queue_count, hdev->online_queues, hdev->page_size);
+
+ return 0;
+}
+
+static u32 hiraid_get_bar_size(struct hiraid_dev *hdev, u32 nr_ioqs)
+{
+ return (HIRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride));
+}
+
+static int hiraid_create_admin_cmds(struct hiraid_dev *hdev)
+{
+ u16 i;
+
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+ spin_lock_init(&hdev->adm_cmd_lock);
+
+ hdev->adm_cmds = kcalloc_node(HIRAID_AQ_BLK_MQ_DEPTH, sizeof(struct hiraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->adm_cmds) {
+ dev_err(hdev->dev, "alloc admin cmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < HIRAID_AQ_BLK_MQ_DEPTH; i++) {
+ hdev->adm_cmds[i].qid = 0;
+ hdev->adm_cmds[i].cid = i;
+ list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list);
+ }
+
+ dev_info(hdev->dev, "alloc admin cmds success, num[%d]\n", HIRAID_AQ_BLK_MQ_DEPTH);
+
+ return 0;
+}
+
+static void hiraid_free_admin_cmds(struct hiraid_dev *hdev)
+{
+ kfree(hdev->adm_cmds);
+ hdev->adm_cmds = NULL;
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+}
+
+static struct hiraid_cmd *hiraid_get_cmd(struct hiraid_dev *hdev, enum hiraid_cmd_type type)
+{
+ struct hiraid_cmd *cmd = NULL;
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == HIRAID_CMD_PTHRU) {
+ head = &hdev->io_pt_list;
+ slock = &hdev->io_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(slock, flags);
+ dev_err(hdev->dev, "err, cmd[%d] list empty\n", type);
+ return NULL;
+ }
+ cmd = list_entry(head->next, struct hiraid_cmd, list);
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(slock, flags);
+
+ WRITE_ONCE(cmd->state, CMD_FLIGHT);
+
+ return cmd;
+}
+
+static void hiraid_put_cmd(struct hiraid_dev *hdev, struct hiraid_cmd *cmd,
+ enum hiraid_cmd_type type)
+{
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == HIRAID_CMD_PTHRU) {
+ head = &hdev->io_pt_list;
+ slock = &hdev->io_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ WRITE_ONCE(cmd->state, CMD_IDLE);
+ list_add_tail(&cmd->list, head);
+ spin_unlock_irqrestore(slock, flags);
+}
+
+static bool hiraid_admin_need_reset(struct hiraid_admin_command *cmd)
+{
+ switch (cmd->common.opcode) {
+ case HIRAID_ADMIN_DELETE_SQ:
+ case HIRAID_ADMIN_CREATE_SQ:
+ case HIRAID_ADMIN_DELETE_CQ:
+ case HIRAID_ADMIN_CREATE_CQ:
+ case HIRAID_ADMIN_SET_FEATURES:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int hiraid_reset_work_sync(struct hiraid_dev *hdev);
+static inline void hiraid_admin_timeout(struct hiraid_dev *hdev, struct hiraid_cmd *cmd)
+{
+ /* command may be returned because controller reset */
+ if (READ_ONCE(cmd->state) == CMD_COMPLETE)
+ return;
+ if (hiraid_reset_work_sync(hdev) == -EBUSY)
+ flush_work(&hdev->reset_work);
+}
+
+static int hiraid_put_admin_sync_request(struct hiraid_dev *hdev, struct hiraid_admin_command *cmd,
+ u32 *result0, u32 *result1, u32 timeout)
+{
+ struct hiraid_cmd *adm_cmd = hiraid_get_cmd(hdev, HIRAID_CMD_ADMIN);
+
+ if (!adm_cmd) {
+ dev_err(hdev->dev, "err, get admin cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&adm_cmd->cmd_done);
+
+ cmd->common.cmd_id = cpu_to_le16(adm_cmd->cid);
+ hiraid_submit_cmd(&hdev->queues[0], cmd);
+
+ if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+ adm_cmd->cid, adm_cmd->qid, cmd->usr_cmd.opcode,
+ cmd->usr_cmd.info_0.subopcode);
+
+ /* reset controller if admin timeout */
+ if (hiraid_admin_need_reset(cmd))
+ hiraid_admin_timeout(hdev, adm_cmd);
+
+ hiraid_put_cmd(hdev, adm_cmd, HIRAID_CMD_ADMIN);
+ return -ETIME;
+ }
+
+ if (result0)
+ *result0 = adm_cmd->result0;
+ if (result1)
+ *result1 = adm_cmd->result1;
+
+ hiraid_put_cmd(hdev, adm_cmd, HIRAID_CMD_ADMIN);
+
+ return adm_cmd->status;
+}
+
+/**
+ * hiraid_create_cq - send cmd to controller for create controller cq
+ */
+static int hiraid_create_complete_queue(struct hiraid_dev *hdev, u16 qid,
+ struct hiraid_queue *hiraidq, u16 cq_vector)
+{
+ struct hiraid_admin_command admin_cmd;
+ int flags = HIRAID_QUEUE_PHYS_CONTIG | HIRAID_CQ_IRQ_ENABLED;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_cq.opcode = HIRAID_ADMIN_CREATE_CQ;
+ admin_cmd.create_cq.prp1 = cpu_to_le64(hiraidq->cq_buffer_phy);
+ admin_cmd.create_cq.cqid = cpu_to_le16(qid);
+ admin_cmd.create_cq.qsize = cpu_to_le16(hiraidq->q_depth - 1);
+ admin_cmd.create_cq.cq_flags = cpu_to_le16(flags);
+ admin_cmd.create_cq.irq_vector = cpu_to_le16(cq_vector);
+
+ return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+/**
+ * hiraid_create_sq - send cmd to controller for create controller sq
+ */
+static int hiraid_create_send_queue(struct hiraid_dev *hdev, u16 qid,
+ struct hiraid_queue *hiraidq)
+{
+ struct hiraid_admin_command admin_cmd;
+ int flags = HIRAID_QUEUE_PHYS_CONTIG;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_sq.opcode = HIRAID_ADMIN_CREATE_SQ;
+ admin_cmd.create_sq.prp1 = cpu_to_le64(hiraidq->sq_buffer_phy);
+ admin_cmd.create_sq.sqid = cpu_to_le16(qid);
+ admin_cmd.create_sq.qsize = cpu_to_le16(hiraidq->q_depth - 1);
+ admin_cmd.create_sq.sq_flags = cpu_to_le16(flags);
+ admin_cmd.create_sq.cqid = cpu_to_le16(qid);
+
+ return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static void hiraid_free_all_queues(struct hiraid_dev *hdev)
+{
+ int i;
+ struct hiraid_queue *hq;
+
+ for (i = 0; i < hdev->queue_count; i++) {
+ hq = &hdev->queues[i];
+ dma_free_coherent(hdev->dev, CQ_SIZE(hq->q_depth),
+ (void *)hq->cqes, hq->cq_buffer_phy);
+ dma_free_coherent(hdev->dev, SQ_SIZE(hq->qid, hq->q_depth),
+ hq->sq_cmds, hq->sq_buffer_phy);
+ if (!work_mode)
+ dma_free_coherent(hdev->dev, SENSE_SIZE(hq->q_depth),
+ hq->sense_buffer_virt, hq->sense_buffer_phy);
+ }
+
+ hdev->queue_count = 0;
+}
+
+static void hiraid_free_sense_buffer(struct hiraid_dev *hdev)
+{
+ if (hdev->sense_buffer_virt) {
+ dma_free_coherent(hdev->dev,
+ SENSE_SIZE(hdev->scsi_qd + max_hwq_num * HIRAID_PTHRU_CMDS_PERQ),
+ hdev->sense_buffer_virt, hdev->sense_buffer_phy);
+ hdev->sense_buffer_virt = NULL;
+ }
+}
+
+static int hiraid_delete_queue(struct hiraid_dev *hdev, u8 opcode, u16 qid)
+{
+ struct hiraid_admin_command admin_cmd;
+ int ret;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.delete_queue.opcode = opcode;
+ admin_cmd.delete_queue.qid = cpu_to_le16(qid);
+
+ ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+
+ if (ret)
+ dev_err(hdev->dev, "delete %s:[%d] failed\n",
+ (opcode == HIRAID_ADMIN_DELETE_CQ) ? "cq" : "sq", qid);
+
+ return ret;
+}
+
+static int hiraid_delete_complete_queue(struct hiraid_dev *hdev, u16 cqid)
+{
+ return hiraid_delete_queue(hdev, HIRAID_ADMIN_DELETE_CQ, cqid);
+}
+
+static int hiraid_delete_send_queue(struct hiraid_dev *hdev, u16 sqid)
+{
+ return hiraid_delete_queue(hdev, HIRAID_ADMIN_DELETE_SQ, sqid);
+}
+
+static int hiraid_create_queue(struct hiraid_queue *hiraidq, u16 qid)
+{
+ struct hiraid_dev *hdev = hiraidq->hdev;
+ u16 cq_vector;
+ int ret;
+
+ cq_vector = (hdev->num_vecs == 1) ? 0 : qid;
+ ret = hiraid_create_complete_queue(hdev, qid, hiraidq, cq_vector);
+ if (ret)
+ return ret;
+
+ ret = hiraid_create_send_queue(hdev, qid, hiraidq);
+ if (ret)
+ goto delete_cq;
+
+ hiraidq->cq_vector = cq_vector;
+ ret = pci_request_irq(hdev->pdev, cq_vector, hiraid_handle_irq, NULL,
+ hiraidq, "hiraid%d_q%d", hdev->instance, qid);
+ if (ret) {
+ hiraidq->cq_vector = -1;
+ dev_err(hdev->dev, "request queue[%d] irq failed\n", qid);
+ goto delete_sq;
+ }
+
+ hiraid_init_queue(hiraidq, qid);
+
+ return 0;
+
+delete_sq:
+ hiraid_delete_send_queue(hdev, qid);
+delete_cq:
+ hiraid_delete_complete_queue(hdev, qid);
+
+ return ret;
+}
+
+static int hiraid_create_io_queues(struct hiraid_dev *hdev)
+{
+ u32 i, max;
+ int ret = 0;
+
+ max = min(hdev->max_qid, hdev->queue_count - 1);
+ for (i = hdev->online_queues; i <= max; i++) {
+ ret = hiraid_create_queue(&hdev->queues[i], i);
+ if (ret) {
+ dev_err(hdev->dev, "create queue[%d] failed\n", i);
+ break;
+ }
+ }
+
+ if (!hdev->last_qcnt)
+ hdev->last_qcnt = hdev->online_queues;
+
+ dev_info(hdev->dev, "queue_count[%d] online_queue[%d] last_online[%d]",
+ hdev->queue_count, hdev->online_queues, hdev->last_qcnt);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+static int hiraid_set_features(struct hiraid_dev *hdev, u32 fid, u32 dword11, void *buffer,
+ size_t buflen, u32 *result)
+{
+ struct hiraid_admin_command admin_cmd;
+ int ret;
+ u8 *data_ptr = NULL;
+ dma_addr_t buffer_phy = 0;
+
+ if (buffer && buflen) {
+ data_ptr = dma_alloc_coherent(hdev->dev, buflen, &buffer_phy, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memcpy(data_ptr, buffer, buflen);
+ }
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.features.opcode = HIRAID_ADMIN_SET_FEATURES;
+ admin_cmd.features.fid = cpu_to_le32(fid);
+ admin_cmd.features.dword11 = cpu_to_le32(dword11);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+ ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, result, NULL, 0);
+
+ if (data_ptr)
+ dma_free_coherent(hdev->dev, buflen, data_ptr, buffer_phy);
+
+ return ret;
+}
+
+static int hiraid_configure_timestamp(struct hiraid_dev *hdev)
+{
+ __le64 timestamp;
+ int ret;
+
+ timestamp = cpu_to_le64(ktime_to_ms(ktime_get_real()));
+ ret = hiraid_set_features(hdev, HIRAID_FEATURE_TIMESTAMP, 0,
+ ×tamp, sizeof(timestamp), NULL);
+
+ if (ret)
+ dev_err(hdev->dev, "set timestamp failed[%d]\n", ret);
+ return ret;
+}
+
+static int hiraid_get_queue_cnt(struct hiraid_dev *hdev, u32 *cnt)
+{
+ u32 q_cnt = (*cnt - 1) | ((*cnt - 1) << 16);
+ u32 nr_ioqs, result;
+ int status;
+
+ status = hiraid_set_features(hdev, HIRAID_FEATURE_NUM_QUEUES, q_cnt, NULL, 0, &result);
+ if (status) {
+ dev_err(hdev->dev, "set queue count failed, status[%d]\n",
+ status);
+ return -EIO;
+ }
+
+ nr_ioqs = min(result & 0xffff, result >> 16) + 1;
+ *cnt = min(*cnt, nr_ioqs);
+ if (*cnt == 0) {
+ dev_err(hdev->dev, "illegal qcount: zero, nr_ioqs[%d], cnt[%d]\n", nr_ioqs, *cnt);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int hiraid_setup_io_queues(struct hiraid_dev *hdev)
+{
+ struct hiraid_queue *adminq = &hdev->queues[0];
+ struct pci_dev *pdev = hdev->pdev;
+ u32 i, size, nr_ioqs;
+ int ret;
+
+ struct irq_affinity affd = {
+ .pre_vectors = 1
+ };
+
+ /* alloc IO sense buffer for single hw queue mode */
+ if (work_mode && !hdev->sense_buffer_virt) {
+ hdev->sense_buffer_virt = dma_alloc_coherent(hdev->dev,
+ SENSE_SIZE(hdev->scsi_qd + max_hwq_num * HIRAID_PTHRU_CMDS_PERQ),
+ &hdev->sense_buffer_phy, GFP_KERNEL | __GFP_ZERO);
+ if (!hdev->sense_buffer_virt)
+ return -ENOMEM;
+ }
+
+ nr_ioqs = min(num_online_cpus(), max_hwq_num);
+ ret = hiraid_get_queue_cnt(hdev, &nr_ioqs);
+ if (ret < 0)
+ return ret;
+
+ size = hiraid_get_bar_size(hdev, nr_ioqs);
+ ret = hiraid_remap_bar(hdev, size);
+ if (ret)
+ return -ENOMEM;
+
+ adminq->q_db = hdev->dbs;
+
+ pci_free_irq(pdev, 0, adminq);
+ pci_free_irq_vectors(pdev);
+ hdev->online_queues--;
+
+ ret = pci_alloc_irq_vectors_affinity(pdev, 1, (nr_ioqs + 1),
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+ if (ret <= 0)
+ return -EIO;
+
+ hdev->num_vecs = ret;
+ hdev->max_qid = max(ret - 1, 1);
+
+ ret = pci_request_irq(pdev, adminq->cq_vector, hiraid_handle_irq, NULL,
+ adminq, "hiraid%d_q%d", hdev->instance, adminq->qid);
+ if (ret) {
+ dev_err(hdev->dev, "request admin irq failed\n");
+ adminq->cq_vector = -1;
+ return ret;
+ }
+
+ hdev->online_queues++;
+
+ for (i = hdev->queue_count; i <= hdev->max_qid; i++) {
+ ret = hiraid_alloc_queue(hdev, i, hdev->ioq_depth);
+ if (ret)
+ break;
+ }
+ dev_info(hdev->dev, "max_qid[%d] queuecount[%d] onlinequeue[%d] ioqdepth[%d]\n",
+ hdev->max_qid, hdev->queue_count, hdev->online_queues, hdev->ioq_depth);
+
+ return hiraid_create_io_queues(hdev);
+}
+
+static void hiraid_delete_io_queues(struct hiraid_dev *hdev)
+{
+ u16 queues = hdev->online_queues - 1;
+ u8 opcode = HIRAID_ADMIN_DELETE_SQ;
+ u16 i, pass;
+
+ if (!pci_device_is_present(hdev->pdev)) {
+ dev_err(hdev->dev, "pci_device is not present, skip disable io queues\n");
+ return;
+ }
+
+ if (hdev->online_queues < 2) {
+ dev_err(hdev->dev, "err, io queue has been delete\n");
+ return;
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ for (i = queues; i > 0; i--)
+ if (hiraid_delete_queue(hdev, opcode, i))
+ break;
+
+ opcode = HIRAID_ADMIN_DELETE_CQ;
+ }
+}
+
+static void hiraid_pci_disable(struct hiraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u32 i;
+
+ for (i = 0; i < hdev->online_queues; i++)
+ pci_free_irq(pdev, hdev->queues[i].cq_vector, &hdev->queues[i]);
+ pci_free_irq_vectors(pdev);
+ if (pci_is_enabled(pdev)) {
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ }
+ hdev->online_queues = 0;
+}
+
+static void hiraid_disable_admin_queue(struct hiraid_dev *hdev, bool shutdown)
+{
+ struct hiraid_queue *adminq = &hdev->queues[0];
+ u16 start, end;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ hiraid_shutdown_control(hdev);
+ else
+ hiraid_disable_control(hdev);
+ }
+
+ if (hdev->queue_count == 0) {
+ dev_err(hdev->dev, "err, admin queue has been delete\n");
+ return;
+ }
+
+ spin_lock_irq(&adminq->cq_lock);
+ hiraid_process_cq(adminq, &start, &end, -1);
+ spin_unlock_irq(&adminq->cq_lock);
+ hiraid_complete_cqes(adminq, start, end);
+}
+
+static int hiraid_create_prp_pools(struct hiraid_dev *hdev)
+{
+ int i;
+ char poolname[20] = { 0 };
+
+ hdev->prp_page_pool = dma_pool_create("prp list page", hdev->dev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+
+ if (!hdev->prp_page_pool) {
+ dev_err(hdev->dev, "create prp_page_pool failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < extra_pool_num; i++) {
+ sprintf(poolname, "prp_list_256_%d", i);
+ hdev->prp_extra_pool[i] = dma_pool_create(poolname, hdev->dev, EXTRA_POOL_SIZE,
+ EXTRA_POOL_SIZE, 0);
+
+ if (!hdev->prp_extra_pool[i]) {
+ dev_err(hdev->dev, "create prp extra pool[%d] failed\n", i);
+ goto destroy_prp_extra_pool;
+ }
+ }
+
+ return 0;
+
+destroy_prp_extra_pool:
+ while (i > 0)
+ dma_pool_destroy(hdev->prp_extra_pool[--i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+
+ return -ENOMEM;
+}
+
+static void hiraid_free_prp_pools(struct hiraid_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < extra_pool_num; i++)
+ dma_pool_destroy(hdev->prp_extra_pool[i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+}
+
+static int hiraid_request_devices(struct hiraid_dev *hdev, struct hiraid_dev_info *dev)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ struct hiraid_admin_command admin_cmd;
+ struct hiraid_dev_list *list_buf;
+ dma_addr_t buffer_phy = 0;
+ u32 i, idx, hdid, ndev;
+ int ret = 0;
+
+ list_buf = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &buffer_phy, GFP_KERNEL);
+ if (!list_buf)
+ return -ENOMEM;
+
+ for (idx = 0; idx < nd;) {
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = HIRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = HIRAID_GET_DEVLIST_INFO;
+ admin_cmd.get_info.cdw11 = cpu_to_le32(idx);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+ ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+
+ if (ret) {
+ dev_err(hdev->dev, "get device list failed, nd[%u] idx[%u] ret[%d]\n",
+ nd, idx, ret);
+ goto out;
+ }
+ ndev = le32_to_cpu(list_buf->dev_num);
+
+ dev_info(hdev->dev, "get dev list ndev num[%u]\n", ndev);
+
+ for (i = 0; i < ndev; i++) {
+ hdid = le32_to_cpu(list_buf->devinfo[i].hdid);
+ dev_info(hdev->dev, "devices[%d], hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n",
+ i, hdid, le16_to_cpu(list_buf->devinfo[i].target),
+ list_buf->devinfo[i].channel,
+ list_buf->devinfo[i].lun,
+ list_buf->devinfo[i].attr);
+ if (hdid > nd || hdid == 0) {
+ dev_err(hdev->dev, "err, hdid[%d] invalid\n", hdid);
+ continue;
+ }
+ memcpy(&dev[hdid - 1], &list_buf->devinfo[i],
+ sizeof(struct hiraid_dev_info));
+ }
+ idx += ndev;
+
+ if (ndev < MAX_DEV_ENTRY_PER_PAGE_4K)
+ break;
+ }
+
+out:
+ dma_free_coherent(hdev->dev, PAGE_SIZE, list_buf, buffer_phy);
+ return ret;
+}
+
+static void hiraid_send_async_event(struct hiraid_dev *hdev, u16 cid)
+{
+ struct hiraid_queue *adminq = &hdev->queues[0];
+ struct hiraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = HIRAID_ADMIN_ASYNC_EVENT;
+ admin_cmd.common.cmd_id = cpu_to_le16(cid);
+
+ hiraid_submit_cmd(adminq, &admin_cmd);
+ dev_info(hdev->dev, "send async event to controller, cid[%d]\n", cid);
+}
+
+static inline void hiraid_init_async_event(struct hiraid_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->ctrl_info->asynevent; i++)
+ hiraid_send_async_event(hdev, i + HIRAID_AQ_BLK_MQ_DEPTH);
+}
+
+static int hiraid_add_device(struct hiraid_dev *hdev, struct hiraid_dev_info *devinfo)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "add device, hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n",
+ le32_to_cpu(devinfo->hdid), le16_to_cpu(devinfo->target),
+ devinfo->channel, devinfo->lun, devinfo->attr);
+
+ sdev = scsi_device_lookup(shost, devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ if (sdev) {
+ dev_warn(hdev->dev, "device is already exist, channel[%d] targetid[%d] lun[%d]\n",
+ devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ scsi_device_put(sdev);
+ return -EEXIST;
+ }
+ scsi_add_device(shost, devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ return 0;
+}
+
+static int hiraid_rescan_device(struct hiraid_dev *hdev, struct hiraid_dev_info *devinfo)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "rescan device, hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n",
+ le32_to_cpu(devinfo->hdid), le16_to_cpu(devinfo->target),
+ devinfo->channel, devinfo->lun, devinfo->attr);
+
+ sdev = scsi_device_lookup(shost, devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ if (!sdev) {
+ dev_warn(hdev->dev, "device is not exit rescan it, channel[%d] target_id[%d] lun[%d]\n",
+ devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int hiraid_delete_device(struct hiraid_dev *hdev, struct hiraid_dev_info *devinfo)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "remove device, hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n",
+ le32_to_cpu(devinfo->hdid), le16_to_cpu(devinfo->target),
+ devinfo->channel, devinfo->lun, devinfo->attr);
+
+ sdev = scsi_device_lookup(shost, devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ if (!sdev) {
+ dev_warn(hdev->dev, "device is not exit remove it, channel[%d] target_id[%d] lun[%d]\n",
+ devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int hiraid_dev_list_init(struct hiraid_dev *hdev)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+
+ hdev->dev_info = kzalloc_node(nd * sizeof(struct hiraid_dev_info),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->dev_info)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int hiraid_luntarget_sort(const void *l, const void *r)
+{
+ const struct hiraid_dev_info *ln = l;
+ const struct hiraid_dev_info *rn = r;
+ int l_attr = HIRAID_DEV_INFO_ATTR_BOOT(ln->attr);
+ int r_attr = HIRAID_DEV_INFO_ATTR_BOOT(rn->attr);
+
+ /* boot first */
+ if (l_attr != r_attr)
+ return (r_attr - l_attr);
+
+ if (ln->channel == rn->channel)
+ return le16_to_cpu(ln->target) - le16_to_cpu(rn->target);
+
+ return ln->channel - rn->channel;
+}
+
+static void hiraid_scan_work(struct work_struct *work)
+{
+ struct hiraid_dev *hdev =
+ container_of(work, struct hiraid_dev, scan_work);
+ struct hiraid_dev_info *dev, *old_dev, *new_dev;
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ u8 flag, org_flag;
+ int i, ret;
+ int count = 0;
+
+ dev = kcalloc(nd, sizeof(struct hiraid_dev_info), GFP_KERNEL);
+ if (!dev)
+ return;
+
+ new_dev = kcalloc(nd, sizeof(struct hiraid_dev_info), GFP_KERNEL);
+ if (!new_dev)
+ goto free_list;
+
+ ret = hiraid_request_devices(hdev, dev);
+ if (ret)
+ goto free_all;
+ old_dev = hdev->dev_info;
+ for (i = 0; i < nd; i++) {
+ org_flag = old_dev[i].flag;
+ flag = dev[i].flag;
+
+ dev_log_dbg(hdev->dev, "i[%d] org_flag[0x%x] flag[0x%x]\n", i, org_flag, flag);
+
+ if (HIRAID_DEV_INFO_FLAG_VALID(flag)) {
+ if (!HIRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->dev_rwsem);
+ memcpy(&old_dev[i], &dev[i],
+ sizeof(struct hiraid_dev_info));
+ memcpy(&new_dev[count++], &dev[i],
+ sizeof(struct hiraid_dev_info));
+ up_write(&hdev->dev_rwsem);
+ } else if (HIRAID_DEV_INFO_FLAG_CHANGE(flag)) {
+ hiraid_rescan_device(hdev, &dev[i]);
+ }
+ } else {
+ if (HIRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->dev_rwsem);
+ old_dev[i].flag &= 0xfe;
+ up_write(&hdev->dev_rwsem);
+ hiraid_delete_device(hdev, &old_dev[i]);
+ }
+ }
+ }
+
+ dev_info(hdev->dev, "scan work add device num[%d]\n", count);
+
+ sort(new_dev, count, sizeof(new_dev[0]), hiraid_luntarget_sort, NULL);
+
+ for (i = 0; i < count; i++)
+ hiraid_add_device(hdev, &new_dev[i]);
+
+free_all:
+ kfree(new_dev);
+free_list:
+ kfree(dev);
+}
+
+static void hiraid_timesyn_work(struct work_struct *work)
+{
+ struct hiraid_dev *hdev =
+ container_of(work, struct hiraid_dev, timesyn_work);
+
+ hiraid_configure_timestamp(hdev);
+}
+
+static int hiraid_init_control_info(struct hiraid_dev *hdev);
+static void hiraid_fwactive_work(struct work_struct *work)
+{
+ struct hiraid_dev *hdev = container_of(work, struct hiraid_dev, fwact_work);
+
+ if (hiraid_init_control_info(hdev))
+ dev_err(hdev->dev, "get controller info failed after fw activation\n");
+}
+
+static void hiraid_queue_scan(struct hiraid_dev *hdev)
+{
+ queue_work(work_queue, &hdev->scan_work);
+}
+
+static void hiraid_handle_async_notice(struct hiraid_dev *hdev, u32 result)
+{
+ switch ((result & 0xff00) >> 8) {
+ case HIRAID_ASYN_DEV_CHANGED:
+ hiraid_queue_scan(hdev);
+ break;
+ case HIRAID_ASYN_FW_ACT_START:
+ dev_info(hdev->dev, "fw activation starting\n");
+ break;
+ case HIRAID_ASYN_HOST_PROBING:
+ break;
+ default:
+ dev_warn(hdev->dev, "async event result[%08x]\n", result);
+ }
+}
+
+static void hiraid_handle_async_vs(struct hiraid_dev *hdev, u32 result, u32 result1)
+{
+ switch ((result & 0xff00) >> 8) {
+ case HIRAID_ASYN_TIMESYN:
+ queue_work(work_queue, &hdev->timesyn_work);
+ break;
+ case HIRAID_ASYN_FW_ACT_FINISH:
+ dev_info(hdev->dev, "fw activation finish\n");
+ queue_work(work_queue, &hdev->fwact_work);
+ break;
+ case HIRAID_ASYN_EVENT_MIN ... HIRAID_ASYN_EVENT_MAX:
+ dev_info(hdev->dev, "recv card event[%d] param1[0x%x] param2[0x%x]\n",
+ (result & 0xff00) >> 8, result, result1);
+ break;
+ default:
+ dev_warn(hdev->dev, "async event result[0x%x]\n", result);
+ }
+}
+
+static int hiraid_alloc_resources(struct hiraid_dev *hdev)
+{
+ int ret, nqueue;
+
+ hdev->ctrl_info = kzalloc_node(sizeof(*hdev->ctrl_info), GFP_KERNEL, hdev->numa_node);
+ if (!hdev->ctrl_info)
+ return -ENOMEM;
+
+ ret = hiraid_create_prp_pools(hdev);
+ if (ret)
+ goto free_ctrl_info;
+ nqueue = min(num_possible_cpus(), max_hwq_num) + 1;
+ hdev->queues = kcalloc_node(nqueue, sizeof(struct hiraid_queue),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->queues) {
+ ret = -ENOMEM;
+ goto destroy_dma_pools;
+ }
+
+ ret = hiraid_create_admin_cmds(hdev);
+ if (ret)
+ goto free_queues;
+
+ dev_info(hdev->dev, "total queues num[%d]\n", nqueue);
+
+ return 0;
+
+free_queues:
+ kfree(hdev->queues);
+destroy_dma_pools:
+ hiraid_free_prp_pools(hdev);
+free_ctrl_info:
+ kfree(hdev->ctrl_info);
+
+ return ret;
+}
+
+static void hiraid_free_resources(struct hiraid_dev *hdev)
+{
+ hiraid_free_admin_cmds(hdev);
+ kfree(hdev->queues);
+ hiraid_free_prp_pools(hdev);
+ kfree(hdev->ctrl_info);
+}
+
+static void hiraid_bsg_buf_unmap(struct hiraid_dev *hdev, struct bsg_job *job)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct hiraid_mapmange *mapbuf = job->dd_data;
+ enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ if (mapbuf->sge_cnt)
+ dma_unmap_sg(hdev->dev, mapbuf->sgl, mapbuf->sge_cnt, dma_dir);
+
+ hiraid_free_mapbuf(hdev, mapbuf);
+}
+
+static int hiraid_bsg_buf_map(struct hiraid_dev *hdev, struct bsg_job *job,
+ struct hiraid_admin_command *cmd)
+{
+ struct hiraid_bsg_request *bsg_req = job->request;
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct hiraid_mapmange *mapbuf = job->dd_data;
+ enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ /* No data to DMA, it may be scsi no-rw command */
+ mapbuf->sge_cnt = job->request_payload.sg_cnt;
+ mapbuf->sgl = job->request_payload.sg_list;
+ mapbuf->len = job->request_payload.payload_len;
+ mapbuf->page_cnt = -1;
+ if (unlikely(mapbuf->sge_cnt == 0))
+ goto out;
+
+ mapbuf->use_sgl = !hiraid_is_prp(hdev, mapbuf->sgl, mapbuf->sge_cnt);
+
+ ret = dma_map_sg_attrs(hdev->dev, mapbuf->sgl, mapbuf->sge_cnt, dma_dir, DMA_ATTR_NO_WARN);
+ if (!ret)
+ goto out;
+
+ if ((mapbuf->use_sgl == (bool)true) && (bsg_req->msgcode == HIRAID_BSG_IOPTHRU) &&
+ (hdev->ctrl_info->pt_use_sgl != (bool)false)) {
+ ret = hiraid_build_passthru_sgl(hdev, cmd, mapbuf);
+ } else {
+ mapbuf->use_sgl = false;
+
+ ret = hiraid_build_passthru_prp(hdev, mapbuf);
+ cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(mapbuf->sgl));
+ cmd->common.dptr.prp2 = cpu_to_le64(mapbuf->first_dma);
+ }
+
+ if (ret)
+ goto unmap;
+
+ return 0;
+
+unmap:
+ dma_unmap_sg(hdev->dev, mapbuf->sgl, mapbuf->sge_cnt, dma_dir);
+out:
+ return ret;
+}
+
+static int hiraid_get_control_info(struct hiraid_dev *hdev, struct hiraid_ctrl_info *ctrl_info)
+{
+ struct hiraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t buffer_phy = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &buffer_phy, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = HIRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = HIRAID_GET_CTRL_INFO;
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+ ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(ctrl_info, data_ptr, sizeof(struct hiraid_ctrl_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, buffer_phy);
+
+ return ret;
+}
+
+static int hiraid_init_control_info(struct hiraid_dev *hdev)
+{
+ int ret;
+
+ hdev->ctrl_info->nd = cpu_to_le32(240);
+ hdev->ctrl_info->mdts = 8;
+ hdev->ctrl_info->max_cmds = cpu_to_le16(4096);
+ hdev->ctrl_info->max_num_sge = cpu_to_le16(128);
+ hdev->ctrl_info->max_channel = cpu_to_le16(4);
+ hdev->ctrl_info->max_tgt_id = cpu_to_le32(3239);
+ hdev->ctrl_info->max_lun = cpu_to_le16(2);
+
+ ret = hiraid_get_control_info(hdev, hdev->ctrl_info);
+ if (ret)
+ dev_err(hdev->dev, "get controller info failed[%d]\n", ret);
+
+ dev_info(hdev->dev, "device_num = %d\n", hdev->ctrl_info->nd);
+ dev_info(hdev->dev, "max_cmd = %d\n", hdev->ctrl_info->max_cmds);
+ dev_info(hdev->dev, "max_channel = %d\n", hdev->ctrl_info->max_channel);
+ dev_info(hdev->dev, "max_tgt_id = %d\n", hdev->ctrl_info->max_tgt_id);
+ dev_info(hdev->dev, "max_lun = %d\n", hdev->ctrl_info->max_lun);
+ dev_info(hdev->dev, "max_num_sge = %d\n", hdev->ctrl_info->max_num_sge);
+ dev_info(hdev->dev, "lun_num_boot = %d\n", hdev->ctrl_info->lun_num_boot);
+ dev_info(hdev->dev, "max_data_transfer_size = %d\n", hdev->ctrl_info->mdts);
+ dev_info(hdev->dev, "abort_cmd_limit = %d\n", hdev->ctrl_info->acl);
+ dev_info(hdev->dev, "asyn_event_num = %d\n", hdev->ctrl_info->asynevent);
+ dev_info(hdev->dev, "card_type = %d\n", hdev->ctrl_info->card_type);
+ dev_info(hdev->dev, "pt_use_sgl = %d\n", hdev->ctrl_info->pt_use_sgl);
+ dev_info(hdev->dev, "rtd3e = %d\n", hdev->ctrl_info->rtd3e);
+ dev_info(hdev->dev, "serial_num = %s\n", hdev->ctrl_info->sn);
+ dev_info(hdev->dev, "fw_verion = %s\n", hdev->ctrl_info->fw_version);
+
+ if (!hdev->ctrl_info->asynevent)
+ hdev->ctrl_info->asynevent = 1;
+ if (hdev->ctrl_info->asynevent > HIRAID_ASYN_COMMANDS)
+ hdev->ctrl_info->asynevent = HIRAID_ASYN_COMMANDS;
+
+ hdev->scsi_qd = work_mode ?
+ le16_to_cpu(hdev->ctrl_info->max_cmds) : (hdev->ioq_depth - HIRAID_PTHRU_CMDS_PERQ);
+
+ return 0;
+}
+
+static int hiraid_user_send_admcmd(struct hiraid_dev *hdev, struct bsg_job *job)
+{
+ struct hiraid_bsg_request *bsg_req = job->request;
+ struct hiraid_passthru_common_cmd *ptcmd = &(bsg_req->admcmd);
+ struct hiraid_admin_command admin_cmd;
+ u32 timeout = msecs_to_jiffies(ptcmd->timeout_ms);
+ u32 result[2] = {0};
+ int status;
+
+ if (hdev->state >= DEV_RESETTING) {
+ dev_err(hdev->dev, "err, host state[%d] is not right\n",
+ hdev->state);
+ return -EBUSY;
+ }
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = ptcmd->opcode;
+ admin_cmd.common.flags = ptcmd->flags;
+ admin_cmd.common.hdid = cpu_to_le32(ptcmd->nsid);
+ admin_cmd.common.cdw2[0] = cpu_to_le32(ptcmd->cdw2);
+ admin_cmd.common.cdw2[1] = cpu_to_le32(ptcmd->cdw3);
+ admin_cmd.common.cdw10 = cpu_to_le32(ptcmd->cdw10);
+ admin_cmd.common.cdw11 = cpu_to_le32(ptcmd->cdw11);
+ admin_cmd.common.cdw12 = cpu_to_le32(ptcmd->cdw12);
+ admin_cmd.common.cdw13 = cpu_to_le32(ptcmd->cdw13);
+ admin_cmd.common.cdw14 = cpu_to_le32(ptcmd->cdw14);
+ admin_cmd.common.cdw15 = cpu_to_le32(ptcmd->cdw15);
+
+ status = hiraid_bsg_buf_map(hdev, job, &admin_cmd);
+ if (status) {
+ dev_err(hdev->dev, "err, map data failed\n");
+ return status;
+ }
+
+ status = hiraid_put_admin_sync_request(hdev, &admin_cmd, &result[0], &result[1], timeout);
+ if (status >= 0) {
+ job->reply_len = sizeof(result);
+ memcpy(job->reply, result, sizeof(result));
+ }
+ if (status)
+ dev_info(hdev->dev, "opcode[0x%x] subopcode[0x%x] status[0x%x] result0[0x%x];"
+ "result1[0x%x]\n", ptcmd->opcode, ptcmd->info_0.subopcode, status,
+ result[0], result[1]);
+
+ hiraid_bsg_buf_unmap(hdev, job);
+
+ return status;
+}
+
+static int hiraid_alloc_io_ptcmds(struct hiraid_dev *hdev)
+{
+ u32 i;
+ u32 ptnum = HIRAID_TOTAL_PTCMDS(hdev->online_queues - 1);
+
+ INIT_LIST_HEAD(&hdev->io_pt_list);
+ spin_lock_init(&hdev->io_pt_lock);
+
+ hdev->io_ptcmds = kcalloc_node(ptnum, sizeof(struct hiraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->io_ptcmds) {
+ dev_err(hdev->dev, "alloc io pthrunum failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ptnum; i++) {
+ hdev->io_ptcmds[i].qid = i / HIRAID_PTHRU_CMDS_PERQ + 1;
+ hdev->io_ptcmds[i].cid = i % HIRAID_PTHRU_CMDS_PERQ + hdev->scsi_qd;
+ list_add_tail(&(hdev->io_ptcmds[i].list), &hdev->io_pt_list);
+ }
+
+ dev_info(hdev->dev, "alloc io pthru cmd success, pthrunum[%d]\n", ptnum);
+
+ return 0;
+}
+
+static void hiraid_free_io_ptcmds(struct hiraid_dev *hdev)
+{
+ kfree(hdev->io_ptcmds);
+ hdev->io_ptcmds = NULL;
+
+ INIT_LIST_HEAD(&hdev->io_pt_list);
+}
+
+static int hiraid_put_io_sync_request(struct hiraid_dev *hdev, struct hiraid_scsi_io_cmd *io_cmd,
+ u32 *result, u32 *reslen, u32 timeout)
+{
+ int ret;
+ dma_addr_t buffer_phy;
+ struct hiraid_queue *ioq;
+ void *sense_addr = NULL;
+ struct hiraid_cmd *pt_cmd = hiraid_get_cmd(hdev, HIRAID_CMD_PTHRU);
+
+ if (!pt_cmd) {
+ dev_err(hdev->dev, "err, get ioq cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&pt_cmd->cmd_done);
+
+ ioq = &hdev->queues[pt_cmd->qid];
+ if (work_mode) {
+ ret = ((pt_cmd->qid - 1) * HIRAID_PTHRU_CMDS_PERQ + pt_cmd->cid) *
+ SCSI_SENSE_BUFFERSIZE;
+ sense_addr = hdev->sense_buffer_virt + ret;
+ buffer_phy = hdev->sense_buffer_phy + ret;
+ } else {
+ ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE;
+ sense_addr = ioq->sense_buffer_virt + ret;
+ buffer_phy = ioq->sense_buffer_phy + ret;
+ }
+
+ io_cmd->common.sense_addr = cpu_to_le64(buffer_phy);
+ io_cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+ io_cmd->common.cmd_id = cpu_to_le16(pt_cmd->cid);
+
+ hiraid_submit_cmd(ioq, io_cmd);
+
+ if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+ pt_cmd->cid, pt_cmd->qid, io_cmd->common.opcode,
+ (le32_to_cpu(io_cmd->common.cdw3[0]) & 0xffff));
+
+ hiraid_admin_timeout(hdev, pt_cmd);
+
+ hiraid_put_cmd(hdev, pt_cmd, HIRAID_CMD_PTHRU);
+ return -ETIME;
+ }
+
+ if (result && reslen) {
+ if ((pt_cmd->status & 0x17f) == 0x101) {
+ memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE);
+ *reslen = SCSI_SENSE_BUFFERSIZE;
+ }
+ }
+
+ hiraid_put_cmd(hdev, pt_cmd, HIRAID_CMD_PTHRU);
+
+ return pt_cmd->status;
+}
+
+static int hiraid_user_send_ptcmd(struct hiraid_dev *hdev, struct bsg_job *job)
+{
+ struct hiraid_bsg_request *bsg_req = (struct hiraid_bsg_request *)(job->request);
+ struct hiraid_passthru_io_cmd *cmd = &(bsg_req->pthrucmd);
+ struct hiraid_scsi_io_cmd pthru_cmd;
+ int status = 0;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+ // data len is 4k before use sgl, now len is 1M
+ u32 io_pt_data_len = (hdev->ctrl_info->pt_use_sgl == (bool)true) ?
+ IOQ_PT_SGL_DATA_LEN : IOQ_PT_DATA_LEN;
+
+ if (cmd->data_len > io_pt_data_len) {
+ dev_err(hdev->dev, "data len bigger than %d\n", io_pt_data_len);
+ return -EFAULT;
+ }
+
+ if (hdev->state != DEV_LIVE) {
+ dev_err(hdev->dev, "err, host state[%d] is not live\n", hdev->state);
+ return -EBUSY;
+ }
+
+ memset(&pthru_cmd, 0, sizeof(pthru_cmd));
+ pthru_cmd.common.opcode = cmd->opcode;
+ pthru_cmd.common.flags = cmd->flags;
+ pthru_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ pthru_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len);
+ pthru_cmd.common.cdb_len = cmd->info_0.cdb_len;
+ pthru_cmd.common.rsvd2 = cmd->info_0.rsvd0;
+ pthru_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3);
+ pthru_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4);
+ pthru_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5);
+
+ pthru_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10);
+ pthru_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11);
+ pthru_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12);
+ pthru_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13);
+ pthru_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14);
+ pthru_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len);
+
+ memcpy(pthru_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len);
+
+ pthru_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]);
+ pthru_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]);
+ pthru_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]);
+ pthru_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]);
+
+ status = hiraid_bsg_buf_map(hdev, job, (struct hiraid_admin_command *)&pthru_cmd);
+ if (status) {
+ dev_err(hdev->dev, "err, map data failed\n");
+ return status;
+ }
+
+ status = hiraid_put_io_sync_request(hdev, &pthru_cmd, job->reply, &job->reply_len, timeout);
+
+ if (status)
+ dev_info(hdev->dev, "opcode[0x%x] subopcode[0x%x] status[0x%x] replylen[%d]\n",
+ cmd->opcode, cmd->info_1.subopcode, status, job->reply_len);
+
+ hiraid_bsg_buf_unmap(hdev, job);
+
+ return status;
+}
+
+static bool hiraid_check_scmd_finished(struct scsi_cmnd *scmd)
+{
+ struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+ struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd);
+ struct hiraid_queue *hiraidq;
+
+ hiraidq = mapbuf->hiraidq;
+ if (!hiraidq)
+ return false;
+ if (READ_ONCE(mapbuf->state) == CMD_COMPLETE || hiraid_poll_cq(hiraidq, mapbuf->cid)) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] has been completed\n",
+ mapbuf->cid, hiraidq->qid);
+ return true;
+ }
+ return false;
+}
+
+static enum blk_eh_timer_return hiraid_timed_out(struct scsi_cmnd *scmd)
+{
+ struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd);
+ unsigned int timeout = scmd->device->request_queue->rq_timeout;
+
+ if (hiraid_check_scmd_finished(scmd))
+ goto out;
+
+ if (time_after(jiffies, scmd->jiffies_at_alloc + timeout)) {
+ if (cmpxchg(&mapbuf->state, CMD_FLIGHT, CMD_TIMEOUT) == CMD_FLIGHT)
+ return BLK_EH_DONE;
+ }
+out:
+ return BLK_EH_RESET_TIMER;
+}
+
+/* send abort command by admin queue temporary */
+static int hiraid_send_abort_cmd(struct hiraid_dev *hdev, u32 hdid, u16 qid, u16 cid)
+{
+ struct hiraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.abort.opcode = HIRAID_ADMIN_ABORT_CMD;
+ admin_cmd.abort.hdid = cpu_to_le32(hdid);
+ admin_cmd.abort.sqid = cpu_to_le16(qid);
+ admin_cmd.abort.cid = cpu_to_le16(cid);
+
+ return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+/* send reset command by admin quueue temporary */
+static int hiraid_send_reset_cmd(struct hiraid_dev *hdev, u8 type, u32 hdid)
+{
+ struct hiraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.reset.opcode = HIRAID_ADMIN_RESET;
+ admin_cmd.reset.hdid = cpu_to_le32(hdid);
+ admin_cmd.reset.type = type;
+
+ return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static bool hiraid_dev_state_trans(struct hiraid_dev *hdev, enum hiraid_dev_state new_state)
+{
+ unsigned long flags;
+ enum hiraid_dev_state old_state;
+ bool change = false;
+
+ spin_lock_irqsave(&hdev->state_lock, flags);
+
+ old_state = hdev->state;
+ switch (new_state) {
+ case DEV_LIVE:
+ switch (old_state) {
+ case DEV_NEW:
+ case DEV_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case DEV_RESETTING:
+ switch (old_state) {
+ case DEV_LIVE:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case DEV_DELETING:
+ if (old_state != DEV_DELETING)
+ change = true;
+ break;
+ case DEV_DEAD:
+ switch (old_state) {
+ case DEV_NEW:
+ case DEV_LIVE:
+ case DEV_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (change)
+ hdev->state = new_state;
+ spin_unlock_irqrestore(&hdev->state_lock, flags);
+
+ dev_info(hdev->dev, "oldstate[%d]->newstate[%d], change[%d]\n",
+ old_state, new_state, change);
+
+ return change;
+}
+
+static void hiraid_drain_pending_ios(struct hiraid_dev *hdev);
+
+static void hiraid_flush_running_cmds(struct hiraid_dev *hdev)
+{
+ int i, j;
+
+ scsi_block_requests(hdev->shost);
+ hiraid_drain_pending_ios(hdev);
+ scsi_unblock_requests(hdev->shost);
+
+ j = HIRAID_AQ_BLK_MQ_DEPTH;
+ for (i = 0; i < j; i++) {
+ if (READ_ONCE(hdev->adm_cmds[i].state) == CMD_FLIGHT) {
+ dev_info(hdev->dev, "flush admin, cid[%d]\n", i);
+ hdev->adm_cmds[i].status = 0xFFFF;
+ WRITE_ONCE(hdev->adm_cmds[i].state, CMD_COMPLETE);
+ complete(&(hdev->adm_cmds[i].cmd_done));
+ }
+ }
+
+ j = HIRAID_TOTAL_PTCMDS(hdev->online_queues - 1);
+ for (i = 0; i < j; i++) {
+ if (READ_ONCE(hdev->io_ptcmds[i].state) == CMD_FLIGHT) {
+ hdev->io_ptcmds[i].status = 0xFFFF;
+ WRITE_ONCE(hdev->io_ptcmds[i].state, CMD_COMPLETE);
+ complete(&(hdev->io_ptcmds[i].cmd_done));
+ }
+ }
+}
+
+static int hiraid_dev_disable(struct hiraid_dev *hdev, bool shutdown)
+{
+ int ret = -ENODEV;
+ struct hiraid_queue *adminq = &hdev->queues[0];
+ u16 start, end;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ hiraid_shutdown_control(hdev);
+ else
+ ret = hiraid_disable_control(hdev);
+ }
+
+ if (hdev->queue_count == 0) {
+ dev_err(hdev->dev, "warn: queue has been delete\n");
+ return ret;
+ }
+
+ spin_lock_irq(&adminq->cq_lock);
+ hiraid_process_cq(adminq, &start, &end, -1);
+ spin_unlock_irq(&adminq->cq_lock);
+ hiraid_complete_cqes(adminq, start, end);
+
+ hiraid_pci_disable(hdev);
+
+ hiraid_flush_running_cmds(hdev);
+
+ return ret;
+}
+
+static void hiraid_reset_work(struct work_struct *work)
+{
+ int ret = 0;
+ struct hiraid_dev *hdev = container_of(work, struct hiraid_dev, reset_work);
+
+ if (hdev->state != DEV_RESETTING) {
+ dev_err(hdev->dev, "err, host is not reset state\n");
+ return;
+ }
+
+ dev_info(hdev->dev, "enter host reset\n");
+
+ if (hdev->ctrl_config & HIRAID_CC_ENABLE) {
+ dev_info(hdev->dev, "start dev_disable\n");
+ ret = hiraid_dev_disable(hdev, false);
+ }
+
+ if (ret)
+ goto out;
+
+ ret = hiraid_pci_enable(hdev);
+ if (ret)
+ goto out;
+
+ ret = hiraid_setup_admin_queue(hdev);
+ if (ret)
+ goto pci_disable;
+
+ ret = hiraid_setup_io_queues(hdev);
+ if (ret || hdev->online_queues != hdev->last_qcnt)
+ goto pci_disable;
+
+ hiraid_dev_state_trans(hdev, DEV_LIVE);
+
+ hiraid_init_async_event(hdev);
+
+ hiraid_queue_scan(hdev);
+
+ return;
+
+pci_disable:
+ hiraid_pci_disable(hdev);
+out:
+ hiraid_dev_state_trans(hdev, DEV_DEAD);
+ dev_err(hdev->dev, "err, host reset failed\n");
+}
+
+static int hiraid_reset_work_sync(struct hiraid_dev *hdev)
+{
+ if (!hiraid_dev_state_trans(hdev, DEV_RESETTING)) {
+ dev_info(hdev->dev, "can't change to reset state\n");
+ return -EBUSY;
+ }
+
+ if (!queue_work(work_queue, &hdev->reset_work)) {
+ dev_err(hdev->dev, "err, host is already in reset state\n");
+ return -EBUSY;
+ }
+
+ flush_work(&hdev->reset_work);
+ if (hdev->state != DEV_LIVE)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int hiraid_wait_io_completion(struct hiraid_mapmange *mapbuf)
+{
+ u16 times = 0;
+
+ do {
+ if (READ_ONCE(mapbuf->state) == CMD_TMO_COMPLETE)
+ break;
+ msleep(500);
+ times++;
+ } while (times <= HIRAID_WAIT_ABNL_CMD_TIMEOUT);
+
+ /* wait command completion timeout after abort/reset success */
+ if (times >= HIRAID_WAIT_ABNL_CMD_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static bool hiraid_tgt_rst_pending_io_count(struct request *rq, void *data, bool reserved)
+{
+ unsigned int id = *(unsigned int *)data;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct hiraid_mapmange *mapbuf;
+ struct hiraid_sdev_hostdata *hostdata;
+
+ if (scmd) {
+ mapbuf = scsi_cmd_priv(scmd);
+ if ((mapbuf->state == CMD_FLIGHT) || (mapbuf->state == CMD_TIMEOUT)) {
+ if ((scmd->device) && (scmd->device->id == id)) {
+ hostdata = scmd->device->hostdata;
+ hostdata->pend_count++;
+ }
+ }
+ }
+ return true;
+}
+static bool hiraid_clean_pending_io(struct request *rq, void *data, bool reserved)
+{
+ struct hiraid_dev *hdev = data;
+ struct scsi_cmnd *scmd;
+ struct hiraid_mapmange *mapbuf;
+
+ if (unlikely(!rq || !blk_mq_request_started(rq)))
+ return true;
+
+ scmd = blk_mq_rq_to_pdu(rq);
+ mapbuf = scsi_cmd_priv(scmd);
+
+ if ((cmpxchg(&mapbuf->state, CMD_FLIGHT, CMD_COMPLETE) != CMD_FLIGHT) &&
+ (cmpxchg(&mapbuf->state, CMD_TIMEOUT, CMD_COMPLETE) != CMD_TIMEOUT))
+ return true;
+
+ set_host_byte(scmd, DID_NO_CONNECT);
+ if (mapbuf->sge_cnt)
+ scsi_dma_unmap(scmd);
+ hiraid_free_mapbuf(hdev, mapbuf);
+ dev_warn_ratelimited(hdev->dev, "back unfinished CQE, cid[%d] qid[%d]\n",
+ mapbuf->cid, mapbuf->hiraidq->qid);
+ scmd->scsi_done(scmd);
+
+ return true;
+}
+
+static void hiraid_drain_pending_ios(struct hiraid_dev *hdev)
+{
+ blk_mq_tagset_busy_iter(&hdev->shost->tag_set, hiraid_clean_pending_io, (void *)(hdev));
+}
+
+static int wait_tgt_reset_io_done(struct scsi_cmnd *scmd)
+{
+ u16 timeout = 0;
+ struct hiraid_sdev_hostdata *hostdata;
+ struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+
+ hostdata = scmd->device->hostdata;
+
+ do {
+ hostdata->pend_count = 0;
+ blk_mq_tagset_busy_iter(&hdev->shost->tag_set, hiraid_tgt_rst_pending_io_count,
+ (void *)(&scmd->device->id));
+
+ if (!hostdata->pend_count)
+ return 0;
+
+ msleep(500);
+ timeout++;
+ } while (timeout <= HIRAID_WAIT_RST_IO_TIMEOUT);
+
+ return -ETIMEDOUT;
+}
+
+static int hiraid_abort(struct scsi_cmnd *scmd)
+{
+ struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+ struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd);
+ struct hiraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != DEV_LIVE || !hiraid_wait_io_completion(mapbuf) ||
+ hiraid_check_scmd_finished(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ cid = mapbuf->cid;
+ hwq = mapbuf->hiraidq->qid;
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, send abort\n", cid, hwq);
+ ret = hiraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid);
+ if (ret != -ETIME) {
+ ret = hiraid_wait_io_completion(mapbuf);
+ if (ret) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, not found\n", cid, hwq);
+ return FAILED;
+ }
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort succ\n", cid, hwq);
+ return SUCCESS;
+ }
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, timeout\n", cid, hwq);
+ return FAILED;
+}
+
+static int hiraid_scsi_reset(struct scsi_cmnd *scmd, enum hiraid_rst_type rst)
+{
+ struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+ struct hiraid_sdev_hostdata *hostdata;
+ int ret;
+
+ if (hdev->state != DEV_LIVE)
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+
+ dev_warn(hdev->dev, "sdev[%d:%d] send %s reset\n", scmd->device->channel, scmd->device->id,
+ rst ? "bus" : "target");
+ ret = hiraid_send_reset_cmd(hdev, rst, hostdata->hdid);
+ if ((ret == 0) || (ret == FW_EH_DEV_NONE && rst == HIRAID_RESET_TARGET)) {
+ if (rst == HIRAID_RESET_TARGET) {
+ ret = wait_tgt_reset_io_done(scmd);
+ if (ret) {
+ dev_warn(hdev->dev, "sdev[%d:%d] target has %d peding cmd, target reset failed\n",
+ scmd->device->channel, scmd->device->id,
+ hostdata->pend_count);
+ return FAILED;
+ }
+ }
+ dev_warn(hdev->dev, "sdev[%d:%d] %s reset success\n",
+ scmd->device->channel, scmd->device->id, rst ? "bus" : "target");
+ return SUCCESS;
+ }
+
+ dev_warn(hdev->dev, "sdev[%d:%d] %s reset failed\n",
+ scmd->device->channel, scmd->device->id, rst ? "bus" : "target");
+ return FAILED;
+}
+
+static int hiraid_target_reset(struct scsi_cmnd *scmd)
+{
+ return hiraid_scsi_reset(scmd, HIRAID_RESET_TARGET);
+}
+
+static int hiraid_bus_reset(struct scsi_cmnd *scmd)
+{
+ return hiraid_scsi_reset(scmd, HIRAID_RESET_BUS);
+}
+
+static int hiraid_host_reset(struct scsi_cmnd *scmd)
+{
+ struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+
+ if (hdev->state != DEV_LIVE)
+ return SUCCESS;
+
+ dev_warn(hdev->dev, "sdev[%d:%d] send host reset\n",
+ scmd->device->channel, scmd->device->id);
+ if (hiraid_reset_work_sync(hdev) == -EBUSY)
+ flush_work(&hdev->reset_work);
+
+ if (hdev->state != DEV_LIVE) {
+ dev_warn(hdev->dev, "sdev[%d:%d] host reset failed\n",
+ scmd->device->channel, scmd->device->id);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "sdev[%d:%d] host reset success\n",
+ scmd->device->channel, scmd->device->id);
+
+ return SUCCESS;
+}
+
+static pci_ers_result_t hiraid_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "pci error detected, state[%d]\n", state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ dev_warn(hdev->dev, "channel is normal, do nothing\n");
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ dev_warn(hdev->dev, "channel io frozen, need reset controller\n");
+
+ scsi_block_requests(hdev->shost);
+
+ hiraid_dev_state_trans(hdev, DEV_RESETTING);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ dev_warn(hdev->dev, "channel io failure, disconnect\n");
+
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t hiraid_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "restart after slot reset\n");
+
+ pci_restore_state(pdev);
+
+ if (!queue_work(work_queue, &hdev->reset_work)) {
+ dev_err(hdev->dev, "err, the device is resetting state\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ flush_work(&hdev->reset_work);
+
+ scsi_unblock_requests(hdev->shost);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void hiraid_reset_pci_finish(struct pci_dev *pdev)
+{
+ struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter hiraid reset finish\n");
+}
+
+static ssize_t csts_pp_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_PP_MASK);
+ ret >>= HIRAID_CSTS_PP_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_shst_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_SHST_MASK);
+ ret >>= HIRAID_CSTS_SHST_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_cfs_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_CFS_MASK);
+ ret >>= HIRAID_CSTS_CFS_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_rdy_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev))
+ ret = (readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_RDY);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t fw_version_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fw_version);
+}
+
+static ssize_t hdd_dispatch_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int val = 0;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+ if (val < DISPATCH_BY_CPU || val > DISPATCH_BY_DISK)
+ return -EINVAL;
+ hdev->hdd_dispatch = val;
+
+ return strlen(buf);
+}
+static ssize_t hdd_dispatch_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hdev->hdd_dispatch);
+}
+
+static DEVICE_ATTR_RO(csts_pp);
+static DEVICE_ATTR_RO(csts_shst);
+static DEVICE_ATTR_RO(csts_cfs);
+static DEVICE_ATTR_RO(csts_rdy);
+static DEVICE_ATTR_RO(fw_version);
+static DEVICE_ATTR_RW(hdd_dispatch);
+
+static struct device_attribute *hiraid_host_attrs[] = {
+ &dev_attr_csts_rdy,
+ &dev_attr_csts_pp,
+ &dev_attr_csts_cfs,
+ &dev_attr_fw_version,
+ &dev_attr_csts_shst,
+ &dev_attr_hdd_dispatch,
+ NULL,
+};
+
+static int hiraid_get_vd_info(struct hiraid_dev *hdev, struct hiraid_vd_info *vd_info, u16 vid)
+{
+ struct hiraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t buffer_phy = 0;
+ int ret;
+
+ if (hdev->state >= DEV_RESETTING) {
+ dev_err(hdev->dev, "err, host state[%d] is not right\n", hdev->state);
+ return -EBUSY;
+ }
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &buffer_phy, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN);
+ admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+ ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, USRCMD_TIMEOUT);
+ if (!ret)
+ memcpy(vd_info, data_ptr, sizeof(struct hiraid_vd_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, buffer_phy);
+
+ return ret;
+}
+
+static int hiraid_get_bgtask(struct hiraid_dev *hdev, struct hiraid_bgtask *bgtask)
+{
+ struct hiraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t buffer_phy = 0;
+ int ret;
+
+ if (hdev->state >= DEV_RESETTING) {
+ dev_err(hdev->dev, "err, host state[%d] is not right\n", hdev->state);
+ return -EBUSY;
+ }
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &buffer_phy, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+ ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, USRCMD_TIMEOUT);
+ if (!ret)
+ memcpy(bgtask, data_ptr, sizeof(struct hiraid_bgtask));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, buffer_phy);
+
+ return ret;
+}
+
+static ssize_t raid_level_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct hiraid_dev *hdev;
+ struct hiraid_vd_info *vd_info;
+ struct hiraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !HIRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = hiraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1;
+
+ ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ?
+ vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1);
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]);
+}
+
+static ssize_t raid_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct hiraid_dev *hdev;
+ struct hiraid_vd_info *vd_info;
+ struct hiraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !HIRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = hiraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret) {
+ vd_info->vd_status = 0;
+ vd_info->rg_id = 0xff;
+ }
+
+ ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ? vd_info->vd_status : 0;
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]);
+}
+
+static ssize_t raid_resync_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct hiraid_dev *hdev;
+ struct hiraid_vd_info *vd_info;
+ struct hiraid_bgtask *bgtask;
+ struct hiraid_sdev_hostdata *hostdata;
+ u8 rg_id, i, progress = 0;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !HIRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = hiraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ goto out;
+
+ rg_id = vd_info->rg_id;
+
+ bgtask = (struct hiraid_bgtask *)vd_info;
+ ret = hiraid_get_bgtask(hdev, bgtask);
+ if (ret)
+ goto out;
+ for (i = 0; i < bgtask->task_num; i++) {
+ if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) &&
+ (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id))
+ progress = bgtask->bgtask[i].progress;
+ }
+
+out:
+ kfree(vd_info);
+ return snprintf(buf, PAGE_SIZE, "%d\n", progress);
+}
+
+static ssize_t dispatch_hwq_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hiraid_sdev_hostdata *hostdata;
+
+ hostdata = to_scsi_device(dev)->hostdata;
+ return snprintf(buf, PAGE_SIZE, "%d\n", hostdata->hwq);
+}
+
+static ssize_t dispatch_hwq_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int val;
+ struct hiraid_dev *hdev;
+ struct scsi_device *sdev;
+ struct hiraid_sdev_hostdata *hostdata;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+ if (val <= 0 || val >= hdev->online_queues)
+ return -EINVAL;
+ if (!hiraid_disk_is_hdd(hostdata->attr))
+ return -EINVAL;
+
+ hostdata->hwq = val;
+ return strlen(buf);
+}
+
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(raid_state);
+static DEVICE_ATTR_RO(raid_resync);
+static DEVICE_ATTR_RW(dispatch_hwq);
+
+static struct device_attribute *hiraid_dev_attrs[] = {
+ &dev_attr_raid_state,
+ &dev_attr_raid_level,
+ &dev_attr_raid_resync,
+ &dev_attr_dispatch_hwq,
+ NULL,
+};
+
+static struct pci_error_handlers hiraid_err_handler = {
+ .error_detected = hiraid_pci_error_detected,
+ .slot_reset = hiraid_pci_slot_reset,
+ .reset_done = hiraid_reset_pci_finish,
+};
+
+static int hiraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ int ret;
+ struct hiraid_dev *hdev = shost_priv(shost);
+
+ dev_info(hdev->dev, "start sysfs host reset cmd\n");
+ ret = hiraid_reset_work_sync(hdev);
+ dev_info(hdev->dev, "stop sysfs host reset cmd[%d]\n", ret);
+
+ return ret;
+}
+
+static int hiraid_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct hiraid_dev *hdev = shost_priv(shost);
+
+ hiraid_scan_work(&hdev->scan_work);
+
+ return 1;
+}
+
+static struct scsi_host_template hiraid_driver_template = {
+ .module = THIS_MODULE,
+ .name = "hiraid",
+ .proc_name = "hiraid",
+ .queuecommand = hiraid_queue_command,
+ .slave_alloc = hiraid_slave_alloc,
+ .slave_destroy = hiraid_slave_destroy,
+ .slave_configure = hiraid_slave_configure,
+ .scan_finished = hiraid_scan_finished,
+ .eh_timed_out = hiraid_timed_out,
+ .eh_abort_handler = hiraid_abort,
+ .eh_target_reset_handler = hiraid_target_reset,
+ .eh_bus_reset_handler = hiraid_bus_reset,
+ .eh_host_reset_handler = hiraid_host_reset,
+ .change_queue_depth = scsi_change_queue_depth,
+ .this_id = -1,
+ .unchecked_isa_dma = 0,
+ .shost_attrs = hiraid_host_attrs,
+ .sdev_attrs = hiraid_dev_attrs,
+ .host_reset = hiraid_sysfs_host_reset,
+};
+
+static void hiraid_shutdown(struct pci_dev *pdev)
+{
+ struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+
+ hiraid_delete_io_queues(hdev);
+ hiraid_disable_admin_queue(hdev, true);
+}
+
+static bool hiraid_bsg_is_valid(struct bsg_job *job)
+{
+ u64 timeout = 0;
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct hiraid_bsg_request *bsg_req = job->request;
+ struct hiraid_dev *hdev = shost_priv(dev_to_shost(job->dev));
+
+ if (bsg_req == NULL || job->request_len != sizeof(struct hiraid_bsg_request))
+ return false;
+
+ switch (bsg_req->msgcode) {
+ case HIRAID_BSG_ADMIN:
+ timeout = msecs_to_jiffies(bsg_req->admcmd.timeout_ms);
+ break;
+ case HIRAID_BSG_IOPTHRU:
+ timeout = msecs_to_jiffies(bsg_req->pthrucmd.timeout_ms);
+ break;
+ default:
+ dev_info(hdev->dev, "bsg unsupport msgcode[%d]\n", bsg_req->msgcode);
+ return false;
+ }
+
+ if ((timeout + CTL_RST_TIME) > rq->timeout) {
+ dev_err(hdev->dev, "bsg invalid time\n");
+ return false;
+ }
+
+ return true;
+}
+
+/* bsg dispatch user command */
+static int hiraid_bsg_dispatch(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = dev_to_shost(job->dev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct hiraid_bsg_request *bsg_req = job->request;
+ int ret = -ENOMSG;
+
+ job->reply_len = 0;
+
+ if (!hiraid_bsg_is_valid(job)) {
+ bsg_job_done(job, ret, 0);
+ return 0;
+ }
+
+ dev_log_dbg(hdev->dev, "bsg msgcode[%d] msglen[%d] timeout[%d];"
+ "reqnsge[%d], reqlen[%d]\n",
+ bsg_req->msgcode, job->request_len, rq->timeout,
+ job->request_payload.sg_cnt, job->request_payload.payload_len);
+
+ switch (bsg_req->msgcode) {
+ case HIRAID_BSG_ADMIN:
+ ret = hiraid_user_send_admcmd(hdev, job);
+ break;
+ case HIRAID_BSG_IOPTHRU:
+ ret = hiraid_user_send_ptcmd(hdev, job);
+ break;
+ default:
+ break;
+ }
+
+ if (ret > 0)
+ ret = ret | (ret << 8);
+
+ bsg_job_done(job, ret, 0);
+ return 0;
+}
+
+static inline void hiraid_unregist_bsg(struct hiraid_dev *hdev)
+{
+ if (hdev->bsg_queue) {
+ bsg_unregister_queue(hdev->bsg_queue);
+ blk_cleanup_queue(hdev->bsg_queue);
+ }
+}
+static int hiraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hiraid_dev *hdev;
+ struct Scsi_Host *shost;
+ int node, ret;
+ char bsg_name[15];
+
+ shost = scsi_host_alloc(&hiraid_driver_template, sizeof(*hdev));
+ if (!shost) {
+ dev_err(&pdev->dev, "failed to allocate scsi host\n");
+ return -ENOMEM;
+ }
+ hdev = shost_priv(shost);
+ hdev->pdev = pdev;
+ hdev->dev = get_device(&pdev->dev);
+
+ node = dev_to_node(hdev->dev);
+ if (node == NUMA_NO_NODE) {
+ node = first_memory_node;
+ set_dev_node(hdev->dev, node);
+ }
+ hdev->numa_node = node;
+ hdev->shost = shost;
+ hdev->instance = shost->host_no;
+ pci_set_drvdata(pdev, hdev);
+
+ ret = hiraid_dev_map(hdev);
+ if (ret)
+ goto put_dev;
+
+ init_rwsem(&hdev->dev_rwsem);
+ INIT_WORK(&hdev->scan_work, hiraid_scan_work);
+ INIT_WORK(&hdev->timesyn_work, hiraid_timesyn_work);
+ INIT_WORK(&hdev->reset_work, hiraid_reset_work);
+ INIT_WORK(&hdev->fwact_work, hiraid_fwactive_work);
+ spin_lock_init(&hdev->state_lock);
+
+ ret = hiraid_alloc_resources(hdev);
+ if (ret)
+ goto dev_unmap;
+
+ ret = hiraid_pci_enable(hdev);
+ if (ret)
+ goto resources_free;
+
+ ret = hiraid_setup_admin_queue(hdev);
+ if (ret)
+ goto pci_disable;
+
+ ret = hiraid_init_control_info(hdev);
+ if (ret)
+ goto disable_admin_q;
+
+ ret = hiraid_setup_io_queues(hdev);
+ if (ret)
+ goto disable_admin_q;
+
+ hiraid_shost_init(hdev);
+
+ ret = scsi_add_host(hdev->shost, hdev->dev);
+ if (ret) {
+ dev_err(hdev->dev, "add shost to system failed, ret[%d]\n", ret);
+ goto remove_io_queues;
+ }
+
+ snprintf(bsg_name, sizeof(bsg_name), "hiraid%d", shost->host_no);
+ hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name, hiraid_bsg_dispatch,
+ NULL, hiraid_get_max_cmd_size(hdev));
+ if (IS_ERR(hdev->bsg_queue)) {
+ dev_err(hdev->dev, "err, setup bsg failed\n");
+ hdev->bsg_queue = NULL;
+ goto remove_io_queues;
+ }
+
+ if (hdev->online_queues == HIRAID_ADMIN_QUEUE_NUM) {
+ dev_warn(hdev->dev, "warn: only admin queue can be used\n");
+ return 0;
+ }
+
+ hdev->state = DEV_LIVE;
+
+ hiraid_init_async_event(hdev);
+
+ ret = hiraid_dev_list_init(hdev);
+ if (ret)
+ goto unregist_bsg;
+
+ ret = hiraid_configure_timestamp(hdev);
+ if (ret)
+ dev_warn(hdev->dev, "time synchronization failed\n");
+
+ ret = hiraid_alloc_io_ptcmds(hdev);
+ if (ret)
+ goto unregist_bsg;
+
+ scsi_scan_host(hdev->shost);
+
+ return 0;
+
+unregist_bsg:
+ hiraid_unregist_bsg(hdev);
+remove_io_queues:
+ hiraid_delete_io_queues(hdev);
+disable_admin_q:
+ hiraid_free_sense_buffer(hdev);
+ hiraid_disable_admin_queue(hdev, false);
+pci_disable:
+ hiraid_free_all_queues(hdev);
+ hiraid_pci_disable(hdev);
+resources_free:
+ hiraid_free_resources(hdev);
+dev_unmap:
+ hiraid_dev_unmap(hdev);
+put_dev:
+ put_device(hdev->dev);
+ scsi_host_put(shost);
+
+ return -ENODEV;
+}
+
+static void hiraid_remove(struct pci_dev *pdev)
+{
+ struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = hdev->shost;
+
+ dev_info(hdev->dev, "enter hiraid remove\n");
+
+ hiraid_dev_state_trans(hdev, DEV_DELETING);
+ flush_work(&hdev->reset_work);
+
+ if (!pci_device_is_present(pdev))
+ hiraid_flush_running_cmds(hdev);
+
+ hiraid_unregist_bsg(hdev);
+ scsi_remove_host(shost);
+ hiraid_free_io_ptcmds(hdev);
+ kfree(hdev->dev_info);
+ hiraid_delete_io_queues(hdev);
+ hiraid_free_sense_buffer(hdev);
+ hiraid_disable_admin_queue(hdev, false);
+ hiraid_free_all_queues(hdev);
+ hiraid_pci_disable(hdev);
+ hiraid_free_resources(hdev);
+ hiraid_dev_unmap(hdev);
+ put_device(hdev->dev);
+ scsi_host_put(shost);
+
+ dev_info(hdev->dev, "exit hiraid remove\n");
+}
+
+static const struct pci_device_id hiraid_hw_card_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_LOGIC, HIRAID_SERVER_DEVICE_HBA_DID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_LOGIC, HIRAID_SERVER_DEVICE_RAID_DID) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, hiraid_hw_card_ids);
+
+static struct pci_driver hiraid_driver = {
+ .name = "hiraid",
+ .id_table = hiraid_hw_card_ids,
+ .probe = hiraid_probe,
+ .remove = hiraid_remove,
+ .shutdown = hiraid_shutdown,
+ .err_handler = &hiraid_err_handler,
+};
+
+static int __init hiraid_init(void)
+{
+ int ret;
+
+ work_queue = alloc_workqueue("hiraid-wq", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!work_queue)
+ return -ENOMEM;
+
+ hiraid_class = class_create(THIS_MODULE, "hiraid");
+ if (IS_ERR(hiraid_class)) {
+ ret = PTR_ERR(hiraid_class);
+ goto destroy_wq;
+ }
+
+ ret = pci_register_driver(&hiraid_driver);
+ if (ret < 0)
+ goto destroy_class;
+
+ return 0;
+
+destroy_class:
+ class_destroy(hiraid_class);
+destroy_wq:
+ destroy_workqueue(work_queue);
+
+ return ret;
+}
+
+static void __exit hiraid_exit(void)
+{
+ pci_unregister_driver(&hiraid_driver);
+ class_destroy(hiraid_class);
+ destroy_workqueue(work_queue);
+}
+
+MODULE_AUTHOR("Huawei Technologies CO., Ltd");
+MODULE_DESCRIPTION("Huawei RAID driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(HIRAID_DRV_VERSION);
+module_init(hiraid_init);
+module_exit(hiraid_exit);
--
2.22.0.windows.1
2
1

[PATCH openEuler-1.0-LTS] Revert "tcp: fix delayed ACKs for MSS boundary condition"
by Dong Chenchen 16 Nov '23
by Dong Chenchen 16 Nov '23
16 Nov '23
From: dongchenchen <dongchenchen2(a)huawei.com>
hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I8GYWB
CVE: NA
--------------------------------
This reverts commit 389055ab28760dd7b25c6996c6647b0a37e0a34e.
Signed-off-by: dongchenchen <dongchenchen2(a)huawei.com>
---
net/ipv4/tcp_input.c | 13 -------------
1 file changed, 13 deletions(-)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f8b1ace50f7a..a12598dabb80 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -172,19 +172,6 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
if (unlikely(len > icsk->icsk_ack.rcv_mss +
MAX_TCP_OPTION_SPACE))
tcp_gro_dev_warn(sk, skb, len);
- /* If the skb has a len of exactly 1*MSS and has the PSH bit
- * set then it is likely the end of an application write. So
- * more data may not be arriving soon, and yet the data sender
- * may be waiting for an ACK if cwnd-bound or using TX zero
- * copy. So we set ICSK_ACK_PUSHED here so that
- * tcp_cleanup_rbuf() will send an ACK immediately if the app
- * reads all of the data and is not ping-pong. If len > MSS
- * then this logic does not matter (and does not hurt) because
- * tcp_cleanup_rbuf() will always ACK immediately if the app
- * reads data and there is more than an MSS of unACKed data.
- */
- if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH)
- icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
} else {
/* Otherwise, we make more careful check taking into account,
* that SACKs block is variable.
--
2.25.1
2
1

16 Nov '23
driver inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I89D3P
CVE: NA
------------------------------------------
This commit is to support SPxxx RAID/HBA controllers.
RAID controllers support RAID 0/1/5/6/10/50/60 modes.
HBA controlllers support RAID 0/1/10 modes.
RAID/HBA support SAS/SATA HDD/SSD.
Signed-off-by: z00848923 <zhanglei48(a)huawei.com>
---
Documentation/scsi/hisi_raid.rst | 84 +
MAINTAINERS | 7 +
arch/arm64/configs/openeuler_defconfig | 1 +
arch/x86/configs/openeuler_defconfig | 1 +
drivers/scsi/Kconfig | 1 +
drivers/scsi/Makefile | 1 +
drivers/scsi/hisi_raid/Kconfig | 14 +
drivers/scsi/hisi_raid/Makefile | 7 +
drivers/scsi/hisi_raid/hiraid.h | 760 +++++
drivers/scsi/hisi_raid/hiraid_main.c | 3982 ++++++++++++++++++++++++
10 files changed, 4858 insertions(+)
create mode 100644 Documentation/scsi/hisi_raid.rst
create mode 100644 drivers/scsi/hisi_raid/Kconfig
create mode 100644 drivers/scsi/hisi_raid/Makefile
create mode 100644 drivers/scsi/hisi_raid/hiraid.h
create mode 100644 drivers/scsi/hisi_raid/hiraid_main.c
diff --git a/Documentation/scsi/hisi_raid.rst b/Documentation/scsi/hisi_raid.rst
new file mode 100644
index 000000000000..523a6763a7fd
--- /dev/null
+++ b/Documentation/scsi/hisi_raid.rst
@@ -0,0 +1,84 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================================
+hisi_raid - HUAWEI SCSI RAID Controller driver
+==============================================
+
+This file describes the hisi_raid SCSI driver for HUAWEI
+RAID controllers. The hisi_raid driver is the first
+generation RAID driver.
+
+For hisi_raid controller support, enable the hisi_raid driver
+when configuring the kernel.
+
+hisi_raid specific entries in /sys
+=================================
+
+hisi_raid host attributes
+------------------------
+ - /sys/class/scsi_host/host*/csts_pp
+ - /sys/class/scsi_host/host*/csts_shst
+ - /sys/class/scsi_host/host*/csts_cfs
+ - /sys/class/scsi_host/host*/csts_rdy
+ - /sys/class/scsi_host/host*/fw_version
+
+ The host csts_pp attribute is a read only attribute. This attribute
+ indicates whether the controller is processing commands. If this attribute
+ is set to ‘1’, then the controller is processing commands normally. If
+ this attribute is cleared to ‘0’, then the controller has temporarily stopped
+ processing commands in order to handle an event (e.g., firmware activation).
+
+ The host csts_shst attribute is a read only attribute. This attribute
+ indicates status of shutdown processing.The shutdown status values are defined
+ as:
+ ====== ==============================
+ Value Definition
+ ====== ==============================
+ 00b Normal operation
+ 01b Shutdown processing occurring
+ 10b Shutdown processing complete
+ 11b Reserved
+ ====== ==============================
+ The host csts_cfs attribute is a read only attribute. This attribute is set to
+ ’1’ when a fatal controller error occurred that could not be communicated in the
+ appropriate Completion Queue. This bit is cleared to ‘0’ when a fatal controller
+ error has not occurred.
+
+ The host csts_rdy attribute is a read only attribute. This attribute is set to
+ ‘1’ when the controller is ready to process submission queue entries.
+
+ The fw_version attribute is read-only and will return the driver version and the
+ controller firmware version.
+
+hisi_raid scsi device attributes
+------------------------------
+ - /sys/class/scsi_device/X\:X\:X\:X/device/raid_level
+ - /sys/class/scsi_device/X\:X\:X\:X/device/raid_state
+ - /sys/class/scsi_device/X\:X\:X\:X/device/raid_resync
+
+ The device raid_level attribute is a read only attribute. This attribute indicates
+ RAID level of scsi device(will dispaly "NA" if scsi device is not virtual disk type).
+
+ The device raid_state attribute is read-only and indicates RAID status of scsi
+ device(will dispaly "NA" if scsi device is not virtual disk type).
+
+ The device raid_resync attribute is read-only and indicates RAID rebuild processing
+ of scsi device(will dispaly "NA" if scsi device is not virtual disk type).
+
+Supported devices
+=================
+
+ =================== ======= =======================================
+ PCI ID (pci.ids) OEM Product
+ =================== ======= =======================================
+ 19E5:3858 HUAWEI SP186-M-8i(HBA:8Ports)
+ 19E5:3858 HUAWEI SP186-M-16i(HBA:16Ports)
+ 19E5:3858 HUAWEI SP186-M-32i(HBA:32Ports)
+ 19E5:3858 HUAWEI SP186-M-40i(HBA:40Ports)
+ 19E5:3758 HUAWEI SP686C-M-16i(RAID:16Ports,2G cache)
+ 19E5:3758 HUAWEI SP686C-M-16i(RAID:16Ports,4G cache)
+ 19E5:3758 HUAWEI SP686C-MH-32i(RAID:32Ports,4G cache)
+ 19E5:3758 HUAWEI SP686C-M-40i(RAID:40Ports,2G cache)
+ 19E5:3758 HUAWEI SP686C-M-40i(RAID:40Ports,4G cache)
+ =================== ======= =======================================
+
diff --git a/MAINTAINERS b/MAINTAINERS
index a7815fd1072f..8324f56a2096 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8070,6 +8070,13 @@ M: Yonglong Liu <liuyonglong(a)huawei.com>
S: Supported
F: drivers/ptp/ptp_hisi.c
+HISI_RAID SCSI RAID DRIVERS
+M: Zhang Lei <zhanglei48(a)huawei.com>
+L: linux-scsi(a)vger.kernel.org
+S: Maintained
+F: Documentation/scsi/hisi_raid.rst
+F: drivers/scsi/hisi_raid/
+
HMM - Heterogeneous Memory Management
M: Jérôme Glisse <jglisse(a)redhat.com>
L: linux-mm(a)kvack.org
diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig
index ec758f0530c1..b9a50ef6d768 100644
--- a/arch/arm64/configs/openeuler_defconfig
+++ b/arch/arm64/configs/openeuler_defconfig
@@ -2413,6 +2413,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_SMARTPQI=m
+CONFIG_SCSI_HISI_RAID=m
# CONFIG_SCSI_UFSHCD is not set
# CONFIG_SCSI_HPTIOP is not set
# CONFIG_SCSI_MYRB is not set
diff --git a/arch/x86/configs/openeuler_defconfig b/arch/x86/configs/openeuler_defconfig
index 5171aa50736b..43b5294326e6 100644
--- a/arch/x86/configs/openeuler_defconfig
+++ b/arch/x86/configs/openeuler_defconfig
@@ -2369,6 +2369,7 @@ CONFIG_SCSI_MPT2SAS_MAX_SGE=128
CONFIG_SCSI_MPT3SAS_MAX_SGE=128
CONFIG_SCSI_MPT2SAS=m
CONFIG_SCSI_SMARTPQI=m
+CONFIG_SCSI_HISI_RAID=m
# CONFIG_SCSI_UFSHCD is not set
# CONFIG_SCSI_HPTIOP is not set
# CONFIG_SCSI_BUSLOGIC is not set
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index a9da1b2dec4a..41ef664cf0ed 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -473,6 +473,7 @@ source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/sssraid/Kconfig"
source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/smartpqi/Kconfig"
+source "drivers/scsi/hisi_raid/Kconfig"
source "drivers/scsi/ufs/Kconfig"
config SCSI_HPTIOP
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index c2a1efa16912..8f26dbb5ee37 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -101,6 +101,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
+obj-$(CONFIG_SCSI_HISI_RAID) += hisi_raid/
obj-$(CONFIG_SCSI_UFSHCD) += ufs/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
diff --git a/drivers/scsi/hisi_raid/Kconfig b/drivers/scsi/hisi_raid/Kconfig
new file mode 100644
index 000000000000..d402dc45a7c1
--- /dev/null
+++ b/drivers/scsi/hisi_raid/Kconfig
@@ -0,0 +1,14 @@
+#
+# Kernel configuration file for the hisi_raid
+#
+
+config SCSI_HISI_RAID
+ tristate "Huawei Hisi_Raid Adapter"
+ depends on PCI && SCSI
+ select BLK_DEV_BSGLIB
+ depends on ARM64 || X86_64
+ help
+ This driver supports hisi_raid SPxxx serial RAID controller, which has
+ PCI Express Gen4 interface with host and supports SAS/SATA HDD/SSD.
+ To compile this driver as a module, choose M here: the module will
+ be called hisi_raid.
diff --git a/drivers/scsi/hisi_raid/Makefile b/drivers/scsi/hisi_raid/Makefile
new file mode 100644
index 000000000000..b71a675f4190
--- /dev/null
+++ b/drivers/scsi/hisi_raid/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the hisi_raid drivers.
+#
+
+obj-$(CONFIG_SCSI_HISI_RAID) += hiraid.o
+
+hiraid-objs := hiraid_main.o
diff --git a/drivers/scsi/hisi_raid/hiraid.h b/drivers/scsi/hisi_raid/hiraid.h
new file mode 100644
index 000000000000..1ebc3dd3f2ec
--- /dev/null
+++ b/drivers/scsi/hisi_raid/hiraid.h
@@ -0,0 +1,760 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022 Huawei Technologies Co., Ltd */
+
+#ifndef __HIRAID_H_
+#define __HIRAID_H_
+
+#define HIRAID_HDD_PD_QD 64
+#define HIRAID_HDD_VD_QD 256
+#define HIRAID_SSD_PD_QD 64
+#define HIRAID_SSD_VD_QD 256
+
+#define BGTASK_TYPE_REBUILD 4
+#define USR_CMD_READ 0xc2
+#define USR_CMD_RDLEN 0x1000
+#define USR_CMD_VDINFO 0x704
+#define USR_CMD_BGTASK 0x504
+#define VDINFO_PARAM_LEN 0x04
+
+#define HIRAID_DEFAULT_MAX_CHANNEL 4
+#define HIRAID_DEFAULT_MAX_ID 240
+#define HIRAID_DEFAULT_MAX_LUN_PER_HOST 8
+
+#define FUA_MASK 0x08
+
+#define HIRAID_IO_SQES 7
+#define HIRAID_IO_CQES 4
+#define PRP_ENTRY_SIZE 8
+
+#define EXTRA_POOL_SIZE 256
+#define MAX_EXTRA_POOL_NUM 16
+#define MAX_CMD_PER_DEV 64
+#define MAX_CDB_LEN 16
+
+#define HIRAID_AQ_DEPTH 128
+#define HIRAID_ASYN_COMMANDS 16
+#define HIRAID_AQ_BLK_MQ_DEPTH (HIRAID_AQ_DEPTH - HIRAID_ASYN_COMMANDS)
+#define HIRAID_AQ_MQ_TAG_DEPTH (HIRAID_AQ_BLK_MQ_DEPTH - 1)
+
+#define HIRAID_ADMIN_QUEUE_NUM 1
+#define HIRAID_PTHRU_CMDS_PERQ 1
+#define HIRAID_TOTAL_PTCMDS(qn) (HIRAID_PTHRU_CMDS_PERQ * (qn))
+
+#define HIRAID_DEV_INFO_ATTR_BOOT(attr) ((attr) & 0x01)
+#define HIRAID_DEV_INFO_ATTR_VD(attr) (((attr) & 0x02) == 0x0)
+#define HIRAID_DEV_INFO_ATTR_PT(attr) (((attr) & 0x22) == 0x02)
+#define HIRAID_DEV_INFO_ATTR_RAWDISK(attr) ((attr) & 0x20)
+#define HIRAID_DEV_DISK_TYPE(attr) ((attr) & 0x1e)
+
+#define HIRAID_DEV_INFO_FLAG_VALID(flag) ((flag) & 0x01)
+#define HIRAID_DEV_INFO_FLAG_CHANGE(flag) ((flag) & 0x02)
+
+#define HIRAID_CAP_MQES(cap) ((cap) & 0xffff)
+#define HIRAID_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
+#define HIRAID_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
+#define HIRAID_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
+#define HIRAID_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
+#define HIRAID_CAP_DMAMASK(cap) (((cap) >> 37) & 0xff)
+
+#define IO_SQE_SIZE sizeof(struct hiraid_scsi_io_cmd)
+#define ADMIN_SQE_SIZE sizeof(struct hiraid_admin_command)
+#define SQE_SIZE(qid) (((qid) > 0) ? IO_SQE_SIZE : ADMIN_SQE_SIZE)
+#define CQ_SIZE(depth) ((depth) * sizeof(struct hiraid_completion))
+#define SQ_SIZE(qid, depth) ((depth) * SQE_SIZE(qid))
+
+#define SENSE_SIZE(depth) ((depth) * SCSI_SENSE_BUFFERSIZE)
+
+#define IO_6_DEFAULT_TX_LEN 256
+
+#define MAX_DEV_ENTRY_PER_PAGE_4K 340
+
+#define MAX_REALTIME_BGTASK_NUM 32
+
+#define PCI_VENDOR_ID_HUAWEI_LOGIC 0x19E5
+#define HIRAID_SERVER_DEVICE_HBA_DID 0x3858
+#define HIRAID_SERVER_DEVICE_RAID_DID 0x3758
+
+enum {
+ HIRAID_SC_SUCCESS = 0x0,
+ HIRAID_SC_INVALID_OPCODE = 0x1,
+ HIRAID_SC_INVALID_FIELD = 0x2,
+
+ HIRAID_SC_ABORT_LIMIT = 0x103,
+ HIRAID_SC_ABORT_MISSING = 0x104,
+ HIRAID_SC_ASYNC_LIMIT = 0x105,
+
+ HIRAID_SC_DNR = 0x4000,
+};
+
+enum {
+ HIRAID_REG_CAP = 0x0000,
+ HIRAID_REG_CC = 0x0014,
+ HIRAID_REG_CSTS = 0x001c,
+ HIRAID_REG_AQA = 0x0024,
+ HIRAID_REG_ASQ = 0x0028,
+ HIRAID_REG_ACQ = 0x0030,
+ HIRAID_REG_DBS = 0x1000,
+};
+
+enum {
+ HIRAID_CC_ENABLE = 1 << 0,
+ HIRAID_CC_CSS_NVM = 0 << 4,
+ HIRAID_CC_MPS_SHIFT = 7,
+ HIRAID_CC_AMS_SHIFT = 11,
+ HIRAID_CC_SHN_SHIFT = 14,
+ HIRAID_CC_IOSQES_SHIFT = 16,
+ HIRAID_CC_IOCQES_SHIFT = 20,
+ HIRAID_CC_AMS_RR = 0 << HIRAID_CC_AMS_SHIFT,
+ HIRAID_CC_SHN_NONE = 0 << HIRAID_CC_SHN_SHIFT,
+ HIRAID_CC_IOSQES = HIRAID_IO_SQES << HIRAID_CC_IOSQES_SHIFT,
+ HIRAID_CC_IOCQES = HIRAID_IO_CQES << HIRAID_CC_IOCQES_SHIFT,
+ HIRAID_CC_SHN_NORMAL = 1 << HIRAID_CC_SHN_SHIFT,
+ HIRAID_CC_SHN_MASK = 3 << HIRAID_CC_SHN_SHIFT,
+ HIRAID_CSTS_CFS_SHIFT = 1,
+ HIRAID_CSTS_SHST_SHIFT = 2,
+ HIRAID_CSTS_PP_SHIFT = 5,
+ HIRAID_CSTS_RDY = 1 << 0,
+ HIRAID_CSTS_SHST_CMPLT = 2 << 2,
+ HIRAID_CSTS_SHST_MASK = 3 << 2,
+ HIRAID_CSTS_CFS_MASK = 1 << HIRAID_CSTS_CFS_SHIFT,
+ HIRAID_CSTS_PP_MASK = 1 << HIRAID_CSTS_PP_SHIFT,
+};
+
+enum {
+ HIRAID_ADMIN_DELETE_SQ = 0x00,
+ HIRAID_ADMIN_CREATE_SQ = 0x01,
+ HIRAID_ADMIN_DELETE_CQ = 0x04,
+ HIRAID_ADMIN_CREATE_CQ = 0x05,
+ HIRAID_ADMIN_ABORT_CMD = 0x08,
+ HIRAID_ADMIN_SET_FEATURES = 0x09,
+ HIRAID_ADMIN_ASYNC_EVENT = 0x0c,
+ HIRAID_ADMIN_GET_INFO = 0xc6,
+ HIRAID_ADMIN_RESET = 0xc8,
+};
+
+enum {
+ HIRAID_GET_CTRL_INFO = 0,
+ HIRAID_GET_DEVLIST_INFO = 1,
+};
+
+enum hiraid_rst_type {
+ HIRAID_RESET_TARGET = 0,
+ HIRAID_RESET_BUS = 1,
+};
+
+enum {
+ HIRAID_ASYN_EVENT_ERROR = 0,
+ HIRAID_ASYN_EVENT_NOTICE = 2,
+ HIRAID_ASYN_EVENT_VS = 7,
+};
+
+enum {
+ HIRAID_ASYN_DEV_CHANGED = 0x00,
+ HIRAID_ASYN_FW_ACT_START = 0x01,
+ HIRAID_ASYN_HOST_PROBING = 0x10,
+};
+
+enum {
+ HIRAID_ASYN_TIMESYN = 0x00,
+ HIRAID_ASYN_FW_ACT_FINISH = 0x02,
+ HIRAID_ASYN_EVENT_MIN = 0x80,
+ HIRAID_ASYN_EVENT_MAX = 0xff,
+};
+
+enum {
+ HIRAID_CMD_WRITE = 0x01,
+ HIRAID_CMD_READ = 0x02,
+
+ HIRAID_CMD_NONRW_NONE = 0x80,
+ HIRAID_CMD_NONRW_TODEV = 0x81,
+ HIRAID_CMD_NONRW_FROMDEV = 0x82,
+};
+
+enum {
+ HIRAID_QUEUE_PHYS_CONTIG = (1 << 0),
+ HIRAID_CQ_IRQ_ENABLED = (1 << 1),
+
+ HIRAID_FEATURE_NUM_QUEUES = 0x07,
+ HIRAID_FEATURE_ASYNC_EVENT = 0x0b,
+ HIRAID_FEATURE_TIMESTAMP = 0x0e,
+};
+
+enum hiraid_dev_state {
+ DEV_NEW,
+ DEV_LIVE,
+ DEV_RESETTING,
+ DEV_DELETING,
+ DEV_DEAD,
+};
+
+enum {
+ HIRAID_CARD_HBA,
+ HIRAID_CARD_RAID,
+};
+
+enum hiraid_cmd_type {
+ HIRAID_CMD_ADMIN,
+ HIRAID_CMD_PTHRU,
+};
+
+enum {
+ SQE_FLAG_SGL_METABUF = (1 << 6),
+ SQE_FLAG_SGL_METASEG = (1 << 7),
+ SQE_FLAG_SGL_ALL = SQE_FLAG_SGL_METABUF | SQE_FLAG_SGL_METASEG,
+};
+
+enum hiraid_cmd_state {
+ CMD_IDLE = 0,
+ CMD_FLIGHT = 1,
+ CMD_COMPLETE = 2,
+ CMD_TIMEOUT = 3,
+ CMD_TMO_COMPLETE = 4,
+};
+
+enum {
+ HIRAID_BSG_ADMIN,
+ HIRAID_BSG_IOPTHRU,
+};
+
+enum {
+ HIRAID_SAS_HDD_VD = 0x04,
+ HIRAID_SATA_HDD_VD = 0x08,
+ HIRAID_SAS_SSD_VD = 0x0c,
+ HIRAID_SATA_SSD_VD = 0x10,
+ HIRAID_NVME_SSD_VD = 0x14,
+ HIRAID_SAS_HDD_PD = 0x06,
+ HIRAID_SATA_HDD_PD = 0x0a,
+ HIRAID_SAS_SSD_PD = 0x0e,
+ HIRAID_SATA_SSD_PD = 0x12,
+ HIRAID_NVME_SSD_PD = 0x16,
+};
+
+enum {
+ DISPATCH_BY_CPU,
+ DISPATCH_BY_DISK,
+};
+
+struct hiraid_completion {
+ __le32 result;
+ union {
+ struct {
+ __u8 sense_len;
+ __u8 resv[3];
+ };
+ __le32 result1;
+ };
+ __le16 sq_head;
+ __le16 sq_id;
+ __le16 cmd_id;
+ __le16 status;
+};
+
+struct hiraid_ctrl_info {
+ __le32 nd;
+ __le16 max_cmds;
+ __le16 max_channel;
+ __le32 max_tgt_id;
+ __le16 max_lun;
+ __le16 max_num_sge;
+ __le16 lun_num_boot;
+ __u8 mdts;
+ __u8 acl;
+ __u8 asynevent;
+ __u8 card_type;
+ __u8 pt_use_sgl;
+ __u8 rsvd;
+ __le32 rtd3e;
+ __u8 sn[32];
+ __u8 fw_version[16];
+ __u8 rsvd1[4020];
+};
+
+struct hiraid_dev {
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct Scsi_Host *shost;
+ struct hiraid_queue *queues;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_extra_pool[MAX_EXTRA_POOL_NUM];
+ void __iomem *bar;
+ u32 max_qid;
+ u32 num_vecs;
+ u32 queue_count;
+ u32 ioq_depth;
+ u32 db_stride;
+ u32 __iomem *dbs;
+ struct rw_semaphore dev_rwsem;
+ int numa_node;
+ u32 page_size;
+ u32 ctrl_config;
+ u32 online_queues;
+ u64 cap;
+ u32 scsi_qd;
+ u32 instance;
+ struct hiraid_ctrl_info *ctrl_info;
+ struct hiraid_dev_info *dev_info;
+
+ struct hiraid_cmd *adm_cmds;
+ struct list_head adm_cmd_list;
+ spinlock_t adm_cmd_lock;
+
+ struct hiraid_cmd *io_ptcmds;
+ struct list_head io_pt_list;
+ spinlock_t io_pt_lock;
+
+ struct work_struct scan_work;
+ struct work_struct timesyn_work;
+ struct work_struct reset_work;
+ struct work_struct fwact_work;
+
+ enum hiraid_dev_state state;
+ spinlock_t state_lock;
+
+ void *sense_buffer_virt;
+ dma_addr_t sense_buffer_phy;
+ u32 last_qcnt;
+ u8 hdd_dispatch;
+
+ struct request_queue *bsg_queue;
+};
+
+struct hiraid_sgl_desc {
+ __le64 addr;
+ __le32 length;
+ __u8 rsvd[3];
+ __u8 type;
+};
+
+union hiraid_data_ptr {
+ struct {
+ __le64 prp1;
+ __le64 prp2;
+ };
+ struct hiraid_sgl_desc sgl;
+};
+
+struct hiraid_admin_com_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __le32 cdw2[4];
+ union hiraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+struct hiraid_features {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __u64 rsvd2[2];
+ union hiraid_data_ptr dptr;
+ __le32 fid;
+ __le32 dword11;
+ __le32 dword12;
+ __le32 dword13;
+ __le32 dword14;
+ __le32 dword15;
+};
+
+struct hiraid_create_cq {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 cqid;
+ __le16 qsize;
+ __le16 cq_flags;
+ __le16 irq_vector;
+ __u32 rsvd12[4];
+};
+
+struct hiraid_create_sq {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __u64 rsvd8;
+ __le16 sqid;
+ __le16 qsize;
+ __le16 sq_flags;
+ __le16 cqid;
+ __u32 rsvd12[4];
+};
+
+struct hiraid_delete_queue {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __u32 rsvd1[9];
+ __le16 qid;
+ __u16 rsvd10;
+ __u32 rsvd11[5];
+};
+
+struct hiraid_get_info {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __u32 rsvd2[4];
+ union hiraid_data_ptr dptr;
+ __u8 type;
+ __u8 rsvd10[3];
+ __le32 cdw11;
+ __u32 rsvd12[4];
+};
+
+struct hiraid_usr_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ union {
+ struct {
+ __le16 subopcode;
+ __le16 rsvd1;
+ } info_0;
+ __le32 cdw2;
+ };
+ union {
+ struct {
+ __le16 data_len;
+ __le16 param_len;
+ } info_1;
+ __le32 cdw3;
+ };
+ __u64 metadata;
+ union hiraid_data_ptr dptr;
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
+};
+
+struct hiraid_abort_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __le16 sqid;
+ __le16 cid;
+ __u32 rsvd11[5];
+};
+
+struct hiraid_reset_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __u64 rsvd2[4];
+ __u8 type;
+ __u8 rsvd10[3];
+ __u32 rsvd11[5];
+};
+
+struct hiraid_admin_command {
+ union {
+ struct hiraid_admin_com_cmd common;
+ struct hiraid_features features;
+ struct hiraid_create_cq create_cq;
+ struct hiraid_create_sq create_sq;
+ struct hiraid_delete_queue delete_queue;
+ struct hiraid_get_info get_info;
+ struct hiraid_abort_cmd abort;
+ struct hiraid_reset_cmd reset;
+ struct hiraid_usr_cmd usr_cmd;
+ };
+};
+
+struct hiraid_scsi_io_com_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __le32 cdw3[3];
+ union hiraid_data_ptr dptr;
+ __le32 cdw10[6];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __le32 cdw26[6];
+};
+
+struct hiraid_scsi_rw_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_len;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union hiraid_data_ptr dptr;
+ __le64 slba;
+ __le16 nlb;
+ __le16 control;
+ __u32 rsvd13[3];
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct hiraid_scsi_nonrw_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __le16 cmd_id;
+ __le32 hdid;
+ __le16 sense_len;
+ __u8 cdb_length;
+ __u8 rsvd2;
+ __u32 rsvd3[3];
+ union hiraid_data_ptr dptr;
+ __u32 rsvd10[5];
+ __le32 buf_len;
+ __u8 cdb[32];
+ __le64 sense_addr;
+ __u32 rsvd26[6];
+};
+
+struct hiraid_scsi_io_cmd {
+ union {
+ struct hiraid_scsi_io_com_cmd common;
+ struct hiraid_scsi_rw_cmd rw;
+ struct hiraid_scsi_nonrw_cmd nonrw;
+ };
+};
+
+struct hiraid_passthru_common_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 data_len;
+ __u16 param_len;
+ } info_1;
+ __u32 cdw3;
+ };
+ __u64 metadata;
+
+ __u64 addr;
+ __u64 prp2;
+
+ __u32 cdw10;
+ __u32 cdw11;
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 cdw15;
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct hiraid_passthru_io_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 rsvd0;
+ __u32 nsid;
+ union {
+ struct {
+ __u16 res_sense_len;
+ __u8 cdb_len;
+ __u8 rsvd0;
+ } info_0;
+ __u32 cdw2;
+ };
+ union {
+ struct {
+ __u16 subopcode;
+ __u16 rsvd1;
+ } info_1;
+ __u32 cdw3;
+ };
+ union {
+ struct {
+ __u16 rsvd;
+ __u16 param_len;
+ } info_2;
+ __u32 cdw4;
+ };
+ __u32 cdw5;
+ __u64 addr;
+ __u64 prp2;
+ union {
+ struct {
+ __u16 eid;
+ __u16 sid;
+ } info_3;
+ __u32 cdw10;
+ };
+ union {
+ struct {
+ __u16 did;
+ __u8 did_flag;
+ __u8 rsvd2;
+ } info_4;
+ __u32 cdw11;
+ };
+ __u32 cdw12;
+ __u32 cdw13;
+ __u32 cdw14;
+ __u32 data_len;
+ __u32 cdw16;
+ __u32 cdw17;
+ __u32 cdw18;
+ __u32 cdw19;
+ __u32 cdw20;
+ __u32 cdw21;
+ __u32 cdw22;
+ __u32 cdw23;
+ __u64 sense_addr;
+ __u32 cdw26[4];
+ __u32 timeout_ms;
+ __u32 result0;
+ __u32 result1;
+};
+
+struct hiraid_bsg_request {
+ u32 msgcode;
+ u32 control;
+ union {
+ struct hiraid_passthru_common_cmd admcmd;
+ struct hiraid_passthru_io_cmd pthrucmd;
+ };
+};
+
+struct hiraid_cmd {
+ u16 qid;
+ u16 cid;
+ u32 result0;
+ u32 result1;
+ u16 status;
+ void *priv;
+ enum hiraid_cmd_state state;
+ struct completion cmd_done;
+ struct list_head list;
+};
+
+struct hiraid_queue {
+ struct hiraid_dev *hdev;
+ spinlock_t sq_lock;
+
+ spinlock_t cq_lock ____cacheline_aligned_in_smp;
+
+ void *sq_cmds;
+
+ struct hiraid_completion *cqes;
+
+ dma_addr_t sq_buffer_phy;
+ dma_addr_t cq_buffer_phy;
+ u32 __iomem *q_db;
+ u8 cq_phase;
+ u8 sqes;
+ u16 qid;
+ u16 sq_tail;
+ u16 cq_head;
+ u16 last_cq_head;
+ u16 q_depth;
+ s16 cq_vector;
+ atomic_t inflight;
+ void *sense_buffer_virt;
+ dma_addr_t sense_buffer_phy;
+ struct dma_pool *prp_small_pool;
+};
+
+struct hiraid_mapmange {
+ struct hiraid_queue *hiraidq;
+ enum hiraid_cmd_state state;
+ u16 cid;
+ int page_cnt;
+ u32 sge_cnt;
+ u32 len;
+ bool use_sgl;
+ dma_addr_t first_dma;
+ void *sense_buffer_virt;
+ dma_addr_t sense_buffer_phy;
+ struct scatterlist *sgl;
+ void *list[0];
+};
+
+struct hiraid_vd_info {
+ __u8 name[32];
+ __le16 id;
+ __u8 rg_id;
+ __u8 rg_level;
+ __u8 sg_num;
+ __u8 sg_disk_num;
+ __u8 vd_status;
+ __u8 vd_type;
+ __u8 rsvd1[4056];
+};
+
+struct bgtask_info {
+ __u8 type;
+ __u8 progress;
+ __u8 rate;
+ __u8 rsvd0;
+ __le16 vd_id;
+ __le16 time_left;
+ __u8 rsvd1[4];
+};
+
+struct hiraid_bgtask {
+ __u8 sw;
+ __u8 task_num;
+ __u8 rsvd[6];
+ struct bgtask_info bgtask[MAX_REALTIME_BGTASK_NUM];
+};
+
+struct hiraid_dev_info {
+ __le32 hdid;
+ __le16 target;
+ __u8 channel;
+ __u8 lun;
+ __u8 attr;
+ __u8 flag;
+ __le16 max_io_kb;
+};
+
+struct hiraid_dev_list {
+ __le32 dev_num;
+ __u32 rsvd0[3];
+ struct hiraid_dev_info devinfo[MAX_DEV_ENTRY_PER_PAGE_4K];
+};
+
+struct hiraid_sdev_hostdata {
+ u32 hdid;
+ u16 max_io_kb;
+ u8 attr;
+ u8 flag;
+ u8 rg_id;
+ u8 hwq;
+ u16 pend_count;
+};
+
+#endif
+
diff --git a/drivers/scsi/hisi_raid/hiraid_main.c b/drivers/scsi/hisi_raid/hiraid_main.c
new file mode 100644
index 000000000000..b9ffa642479c
--- /dev/null
+++ b/drivers/scsi/hisi_raid/hiraid_main.c
@@ -0,0 +1,3982 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2022 Huawei Technologies Co., Ltd */
+
+/* Huawei Raid Series Linux Driver */
+
+#define pr_fmt(fmt) "hiraid: " fmt
+
+#include <linux/sched/signal.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/sysfs.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+#include <linux/once.h>
+#include <linux/debugfs.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/blkdev.h>
+#include <linux/bsg-lib.h>
+#include <asm/unaligned.h>
+#include <linux/sort.h>
+#include <target/target_core_backend.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/sg.h>
+
+#include "hiraid.h"
+
+static u32 admin_tmout = 60;
+module_param(admin_tmout, uint, 0644);
+MODULE_PARM_DESC(admin_tmout, "admin commands timeout (seconds)");
+
+static u32 scmd_tmout_rawdisk = 180;
+module_param(scmd_tmout_rawdisk, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_rawdisk, "scsi commands timeout for rawdisk(seconds)");
+
+static u32 scmd_tmout_vd = 180;
+module_param(scmd_tmout_vd, uint, 0644);
+MODULE_PARM_DESC(scmd_tmout_vd, "scsi commands timeout for vd(seconds)");
+
+static bool max_io_force;
+module_param(max_io_force, bool, 0644);
+MODULE_PARM_DESC(max_io_force, "force max_hw_sectors_kb = 1024, default false(performance first)");
+
+static bool work_mode;
+module_param(work_mode, bool, 0444);
+MODULE_PARM_DESC(work_mode, "work mode switch, default false for multi hw queues");
+
+#define MAX_IO_QUEUES 128
+#define MIN_IO_QUEUES 1
+
+static int ioq_num_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0;
+ int ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < MIN_IO_QUEUES || n > MAX_IO_QUEUES)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops max_hwq_num_ops = {
+ .set = ioq_num_set,
+ .get = param_get_uint,
+};
+
+static u32 max_hwq_num = 128;
+module_param_cb(max_hwq_num, &max_hwq_num_ops, &max_hwq_num, 0444);
+MODULE_PARM_DESC(max_hwq_num, "max num of hw io queues, should >= 1, default 128");
+
+static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0;
+ int ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (ret != 0 || n < 2)
+ return -EINVAL;
+
+ return param_set_int(val, kp);
+}
+
+static const struct kernel_param_ops io_queue_depth_ops = {
+ .set = io_queue_depth_set,
+ .get = param_get_uint,
+};
+
+static u32 io_queue_depth = 1024;
+module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
+MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+
+static u32 log_debug_switch;
+module_param(log_debug_switch, uint, 0644);
+MODULE_PARM_DESC(log_debug_switch, "set log state, default zero for switch off");
+
+static int extra_pool_num_set(const char *val, const struct kernel_param *kp)
+{
+ u8 n = 0;
+ int ret;
+
+ ret = kstrtou8(val, 10, &n);
+ if (ret != 0)
+ return -EINVAL;
+ if (n > MAX_EXTRA_POOL_NUM)
+ n = MAX_EXTRA_POOL_NUM;
+ if (n < 1)
+ n = 1;
+ *((u8 *)kp->arg) = n;
+
+ return 0;
+}
+
+static const struct kernel_param_ops small_pool_num_ops = {
+ .set = extra_pool_num_set,
+ .get = param_get_byte,
+};
+
+/* It was found that the spindlock of a single pool conflicts
+ * a lot with multiple CPUs.So multiple pools are introduced
+ * to reduce the conflictions.
+ */
+static unsigned char extra_pool_num = 4;
+module_param_cb(extra_pool_num, &small_pool_num_ops, &extra_pool_num, 0644);
+MODULE_PARM_DESC(extra_pool_num, "set prp extra pool num, default 4, MAX 16");
+
+static void hiraid_handle_async_notice(struct hiraid_dev *hdev, u32 result);
+static void hiraid_handle_async_vs(struct hiraid_dev *hdev, u32 result, u32 result1);
+
+static struct class *hiraid_class;
+
+#define HIRAID_CAP_TIMEOUT_UNIT_MS (HZ / 2)
+
+static struct workqueue_struct *work_queue;
+
+#define dev_log_dbg(dev, fmt, ...) do { \
+ if (unlikely(log_debug_switch)) \
+ dev_info(dev, "[%s] " fmt, \
+ __func__, ##__VA_ARGS__); \
+} while (0)
+
+#define HIRAID_DRV_VERSION "1.1.0.0"
+
+#define ADMIN_TIMEOUT (admin_tmout * HZ)
+#define USRCMD_TIMEOUT (180 * HZ)
+#define CTL_RST_TIME (600 * HZ)
+
+#define HIRAID_WAIT_ABNL_CMD_TIMEOUT 6
+#define HIRAID_WAIT_RST_IO_TIMEOUT 10
+
+#define HIRAID_DMA_MSK_BIT_MAX 64
+
+#define IOQ_PT_DATA_LEN 4096
+#define IOQ_PT_SGL_DATA_LEN (1024 * 1024)
+
+#define MAX_CAN_QUEUE (4096 - 1)
+#define MIN_CAN_QUEUE (1024 - 1)
+
+enum SENSE_STATE_CODE {
+ SENSE_STATE_OK = 0,
+ SENSE_STATE_NEED_CHECK,
+ SENSE_STATE_ERROR,
+ SENSE_STATE_EP_PCIE_ERROR,
+ SENSE_STATE_NAC_DMA_ERROR,
+ SENSE_STATE_ABORTED,
+ SENSE_STATE_NEED_RETRY
+};
+
+enum {
+ FW_EH_OK = 0,
+ FW_EH_DEV_NONE = 0x701
+};
+
+static const char * const raid_levels[] = {"0", "1", "5", "6", "10", "50", "60", "NA"};
+
+static const char * const raid_states[] = {
+ "NA", "NORMAL", "FAULT", "DEGRADE", "NOT_FORMATTED", "FORMATTING", "SANITIZING",
+ "INITIALIZING", "INITIALIZE_FAIL", "DELETING", "DELETE_FAIL", "WRITE_PROTECT"
+};
+
+static int hiraid_remap_bar(struct hiraid_dev *hdev, u32 size)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (size > pci_resource_len(pdev, 0)) {
+ dev_err(hdev->dev, "input size[%u] exceed bar0 length[%llu]\n",
+ size, pci_resource_len(pdev, 0));
+ return -ENOMEM;
+ }
+
+ if (hdev->bar)
+ iounmap(hdev->bar);
+
+ hdev->bar = ioremap(pci_resource_start(pdev, 0), size);
+ if (!hdev->bar) {
+ dev_err(hdev->dev, "ioremap for bar0 failed\n");
+ return -ENOMEM;
+ }
+ hdev->dbs = hdev->bar + HIRAID_REG_DBS;
+
+ return 0;
+}
+
+static int hiraid_dev_map(struct hiraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ ret = pci_request_mem_regions(pdev, "hiraid");
+ if (ret) {
+ dev_err(hdev->dev, "fail to request memory regions\n");
+ return ret;
+ }
+
+ ret = hiraid_remap_bar(hdev, HIRAID_REG_DBS + 4096);
+ if (ret) {
+ pci_release_mem_regions(pdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hiraid_dev_unmap(struct hiraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ if (hdev->bar) {
+ iounmap(hdev->bar);
+ hdev->bar = NULL;
+ }
+ pci_release_mem_regions(pdev);
+}
+
+static int hiraid_pci_enable(struct hiraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = -ENOMEM;
+ u64 maskbit = HIRAID_DMA_MSK_BIT_MAX;
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(hdev->dev, "enable pci device memory resources failed\n");
+ return ret;
+ }
+ pci_set_master(pdev);
+
+ if (readl(hdev->bar + HIRAID_REG_CSTS) == U32_MAX) {
+ ret = -ENODEV;
+ dev_err(hdev->dev, "read CSTS register failed\n");
+ goto disable;
+ }
+
+ hdev->cap = lo_hi_readq(hdev->bar + HIRAID_REG_CAP);
+ hdev->ioq_depth = min_t(u32, HIRAID_CAP_MQES(hdev->cap) + 1, io_queue_depth);
+ hdev->db_stride = 1 << HIRAID_CAP_STRIDE(hdev->cap);
+
+ maskbit = HIRAID_CAP_DMAMASK(hdev->cap);
+ if (maskbit < 32 || maskbit > HIRAID_DMA_MSK_BIT_MAX) {
+ dev_err(hdev->dev, "err, dma mask invalid[%llu], set to default\n", maskbit);
+ maskbit = HIRAID_DMA_MSK_BIT_MAX;
+ }
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(maskbit))) {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_err(hdev->dev, "set dma mask[32] and coherent failed\n");
+ goto disable;
+ }
+ dev_info(hdev->dev, "set dma mask[32] success\n");
+ } else {
+ dev_info(hdev->dev, "set dma mask[%llu] success\n", maskbit);
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(hdev->dev, "allocate one IRQ for setup admin queue failed\n");
+ goto disable;
+ }
+
+ pci_enable_pcie_error_reporting(pdev);
+ pci_save_state(pdev);
+
+ return 0;
+
+disable:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+
+/*
+ * It is fact that first prp and last prp may be not full page.
+ * The size to count total nprps for the io equal to size + page_size,
+ * it may be a slightly overestimate.
+ *
+ * 8B per prp address. It may be there is one prp_list address per page,
+ * prp_list address does not count in io data prps. So divisor equal to
+ * PAGE_SIZE - 8, it may be a slightly overestimate.
+ */
+static int hiraid_prp_pagenum(struct hiraid_dev *hdev)
+{
+ u32 size = 1U << ((hdev->ctrl_info->mdts) * 1U) << 12;
+ u32 nprps = DIV_ROUND_UP(size + hdev->page_size, hdev->page_size);
+
+ return DIV_ROUND_UP(PRP_ENTRY_SIZE * nprps, hdev->page_size - PRP_ENTRY_SIZE);
+}
+
+/*
+ * Calculates the number of pages needed for the SGL segments. For example a 4k
+ * page can accommodate 256 SGL descriptors.
+ */
+static int hiraid_sgl_pagenum(struct hiraid_dev *hdev)
+{
+ u32 nsge = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+
+ return DIV_ROUND_UP(nsge * sizeof(struct hiraid_sgl_desc), hdev->page_size);
+}
+
+static inline void **hiraid_mapbuf_list(struct hiraid_mapmange *mapbuf)
+{
+ return mapbuf->list;
+}
+
+static u32 hiraid_get_max_cmd_size(struct hiraid_dev *hdev)
+{
+ u32 alloc_size = sizeof(__le64 *) * max(hiraid_prp_pagenum(hdev), hiraid_sgl_pagenum(hdev));
+
+ dev_info(hdev->dev, "mapbuf size[%lu], alloc_size[%u]\n",
+ sizeof(struct hiraid_mapmange), alloc_size);
+
+ return sizeof(struct hiraid_mapmange) + alloc_size;
+}
+
+static int hiraid_build_passthru_prp(struct hiraid_dev *hdev, struct hiraid_mapmange *mapbuf)
+{
+ struct scatterlist *sg = mapbuf->sgl;
+ __le64 *phy_regpage, *prior_list;
+ u64 buf_addr = sg_dma_address(sg);
+ int buf_length = sg_dma_len(sg);
+ u32 page_size = hdev->page_size;
+ int offset = buf_addr & (page_size - 1);
+ void **list = hiraid_mapbuf_list(mapbuf);
+ int maplen = mapbuf->len;
+ struct dma_pool *pool;
+ dma_addr_t buffer_phy;
+ int i;
+
+ maplen -= (page_size - offset);
+ if (maplen <= 0) {
+ mapbuf->first_dma = 0;
+ return 0;
+ }
+
+ buf_length -= (page_size - offset);
+ if (buf_length) {
+ buf_addr += (page_size - offset);
+ } else {
+ sg = sg_next(sg);
+ buf_addr = sg_dma_address(sg);
+ buf_length = sg_dma_len(sg);
+ }
+
+ if (maplen <= page_size) {
+ mapbuf->first_dma = buf_addr;
+ return 0;
+ }
+
+ pool = hdev->prp_page_pool;
+ mapbuf->page_cnt = 1;
+
+ phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!phy_regpage) {
+ dev_err_ratelimited(hdev->dev, "allocate first admin prp_list memory failed\n");
+ mapbuf->first_dma = buf_addr;
+ mapbuf->page_cnt = -1;
+ return -ENOMEM;
+ }
+ list[0] = phy_regpage;
+ mapbuf->first_dma = buffer_phy;
+ i = 0;
+ for (;;) {
+ if (i == page_size / PRP_ENTRY_SIZE) {
+ prior_list = phy_regpage;
+
+ phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!phy_regpage) {
+ dev_err_ratelimited(hdev->dev, "allocate [%d]th admin prp list memory failed\n",
+ mapbuf->page_cnt + 1);
+ return -ENOMEM;
+ }
+ list[mapbuf->page_cnt++] = phy_regpage;
+ phy_regpage[0] = prior_list[i - 1];
+ prior_list[i - 1] = cpu_to_le64(buffer_phy);
+ i = 1;
+ }
+ phy_regpage[i++] = cpu_to_le64(buf_addr);
+ buf_addr += page_size;
+ buf_length -= page_size;
+ maplen -= page_size;
+ if (maplen <= 0)
+ break;
+ if (buf_length > 0)
+ continue;
+ if (unlikely(buf_length < 0))
+ goto bad_admin_sgl;
+ sg = sg_next(sg);
+ buf_addr = sg_dma_address(sg);
+ buf_length = sg_dma_len(sg);
+ }
+
+ return 0;
+
+bad_admin_sgl:
+ dev_err(hdev->dev, "setup prps, invalid admin SGL for payload[%d] nents[%d]\n",
+ mapbuf->len, mapbuf->sge_cnt);
+ return -EIO;
+}
+
+static int hiraid_build_prp(struct hiraid_dev *hdev, struct hiraid_mapmange *mapbuf)
+{
+ struct scatterlist *sg = mapbuf->sgl;
+ __le64 *phy_regpage, *prior_list;
+ u64 buf_addr = sg_dma_address(sg);
+ int buf_length = sg_dma_len(sg);
+ u32 page_size = hdev->page_size;
+ int offset = buf_addr & (page_size - 1);
+ void **list = hiraid_mapbuf_list(mapbuf);
+ int maplen = mapbuf->len;
+ struct dma_pool *pool;
+ dma_addr_t buffer_phy;
+ int nprps, i;
+
+ maplen -= (page_size - offset);
+ if (maplen <= 0) {
+ mapbuf->first_dma = 0;
+ return 0;
+ }
+
+ buf_length -= (page_size - offset);
+ if (buf_length) {
+ buf_addr += (page_size - offset);
+ } else {
+ sg = sg_next(sg);
+ buf_addr = sg_dma_address(sg);
+ buf_length = sg_dma_len(sg);
+ }
+
+ if (maplen <= page_size) {
+ mapbuf->first_dma = buf_addr;
+ return 0;
+ }
+
+ nprps = DIV_ROUND_UP(maplen, page_size);
+ if (nprps <= (EXTRA_POOL_SIZE / PRP_ENTRY_SIZE)) {
+ pool = mapbuf->hiraidq->prp_small_pool;
+ mapbuf->page_cnt = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ mapbuf->page_cnt = 1;
+ }
+
+ phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!phy_regpage) {
+ dev_err_ratelimited(hdev->dev, "allocate first prp_list memory failed\n");
+ mapbuf->first_dma = buf_addr;
+ mapbuf->page_cnt = -1;
+ return -ENOMEM;
+ }
+ list[0] = phy_regpage;
+ mapbuf->first_dma = buffer_phy;
+ i = 0;
+ for (;;) {
+ if (i == page_size / PRP_ENTRY_SIZE) {
+ prior_list = phy_regpage;
+
+ phy_regpage = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!phy_regpage) {
+ dev_err_ratelimited(hdev->dev, "allocate [%d]th prp list memory failed\n",
+ mapbuf->page_cnt + 1);
+ return -ENOMEM;
+ }
+ list[mapbuf->page_cnt++] = phy_regpage;
+ phy_regpage[0] = prior_list[i - 1];
+ prior_list[i - 1] = cpu_to_le64(buffer_phy);
+ i = 1;
+ }
+ phy_regpage[i++] = cpu_to_le64(buf_addr);
+ buf_addr += page_size;
+ buf_length -= page_size;
+ maplen -= page_size;
+ if (maplen <= 0)
+ break;
+ if (buf_length > 0)
+ continue;
+ if (unlikely(buf_length < 0))
+ goto bad_sgl;
+ sg = sg_next(sg);
+ buf_addr = sg_dma_address(sg);
+ buf_length = sg_dma_len(sg);
+ }
+
+ return 0;
+
+bad_sgl:
+ dev_err(hdev->dev, "setup prps, invalid SGL for payload[%d] nents[%d]\n",
+ mapbuf->len, mapbuf->sge_cnt);
+ return -EIO;
+}
+
+#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct hiraid_sgl_desc))
+
+static void hiraid_submit_cmd(struct hiraid_queue *hiraidq, const void *cmd)
+{
+ u32 sqes = SQE_SIZE(hiraidq->qid);
+ unsigned long flags;
+ struct hiraid_admin_com_cmd *acd = (struct hiraid_admin_com_cmd *)cmd;
+
+ spin_lock_irqsave(&hiraidq->sq_lock, flags);
+ memcpy((hiraidq->sq_cmds + sqes * hiraidq->sq_tail), cmd, sqes);
+ if (++hiraidq->sq_tail == hiraidq->q_depth)
+ hiraidq->sq_tail = 0;
+
+ writel(hiraidq->sq_tail, hiraidq->q_db);
+ spin_unlock_irqrestore(&hiraidq->sq_lock, flags);
+
+ dev_log_dbg(hiraidq->hdev->dev, "cid[%d] qid[%d] opcode[0x%x] flags[0x%x] hdid[%u]\n",
+ le16_to_cpu(acd->cmd_id), hiraidq->qid, acd->opcode, acd->flags,
+ le32_to_cpu(acd->hdid));
+}
+
+static inline bool hiraid_is_rw_scmd(struct scsi_cmnd *scmd)
+{
+ switch (scmd->cmnd[0]) {
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * checks if prps can be built for the IO cmd
+ */
+static bool hiraid_is_prp(struct hiraid_dev *hdev, struct scatterlist *sgl, u32 nsge)
+{
+ struct scatterlist *sg = sgl;
+ u32 page_mask = hdev->page_size - 1;
+ bool is_prp = true;
+ u32 i = 0;
+
+ for_each_sg(sgl, sg, nsge, i) {
+ /*
+ * Data length of the middle sge multiple of page_size,
+ * address page_size aligned.
+ */
+ if (i != 0 && i != nsge - 1) {
+ if ((sg_dma_len(sg) & page_mask) ||
+ (sg_dma_address(sg) & page_mask)) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ /*
+ * The first sge addr plus the data length meets
+ * the page_size alignment.
+ */
+ if (nsge > 1 && i == 0) {
+ if ((sg_dma_address(sg) + sg_dma_len(sg)) & page_mask) {
+ is_prp = false;
+ break;
+ }
+ }
+
+ /* The last sge addr meets the page_size alignment. */
+ if (nsge > 1 && i == (nsge - 1)) {
+ if (sg_dma_address(sg) & page_mask) {
+ is_prp = false;
+ break;
+ }
+ }
+ }
+
+ return is_prp;
+}
+
+enum {
+ HIRAID_SGL_FMT_DATA_DESC = 0x00,
+ HIRAID_SGL_FMT_SEG_DESC = 0x02,
+ HIRAID_SGL_FMT_LAST_SEG_DESC = 0x03,
+ HIRAID_KEY_SGL_FMT_DATA_DESC = 0x04,
+ HIRAID_TRANSPORT_SGL_DATA_DESC = 0x05
+};
+
+static void hiraid_sgl_set_data(struct hiraid_sgl_desc *sge, struct scatterlist *sg)
+{
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->type = HIRAID_SGL_FMT_DATA_DESC << 4;
+}
+
+static void hiraid_sgl_set_seg(struct hiraid_sgl_desc *sge, dma_addr_t buffer_phy, int entries)
+{
+ sge->addr = cpu_to_le64(buffer_phy);
+ if (entries <= SGES_PER_PAGE) {
+ sge->length = cpu_to_le32(entries * sizeof(*sge));
+ sge->type = HIRAID_SGL_FMT_LAST_SEG_DESC << 4;
+ } else {
+ sge->length = cpu_to_le32(PAGE_SIZE);
+ sge->type = HIRAID_SGL_FMT_SEG_DESC << 4;
+ }
+}
+
+static int hiraid_build_passthru_sgl(struct hiraid_dev *hdev,
+ struct hiraid_admin_command *admin_cmd,
+ struct hiraid_mapmange *mapbuf)
+{
+ struct hiraid_sgl_desc *sg_list, *link, *old_sg_list;
+ struct scatterlist *sg = mapbuf->sgl;
+ void **list = hiraid_mapbuf_list(mapbuf);
+ struct dma_pool *pool;
+ int nsge = mapbuf->sge_cnt;
+ dma_addr_t buffer_phy;
+ int i = 0;
+
+ admin_cmd->common.flags |= SQE_FLAG_SGL_METABUF;
+
+ if (nsge == 1) {
+ hiraid_sgl_set_data(&admin_cmd->common.dptr.sgl, sg);
+ return 0;
+ }
+
+ pool = hdev->prp_page_pool;
+ mapbuf->page_cnt = 1;
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev, "allocate first admin sgl_list failed\n");
+ mapbuf->page_cnt = -1;
+ return -ENOMEM;
+ }
+
+ list[0] = sg_list;
+ mapbuf->first_dma = buffer_phy;
+ hiraid_sgl_set_seg(&admin_cmd->common.dptr.sgl, buffer_phy, nsge);
+ do {
+ if (i == SGES_PER_PAGE) {
+ old_sg_list = sg_list;
+ link = &old_sg_list[SGES_PER_PAGE - 1];
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev, "allocate [%d]th admin sgl_list failed\n",
+ mapbuf->page_cnt + 1);
+ return -ENOMEM;
+ }
+ list[mapbuf->page_cnt++] = sg_list;
+
+ i = 0;
+ memcpy(&sg_list[i++], link, sizeof(*link));
+ hiraid_sgl_set_seg(link, buffer_phy, nsge);
+ }
+
+ hiraid_sgl_set_data(&sg_list[i++], sg);
+ sg = sg_next(sg);
+ } while (--nsge > 0);
+
+ return 0;
+}
+
+
+static int hiraid_build_sgl(struct hiraid_dev *hdev, struct hiraid_scsi_io_cmd *io_cmd,
+ struct hiraid_mapmange *mapbuf)
+{
+ struct hiraid_sgl_desc *sg_list, *link, *old_sg_list;
+ struct scatterlist *sg = mapbuf->sgl;
+ void **list = hiraid_mapbuf_list(mapbuf);
+ struct dma_pool *pool;
+ int nsge = mapbuf->sge_cnt;
+ dma_addr_t buffer_phy;
+ int i = 0;
+
+ io_cmd->common.flags |= SQE_FLAG_SGL_METABUF;
+
+ if (nsge == 1) {
+ hiraid_sgl_set_data(&io_cmd->common.dptr.sgl, sg);
+ return 0;
+ }
+
+ if (nsge <= (EXTRA_POOL_SIZE / sizeof(struct hiraid_sgl_desc))) {
+ pool = mapbuf->hiraidq->prp_small_pool;
+ mapbuf->page_cnt = 0;
+ } else {
+ pool = hdev->prp_page_pool;
+ mapbuf->page_cnt = 1;
+ }
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev, "allocate first sgl_list failed\n");
+ mapbuf->page_cnt = -1;
+ return -ENOMEM;
+ }
+
+ list[0] = sg_list;
+ mapbuf->first_dma = buffer_phy;
+ hiraid_sgl_set_seg(&io_cmd->common.dptr.sgl, buffer_phy, nsge);
+ do {
+ if (i == SGES_PER_PAGE) {
+ old_sg_list = sg_list;
+ link = &old_sg_list[SGES_PER_PAGE - 1];
+
+ sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &buffer_phy);
+ if (!sg_list) {
+ dev_err_ratelimited(hdev->dev, "allocate [%d]th sgl_list failed\n",
+ mapbuf->page_cnt + 1);
+ return -ENOMEM;
+ }
+ list[mapbuf->page_cnt++] = sg_list;
+
+ i = 0;
+ memcpy(&sg_list[i++], link, sizeof(*link));
+ hiraid_sgl_set_seg(link, buffer_phy, nsge);
+ }
+
+ hiraid_sgl_set_data(&sg_list[i++], sg);
+ sg = sg_next(sg);
+ } while (--nsge > 0);
+
+ return 0;
+}
+
+#define HIRAID_RW_FUA BIT(14)
+
+static int hiraid_setup_rw_cmd(struct hiraid_dev *hdev,
+ struct hiraid_scsi_rw_cmd *io_cmd,
+ struct scsi_cmnd *scmd)
+{
+ u32 start_lba_lo, start_lba_hi;
+ u32 datalength = 0;
+ u16 control = 0;
+
+ start_lba_lo = 0;
+ start_lba_hi = 0;
+
+ if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+ io_cmd->opcode = HIRAID_CMD_WRITE;
+ } else if (scmd->sc_data_direction == DMA_FROM_DEVICE) {
+ io_cmd->opcode = HIRAID_CMD_READ;
+ } else {
+ dev_err(hdev->dev, "invalid RW_IO for unsupported data direction[%d]\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* 6-byte READ(0x08) or WRITE(0x0A) cdb */
+ if (scmd->cmd_len == 6) {
+ datalength = (u32)(scmd->cmnd[4] == 0 ?
+ IO_6_DEFAULT_TX_LEN : scmd->cmnd[4]);
+ start_lba_lo = (u32)get_unaligned_be24(&scmd->cmnd[1]);
+
+ start_lba_lo &= 0x1FFFFF;
+ }
+
+ /* 10-byte READ(0x28) or WRITE(0x2A) cdb */
+ else if (scmd->cmd_len == 10) {
+ datalength = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= HIRAID_RW_FUA;
+ }
+
+ /* 12-byte READ(0xA8) or WRITE(0xAA) cdb */
+ else if (scmd->cmd_len == 12) {
+ datalength = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= HIRAID_RW_FUA;
+ }
+ /* 16-byte READ(0x88) or WRITE(0x8A) cdb */
+ else if (scmd->cmd_len == 16) {
+ datalength = get_unaligned_be32(&scmd->cmnd[10]);
+ start_lba_lo = get_unaligned_be32(&scmd->cmnd[6]);
+ start_lba_hi = get_unaligned_be32(&scmd->cmnd[2]);
+
+ if (scmd->cmnd[1] & FUA_MASK)
+ control |= HIRAID_RW_FUA;
+ }
+
+ if (unlikely(datalength > U16_MAX || datalength == 0)) {
+ dev_err(hdev->dev, "invalid IO for illegal transfer data length[%u]\n", datalength);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ io_cmd->slba = cpu_to_le64(((u64)start_lba_hi << 32) | start_lba_lo);
+ /* 0base for nlb */
+ io_cmd->nlb = cpu_to_le16((u16)(datalength - 1));
+ io_cmd->control = cpu_to_le16(control);
+
+ return 0;
+}
+
+static int hiraid_setup_nonrw_cmd(struct hiraid_dev *hdev,
+ struct hiraid_scsi_nonrw_cmd *io_cmd, struct scsi_cmnd *scmd)
+{
+ io_cmd->buf_len = cpu_to_le32(scsi_bufflen(scmd));
+
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ io_cmd->opcode = HIRAID_CMD_NONRW_NONE;
+ break;
+ case DMA_TO_DEVICE:
+ io_cmd->opcode = HIRAID_CMD_NONRW_TODEV;
+ break;
+ case DMA_FROM_DEVICE:
+ io_cmd->opcode = HIRAID_CMD_NONRW_FROMDEV;
+ break;
+ default:
+ dev_err(hdev->dev, "invalid NON_IO for unsupported data direction[%d]\n",
+ scmd->sc_data_direction);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hiraid_setup_io_cmd(struct hiraid_dev *hdev,
+ struct hiraid_scsi_io_cmd *io_cmd, struct scsi_cmnd *scmd)
+{
+ memcpy(io_cmd->common.cdb, scmd->cmnd, scmd->cmd_len);
+ io_cmd->common.cdb_len = scmd->cmd_len;
+
+ if (hiraid_is_rw_scmd(scmd))
+ return hiraid_setup_rw_cmd(hdev, &io_cmd->rw, scmd);
+ else
+ return hiraid_setup_nonrw_cmd(hdev, &io_cmd->nonrw, scmd);
+}
+
+static inline void hiraid_init_mapbuff(struct hiraid_mapmange *mapbuf)
+{
+ mapbuf->sge_cnt = 0;
+ mapbuf->page_cnt = -1;
+ mapbuf->use_sgl = false;
+ WRITE_ONCE(mapbuf->state, CMD_IDLE);
+}
+
+static void hiraid_free_mapbuf(struct hiraid_dev *hdev, struct hiraid_mapmange *mapbuf)
+{
+ const int last_prp = hdev->page_size / sizeof(__le64) - 1;
+ dma_addr_t buffer_phy, next_buffer_phy;
+ struct hiraid_sgl_desc *sg_list;
+ __le64 *prp_list;
+ void *addr;
+ int i;
+
+ buffer_phy = mapbuf->first_dma;
+ if (mapbuf->page_cnt == 0)
+ dma_pool_free(mapbuf->hiraidq->prp_small_pool,
+ hiraid_mapbuf_list(mapbuf)[0], buffer_phy);
+
+ for (i = 0; i < mapbuf->page_cnt; i++) {
+ addr = hiraid_mapbuf_list(mapbuf)[i];
+
+ if (mapbuf->use_sgl) {
+ sg_list = addr;
+ next_buffer_phy =
+ le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
+ } else {
+ prp_list = addr;
+ next_buffer_phy = le64_to_cpu(prp_list[last_prp]);
+ }
+
+ dma_pool_free(hdev->prp_page_pool, addr, buffer_phy);
+ buffer_phy = next_buffer_phy;
+ }
+
+ mapbuf->sense_buffer_virt = NULL;
+ mapbuf->page_cnt = -1;
+}
+
+static int hiraid_io_map_data(struct hiraid_dev *hdev, struct hiraid_mapmange *mapbuf,
+ struct scsi_cmnd *scmd, struct hiraid_scsi_io_cmd *io_cmd)
+{
+ int ret;
+
+ ret = scsi_dma_map(scmd);
+ if (unlikely(ret < 0))
+ return ret;
+ mapbuf->sge_cnt = ret;
+
+ /* No data to DMA, it may be scsi no-rw command */
+ if (unlikely(mapbuf->sge_cnt == 0))
+ return 0;
+
+ mapbuf->len = scsi_bufflen(scmd);
+ mapbuf->sgl = scsi_sglist(scmd);
+ mapbuf->use_sgl = !hiraid_is_prp(hdev, mapbuf->sgl, mapbuf->sge_cnt);
+
+ if (mapbuf->use_sgl) {
+ ret = hiraid_build_sgl(hdev, io_cmd, mapbuf);
+ } else {
+ ret = hiraid_build_prp(hdev, mapbuf);
+ io_cmd->common.dptr.prp1 =
+ cpu_to_le64(sg_dma_address(mapbuf->sgl));
+ io_cmd->common.dptr.prp2 = cpu_to_le64(mapbuf->first_dma);
+ }
+
+ if (ret)
+ scsi_dma_unmap(scmd);
+
+ return ret;
+}
+
+static void hiraid_check_status(struct hiraid_mapmange *mapbuf, struct scsi_cmnd *scmd,
+ struct hiraid_completion *cqe)
+{
+ scsi_set_resid(scmd, 0);
+
+ switch ((le16_to_cpu(cqe->status) >> 1) & 0x7f) {
+ case SENSE_STATE_OK:
+ set_host_byte(scmd, DID_OK);
+ break;
+ case SENSE_STATE_NEED_CHECK:
+ set_host_byte(scmd, DID_OK);
+ scmd->result |= le16_to_cpu(cqe->status) >> 8;
+ if (scmd->result & SAM_STAT_CHECK_CONDITION) {
+ memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ memcpy(scmd->sense_buffer,
+ mapbuf->sense_buffer_virt, SCSI_SENSE_BUFFERSIZE);
+ scmd->result = (scmd->result & 0x00ffffff) | (DRIVER_SENSE << 24);
+ }
+ break;
+ case SENSE_STATE_ABORTED:
+ set_host_byte(scmd, DID_ABORT);
+ break;
+ case SENSE_STATE_NEED_RETRY:
+ set_host_byte(scmd, DID_REQUEUE);
+ break;
+ default:
+ set_host_byte(scmd, DID_BAD_TARGET);
+ dev_warn_ratelimited(mapbuf->hiraidq->hdev->dev, "cid[%d] qid[%d] sdev[%d:%d] opcode[%.2x] bad status[0x%x]\n",
+ le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id), scmd->device->channel,
+ scmd->device->id, scmd->cmnd[0], le16_to_cpu(cqe->status));
+ break;
+ }
+}
+
+static inline void hiraid_query_scmd_tag(struct scsi_cmnd *scmd, u16 *qid, u16 *cid,
+ struct hiraid_dev *hdev, struct hiraid_sdev_hostdata *hostdata)
+{
+ u32 tag = blk_mq_unique_tag(blk_mq_rq_from_pdu((void *)scmd));
+
+ if (work_mode) {
+ if ((hdev->hdd_dispatch == DISPATCH_BY_DISK) && (hostdata->hwq != 0))
+ *qid = hostdata->hwq;
+ else
+ *qid = raw_smp_processor_id() % (hdev->online_queues - 1) + 1;
+ } else {
+ *qid = blk_mq_unique_tag_to_hwq(tag) + 1;
+ }
+ *cid = blk_mq_unique_tag_to_tag(tag);
+}
+
+static int hiraid_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+{
+ struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd);
+ struct hiraid_dev *hdev = shost_priv(shost);
+ struct scsi_device *sdev = scmd->device;
+ struct hiraid_sdev_hostdata *hostdata;
+ struct hiraid_scsi_io_cmd io_cmd;
+ struct hiraid_queue *ioq;
+ u16 hwq, cid;
+ int ret;
+
+ if (unlikely(hdev->state == DEV_RESETTING))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ if (unlikely(hdev->state != DEV_LIVE)) {
+ set_host_byte(scmd, DID_NO_CONNECT);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (log_debug_switch)
+ scsi_print_command(scmd);
+
+ hostdata = sdev->hostdata;
+ hiraid_query_scmd_tag(scmd, &hwq, &cid, hdev, hostdata);
+ ioq = &hdev->queues[hwq];
+
+ if (unlikely(atomic_inc_return(&ioq->inflight) >
+ (hdev->ioq_depth - HIRAID_PTHRU_CMDS_PERQ))) {
+ atomic_dec(&ioq->inflight);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ io_cmd.rw.hdid = cpu_to_le32(hostdata->hdid);
+ io_cmd.rw.cmd_id = cpu_to_le16(cid);
+
+ ret = hiraid_setup_io_cmd(hdev, &io_cmd, scmd);
+ if (unlikely(ret)) {
+ set_host_byte(scmd, DID_ERROR);
+ scmd->scsi_done(scmd);
+ atomic_dec(&ioq->inflight);
+ return 0;
+ }
+
+ ret = cid * SCSI_SENSE_BUFFERSIZE;
+ if (work_mode) {
+ mapbuf->sense_buffer_virt = hdev->sense_buffer_virt + ret;
+ mapbuf->sense_buffer_phy = hdev->sense_buffer_phy + ret;
+ } else {
+ mapbuf->sense_buffer_virt = ioq->sense_buffer_virt + ret;
+ mapbuf->sense_buffer_phy = ioq->sense_buffer_phy + ret;
+ }
+ io_cmd.common.sense_addr = cpu_to_le64(mapbuf->sense_buffer_phy);
+ io_cmd.common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+
+ hiraid_init_mapbuff(mapbuf);
+
+ mapbuf->hiraidq = ioq;
+ mapbuf->cid = cid;
+ ret = hiraid_io_map_data(hdev, mapbuf, scmd, &io_cmd);
+ if (unlikely(ret)) {
+ dev_err(hdev->dev, "io map data err\n");
+ set_host_byte(scmd, DID_ERROR);
+ scmd->scsi_done(scmd);
+ ret = 0;
+ goto deinit_iobuf;
+ }
+
+ WRITE_ONCE(mapbuf->state, CMD_FLIGHT);
+ hiraid_submit_cmd(ioq, &io_cmd);
+
+ return 0;
+
+deinit_iobuf:
+ atomic_dec(&ioq->inflight);
+ hiraid_free_mapbuf(hdev, mapbuf);
+ return ret;
+}
+
+static int hiraid_match_dev(struct hiraid_dev *hdev, u16 idx, struct scsi_device *sdev)
+{
+ if (HIRAID_DEV_INFO_FLAG_VALID(hdev->dev_info[idx].flag)) {
+ if (sdev->channel == hdev->dev_info[idx].channel &&
+ sdev->id == le16_to_cpu(hdev->dev_info[idx].target) &&
+ sdev->lun < hdev->dev_info[idx].lun) {
+ dev_info(hdev->dev, "match device success, channel:target:lun[%d:%d:%d]\n",
+ hdev->dev_info[idx].channel,
+ hdev->dev_info[idx].target,
+ hdev->dev_info[idx].lun);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int hiraid_disk_qd(u8 attr)
+{
+ switch (HIRAID_DEV_DISK_TYPE(attr)) {
+ case HIRAID_SAS_HDD_VD:
+ case HIRAID_SATA_HDD_VD:
+ return HIRAID_HDD_VD_QD;
+ case HIRAID_SAS_SSD_VD:
+ case HIRAID_SATA_SSD_VD:
+ case HIRAID_NVME_SSD_VD:
+ return HIRAID_SSD_VD_QD;
+ case HIRAID_SAS_HDD_PD:
+ case HIRAID_SATA_HDD_PD:
+ return HIRAID_HDD_PD_QD;
+ case HIRAID_SAS_SSD_PD:
+ case HIRAID_SATA_SSD_PD:
+ case HIRAID_NVME_SSD_PD:
+ return HIRAID_SSD_PD_QD;
+ default:
+ return MAX_CMD_PER_DEV;
+ }
+}
+
+static bool hiraid_disk_is_hdd(u8 attr)
+{
+ switch (HIRAID_DEV_DISK_TYPE(attr)) {
+ case HIRAID_SAS_HDD_VD:
+ case HIRAID_SATA_HDD_VD:
+ case HIRAID_SAS_HDD_PD:
+ case HIRAID_SATA_HDD_PD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int hiraid_slave_alloc(struct scsi_device *sdev)
+{
+ struct hiraid_sdev_hostdata *hostdata;
+ struct hiraid_dev *hdev;
+ u16 idx;
+
+ hdev = shost_priv(sdev->host);
+ hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
+ if (!hostdata) {
+ dev_err(hdev->dev, "alloc scsi host data memory failed\n");
+ return -ENOMEM;
+ }
+
+ down_read(&hdev->dev_rwsem);
+ for (idx = 0; idx < le32_to_cpu(hdev->ctrl_info->nd); idx++) {
+ if (hiraid_match_dev(hdev, idx, sdev))
+ goto scan_host;
+ }
+ up_read(&hdev->dev_rwsem);
+
+ kfree(hostdata);
+ return -ENXIO;
+
+scan_host:
+ hostdata->hdid = le32_to_cpu(hdev->dev_info[idx].hdid);
+ hostdata->max_io_kb = le16_to_cpu(hdev->dev_info[idx].max_io_kb);
+ hostdata->attr = hdev->dev_info[idx].attr;
+ hostdata->flag = hdev->dev_info[idx].flag;
+ hostdata->rg_id = 0xff;
+ sdev->hostdata = hostdata;
+ up_read(&hdev->dev_rwsem);
+ return 0;
+}
+
+static void hiraid_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+static int hiraid_slave_configure(struct scsi_device *sdev)
+{
+ unsigned int timeout = scmd_tmout_rawdisk * HZ;
+ struct hiraid_dev *hdev = shost_priv(sdev->host);
+ struct hiraid_sdev_hostdata *hostdata = sdev->hostdata;
+ u32 max_sec = sdev->host->max_sectors;
+ int qd = MAX_CMD_PER_DEV;
+
+ if (hostdata) {
+ if (HIRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ timeout = scmd_tmout_vd * HZ;
+ else if (HIRAID_DEV_INFO_ATTR_RAWDISK(hostdata->attr))
+ timeout = scmd_tmout_rawdisk * HZ;
+ max_sec = hostdata->max_io_kb << 1;
+ qd = hiraid_disk_qd(hostdata->attr);
+
+ if (hiraid_disk_is_hdd(hostdata->attr))
+ hostdata->hwq = hostdata->hdid % (hdev->online_queues - 1) + 1;
+ else
+ hostdata->hwq = 0;
+ } else {
+ dev_err(hdev->dev, "err, sdev->hostdata is null\n");
+ }
+
+ blk_queue_rq_timeout(sdev->request_queue, timeout);
+ sdev->eh_timeout = timeout;
+ scsi_change_queue_depth(sdev, qd);
+
+ if ((max_sec == 0) || (max_sec > sdev->host->max_sectors))
+ max_sec = sdev->host->max_sectors;
+
+ if (!max_io_force)
+ blk_queue_max_hw_sectors(sdev->request_queue, max_sec);
+
+ dev_info(hdev->dev, "sdev->channel:id:lun[%d:%d:%lld] scmd_timeout[%d]s maxsec[%d]\n",
+ sdev->channel, sdev->id, sdev->lun, timeout / HZ, max_sec);
+
+ return 0;
+}
+
+static void hiraid_shost_init(struct hiraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u8 domain, bus;
+ u32 dev_func;
+
+ domain = pci_domain_nr(pdev->bus);
+ bus = pdev->bus->number;
+ dev_func = pdev->devfn;
+
+ hdev->shost->nr_hw_queues = work_mode ? 1 : hdev->online_queues - 1;
+ hdev->shost->can_queue = hdev->scsi_qd;
+
+ hdev->shost->sg_tablesize = le16_to_cpu(hdev->ctrl_info->max_num_sge);
+ /* 512B per sector */
+ hdev->shost->max_sectors = (1U << ((hdev->ctrl_info->mdts) * 1U) << 12) / 512;
+ hdev->shost->cmd_per_lun = MAX_CMD_PER_DEV;
+ hdev->shost->max_channel = le16_to_cpu(hdev->ctrl_info->max_channel) - 1;
+ hdev->shost->max_id = le32_to_cpu(hdev->ctrl_info->max_tgt_id);
+ hdev->shost->max_lun = le16_to_cpu(hdev->ctrl_info->max_lun);
+
+ hdev->shost->this_id = -1;
+ hdev->shost->unique_id = (domain << 16) | (bus << 8) | dev_func;
+ hdev->shost->max_cmd_len = MAX_CDB_LEN;
+ hdev->shost->hostt->cmd_size = hiraid_get_max_cmd_size(hdev);
+}
+
+static int hiraid_alloc_queue(struct hiraid_dev *hdev, u16 qid, u16 depth)
+{
+ struct hiraid_queue *hiraidq = &hdev->queues[qid];
+ int ret = 0;
+
+ if (hdev->queue_count > qid) {
+ dev_info(hdev->dev, "warn: queue[%d] is exist\n", qid);
+ return 0;
+ }
+
+ hiraidq->cqes = dma_alloc_coherent(hdev->dev, CQ_SIZE(depth),
+ &hiraidq->cq_buffer_phy, GFP_KERNEL | __GFP_ZERO);
+ if (!hiraidq->cqes)
+ return -ENOMEM;
+
+ hiraidq->sq_cmds = dma_alloc_coherent(hdev->dev, SQ_SIZE(qid, depth),
+ &hiraidq->sq_buffer_phy, GFP_KERNEL);
+ if (!hiraidq->sq_cmds) {
+ ret = -ENOMEM;
+ goto free_cqes;
+ }
+
+ /*
+ * if single hw queue, we do not need to alloc sense buffer for every queue,
+ * we have alloced all on hiraid_alloc_resources.
+ */
+ if (work_mode)
+ goto initq;
+
+ /* alloc sense buffer */
+ hiraidq->sense_buffer_virt = dma_alloc_coherent(hdev->dev, SENSE_SIZE(depth),
+ &hiraidq->sense_buffer_phy, GFP_KERNEL | __GFP_ZERO);
+ if (!hiraidq->sense_buffer_virt) {
+ ret = -ENOMEM;
+ goto free_sq_cmds;
+ }
+
+initq:
+ spin_lock_init(&hiraidq->sq_lock);
+ spin_lock_init(&hiraidq->cq_lock);
+ hiraidq->hdev = hdev;
+ hiraidq->q_depth = depth;
+ hiraidq->qid = qid;
+ hiraidq->cq_vector = -1;
+ hdev->queue_count++;
+
+ return 0;
+
+free_sq_cmds:
+ dma_free_coherent(hdev->dev, SQ_SIZE(qid, depth), (void *)hiraidq->sq_cmds,
+ hiraidq->sq_buffer_phy);
+free_cqes:
+ dma_free_coherent(hdev->dev, CQ_SIZE(depth), (void *)hiraidq->cqes,
+ hiraidq->cq_buffer_phy);
+ return ret;
+}
+
+static int hiraid_wait_control_ready(struct hiraid_dev *hdev, u64 cap, bool enabled)
+{
+ unsigned long timeout =
+ ((HIRAID_CAP_TIMEOUT(cap) + 1) * HIRAID_CAP_TIMEOUT_UNIT_MS) + jiffies;
+ u32 bit = enabled ? HIRAID_CSTS_RDY : 0;
+
+ while ((readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_RDY) != bit) {
+ usleep_range(1000, 2000);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(hdev->dev, "device not ready; aborting %s\n",
+ enabled ? "initialisation" : "reset");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int hiraid_shutdown_control(struct hiraid_dev *hdev)
+{
+ unsigned long timeout = le32_to_cpu(hdev->ctrl_info->rtd3e) / 1000000 * HZ + jiffies;
+
+ hdev->ctrl_config &= ~HIRAID_CC_SHN_MASK;
+ hdev->ctrl_config |= HIRAID_CC_SHN_NORMAL;
+ writel(hdev->ctrl_config, hdev->bar + HIRAID_REG_CC);
+
+ while ((readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_SHST_MASK) !=
+ HIRAID_CSTS_SHST_CMPLT) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(hdev->dev, "device shutdown incomplete, abort shutdown\n");
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+static int hiraid_disable_control(struct hiraid_dev *hdev)
+{
+ hdev->ctrl_config &= ~HIRAID_CC_SHN_MASK;
+ hdev->ctrl_config &= ~HIRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + HIRAID_REG_CC);
+
+ return hiraid_wait_control_ready(hdev, hdev->cap, false);
+}
+
+static int hiraid_enable_control(struct hiraid_dev *hdev)
+{
+ u64 cap = hdev->cap;
+ u32 dev_page_min = HIRAID_CAP_MPSMIN(cap) + 12;
+ u32 page_shift = PAGE_SHIFT;
+
+ if (page_shift < dev_page_min) {
+ dev_err(hdev->dev, "minimum device page size[%u], too large for host[%u]\n",
+ 1U << dev_page_min, 1U << page_shift);
+ return -ENODEV;
+ }
+
+ page_shift = min_t(unsigned int, HIRAID_CAP_MPSMAX(cap) + 12, PAGE_SHIFT);
+ hdev->page_size = 1U << page_shift;
+
+ hdev->ctrl_config = HIRAID_CC_CSS_NVM;
+ hdev->ctrl_config |= (page_shift - 12) << HIRAID_CC_MPS_SHIFT;
+ hdev->ctrl_config |= HIRAID_CC_AMS_RR | HIRAID_CC_SHN_NONE;
+ hdev->ctrl_config |= HIRAID_CC_IOSQES | HIRAID_CC_IOCQES;
+ hdev->ctrl_config |= HIRAID_CC_ENABLE;
+ writel(hdev->ctrl_config, hdev->bar + HIRAID_REG_CC);
+
+ return hiraid_wait_control_ready(hdev, cap, true);
+}
+
+static void hiraid_init_queue(struct hiraid_queue *hiraidq, u16 qid)
+{
+ struct hiraid_dev *hdev = hiraidq->hdev;
+
+ memset((void *)hiraidq->cqes, 0, CQ_SIZE(hiraidq->q_depth));
+
+ hiraidq->sq_tail = 0;
+ hiraidq->cq_head = 0;
+ hiraidq->cq_phase = 1;
+ hiraidq->q_db = &hdev->dbs[qid * 2 * hdev->db_stride];
+ hiraidq->prp_small_pool = hdev->prp_extra_pool[qid % extra_pool_num];
+ hdev->online_queues++;
+ atomic_set(&hiraidq->inflight, 0);
+}
+
+static inline bool hiraid_cqe_pending(struct hiraid_queue *hiraidq)
+{
+ return (le16_to_cpu(hiraidq->cqes[hiraidq->cq_head].status) & 1) ==
+ hiraidq->cq_phase;
+}
+
+static void hiraid_complete_io_cmnd(struct hiraid_queue *ioq, struct hiraid_completion *cqe)
+{
+ struct hiraid_dev *hdev = ioq->hdev;
+ struct blk_mq_tags *tags;
+ struct scsi_cmnd *scmd;
+ struct hiraid_mapmange *mapbuf;
+ struct request *req;
+ unsigned long elapsed;
+
+ atomic_dec(&ioq->inflight);
+
+ if (work_mode)
+ tags = hdev->shost->tag_set.tags[0];
+ else
+ tags = hdev->shost->tag_set.tags[ioq->qid - 1];
+ req = blk_mq_tag_to_rq(tags, le16_to_cpu(cqe->cmd_id));
+ if (unlikely(!req || !blk_mq_request_started(req))) {
+ dev_warn(hdev->dev, "invalid id[%d] completed on queue[%d]\n",
+ le16_to_cpu(cqe->cmd_id), ioq->qid);
+ return;
+ }
+
+ scmd = blk_mq_rq_to_pdu(req);
+ mapbuf = scsi_cmd_priv(scmd);
+
+ elapsed = jiffies - scmd->jiffies_at_alloc;
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d] finish IO cost %3ld.%3ld seconds\n",
+ le16_to_cpu(cqe->cmd_id), ioq->qid, elapsed / HZ, elapsed % HZ);
+
+ if (cmpxchg(&mapbuf->state, CMD_FLIGHT, CMD_COMPLETE) != CMD_FLIGHT) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] enters abnormal handler, cost %3ld.%3ld seconds\n",
+ le16_to_cpu(cqe->cmd_id), ioq->qid, elapsed / HZ, elapsed % HZ);
+ WRITE_ONCE(mapbuf->state, CMD_TMO_COMPLETE);
+
+ if (mapbuf->sge_cnt) {
+ mapbuf->sge_cnt = 0;
+ scsi_dma_unmap(scmd);
+ }
+ hiraid_free_mapbuf(hdev, mapbuf);
+
+ return;
+ }
+
+ hiraid_check_status(mapbuf, scmd, cqe);
+ if (mapbuf->sge_cnt) {
+ mapbuf->sge_cnt = 0;
+ scsi_dma_unmap(scmd);
+ }
+ hiraid_free_mapbuf(hdev, mapbuf);
+ scmd->scsi_done(scmd);
+}
+
+static void hiraid_complete_admin_cmnd(struct hiraid_queue *adminq, struct hiraid_completion *cqe)
+{
+ struct hiraid_dev *hdev = adminq->hdev;
+ struct hiraid_cmd *adm_cmd;
+
+ adm_cmd = hdev->adm_cmds + le16_to_cpu(cqe->cmd_id);
+ if (unlikely(adm_cmd->state == CMD_IDLE)) {
+ dev_warn(adminq->hdev->dev, "invalid id[%d] completed on queue[%d]\n",
+ le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->sq_id));
+ return;
+ }
+
+ adm_cmd->status = le16_to_cpu(cqe->status) >> 1;
+ adm_cmd->result0 = le32_to_cpu(cqe->result);
+ adm_cmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&adm_cmd->cmd_done);
+}
+
+static void hiraid_send_async_event(struct hiraid_dev *hdev, u16 cid);
+
+static void hiraid_complete_async_event(struct hiraid_queue *hiraidq, struct hiraid_completion *cqe)
+{
+ struct hiraid_dev *hdev = hiraidq->hdev;
+ u32 result = le32_to_cpu(cqe->result);
+
+ dev_info(hdev->dev, "recv async event, cid[%d] status[0x%x] result[0x%x]\n",
+ le16_to_cpu(cqe->cmd_id), le16_to_cpu(cqe->status) >> 1, result);
+
+ hiraid_send_async_event(hdev, le16_to_cpu(cqe->cmd_id));
+
+ if ((le16_to_cpu(cqe->status) >> 1) != HIRAID_SC_SUCCESS)
+ return;
+ switch (result & 0x7) {
+ case HIRAID_ASYN_EVENT_NOTICE:
+ hiraid_handle_async_notice(hdev, result);
+ break;
+ case HIRAID_ASYN_EVENT_VS:
+ hiraid_handle_async_vs(hdev, result, le32_to_cpu(cqe->result1));
+ break;
+ default:
+ dev_warn(hdev->dev, "unsupported async event type[%u]\n", result & 0x7);
+ break;
+ }
+}
+
+static void hiraid_complete_pthru_cmnd(struct hiraid_queue *ioq, struct hiraid_completion *cqe)
+{
+ struct hiraid_dev *hdev = ioq->hdev;
+ struct hiraid_cmd *ptcmd;
+
+ ptcmd = hdev->io_ptcmds + (ioq->qid - 1) * HIRAID_PTHRU_CMDS_PERQ +
+ le16_to_cpu(cqe->cmd_id) - hdev->scsi_qd;
+
+ ptcmd->status = le16_to_cpu(cqe->status) >> 1;
+ ptcmd->result0 = le32_to_cpu(cqe->result);
+ ptcmd->result1 = le32_to_cpu(cqe->result1);
+
+ complete(&ptcmd->cmd_done);
+}
+
+static inline void hiraid_handle_cqe(struct hiraid_queue *hiraidq, u16 idx)
+{
+ struct hiraid_completion *cqe = &hiraidq->cqes[idx];
+ struct hiraid_dev *hdev = hiraidq->hdev;
+ u16 cid = le16_to_cpu(cqe->cmd_id);
+
+ if (unlikely(!work_mode && (cid >= hiraidq->q_depth))) {
+ dev_err(hdev->dev, "invalid command id[%d] completed on queue[%d]\n",
+ cid, cqe->sq_id);
+ return;
+ }
+
+ dev_log_dbg(hdev->dev, "cid[%d] qid[%d] result[0x%x] sqid[%d] status[0x%x]\n",
+ cid, hiraidq->qid, le32_to_cpu(cqe->result),
+ le16_to_cpu(cqe->sq_id), le16_to_cpu(cqe->status));
+
+ if (unlikely(hiraidq->qid == 0 && cid >= HIRAID_AQ_BLK_MQ_DEPTH)) {
+ hiraid_complete_async_event(hiraidq, cqe);
+ return;
+ }
+
+ if (unlikely(hiraidq->qid && cid >= hdev->scsi_qd)) {
+ hiraid_complete_pthru_cmnd(hiraidq, cqe);
+ return;
+ }
+
+ if (hiraidq->qid)
+ hiraid_complete_io_cmnd(hiraidq, cqe);
+ else
+ hiraid_complete_admin_cmnd(hiraidq, cqe);
+}
+
+static void hiraid_complete_cqes(struct hiraid_queue *hiraidq, u16 start, u16 end)
+{
+ while (start != end) {
+ hiraid_handle_cqe(hiraidq, start);
+ if (++start == hiraidq->q_depth)
+ start = 0;
+ }
+}
+
+static inline void hiraid_update_cq_head(struct hiraid_queue *hiraidq)
+{
+ if (++hiraidq->cq_head == hiraidq->q_depth) {
+ hiraidq->cq_head = 0;
+ hiraidq->cq_phase = !hiraidq->cq_phase;
+ }
+}
+
+static inline bool hiraid_process_cq(struct hiraid_queue *hiraidq, u16 *start, u16 *end, int tag)
+{
+ bool found = false;
+
+ *start = hiraidq->cq_head;
+ while (!found && hiraid_cqe_pending(hiraidq)) {
+ if (le16_to_cpu(hiraidq->cqes[hiraidq->cq_head].cmd_id) == tag)
+ found = true;
+ hiraid_update_cq_head(hiraidq);
+ }
+ *end = hiraidq->cq_head;
+
+ if (*start != *end)
+ writel(hiraidq->cq_head, hiraidq->q_db + hiraidq->hdev->db_stride);
+
+ return found;
+}
+
+static bool hiraid_poll_cq(struct hiraid_queue *hiraidq, int cid)
+{
+ u16 start, end;
+ bool found;
+
+ if (!hiraid_cqe_pending(hiraidq))
+ return 0;
+
+ spin_lock_irq(&hiraidq->cq_lock);
+ found = hiraid_process_cq(hiraidq, &start, &end, cid);
+ spin_unlock_irq(&hiraidq->cq_lock);
+
+ hiraid_complete_cqes(hiraidq, start, end);
+ return found;
+}
+
+static irqreturn_t hiraid_handle_irq(int irq, void *data)
+{
+ struct hiraid_queue *hiraidq = data;
+ irqreturn_t ret = IRQ_NONE;
+ u16 start, end;
+
+ spin_lock(&hiraidq->cq_lock);
+ if (hiraidq->cq_head != hiraidq->last_cq_head)
+ ret = IRQ_HANDLED;
+
+ hiraid_process_cq(hiraidq, &start, &end, -1);
+ hiraidq->last_cq_head = hiraidq->cq_head;
+ spin_unlock(&hiraidq->cq_lock);
+
+ if (start != end) {
+ hiraid_complete_cqes(hiraidq, start, end);
+ ret = IRQ_HANDLED;
+ }
+ return ret;
+}
+
+static int hiraid_setup_admin_queue(struct hiraid_dev *hdev)
+{
+ struct hiraid_queue *adminq = &hdev->queues[0];
+ u32 aqa;
+ int ret;
+
+ dev_info(hdev->dev, "start disable controller\n");
+
+ ret = hiraid_disable_control(hdev);
+ if (ret)
+ return ret;
+
+ ret = hiraid_alloc_queue(hdev, 0, HIRAID_AQ_DEPTH);
+ if (ret)
+ return ret;
+
+ aqa = adminq->q_depth - 1;
+ aqa |= aqa << 16;
+ writel(aqa, hdev->bar + HIRAID_REG_AQA);
+ lo_hi_writeq(adminq->sq_buffer_phy, hdev->bar + HIRAID_REG_ASQ);
+ lo_hi_writeq(adminq->cq_buffer_phy, hdev->bar + HIRAID_REG_ACQ);
+
+ dev_info(hdev->dev, "start enable controller\n");
+
+ ret = hiraid_enable_control(hdev);
+ if (ret) {
+ ret = -ENODEV;
+ return ret;
+ }
+
+ adminq->cq_vector = 0;
+ ret = pci_request_irq(hdev->pdev, adminq->cq_vector, hiraid_handle_irq, NULL,
+ adminq, "hiraid%d_q%d", hdev->instance, adminq->qid);
+ if (ret) {
+ adminq->cq_vector = -1;
+ return ret;
+ }
+
+ hiraid_init_queue(adminq, 0);
+
+ dev_info(hdev->dev, "setup admin queue success, queuecount[%d] online[%d] pagesize[%d]\n",
+ hdev->queue_count, hdev->online_queues, hdev->page_size);
+
+ return 0;
+}
+
+static u32 hiraid_get_bar_size(struct hiraid_dev *hdev, u32 nr_ioqs)
+{
+ return (HIRAID_REG_DBS + ((nr_ioqs + 1) * 8 * hdev->db_stride));
+}
+
+static int hiraid_create_admin_cmds(struct hiraid_dev *hdev)
+{
+ u16 i;
+
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+ spin_lock_init(&hdev->adm_cmd_lock);
+
+ hdev->adm_cmds = kcalloc_node(HIRAID_AQ_BLK_MQ_DEPTH, sizeof(struct hiraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->adm_cmds) {
+ dev_err(hdev->dev, "alloc admin cmds failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < HIRAID_AQ_BLK_MQ_DEPTH; i++) {
+ hdev->adm_cmds[i].qid = 0;
+ hdev->adm_cmds[i].cid = i;
+ list_add_tail(&(hdev->adm_cmds[i].list), &hdev->adm_cmd_list);
+ }
+
+ dev_info(hdev->dev, "alloc admin cmds success, num[%d]\n", HIRAID_AQ_BLK_MQ_DEPTH);
+
+ return 0;
+}
+
+static void hiraid_free_admin_cmds(struct hiraid_dev *hdev)
+{
+ kfree(hdev->adm_cmds);
+ hdev->adm_cmds = NULL;
+ INIT_LIST_HEAD(&hdev->adm_cmd_list);
+}
+
+static struct hiraid_cmd *hiraid_get_cmd(struct hiraid_dev *hdev, enum hiraid_cmd_type type)
+{
+ struct hiraid_cmd *cmd = NULL;
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == HIRAID_CMD_PTHRU) {
+ head = &hdev->io_pt_list;
+ slock = &hdev->io_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(slock, flags);
+ dev_err(hdev->dev, "err, cmd[%d] list empty\n", type);
+ return NULL;
+ }
+ cmd = list_entry(head->next, struct hiraid_cmd, list);
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(slock, flags);
+
+ WRITE_ONCE(cmd->state, CMD_FLIGHT);
+
+ return cmd;
+}
+
+static void hiraid_put_cmd(struct hiraid_dev *hdev, struct hiraid_cmd *cmd,
+ enum hiraid_cmd_type type)
+{
+ unsigned long flags;
+ struct list_head *head = &hdev->adm_cmd_list;
+ spinlock_t *slock = &hdev->adm_cmd_lock;
+
+ if (type == HIRAID_CMD_PTHRU) {
+ head = &hdev->io_pt_list;
+ slock = &hdev->io_pt_lock;
+ }
+
+ spin_lock_irqsave(slock, flags);
+ WRITE_ONCE(cmd->state, CMD_IDLE);
+ list_add_tail(&cmd->list, head);
+ spin_unlock_irqrestore(slock, flags);
+}
+
+static bool hiraid_admin_need_reset(struct hiraid_admin_command *cmd)
+{
+ switch (cmd->common.opcode) {
+ case HIRAID_ADMIN_DELETE_SQ:
+ case HIRAID_ADMIN_CREATE_SQ:
+ case HIRAID_ADMIN_DELETE_CQ:
+ case HIRAID_ADMIN_CREATE_CQ:
+ case HIRAID_ADMIN_SET_FEATURES:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int hiraid_reset_work_sync(struct hiraid_dev *hdev);
+static inline void hiraid_admin_timeout(struct hiraid_dev *hdev, struct hiraid_cmd *cmd)
+{
+ /* command may be returned because controller reset */
+ if (READ_ONCE(cmd->state) == CMD_COMPLETE)
+ return;
+ if (hiraid_reset_work_sync(hdev) == -EBUSY)
+ flush_work(&hdev->reset_work);
+}
+
+static int hiraid_put_admin_sync_request(struct hiraid_dev *hdev, struct hiraid_admin_command *cmd,
+ u32 *result0, u32 *result1, u32 timeout)
+{
+ struct hiraid_cmd *adm_cmd = hiraid_get_cmd(hdev, HIRAID_CMD_ADMIN);
+
+ if (!adm_cmd) {
+ dev_err(hdev->dev, "err, get admin cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&adm_cmd->cmd_done);
+
+ cmd->common.cmd_id = cpu_to_le16(adm_cmd->cid);
+ hiraid_submit_cmd(&hdev->queues[0], cmd);
+
+ if (!wait_for_completion_timeout(&adm_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+ adm_cmd->cid, adm_cmd->qid, cmd->usr_cmd.opcode,
+ cmd->usr_cmd.info_0.subopcode);
+
+ /* reset controller if admin timeout */
+ if (hiraid_admin_need_reset(cmd))
+ hiraid_admin_timeout(hdev, adm_cmd);
+
+ hiraid_put_cmd(hdev, adm_cmd, HIRAID_CMD_ADMIN);
+ return -ETIME;
+ }
+
+ if (result0)
+ *result0 = adm_cmd->result0;
+ if (result1)
+ *result1 = adm_cmd->result1;
+
+ hiraid_put_cmd(hdev, adm_cmd, HIRAID_CMD_ADMIN);
+
+ return adm_cmd->status;
+}
+
+/**
+ * hiraid_create_cq - send cmd to controller for create controller cq
+ */
+static int hiraid_create_complete_queue(struct hiraid_dev *hdev, u16 qid,
+ struct hiraid_queue *hiraidq, u16 cq_vector)
+{
+ struct hiraid_admin_command admin_cmd;
+ int flags = HIRAID_QUEUE_PHYS_CONTIG | HIRAID_CQ_IRQ_ENABLED;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_cq.opcode = HIRAID_ADMIN_CREATE_CQ;
+ admin_cmd.create_cq.prp1 = cpu_to_le64(hiraidq->cq_buffer_phy);
+ admin_cmd.create_cq.cqid = cpu_to_le16(qid);
+ admin_cmd.create_cq.qsize = cpu_to_le16(hiraidq->q_depth - 1);
+ admin_cmd.create_cq.cq_flags = cpu_to_le16(flags);
+ admin_cmd.create_cq.irq_vector = cpu_to_le16(cq_vector);
+
+ return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+/**
+ * hiraid_create_sq - send cmd to controller for create controller sq
+ */
+static int hiraid_create_send_queue(struct hiraid_dev *hdev, u16 qid,
+ struct hiraid_queue *hiraidq)
+{
+ struct hiraid_admin_command admin_cmd;
+ int flags = HIRAID_QUEUE_PHYS_CONTIG;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.create_sq.opcode = HIRAID_ADMIN_CREATE_SQ;
+ admin_cmd.create_sq.prp1 = cpu_to_le64(hiraidq->sq_buffer_phy);
+ admin_cmd.create_sq.sqid = cpu_to_le16(qid);
+ admin_cmd.create_sq.qsize = cpu_to_le16(hiraidq->q_depth - 1);
+ admin_cmd.create_sq.sq_flags = cpu_to_le16(flags);
+ admin_cmd.create_sq.cqid = cpu_to_le16(qid);
+
+ return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static void hiraid_free_all_queues(struct hiraid_dev *hdev)
+{
+ int i;
+ struct hiraid_queue *hq;
+
+ for (i = 0; i < hdev->queue_count; i++) {
+ hq = &hdev->queues[i];
+ dma_free_coherent(hdev->dev, CQ_SIZE(hq->q_depth),
+ (void *)hq->cqes, hq->cq_buffer_phy);
+ dma_free_coherent(hdev->dev, SQ_SIZE(hq->qid, hq->q_depth),
+ hq->sq_cmds, hq->sq_buffer_phy);
+ if (!work_mode)
+ dma_free_coherent(hdev->dev, SENSE_SIZE(hq->q_depth),
+ hq->sense_buffer_virt, hq->sense_buffer_phy);
+ }
+
+ hdev->queue_count = 0;
+}
+
+static void hiraid_free_sense_buffer(struct hiraid_dev *hdev)
+{
+ if (hdev->sense_buffer_virt) {
+ dma_free_coherent(hdev->dev,
+ SENSE_SIZE(hdev->scsi_qd + max_hwq_num * HIRAID_PTHRU_CMDS_PERQ),
+ hdev->sense_buffer_virt, hdev->sense_buffer_phy);
+ hdev->sense_buffer_virt = NULL;
+ }
+}
+
+static int hiraid_delete_queue(struct hiraid_dev *hdev, u8 opcode, u16 qid)
+{
+ struct hiraid_admin_command admin_cmd;
+ int ret;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.delete_queue.opcode = opcode;
+ admin_cmd.delete_queue.qid = cpu_to_le16(qid);
+
+ ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+
+ if (ret)
+ dev_err(hdev->dev, "delete %s:[%d] failed\n",
+ (opcode == HIRAID_ADMIN_DELETE_CQ) ? "cq" : "sq", qid);
+
+ return ret;
+}
+
+static int hiraid_delete_complete_queue(struct hiraid_dev *hdev, u16 cqid)
+{
+ return hiraid_delete_queue(hdev, HIRAID_ADMIN_DELETE_CQ, cqid);
+}
+
+static int hiraid_delete_send_queue(struct hiraid_dev *hdev, u16 sqid)
+{
+ return hiraid_delete_queue(hdev, HIRAID_ADMIN_DELETE_SQ, sqid);
+}
+
+static int hiraid_create_queue(struct hiraid_queue *hiraidq, u16 qid)
+{
+ struct hiraid_dev *hdev = hiraidq->hdev;
+ u16 cq_vector;
+ int ret;
+
+ cq_vector = (hdev->num_vecs == 1) ? 0 : qid;
+ ret = hiraid_create_complete_queue(hdev, qid, hiraidq, cq_vector);
+ if (ret)
+ return ret;
+
+ ret = hiraid_create_send_queue(hdev, qid, hiraidq);
+ if (ret)
+ goto delete_cq;
+
+ hiraidq->cq_vector = cq_vector;
+ ret = pci_request_irq(hdev->pdev, cq_vector, hiraid_handle_irq, NULL,
+ hiraidq, "hiraid%d_q%d", hdev->instance, qid);
+ if (ret) {
+ hiraidq->cq_vector = -1;
+ dev_err(hdev->dev, "request queue[%d] irq failed\n", qid);
+ goto delete_sq;
+ }
+
+ hiraid_init_queue(hiraidq, qid);
+
+ return 0;
+
+delete_sq:
+ hiraid_delete_send_queue(hdev, qid);
+delete_cq:
+ hiraid_delete_complete_queue(hdev, qid);
+
+ return ret;
+}
+
+static int hiraid_create_io_queues(struct hiraid_dev *hdev)
+{
+ u32 i, max;
+ int ret = 0;
+
+ max = min(hdev->max_qid, hdev->queue_count - 1);
+ for (i = hdev->online_queues; i <= max; i++) {
+ ret = hiraid_create_queue(&hdev->queues[i], i);
+ if (ret) {
+ dev_err(hdev->dev, "create queue[%d] failed\n", i);
+ break;
+ }
+ }
+
+ if (!hdev->last_qcnt)
+ hdev->last_qcnt = hdev->online_queues;
+
+ dev_info(hdev->dev, "queue_count[%d] online_queue[%d] last_online[%d]",
+ hdev->queue_count, hdev->online_queues, hdev->last_qcnt);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+static int hiraid_set_features(struct hiraid_dev *hdev, u32 fid, u32 dword11, void *buffer,
+ size_t buflen, u32 *result)
+{
+ struct hiraid_admin_command admin_cmd;
+ int ret;
+ u8 *data_ptr = NULL;
+ dma_addr_t buffer_phy = 0;
+
+ if (buffer && buflen) {
+ data_ptr = dma_alloc_coherent(hdev->dev, buflen, &buffer_phy, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memcpy(data_ptr, buffer, buflen);
+ }
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.features.opcode = HIRAID_ADMIN_SET_FEATURES;
+ admin_cmd.features.fid = cpu_to_le32(fid);
+ admin_cmd.features.dword11 = cpu_to_le32(dword11);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+ ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, result, NULL, 0);
+
+ if (data_ptr)
+ dma_free_coherent(hdev->dev, buflen, data_ptr, buffer_phy);
+
+ return ret;
+}
+
+static int hiraid_configure_timestamp(struct hiraid_dev *hdev)
+{
+ __le64 timestamp;
+ int ret;
+
+ timestamp = cpu_to_le64(ktime_to_ms(ktime_get_real()));
+ ret = hiraid_set_features(hdev, HIRAID_FEATURE_TIMESTAMP, 0,
+ ×tamp, sizeof(timestamp), NULL);
+
+ if (ret)
+ dev_err(hdev->dev, "set timestamp failed[%d]\n", ret);
+ return ret;
+}
+
+static int hiraid_get_queue_cnt(struct hiraid_dev *hdev, u32 *cnt)
+{
+ u32 q_cnt = (*cnt - 1) | ((*cnt - 1) << 16);
+ u32 nr_ioqs, result;
+ int status;
+
+ status = hiraid_set_features(hdev, HIRAID_FEATURE_NUM_QUEUES, q_cnt, NULL, 0, &result);
+ if (status) {
+ dev_err(hdev->dev, "set queue count failed, status[%d]\n",
+ status);
+ return -EIO;
+ }
+
+ nr_ioqs = min(result & 0xffff, result >> 16) + 1;
+ *cnt = min(*cnt, nr_ioqs);
+ if (*cnt == 0) {
+ dev_err(hdev->dev, "illegal qcount: zero, nr_ioqs[%d], cnt[%d]\n", nr_ioqs, *cnt);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int hiraid_setup_io_queues(struct hiraid_dev *hdev)
+{
+ struct hiraid_queue *adminq = &hdev->queues[0];
+ struct pci_dev *pdev = hdev->pdev;
+ u32 i, size, nr_ioqs;
+ int ret;
+
+ struct irq_affinity affd = {
+ .pre_vectors = 1
+ };
+
+ /* alloc IO sense buffer for single hw queue mode */
+ if (work_mode && !hdev->sense_buffer_virt) {
+ hdev->sense_buffer_virt = dma_alloc_coherent(hdev->dev,
+ SENSE_SIZE(hdev->scsi_qd + max_hwq_num * HIRAID_PTHRU_CMDS_PERQ),
+ &hdev->sense_buffer_phy, GFP_KERNEL | __GFP_ZERO);
+ if (!hdev->sense_buffer_virt)
+ return -ENOMEM;
+ }
+
+ nr_ioqs = min(num_online_cpus(), max_hwq_num);
+ ret = hiraid_get_queue_cnt(hdev, &nr_ioqs);
+ if (ret < 0)
+ return ret;
+
+ size = hiraid_get_bar_size(hdev, nr_ioqs);
+ ret = hiraid_remap_bar(hdev, size);
+ if (ret)
+ return -ENOMEM;
+
+ adminq->q_db = hdev->dbs;
+
+ pci_free_irq(pdev, 0, adminq);
+ pci_free_irq_vectors(pdev);
+ hdev->online_queues--;
+
+ ret = pci_alloc_irq_vectors_affinity(pdev, 1, (nr_ioqs + 1),
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+ if (ret <= 0)
+ return -EIO;
+
+ hdev->num_vecs = ret;
+ hdev->max_qid = max(ret - 1, 1);
+
+ ret = pci_request_irq(pdev, adminq->cq_vector, hiraid_handle_irq, NULL,
+ adminq, "hiraid%d_q%d", hdev->instance, adminq->qid);
+ if (ret) {
+ dev_err(hdev->dev, "request admin irq failed\n");
+ adminq->cq_vector = -1;
+ return ret;
+ }
+
+ hdev->online_queues++;
+
+ for (i = hdev->queue_count; i <= hdev->max_qid; i++) {
+ ret = hiraid_alloc_queue(hdev, i, hdev->ioq_depth);
+ if (ret)
+ break;
+ }
+ dev_info(hdev->dev, "max_qid[%d] queuecount[%d] onlinequeue[%d] ioqdepth[%d]\n",
+ hdev->max_qid, hdev->queue_count, hdev->online_queues, hdev->ioq_depth);
+
+ return hiraid_create_io_queues(hdev);
+}
+
+static void hiraid_delete_io_queues(struct hiraid_dev *hdev)
+{
+ u16 queues = hdev->online_queues - 1;
+ u8 opcode = HIRAID_ADMIN_DELETE_SQ;
+ u16 i, pass;
+
+ if (!pci_device_is_present(hdev->pdev)) {
+ dev_err(hdev->dev, "pci_device is not present, skip disable io queues\n");
+ return;
+ }
+
+ if (hdev->online_queues < 2) {
+ dev_err(hdev->dev, "err, io queue has been delete\n");
+ return;
+ }
+
+ for (pass = 0; pass < 2; pass++) {
+ for (i = queues; i > 0; i--)
+ if (hiraid_delete_queue(hdev, opcode, i))
+ break;
+
+ opcode = HIRAID_ADMIN_DELETE_CQ;
+ }
+}
+
+static void hiraid_pci_disable(struct hiraid_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ u32 i;
+
+ for (i = 0; i < hdev->online_queues; i++)
+ pci_free_irq(pdev, hdev->queues[i].cq_vector, &hdev->queues[i]);
+ pci_free_irq_vectors(pdev);
+ if (pci_is_enabled(pdev)) {
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ }
+ hdev->online_queues = 0;
+}
+
+static void hiraid_disable_admin_queue(struct hiraid_dev *hdev, bool shutdown)
+{
+ struct hiraid_queue *adminq = &hdev->queues[0];
+ u16 start, end;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ hiraid_shutdown_control(hdev);
+ else
+ hiraid_disable_control(hdev);
+ }
+
+ if (hdev->queue_count == 0) {
+ dev_err(hdev->dev, "err, admin queue has been delete\n");
+ return;
+ }
+
+ spin_lock_irq(&adminq->cq_lock);
+ hiraid_process_cq(adminq, &start, &end, -1);
+ spin_unlock_irq(&adminq->cq_lock);
+ hiraid_complete_cqes(adminq, start, end);
+}
+
+static int hiraid_create_prp_pools(struct hiraid_dev *hdev)
+{
+ int i;
+ char poolname[20] = { 0 };
+
+ hdev->prp_page_pool = dma_pool_create("prp list page", hdev->dev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+
+ if (!hdev->prp_page_pool) {
+ dev_err(hdev->dev, "create prp_page_pool failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < extra_pool_num; i++) {
+ sprintf(poolname, "prp_list_256_%d", i);
+ hdev->prp_extra_pool[i] = dma_pool_create(poolname, hdev->dev, EXTRA_POOL_SIZE,
+ EXTRA_POOL_SIZE, 0);
+
+ if (!hdev->prp_extra_pool[i]) {
+ dev_err(hdev->dev, "create prp extra pool[%d] failed\n", i);
+ goto destroy_prp_extra_pool;
+ }
+ }
+
+ return 0;
+
+destroy_prp_extra_pool:
+ while (i > 0)
+ dma_pool_destroy(hdev->prp_extra_pool[--i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+
+ return -ENOMEM;
+}
+
+static void hiraid_free_prp_pools(struct hiraid_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < extra_pool_num; i++)
+ dma_pool_destroy(hdev->prp_extra_pool[i]);
+ dma_pool_destroy(hdev->prp_page_pool);
+}
+
+static int hiraid_request_devices(struct hiraid_dev *hdev, struct hiraid_dev_info *dev)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ struct hiraid_admin_command admin_cmd;
+ struct hiraid_dev_list *list_buf;
+ dma_addr_t buffer_phy = 0;
+ u32 i, idx, hdid, ndev;
+ int ret = 0;
+
+ list_buf = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &buffer_phy, GFP_KERNEL);
+ if (!list_buf)
+ return -ENOMEM;
+
+ for (idx = 0; idx < nd;) {
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = HIRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = HIRAID_GET_DEVLIST_INFO;
+ admin_cmd.get_info.cdw11 = cpu_to_le32(idx);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+ ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+
+ if (ret) {
+ dev_err(hdev->dev, "get device list failed, nd[%u] idx[%u] ret[%d]\n",
+ nd, idx, ret);
+ goto out;
+ }
+ ndev = le32_to_cpu(list_buf->dev_num);
+
+ dev_info(hdev->dev, "get dev list ndev num[%u]\n", ndev);
+
+ for (i = 0; i < ndev; i++) {
+ hdid = le32_to_cpu(list_buf->devinfo[i].hdid);
+ dev_info(hdev->dev, "devices[%d], hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n",
+ i, hdid, le16_to_cpu(list_buf->devinfo[i].target),
+ list_buf->devinfo[i].channel,
+ list_buf->devinfo[i].lun,
+ list_buf->devinfo[i].attr);
+ if (hdid > nd || hdid == 0) {
+ dev_err(hdev->dev, "err, hdid[%d] invalid\n", hdid);
+ continue;
+ }
+ memcpy(&dev[hdid - 1], &list_buf->devinfo[i],
+ sizeof(struct hiraid_dev_info));
+ }
+ idx += ndev;
+
+ if (ndev < MAX_DEV_ENTRY_PER_PAGE_4K)
+ break;
+ }
+
+out:
+ dma_free_coherent(hdev->dev, PAGE_SIZE, list_buf, buffer_phy);
+ return ret;
+}
+
+static void hiraid_send_async_event(struct hiraid_dev *hdev, u16 cid)
+{
+ struct hiraid_queue *adminq = &hdev->queues[0];
+ struct hiraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = HIRAID_ADMIN_ASYNC_EVENT;
+ admin_cmd.common.cmd_id = cpu_to_le16(cid);
+
+ hiraid_submit_cmd(adminq, &admin_cmd);
+ dev_info(hdev->dev, "send async event to controller, cid[%d]\n", cid);
+}
+
+static inline void hiraid_init_async_event(struct hiraid_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->ctrl_info->asynevent; i++)
+ hiraid_send_async_event(hdev, i + HIRAID_AQ_BLK_MQ_DEPTH);
+}
+
+static int hiraid_add_device(struct hiraid_dev *hdev, struct hiraid_dev_info *devinfo)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "add device, hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n",
+ le32_to_cpu(devinfo->hdid), le16_to_cpu(devinfo->target),
+ devinfo->channel, devinfo->lun, devinfo->attr);
+
+ sdev = scsi_device_lookup(shost, devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ if (sdev) {
+ dev_warn(hdev->dev, "device is already exist, channel[%d] targetid[%d] lun[%d]\n",
+ devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ scsi_device_put(sdev);
+ return -EEXIST;
+ }
+ scsi_add_device(shost, devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ return 0;
+}
+
+static int hiraid_rescan_device(struct hiraid_dev *hdev, struct hiraid_dev_info *devinfo)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "rescan device, hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n",
+ le32_to_cpu(devinfo->hdid), le16_to_cpu(devinfo->target),
+ devinfo->channel, devinfo->lun, devinfo->attr);
+
+ sdev = scsi_device_lookup(shost, devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ if (!sdev) {
+ dev_warn(hdev->dev, "device is not exit rescan it, channel[%d] target_id[%d] lun[%d]\n",
+ devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int hiraid_delete_device(struct hiraid_dev *hdev, struct hiraid_dev_info *devinfo)
+{
+ struct Scsi_Host *shost = hdev->shost;
+ struct scsi_device *sdev;
+
+ dev_info(hdev->dev, "remove device, hdid[%u] target[%d] channel[%d] lun[%d] attr[0x%x]\n",
+ le32_to_cpu(devinfo->hdid), le16_to_cpu(devinfo->target),
+ devinfo->channel, devinfo->lun, devinfo->attr);
+
+ sdev = scsi_device_lookup(shost, devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ if (!sdev) {
+ dev_warn(hdev->dev, "device is not exit remove it, channel[%d] target_id[%d] lun[%d]\n",
+ devinfo->channel, le16_to_cpu(devinfo->target), 0);
+ return -ENODEV;
+ }
+
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ return 0;
+}
+
+static int hiraid_dev_list_init(struct hiraid_dev *hdev)
+{
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+
+ hdev->dev_info = kzalloc_node(nd * sizeof(struct hiraid_dev_info),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->dev_info)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int hiraid_luntarget_sort(const void *l, const void *r)
+{
+ const struct hiraid_dev_info *ln = l;
+ const struct hiraid_dev_info *rn = r;
+ int l_attr = HIRAID_DEV_INFO_ATTR_BOOT(ln->attr);
+ int r_attr = HIRAID_DEV_INFO_ATTR_BOOT(rn->attr);
+
+ /* boot first */
+ if (l_attr != r_attr)
+ return (r_attr - l_attr);
+
+ if (ln->channel == rn->channel)
+ return le16_to_cpu(ln->target) - le16_to_cpu(rn->target);
+
+ return ln->channel - rn->channel;
+}
+
+static void hiraid_scan_work(struct work_struct *work)
+{
+ struct hiraid_dev *hdev =
+ container_of(work, struct hiraid_dev, scan_work);
+ struct hiraid_dev_info *dev, *old_dev, *new_dev;
+ u32 nd = le32_to_cpu(hdev->ctrl_info->nd);
+ u8 flag, org_flag;
+ int i, ret;
+ int count = 0;
+
+ dev = kcalloc(nd, sizeof(struct hiraid_dev_info), GFP_KERNEL);
+ if (!dev)
+ return;
+
+ new_dev = kcalloc(nd, sizeof(struct hiraid_dev_info), GFP_KERNEL);
+ if (!new_dev)
+ goto free_list;
+
+ ret = hiraid_request_devices(hdev, dev);
+ if (ret)
+ goto free_all;
+ old_dev = hdev->dev_info;
+ for (i = 0; i < nd; i++) {
+ org_flag = old_dev[i].flag;
+ flag = dev[i].flag;
+
+ dev_log_dbg(hdev->dev, "i[%d] org_flag[0x%x] flag[0x%x]\n", i, org_flag, flag);
+
+ if (HIRAID_DEV_INFO_FLAG_VALID(flag)) {
+ if (!HIRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->dev_rwsem);
+ memcpy(&old_dev[i], &dev[i],
+ sizeof(struct hiraid_dev_info));
+ memcpy(&new_dev[count++], &dev[i],
+ sizeof(struct hiraid_dev_info));
+ up_write(&hdev->dev_rwsem);
+ } else if (HIRAID_DEV_INFO_FLAG_CHANGE(flag)) {
+ hiraid_rescan_device(hdev, &dev[i]);
+ }
+ } else {
+ if (HIRAID_DEV_INFO_FLAG_VALID(org_flag)) {
+ down_write(&hdev->dev_rwsem);
+ old_dev[i].flag &= 0xfe;
+ up_write(&hdev->dev_rwsem);
+ hiraid_delete_device(hdev, &old_dev[i]);
+ }
+ }
+ }
+
+ dev_info(hdev->dev, "scan work add device num[%d]\n", count);
+
+ sort(new_dev, count, sizeof(new_dev[0]), hiraid_luntarget_sort, NULL);
+
+ for (i = 0; i < count; i++)
+ hiraid_add_device(hdev, &new_dev[i]);
+
+free_all:
+ kfree(new_dev);
+free_list:
+ kfree(dev);
+}
+
+static void hiraid_timesyn_work(struct work_struct *work)
+{
+ struct hiraid_dev *hdev =
+ container_of(work, struct hiraid_dev, timesyn_work);
+
+ hiraid_configure_timestamp(hdev);
+}
+
+static int hiraid_init_control_info(struct hiraid_dev *hdev);
+static void hiraid_fwactive_work(struct work_struct *work)
+{
+ struct hiraid_dev *hdev = container_of(work, struct hiraid_dev, fwact_work);
+
+ if (hiraid_init_control_info(hdev))
+ dev_err(hdev->dev, "get controller info failed after fw activation\n");
+}
+
+static void hiraid_queue_scan(struct hiraid_dev *hdev)
+{
+ queue_work(work_queue, &hdev->scan_work);
+}
+
+static void hiraid_handle_async_notice(struct hiraid_dev *hdev, u32 result)
+{
+ switch ((result & 0xff00) >> 8) {
+ case HIRAID_ASYN_DEV_CHANGED:
+ hiraid_queue_scan(hdev);
+ break;
+ case HIRAID_ASYN_FW_ACT_START:
+ dev_info(hdev->dev, "fw activation starting\n");
+ break;
+ case HIRAID_ASYN_HOST_PROBING:
+ break;
+ default:
+ dev_warn(hdev->dev, "async event result[%08x]\n", result);
+ }
+}
+
+static void hiraid_handle_async_vs(struct hiraid_dev *hdev, u32 result, u32 result1)
+{
+ switch ((result & 0xff00) >> 8) {
+ case HIRAID_ASYN_TIMESYN:
+ queue_work(work_queue, &hdev->timesyn_work);
+ break;
+ case HIRAID_ASYN_FW_ACT_FINISH:
+ dev_info(hdev->dev, "fw activation finish\n");
+ queue_work(work_queue, &hdev->fwact_work);
+ break;
+ case HIRAID_ASYN_EVENT_MIN ... HIRAID_ASYN_EVENT_MAX:
+ dev_info(hdev->dev, "recv card event[%d] param1[0x%x] param2[0x%x]\n",
+ (result & 0xff00) >> 8, result, result1);
+ break;
+ default:
+ dev_warn(hdev->dev, "async event result[0x%x]\n", result);
+ }
+}
+
+static int hiraid_alloc_resources(struct hiraid_dev *hdev)
+{
+ int ret, nqueue;
+
+ hdev->ctrl_info = kzalloc_node(sizeof(*hdev->ctrl_info), GFP_KERNEL, hdev->numa_node);
+ if (!hdev->ctrl_info)
+ return -ENOMEM;
+
+ ret = hiraid_create_prp_pools(hdev);
+ if (ret)
+ goto free_ctrl_info;
+ nqueue = min(num_possible_cpus(), max_hwq_num) + 1;
+ hdev->queues = kcalloc_node(nqueue, sizeof(struct hiraid_queue),
+ GFP_KERNEL, hdev->numa_node);
+ if (!hdev->queues) {
+ ret = -ENOMEM;
+ goto destroy_dma_pools;
+ }
+
+ ret = hiraid_create_admin_cmds(hdev);
+ if (ret)
+ goto free_queues;
+
+ dev_info(hdev->dev, "total queues num[%d]\n", nqueue);
+
+ return 0;
+
+free_queues:
+ kfree(hdev->queues);
+destroy_dma_pools:
+ hiraid_free_prp_pools(hdev);
+free_ctrl_info:
+ kfree(hdev->ctrl_info);
+
+ return ret;
+}
+
+static void hiraid_free_resources(struct hiraid_dev *hdev)
+{
+ hiraid_free_admin_cmds(hdev);
+ kfree(hdev->queues);
+ hiraid_free_prp_pools(hdev);
+ kfree(hdev->ctrl_info);
+}
+
+static void hiraid_bsg_buf_unmap(struct hiraid_dev *hdev, struct bsg_job *job)
+{
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct hiraid_mapmange *mapbuf = job->dd_data;
+ enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ if (mapbuf->sge_cnt)
+ dma_unmap_sg(hdev->dev, mapbuf->sgl, mapbuf->sge_cnt, dma_dir);
+
+ hiraid_free_mapbuf(hdev, mapbuf);
+}
+
+static int hiraid_bsg_buf_map(struct hiraid_dev *hdev, struct bsg_job *job,
+ struct hiraid_admin_command *cmd)
+{
+ struct hiraid_bsg_request *bsg_req = job->request;
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct hiraid_mapmange *mapbuf = job->dd_data;
+ enum dma_data_direction dma_dir = rq_data_dir(rq) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ int ret = 0;
+
+ /* No data to DMA, it may be scsi no-rw command */
+ mapbuf->sge_cnt = job->request_payload.sg_cnt;
+ mapbuf->sgl = job->request_payload.sg_list;
+ mapbuf->len = job->request_payload.payload_len;
+ mapbuf->page_cnt = -1;
+ if (unlikely(mapbuf->sge_cnt == 0))
+ goto out;
+
+ mapbuf->use_sgl = !hiraid_is_prp(hdev, mapbuf->sgl, mapbuf->sge_cnt);
+
+ ret = dma_map_sg_attrs(hdev->dev, mapbuf->sgl, mapbuf->sge_cnt, dma_dir, DMA_ATTR_NO_WARN);
+ if (!ret)
+ goto out;
+
+ if ((mapbuf->use_sgl == (bool)true) && (bsg_req->msgcode == HIRAID_BSG_IOPTHRU) &&
+ (hdev->ctrl_info->pt_use_sgl != (bool)false)) {
+ ret = hiraid_build_passthru_sgl(hdev, cmd, mapbuf);
+ } else {
+ mapbuf->use_sgl = false;
+
+ ret = hiraid_build_passthru_prp(hdev, mapbuf);
+ cmd->common.dptr.prp1 = cpu_to_le64(sg_dma_address(mapbuf->sgl));
+ cmd->common.dptr.prp2 = cpu_to_le64(mapbuf->first_dma);
+ }
+
+ if (ret)
+ goto unmap;
+
+ return 0;
+
+unmap:
+ dma_unmap_sg(hdev->dev, mapbuf->sgl, mapbuf->sge_cnt, dma_dir);
+out:
+ return ret;
+}
+
+static int hiraid_get_control_info(struct hiraid_dev *hdev, struct hiraid_ctrl_info *ctrl_info)
+{
+ struct hiraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t buffer_phy = 0;
+ int ret;
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &buffer_phy, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.get_info.opcode = HIRAID_ADMIN_GET_INFO;
+ admin_cmd.get_info.type = HIRAID_GET_CTRL_INFO;
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+ ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+ if (!ret)
+ memcpy(ctrl_info, data_ptr, sizeof(struct hiraid_ctrl_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, buffer_phy);
+
+ return ret;
+}
+
+static int hiraid_init_control_info(struct hiraid_dev *hdev)
+{
+ int ret;
+
+ hdev->ctrl_info->nd = cpu_to_le32(240);
+ hdev->ctrl_info->mdts = 8;
+ hdev->ctrl_info->max_cmds = cpu_to_le16(4096);
+ hdev->ctrl_info->max_num_sge = cpu_to_le16(128);
+ hdev->ctrl_info->max_channel = cpu_to_le16(4);
+ hdev->ctrl_info->max_tgt_id = cpu_to_le32(3239);
+ hdev->ctrl_info->max_lun = cpu_to_le16(2);
+
+ ret = hiraid_get_control_info(hdev, hdev->ctrl_info);
+ if (ret)
+ dev_err(hdev->dev, "get controller info failed[%d]\n", ret);
+
+ dev_info(hdev->dev, "device_num = %d\n", hdev->ctrl_info->nd);
+ dev_info(hdev->dev, "max_cmd = %d\n", hdev->ctrl_info->max_cmds);
+ dev_info(hdev->dev, "max_channel = %d\n", hdev->ctrl_info->max_channel);
+ dev_info(hdev->dev, "max_tgt_id = %d\n", hdev->ctrl_info->max_tgt_id);
+ dev_info(hdev->dev, "max_lun = %d\n", hdev->ctrl_info->max_lun);
+ dev_info(hdev->dev, "max_num_sge = %d\n", hdev->ctrl_info->max_num_sge);
+ dev_info(hdev->dev, "lun_num_boot = %d\n", hdev->ctrl_info->lun_num_boot);
+ dev_info(hdev->dev, "max_data_transfer_size = %d\n", hdev->ctrl_info->mdts);
+ dev_info(hdev->dev, "abort_cmd_limit = %d\n", hdev->ctrl_info->acl);
+ dev_info(hdev->dev, "asyn_event_num = %d\n", hdev->ctrl_info->asynevent);
+ dev_info(hdev->dev, "card_type = %d\n", hdev->ctrl_info->card_type);
+ dev_info(hdev->dev, "pt_use_sgl = %d\n", hdev->ctrl_info->pt_use_sgl);
+ dev_info(hdev->dev, "rtd3e = %d\n", hdev->ctrl_info->rtd3e);
+ dev_info(hdev->dev, "serial_num = %s\n", hdev->ctrl_info->sn);
+ dev_info(hdev->dev, "fw_verion = %s\n", hdev->ctrl_info->fw_version);
+
+ if (!hdev->ctrl_info->asynevent)
+ hdev->ctrl_info->asynevent = 1;
+ if (hdev->ctrl_info->asynevent > HIRAID_ASYN_COMMANDS)
+ hdev->ctrl_info->asynevent = HIRAID_ASYN_COMMANDS;
+
+ hdev->scsi_qd = work_mode ?
+ le16_to_cpu(hdev->ctrl_info->max_cmds) : (hdev->ioq_depth - HIRAID_PTHRU_CMDS_PERQ);
+
+ return 0;
+}
+
+static int hiraid_user_send_admcmd(struct hiraid_dev *hdev, struct bsg_job *job)
+{
+ struct hiraid_bsg_request *bsg_req = job->request;
+ struct hiraid_passthru_common_cmd *ptcmd = &(bsg_req->admcmd);
+ struct hiraid_admin_command admin_cmd;
+ u32 timeout = msecs_to_jiffies(ptcmd->timeout_ms);
+ u32 result[2] = {0};
+ int status;
+
+ if (hdev->state >= DEV_RESETTING) {
+ dev_err(hdev->dev, "err, host state[%d] is not right\n",
+ hdev->state);
+ return -EBUSY;
+ }
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.common.opcode = ptcmd->opcode;
+ admin_cmd.common.flags = ptcmd->flags;
+ admin_cmd.common.hdid = cpu_to_le32(ptcmd->nsid);
+ admin_cmd.common.cdw2[0] = cpu_to_le32(ptcmd->cdw2);
+ admin_cmd.common.cdw2[1] = cpu_to_le32(ptcmd->cdw3);
+ admin_cmd.common.cdw10 = cpu_to_le32(ptcmd->cdw10);
+ admin_cmd.common.cdw11 = cpu_to_le32(ptcmd->cdw11);
+ admin_cmd.common.cdw12 = cpu_to_le32(ptcmd->cdw12);
+ admin_cmd.common.cdw13 = cpu_to_le32(ptcmd->cdw13);
+ admin_cmd.common.cdw14 = cpu_to_le32(ptcmd->cdw14);
+ admin_cmd.common.cdw15 = cpu_to_le32(ptcmd->cdw15);
+
+ status = hiraid_bsg_buf_map(hdev, job, &admin_cmd);
+ if (status) {
+ dev_err(hdev->dev, "err, map data failed\n");
+ return status;
+ }
+
+ status = hiraid_put_admin_sync_request(hdev, &admin_cmd, &result[0], &result[1], timeout);
+ if (status >= 0) {
+ job->reply_len = sizeof(result);
+ memcpy(job->reply, result, sizeof(result));
+ }
+ if (status)
+ dev_info(hdev->dev, "opcode[0x%x] subopcode[0x%x] status[0x%x] result0[0x%x];"
+ "result1[0x%x]\n", ptcmd->opcode, ptcmd->info_0.subopcode, status,
+ result[0], result[1]);
+
+ hiraid_bsg_buf_unmap(hdev, job);
+
+ return status;
+}
+
+static int hiraid_alloc_io_ptcmds(struct hiraid_dev *hdev)
+{
+ u32 i;
+ u32 ptnum = HIRAID_TOTAL_PTCMDS(hdev->online_queues - 1);
+
+ INIT_LIST_HEAD(&hdev->io_pt_list);
+ spin_lock_init(&hdev->io_pt_lock);
+
+ hdev->io_ptcmds = kcalloc_node(ptnum, sizeof(struct hiraid_cmd),
+ GFP_KERNEL, hdev->numa_node);
+
+ if (!hdev->io_ptcmds) {
+ dev_err(hdev->dev, "alloc io pthrunum failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ptnum; i++) {
+ hdev->io_ptcmds[i].qid = i / HIRAID_PTHRU_CMDS_PERQ + 1;
+ hdev->io_ptcmds[i].cid = i % HIRAID_PTHRU_CMDS_PERQ + hdev->scsi_qd;
+ list_add_tail(&(hdev->io_ptcmds[i].list), &hdev->io_pt_list);
+ }
+
+ dev_info(hdev->dev, "alloc io pthru cmd success, pthrunum[%d]\n", ptnum);
+
+ return 0;
+}
+
+static void hiraid_free_io_ptcmds(struct hiraid_dev *hdev)
+{
+ kfree(hdev->io_ptcmds);
+ hdev->io_ptcmds = NULL;
+
+ INIT_LIST_HEAD(&hdev->io_pt_list);
+}
+
+static int hiraid_put_io_sync_request(struct hiraid_dev *hdev, struct hiraid_scsi_io_cmd *io_cmd,
+ u32 *result, u32 *reslen, u32 timeout)
+{
+ int ret;
+ dma_addr_t buffer_phy;
+ struct hiraid_queue *ioq;
+ void *sense_addr = NULL;
+ struct hiraid_cmd *pt_cmd = hiraid_get_cmd(hdev, HIRAID_CMD_PTHRU);
+
+ if (!pt_cmd) {
+ dev_err(hdev->dev, "err, get ioq cmd failed\n");
+ return -EFAULT;
+ }
+
+ timeout = timeout ? timeout : ADMIN_TIMEOUT;
+
+ init_completion(&pt_cmd->cmd_done);
+
+ ioq = &hdev->queues[pt_cmd->qid];
+ if (work_mode) {
+ ret = ((pt_cmd->qid - 1) * HIRAID_PTHRU_CMDS_PERQ + pt_cmd->cid) *
+ SCSI_SENSE_BUFFERSIZE;
+ sense_addr = hdev->sense_buffer_virt + ret;
+ buffer_phy = hdev->sense_buffer_phy + ret;
+ } else {
+ ret = pt_cmd->cid * SCSI_SENSE_BUFFERSIZE;
+ sense_addr = ioq->sense_buffer_virt + ret;
+ buffer_phy = ioq->sense_buffer_phy + ret;
+ }
+
+ io_cmd->common.sense_addr = cpu_to_le64(buffer_phy);
+ io_cmd->common.sense_len = cpu_to_le16(SCSI_SENSE_BUFFERSIZE);
+ io_cmd->common.cmd_id = cpu_to_le16(pt_cmd->cid);
+
+ hiraid_submit_cmd(ioq, io_cmd);
+
+ if (!wait_for_completion_timeout(&pt_cmd->cmd_done, timeout)) {
+ dev_err(hdev->dev, "cid[%d] qid[%d] timeout, opcode[0x%x] subopcode[0x%x]\n",
+ pt_cmd->cid, pt_cmd->qid, io_cmd->common.opcode,
+ (le32_to_cpu(io_cmd->common.cdw3[0]) & 0xffff));
+
+ hiraid_admin_timeout(hdev, pt_cmd);
+
+ hiraid_put_cmd(hdev, pt_cmd, HIRAID_CMD_PTHRU);
+ return -ETIME;
+ }
+
+ if (result && reslen) {
+ if ((pt_cmd->status & 0x17f) == 0x101) {
+ memcpy(result, sense_addr, SCSI_SENSE_BUFFERSIZE);
+ *reslen = SCSI_SENSE_BUFFERSIZE;
+ }
+ }
+
+ hiraid_put_cmd(hdev, pt_cmd, HIRAID_CMD_PTHRU);
+
+ return pt_cmd->status;
+}
+
+static int hiraid_user_send_ptcmd(struct hiraid_dev *hdev, struct bsg_job *job)
+{
+ struct hiraid_bsg_request *bsg_req = (struct hiraid_bsg_request *)(job->request);
+ struct hiraid_passthru_io_cmd *cmd = &(bsg_req->pthrucmd);
+ struct hiraid_scsi_io_cmd pthru_cmd;
+ int status = 0;
+ u32 timeout = msecs_to_jiffies(cmd->timeout_ms);
+ // data len is 4k before use sgl, now len is 1M
+ u32 io_pt_data_len = (hdev->ctrl_info->pt_use_sgl == (bool)true) ?
+ IOQ_PT_SGL_DATA_LEN : IOQ_PT_DATA_LEN;
+
+ if (cmd->data_len > io_pt_data_len) {
+ dev_err(hdev->dev, "data len bigger than %d\n", io_pt_data_len);
+ return -EFAULT;
+ }
+
+ if (hdev->state != DEV_LIVE) {
+ dev_err(hdev->dev, "err, host state[%d] is not live\n", hdev->state);
+ return -EBUSY;
+ }
+
+ memset(&pthru_cmd, 0, sizeof(pthru_cmd));
+ pthru_cmd.common.opcode = cmd->opcode;
+ pthru_cmd.common.flags = cmd->flags;
+ pthru_cmd.common.hdid = cpu_to_le32(cmd->nsid);
+ pthru_cmd.common.sense_len = cpu_to_le16(cmd->info_0.res_sense_len);
+ pthru_cmd.common.cdb_len = cmd->info_0.cdb_len;
+ pthru_cmd.common.rsvd2 = cmd->info_0.rsvd0;
+ pthru_cmd.common.cdw3[0] = cpu_to_le32(cmd->cdw3);
+ pthru_cmd.common.cdw3[1] = cpu_to_le32(cmd->cdw4);
+ pthru_cmd.common.cdw3[2] = cpu_to_le32(cmd->cdw5);
+
+ pthru_cmd.common.cdw10[0] = cpu_to_le32(cmd->cdw10);
+ pthru_cmd.common.cdw10[1] = cpu_to_le32(cmd->cdw11);
+ pthru_cmd.common.cdw10[2] = cpu_to_le32(cmd->cdw12);
+ pthru_cmd.common.cdw10[3] = cpu_to_le32(cmd->cdw13);
+ pthru_cmd.common.cdw10[4] = cpu_to_le32(cmd->cdw14);
+ pthru_cmd.common.cdw10[5] = cpu_to_le32(cmd->data_len);
+
+ memcpy(pthru_cmd.common.cdb, &cmd->cdw16, cmd->info_0.cdb_len);
+
+ pthru_cmd.common.cdw26[0] = cpu_to_le32(cmd->cdw26[0]);
+ pthru_cmd.common.cdw26[1] = cpu_to_le32(cmd->cdw26[1]);
+ pthru_cmd.common.cdw26[2] = cpu_to_le32(cmd->cdw26[2]);
+ pthru_cmd.common.cdw26[3] = cpu_to_le32(cmd->cdw26[3]);
+
+ status = hiraid_bsg_buf_map(hdev, job, (struct hiraid_admin_command *)&pthru_cmd);
+ if (status) {
+ dev_err(hdev->dev, "err, map data failed\n");
+ return status;
+ }
+
+ status = hiraid_put_io_sync_request(hdev, &pthru_cmd, job->reply, &job->reply_len, timeout);
+
+ if (status)
+ dev_info(hdev->dev, "opcode[0x%x] subopcode[0x%x] status[0x%x] replylen[%d]\n",
+ cmd->opcode, cmd->info_1.subopcode, status, job->reply_len);
+
+ hiraid_bsg_buf_unmap(hdev, job);
+
+ return status;
+}
+
+static bool hiraid_check_scmd_finished(struct scsi_cmnd *scmd)
+{
+ struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+ struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd);
+ struct hiraid_queue *hiraidq;
+
+ hiraidq = mapbuf->hiraidq;
+ if (!hiraidq)
+ return false;
+ if (READ_ONCE(mapbuf->state) == CMD_COMPLETE || hiraid_poll_cq(hiraidq, mapbuf->cid)) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] has been completed\n",
+ mapbuf->cid, hiraidq->qid);
+ return true;
+ }
+ return false;
+}
+
+static enum blk_eh_timer_return hiraid_timed_out(struct scsi_cmnd *scmd)
+{
+ struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd);
+ unsigned int timeout = scmd->device->request_queue->rq_timeout;
+
+ if (hiraid_check_scmd_finished(scmd))
+ goto out;
+
+ if (time_after(jiffies, scmd->jiffies_at_alloc + timeout)) {
+ if (cmpxchg(&mapbuf->state, CMD_FLIGHT, CMD_TIMEOUT) == CMD_FLIGHT)
+ return BLK_EH_DONE;
+ }
+out:
+ return BLK_EH_RESET_TIMER;
+}
+
+/* send abort command by admin queue temporary */
+static int hiraid_send_abort_cmd(struct hiraid_dev *hdev, u32 hdid, u16 qid, u16 cid)
+{
+ struct hiraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.abort.opcode = HIRAID_ADMIN_ABORT_CMD;
+ admin_cmd.abort.hdid = cpu_to_le32(hdid);
+ admin_cmd.abort.sqid = cpu_to_le16(qid);
+ admin_cmd.abort.cid = cpu_to_le16(cid);
+
+ return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+/* send reset command by admin quueue temporary */
+static int hiraid_send_reset_cmd(struct hiraid_dev *hdev, u8 type, u32 hdid)
+{
+ struct hiraid_admin_command admin_cmd;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.reset.opcode = HIRAID_ADMIN_RESET;
+ admin_cmd.reset.hdid = cpu_to_le32(hdid);
+ admin_cmd.reset.type = type;
+
+ return hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, 0);
+}
+
+static bool hiraid_dev_state_trans(struct hiraid_dev *hdev, enum hiraid_dev_state new_state)
+{
+ unsigned long flags;
+ enum hiraid_dev_state old_state;
+ bool change = false;
+
+ spin_lock_irqsave(&hdev->state_lock, flags);
+
+ old_state = hdev->state;
+ switch (new_state) {
+ case DEV_LIVE:
+ switch (old_state) {
+ case DEV_NEW:
+ case DEV_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case DEV_RESETTING:
+ switch (old_state) {
+ case DEV_LIVE:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case DEV_DELETING:
+ if (old_state != DEV_DELETING)
+ change = true;
+ break;
+ case DEV_DEAD:
+ switch (old_state) {
+ case DEV_NEW:
+ case DEV_LIVE:
+ case DEV_RESETTING:
+ change = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ if (change)
+ hdev->state = new_state;
+ spin_unlock_irqrestore(&hdev->state_lock, flags);
+
+ dev_info(hdev->dev, "oldstate[%d]->newstate[%d], change[%d]\n",
+ old_state, new_state, change);
+
+ return change;
+}
+
+static void hiraid_drain_pending_ios(struct hiraid_dev *hdev);
+
+static void hiraid_flush_running_cmds(struct hiraid_dev *hdev)
+{
+ int i, j;
+
+ scsi_block_requests(hdev->shost);
+ hiraid_drain_pending_ios(hdev);
+ scsi_unblock_requests(hdev->shost);
+
+ j = HIRAID_AQ_BLK_MQ_DEPTH;
+ for (i = 0; i < j; i++) {
+ if (READ_ONCE(hdev->adm_cmds[i].state) == CMD_FLIGHT) {
+ dev_info(hdev->dev, "flush admin, cid[%d]\n", i);
+ hdev->adm_cmds[i].status = 0xFFFF;
+ WRITE_ONCE(hdev->adm_cmds[i].state, CMD_COMPLETE);
+ complete(&(hdev->adm_cmds[i].cmd_done));
+ }
+ }
+
+ j = HIRAID_TOTAL_PTCMDS(hdev->online_queues - 1);
+ for (i = 0; i < j; i++) {
+ if (READ_ONCE(hdev->io_ptcmds[i].state) == CMD_FLIGHT) {
+ hdev->io_ptcmds[i].status = 0xFFFF;
+ WRITE_ONCE(hdev->io_ptcmds[i].state, CMD_COMPLETE);
+ complete(&(hdev->io_ptcmds[i].cmd_done));
+ }
+ }
+}
+
+static int hiraid_dev_disable(struct hiraid_dev *hdev, bool shutdown)
+{
+ int ret = -ENODEV;
+ struct hiraid_queue *adminq = &hdev->queues[0];
+ u16 start, end;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ if (shutdown)
+ hiraid_shutdown_control(hdev);
+ else
+ ret = hiraid_disable_control(hdev);
+ }
+
+ if (hdev->queue_count == 0) {
+ dev_err(hdev->dev, "warn: queue has been delete\n");
+ return ret;
+ }
+
+ spin_lock_irq(&adminq->cq_lock);
+ hiraid_process_cq(adminq, &start, &end, -1);
+ spin_unlock_irq(&adminq->cq_lock);
+ hiraid_complete_cqes(adminq, start, end);
+
+ hiraid_pci_disable(hdev);
+
+ hiraid_flush_running_cmds(hdev);
+
+ return ret;
+}
+
+static void hiraid_reset_work(struct work_struct *work)
+{
+ int ret = 0;
+ struct hiraid_dev *hdev = container_of(work, struct hiraid_dev, reset_work);
+
+ if (hdev->state != DEV_RESETTING) {
+ dev_err(hdev->dev, "err, host is not reset state\n");
+ return;
+ }
+
+ dev_info(hdev->dev, "enter host reset\n");
+
+ if (hdev->ctrl_config & HIRAID_CC_ENABLE) {
+ dev_info(hdev->dev, "start dev_disable\n");
+ ret = hiraid_dev_disable(hdev, false);
+ }
+
+ if (ret)
+ goto out;
+
+ ret = hiraid_pci_enable(hdev);
+ if (ret)
+ goto out;
+
+ ret = hiraid_setup_admin_queue(hdev);
+ if (ret)
+ goto pci_disable;
+
+ ret = hiraid_setup_io_queues(hdev);
+ if (ret || hdev->online_queues != hdev->last_qcnt)
+ goto pci_disable;
+
+ hiraid_dev_state_trans(hdev, DEV_LIVE);
+
+ hiraid_init_async_event(hdev);
+
+ hiraid_queue_scan(hdev);
+
+ return;
+
+pci_disable:
+ hiraid_pci_disable(hdev);
+out:
+ hiraid_dev_state_trans(hdev, DEV_DEAD);
+ dev_err(hdev->dev, "err, host reset failed\n");
+}
+
+static int hiraid_reset_work_sync(struct hiraid_dev *hdev)
+{
+ if (!hiraid_dev_state_trans(hdev, DEV_RESETTING)) {
+ dev_info(hdev->dev, "can't change to reset state\n");
+ return -EBUSY;
+ }
+
+ if (!queue_work(work_queue, &hdev->reset_work)) {
+ dev_err(hdev->dev, "err, host is already in reset state\n");
+ return -EBUSY;
+ }
+
+ flush_work(&hdev->reset_work);
+ if (hdev->state != DEV_LIVE)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int hiraid_wait_io_completion(struct hiraid_mapmange *mapbuf)
+{
+ u16 times = 0;
+
+ do {
+ if (READ_ONCE(mapbuf->state) == CMD_TMO_COMPLETE)
+ break;
+ msleep(500);
+ times++;
+ } while (times <= HIRAID_WAIT_ABNL_CMD_TIMEOUT);
+
+ /* wait command completion timeout after abort/reset success */
+ if (times >= HIRAID_WAIT_ABNL_CMD_TIMEOUT)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static bool hiraid_tgt_rst_pending_io_count(struct request *rq, void *data, bool reserved)
+{
+ unsigned int id = *(unsigned int *)data;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct hiraid_mapmange *mapbuf;
+ struct hiraid_sdev_hostdata *hostdata;
+
+ if (scmd) {
+ mapbuf = scsi_cmd_priv(scmd);
+ if ((mapbuf->state == CMD_FLIGHT) || (mapbuf->state == CMD_TIMEOUT)) {
+ if ((scmd->device) && (scmd->device->id == id)) {
+ hostdata = scmd->device->hostdata;
+ hostdata->pend_count++;
+ }
+ }
+ }
+ return true;
+}
+static bool hiraid_clean_pending_io(struct request *rq, void *data, bool reserved)
+{
+ struct hiraid_dev *hdev = data;
+ struct scsi_cmnd *scmd;
+ struct hiraid_mapmange *mapbuf;
+
+ if (unlikely(!rq || !blk_mq_request_started(rq)))
+ return true;
+
+ scmd = blk_mq_rq_to_pdu(rq);
+ mapbuf = scsi_cmd_priv(scmd);
+
+ if ((cmpxchg(&mapbuf->state, CMD_FLIGHT, CMD_COMPLETE) != CMD_FLIGHT) &&
+ (cmpxchg(&mapbuf->state, CMD_TIMEOUT, CMD_COMPLETE) != CMD_TIMEOUT))
+ return true;
+
+ set_host_byte(scmd, DID_NO_CONNECT);
+ if (mapbuf->sge_cnt)
+ scsi_dma_unmap(scmd);
+ hiraid_free_mapbuf(hdev, mapbuf);
+ dev_warn_ratelimited(hdev->dev, "back unfinished CQE, cid[%d] qid[%d]\n",
+ mapbuf->cid, mapbuf->hiraidq->qid);
+ scmd->scsi_done(scmd);
+
+ return true;
+}
+
+static void hiraid_drain_pending_ios(struct hiraid_dev *hdev)
+{
+ blk_mq_tagset_busy_iter(&hdev->shost->tag_set, hiraid_clean_pending_io, (void *)(hdev));
+}
+
+static int wait_tgt_reset_io_done(struct scsi_cmnd *scmd)
+{
+ u16 timeout = 0;
+ struct hiraid_sdev_hostdata *hostdata;
+ struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+
+ hostdata = scmd->device->hostdata;
+
+ do {
+ hostdata->pend_count = 0;
+ blk_mq_tagset_busy_iter(&hdev->shost->tag_set, hiraid_tgt_rst_pending_io_count,
+ (void *)(&scmd->device->id));
+
+ if (!hostdata->pend_count)
+ return 0;
+
+ msleep(500);
+ timeout++;
+ } while (timeout <= HIRAID_WAIT_RST_IO_TIMEOUT);
+
+ return -ETIMEDOUT;
+}
+
+static int hiraid_abort(struct scsi_cmnd *scmd)
+{
+ struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+ struct hiraid_mapmange *mapbuf = scsi_cmd_priv(scmd);
+ struct hiraid_sdev_hostdata *hostdata;
+ u16 hwq, cid;
+ int ret;
+
+ scsi_print_command(scmd);
+
+ if (hdev->state != DEV_LIVE || !hiraid_wait_io_completion(mapbuf) ||
+ hiraid_check_scmd_finished(scmd))
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+ cid = mapbuf->cid;
+ hwq = mapbuf->hiraidq->qid;
+
+ dev_warn(hdev->dev, "cid[%d] qid[%d] timeout, send abort\n", cid, hwq);
+ ret = hiraid_send_abort_cmd(hdev, hostdata->hdid, hwq, cid);
+ if (ret != -ETIME) {
+ ret = hiraid_wait_io_completion(mapbuf);
+ if (ret) {
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, not found\n", cid, hwq);
+ return FAILED;
+ }
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort succ\n", cid, hwq);
+ return SUCCESS;
+ }
+ dev_warn(hdev->dev, "cid[%d] qid[%d] abort failed, timeout\n", cid, hwq);
+ return FAILED;
+}
+
+static int hiraid_scsi_reset(struct scsi_cmnd *scmd, enum hiraid_rst_type rst)
+{
+ struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+ struct hiraid_sdev_hostdata *hostdata;
+ int ret;
+
+ if (hdev->state != DEV_LIVE)
+ return SUCCESS;
+
+ hostdata = scmd->device->hostdata;
+
+ dev_warn(hdev->dev, "sdev[%d:%d] send %s reset\n", scmd->device->channel, scmd->device->id,
+ rst ? "bus" : "target");
+ ret = hiraid_send_reset_cmd(hdev, rst, hostdata->hdid);
+ if ((ret == 0) || (ret == FW_EH_DEV_NONE && rst == HIRAID_RESET_TARGET)) {
+ if (rst == HIRAID_RESET_TARGET) {
+ ret = wait_tgt_reset_io_done(scmd);
+ if (ret) {
+ dev_warn(hdev->dev, "sdev[%d:%d] target has %d peding cmd, target reset failed\n",
+ scmd->device->channel, scmd->device->id,
+ hostdata->pend_count);
+ return FAILED;
+ }
+ }
+ dev_warn(hdev->dev, "sdev[%d:%d] %s reset success\n",
+ scmd->device->channel, scmd->device->id, rst ? "bus" : "target");
+ return SUCCESS;
+ }
+
+ dev_warn(hdev->dev, "sdev[%d:%d] %s reset failed\n",
+ scmd->device->channel, scmd->device->id, rst ? "bus" : "target");
+ return FAILED;
+}
+
+static int hiraid_target_reset(struct scsi_cmnd *scmd)
+{
+ return hiraid_scsi_reset(scmd, HIRAID_RESET_TARGET);
+}
+
+static int hiraid_bus_reset(struct scsi_cmnd *scmd)
+{
+ return hiraid_scsi_reset(scmd, HIRAID_RESET_BUS);
+}
+
+static int hiraid_host_reset(struct scsi_cmnd *scmd)
+{
+ struct hiraid_dev *hdev = shost_priv(scmd->device->host);
+
+ if (hdev->state != DEV_LIVE)
+ return SUCCESS;
+
+ dev_warn(hdev->dev, "sdev[%d:%d] send host reset\n",
+ scmd->device->channel, scmd->device->id);
+ if (hiraid_reset_work_sync(hdev) == -EBUSY)
+ flush_work(&hdev->reset_work);
+
+ if (hdev->state != DEV_LIVE) {
+ dev_warn(hdev->dev, "sdev[%d:%d] host reset failed\n",
+ scmd->device->channel, scmd->device->id);
+ return FAILED;
+ }
+
+ dev_warn(hdev->dev, "sdev[%d:%d] host reset success\n",
+ scmd->device->channel, scmd->device->id);
+
+ return SUCCESS;
+}
+
+static pci_ers_result_t hiraid_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "pci error detected, state[%d]\n", state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ dev_warn(hdev->dev, "channel is normal, do nothing\n");
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ dev_warn(hdev->dev, "channel io frozen, need reset controller\n");
+
+ scsi_block_requests(hdev->shost);
+
+ hiraid_dev_state_trans(hdev, DEV_RESETTING);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ dev_warn(hdev->dev, "channel io failure, disconnect\n");
+
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t hiraid_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "restart after slot reset\n");
+
+ pci_restore_state(pdev);
+
+ if (!queue_work(work_queue, &hdev->reset_work)) {
+ dev_err(hdev->dev, "err, the device is resetting state\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ flush_work(&hdev->reset_work);
+
+ scsi_unblock_requests(hdev->shost);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void hiraid_reset_pci_finish(struct pci_dev *pdev)
+{
+ struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+
+ dev_info(hdev->dev, "enter hiraid reset finish\n");
+}
+
+static ssize_t csts_pp_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_PP_MASK);
+ ret >>= HIRAID_CSTS_PP_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_shst_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_SHST_MASK);
+ ret >>= HIRAID_CSTS_SHST_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_cfs_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev)) {
+ ret = (readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_CFS_MASK);
+ ret >>= HIRAID_CSTS_CFS_SHIFT;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t csts_rdy_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+ int ret = -1;
+
+ if (pci_device_is_present(hdev->pdev))
+ ret = (readl(hdev->bar + HIRAID_REG_CSTS) & HIRAID_CSTS_RDY);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ret);
+}
+
+static ssize_t fw_version_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", hdev->ctrl_info->fw_version);
+}
+
+static ssize_t hdd_dispatch_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int val = 0;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+ if (val < DISPATCH_BY_CPU || val > DISPATCH_BY_DISK)
+ return -EINVAL;
+ hdev->hdd_dispatch = val;
+
+ return strlen(buf);
+}
+static ssize_t hdd_dispatch_show(struct device *cdev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hdev->hdd_dispatch);
+}
+
+static DEVICE_ATTR_RO(csts_pp);
+static DEVICE_ATTR_RO(csts_shst);
+static DEVICE_ATTR_RO(csts_cfs);
+static DEVICE_ATTR_RO(csts_rdy);
+static DEVICE_ATTR_RO(fw_version);
+static DEVICE_ATTR_RW(hdd_dispatch);
+
+static struct device_attribute *hiraid_host_attrs[] = {
+ &dev_attr_csts_rdy,
+ &dev_attr_csts_pp,
+ &dev_attr_csts_cfs,
+ &dev_attr_fw_version,
+ &dev_attr_csts_shst,
+ &dev_attr_hdd_dispatch,
+ NULL,
+};
+
+static int hiraid_get_vd_info(struct hiraid_dev *hdev, struct hiraid_vd_info *vd_info, u16 vid)
+{
+ struct hiraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t buffer_phy = 0;
+ int ret;
+
+ if (hdev->state >= DEV_RESETTING) {
+ dev_err(hdev->dev, "err, host state[%d] is not right\n", hdev->state);
+ return -EBUSY;
+ }
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &buffer_phy, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_VDINFO);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.usr_cmd.info_1.param_len = cpu_to_le16(VDINFO_PARAM_LEN);
+ admin_cmd.usr_cmd.cdw10 = cpu_to_le32(vid);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+ ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, USRCMD_TIMEOUT);
+ if (!ret)
+ memcpy(vd_info, data_ptr, sizeof(struct hiraid_vd_info));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, buffer_phy);
+
+ return ret;
+}
+
+static int hiraid_get_bgtask(struct hiraid_dev *hdev, struct hiraid_bgtask *bgtask)
+{
+ struct hiraid_admin_command admin_cmd;
+ u8 *data_ptr = NULL;
+ dma_addr_t buffer_phy = 0;
+ int ret;
+
+ if (hdev->state >= DEV_RESETTING) {
+ dev_err(hdev->dev, "err, host state[%d] is not right\n", hdev->state);
+ return -EBUSY;
+ }
+
+ data_ptr = dma_alloc_coherent(hdev->dev, PAGE_SIZE, &buffer_phy, GFP_KERNEL);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ memset(&admin_cmd, 0, sizeof(admin_cmd));
+ admin_cmd.usr_cmd.opcode = USR_CMD_READ;
+ admin_cmd.usr_cmd.info_0.subopcode = cpu_to_le16(USR_CMD_BGTASK);
+ admin_cmd.usr_cmd.info_1.data_len = cpu_to_le16(USR_CMD_RDLEN);
+ admin_cmd.common.dptr.prp1 = cpu_to_le64(buffer_phy);
+
+ ret = hiraid_put_admin_sync_request(hdev, &admin_cmd, NULL, NULL, USRCMD_TIMEOUT);
+ if (!ret)
+ memcpy(bgtask, data_ptr, sizeof(struct hiraid_bgtask));
+
+ dma_free_coherent(hdev->dev, PAGE_SIZE, data_ptr, buffer_phy);
+
+ return ret;
+}
+
+static ssize_t raid_level_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct hiraid_dev *hdev;
+ struct hiraid_vd_info *vd_info;
+ struct hiraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !HIRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = hiraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ vd_info->rg_level = ARRAY_SIZE(raid_levels) - 1;
+
+ ret = (vd_info->rg_level < ARRAY_SIZE(raid_levels)) ?
+ vd_info->rg_level : (ARRAY_SIZE(raid_levels) - 1);
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "RAID-%s\n", raid_levels[ret]);
+}
+
+static ssize_t raid_state_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct hiraid_dev *hdev;
+ struct hiraid_vd_info *vd_info;
+ struct hiraid_sdev_hostdata *hostdata;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !HIRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = hiraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret) {
+ vd_info->vd_status = 0;
+ vd_info->rg_id = 0xff;
+ }
+
+ ret = (vd_info->vd_status < ARRAY_SIZE(raid_states)) ? vd_info->vd_status : 0;
+
+ kfree(vd_info);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", raid_states[ret]);
+}
+
+static ssize_t raid_resync_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev;
+ struct hiraid_dev *hdev;
+ struct hiraid_vd_info *vd_info;
+ struct hiraid_bgtask *bgtask;
+ struct hiraid_sdev_hostdata *hostdata;
+ u8 rg_id, i, progress = 0;
+ int ret;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ vd_info = kmalloc(sizeof(*vd_info), GFP_KERNEL);
+ if (!vd_info || !HIRAID_DEV_INFO_ATTR_VD(hostdata->attr))
+ return snprintf(buf, PAGE_SIZE, "NA\n");
+
+ ret = hiraid_get_vd_info(hdev, vd_info, sdev->id);
+ if (ret)
+ goto out;
+
+ rg_id = vd_info->rg_id;
+
+ bgtask = (struct hiraid_bgtask *)vd_info;
+ ret = hiraid_get_bgtask(hdev, bgtask);
+ if (ret)
+ goto out;
+ for (i = 0; i < bgtask->task_num; i++) {
+ if ((bgtask->bgtask[i].type == BGTASK_TYPE_REBUILD) &&
+ (le16_to_cpu(bgtask->bgtask[i].vd_id) == rg_id))
+ progress = bgtask->bgtask[i].progress;
+ }
+
+out:
+ kfree(vd_info);
+ return snprintf(buf, PAGE_SIZE, "%d\n", progress);
+}
+
+static ssize_t dispatch_hwq_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hiraid_sdev_hostdata *hostdata;
+
+ hostdata = to_scsi_device(dev)->hostdata;
+ return snprintf(buf, PAGE_SIZE, "%d\n", hostdata->hwq);
+}
+
+static ssize_t dispatch_hwq_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int val;
+ struct hiraid_dev *hdev;
+ struct scsi_device *sdev;
+ struct hiraid_sdev_hostdata *hostdata;
+
+ sdev = to_scsi_device(dev);
+ hdev = shost_priv(sdev->host);
+ hostdata = sdev->hostdata;
+
+ if (kstrtoint(buf, 0, &val) != 0)
+ return -EINVAL;
+ if (val <= 0 || val >= hdev->online_queues)
+ return -EINVAL;
+ if (!hiraid_disk_is_hdd(hostdata->attr))
+ return -EINVAL;
+
+ hostdata->hwq = val;
+ return strlen(buf);
+}
+
+static DEVICE_ATTR_RO(raid_level);
+static DEVICE_ATTR_RO(raid_state);
+static DEVICE_ATTR_RO(raid_resync);
+static DEVICE_ATTR_RW(dispatch_hwq);
+
+static struct device_attribute *hiraid_dev_attrs[] = {
+ &dev_attr_raid_state,
+ &dev_attr_raid_level,
+ &dev_attr_raid_resync,
+ &dev_attr_dispatch_hwq,
+ NULL,
+};
+
+static struct pci_error_handlers hiraid_err_handler = {
+ .error_detected = hiraid_pci_error_detected,
+ .slot_reset = hiraid_pci_slot_reset,
+ .reset_done = hiraid_reset_pci_finish,
+};
+
+static int hiraid_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
+{
+ int ret;
+ struct hiraid_dev *hdev = shost_priv(shost);
+
+ dev_info(hdev->dev, "start sysfs host reset cmd\n");
+ ret = hiraid_reset_work_sync(hdev);
+ dev_info(hdev->dev, "stop sysfs host reset cmd[%d]\n", ret);
+
+ return ret;
+}
+
+static int hiraid_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct hiraid_dev *hdev = shost_priv(shost);
+
+ hiraid_scan_work(&hdev->scan_work);
+
+ return 1;
+}
+
+static struct scsi_host_template hiraid_driver_template = {
+ .module = THIS_MODULE,
+ .name = "hiraid",
+ .proc_name = "hiraid",
+ .queuecommand = hiraid_queue_command,
+ .slave_alloc = hiraid_slave_alloc,
+ .slave_destroy = hiraid_slave_destroy,
+ .slave_configure = hiraid_slave_configure,
+ .scan_finished = hiraid_scan_finished,
+ .eh_timed_out = hiraid_timed_out,
+ .eh_abort_handler = hiraid_abort,
+ .eh_target_reset_handler = hiraid_target_reset,
+ .eh_bus_reset_handler = hiraid_bus_reset,
+ .eh_host_reset_handler = hiraid_host_reset,
+ .change_queue_depth = scsi_change_queue_depth,
+ .this_id = -1,
+ .unchecked_isa_dma = 0,
+ .shost_attrs = hiraid_host_attrs,
+ .sdev_attrs = hiraid_dev_attrs,
+ .host_reset = hiraid_sysfs_host_reset,
+};
+
+static void hiraid_shutdown(struct pci_dev *pdev)
+{
+ struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+
+ hiraid_delete_io_queues(hdev);
+ hiraid_disable_admin_queue(hdev, true);
+}
+
+static bool hiraid_bsg_is_valid(struct bsg_job *job)
+{
+ u64 timeout = 0;
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct hiraid_bsg_request *bsg_req = job->request;
+ struct hiraid_dev *hdev = shost_priv(dev_to_shost(job->dev));
+
+ if (bsg_req == NULL || job->request_len != sizeof(struct hiraid_bsg_request))
+ return false;
+
+ switch (bsg_req->msgcode) {
+ case HIRAID_BSG_ADMIN:
+ timeout = msecs_to_jiffies(bsg_req->admcmd.timeout_ms);
+ break;
+ case HIRAID_BSG_IOPTHRU:
+ timeout = msecs_to_jiffies(bsg_req->pthrucmd.timeout_ms);
+ break;
+ default:
+ dev_info(hdev->dev, "bsg unsupport msgcode[%d]\n", bsg_req->msgcode);
+ return false;
+ }
+
+ if ((timeout + CTL_RST_TIME) > rq->timeout) {
+ dev_err(hdev->dev, "bsg invalid time\n");
+ return false;
+ }
+
+ return true;
+}
+
+/* bsg dispatch user command */
+static int hiraid_bsg_dispatch(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = dev_to_shost(job->dev);
+ struct hiraid_dev *hdev = shost_priv(shost);
+ struct request *rq = blk_mq_rq_from_pdu(job);
+ struct hiraid_bsg_request *bsg_req = job->request;
+ int ret = -ENOMSG;
+
+ job->reply_len = 0;
+
+ if (!hiraid_bsg_is_valid(job)) {
+ bsg_job_done(job, ret, 0);
+ return 0;
+ }
+
+ dev_log_dbg(hdev->dev, "bsg msgcode[%d] msglen[%d] timeout[%d];"
+ "reqnsge[%d], reqlen[%d]\n",
+ bsg_req->msgcode, job->request_len, rq->timeout,
+ job->request_payload.sg_cnt, job->request_payload.payload_len);
+
+ switch (bsg_req->msgcode) {
+ case HIRAID_BSG_ADMIN:
+ ret = hiraid_user_send_admcmd(hdev, job);
+ break;
+ case HIRAID_BSG_IOPTHRU:
+ ret = hiraid_user_send_ptcmd(hdev, job);
+ break;
+ default:
+ break;
+ }
+
+ if (ret > 0)
+ ret = ret | (ret << 8);
+
+ bsg_job_done(job, ret, 0);
+ return 0;
+}
+
+static inline void hiraid_unregist_bsg(struct hiraid_dev *hdev)
+{
+ if (hdev->bsg_queue) {
+ bsg_unregister_queue(hdev->bsg_queue);
+ blk_cleanup_queue(hdev->bsg_queue);
+ }
+}
+static int hiraid_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct hiraid_dev *hdev;
+ struct Scsi_Host *shost;
+ int node, ret;
+ char bsg_name[15];
+
+ shost = scsi_host_alloc(&hiraid_driver_template, sizeof(*hdev));
+ if (!shost) {
+ dev_err(&pdev->dev, "failed to allocate scsi host\n");
+ return -ENOMEM;
+ }
+ hdev = shost_priv(shost);
+ hdev->pdev = pdev;
+ hdev->dev = get_device(&pdev->dev);
+
+ node = dev_to_node(hdev->dev);
+ if (node == NUMA_NO_NODE) {
+ node = first_memory_node;
+ set_dev_node(hdev->dev, node);
+ }
+ hdev->numa_node = node;
+ hdev->shost = shost;
+ hdev->instance = shost->host_no;
+ pci_set_drvdata(pdev, hdev);
+
+ ret = hiraid_dev_map(hdev);
+ if (ret)
+ goto put_dev;
+
+ init_rwsem(&hdev->dev_rwsem);
+ INIT_WORK(&hdev->scan_work, hiraid_scan_work);
+ INIT_WORK(&hdev->timesyn_work, hiraid_timesyn_work);
+ INIT_WORK(&hdev->reset_work, hiraid_reset_work);
+ INIT_WORK(&hdev->fwact_work, hiraid_fwactive_work);
+ spin_lock_init(&hdev->state_lock);
+
+ ret = hiraid_alloc_resources(hdev);
+ if (ret)
+ goto dev_unmap;
+
+ ret = hiraid_pci_enable(hdev);
+ if (ret)
+ goto resources_free;
+
+ ret = hiraid_setup_admin_queue(hdev);
+ if (ret)
+ goto pci_disable;
+
+ ret = hiraid_init_control_info(hdev);
+ if (ret)
+ goto disable_admin_q;
+
+ ret = hiraid_setup_io_queues(hdev);
+ if (ret)
+ goto disable_admin_q;
+
+ hiraid_shost_init(hdev);
+
+ ret = scsi_add_host(hdev->shost, hdev->dev);
+ if (ret) {
+ dev_err(hdev->dev, "add shost to system failed, ret[%d]\n", ret);
+ goto remove_io_queues;
+ }
+
+ snprintf(bsg_name, sizeof(bsg_name), "hiraid%d", shost->host_no);
+ hdev->bsg_queue = bsg_setup_queue(&shost->shost_gendev, bsg_name, hiraid_bsg_dispatch,
+ NULL, hiraid_get_max_cmd_size(hdev));
+ if (IS_ERR(hdev->bsg_queue)) {
+ dev_err(hdev->dev, "err, setup bsg failed\n");
+ hdev->bsg_queue = NULL;
+ goto remove_io_queues;
+ }
+
+ if (hdev->online_queues == HIRAID_ADMIN_QUEUE_NUM) {
+ dev_warn(hdev->dev, "warn: only admin queue can be used\n");
+ return 0;
+ }
+
+ hdev->state = DEV_LIVE;
+
+ hiraid_init_async_event(hdev);
+
+ ret = hiraid_dev_list_init(hdev);
+ if (ret)
+ goto unregist_bsg;
+
+ ret = hiraid_configure_timestamp(hdev);
+ if (ret)
+ dev_warn(hdev->dev, "time synchronization failed\n");
+
+ ret = hiraid_alloc_io_ptcmds(hdev);
+ if (ret)
+ goto unregist_bsg;
+
+ scsi_scan_host(hdev->shost);
+
+ return 0;
+
+unregist_bsg:
+ hiraid_unregist_bsg(hdev);
+remove_io_queues:
+ hiraid_delete_io_queues(hdev);
+disable_admin_q:
+ hiraid_free_sense_buffer(hdev);
+ hiraid_disable_admin_queue(hdev, false);
+pci_disable:
+ hiraid_free_all_queues(hdev);
+ hiraid_pci_disable(hdev);
+resources_free:
+ hiraid_free_resources(hdev);
+dev_unmap:
+ hiraid_dev_unmap(hdev);
+put_dev:
+ put_device(hdev->dev);
+ scsi_host_put(shost);
+
+ return -ENODEV;
+}
+
+static void hiraid_remove(struct pci_dev *pdev)
+{
+ struct hiraid_dev *hdev = pci_get_drvdata(pdev);
+ struct Scsi_Host *shost = hdev->shost;
+
+ dev_info(hdev->dev, "enter hiraid remove\n");
+
+ hiraid_dev_state_trans(hdev, DEV_DELETING);
+ flush_work(&hdev->reset_work);
+
+ if (!pci_device_is_present(pdev))
+ hiraid_flush_running_cmds(hdev);
+
+ hiraid_unregist_bsg(hdev);
+ scsi_remove_host(shost);
+ hiraid_free_io_ptcmds(hdev);
+ kfree(hdev->dev_info);
+ hiraid_delete_io_queues(hdev);
+ hiraid_free_sense_buffer(hdev);
+ hiraid_disable_admin_queue(hdev, false);
+ hiraid_free_all_queues(hdev);
+ hiraid_pci_disable(hdev);
+ hiraid_free_resources(hdev);
+ hiraid_dev_unmap(hdev);
+ put_device(hdev->dev);
+ scsi_host_put(shost);
+
+ dev_info(hdev->dev, "exit hiraid remove\n");
+}
+
+static const struct pci_device_id hiraid_hw_card_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_LOGIC, HIRAID_SERVER_DEVICE_HBA_DID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI_LOGIC, HIRAID_SERVER_DEVICE_RAID_DID) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, hiraid_hw_card_ids);
+
+static struct pci_driver hiraid_driver = {
+ .name = "hiraid",
+ .id_table = hiraid_hw_card_ids,
+ .probe = hiraid_probe,
+ .remove = hiraid_remove,
+ .shutdown = hiraid_shutdown,
+ .err_handler = &hiraid_err_handler,
+};
+
+static int __init hiraid_init(void)
+{
+ int ret;
+
+ work_queue = alloc_workqueue("hiraid-wq", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!work_queue)
+ return -ENOMEM;
+
+ hiraid_class = class_create(THIS_MODULE, "hiraid");
+ if (IS_ERR(hiraid_class)) {
+ ret = PTR_ERR(hiraid_class);
+ goto destroy_wq;
+ }
+
+ ret = pci_register_driver(&hiraid_driver);
+ if (ret < 0)
+ goto destroy_class;
+
+ return 0;
+
+destroy_class:
+ class_destroy(hiraid_class);
+destroy_wq:
+ destroy_workqueue(work_queue);
+
+ return ret;
+}
+
+static void __exit hiraid_exit(void)
+{
+ pci_unregister_driver(&hiraid_driver);
+ class_destroy(hiraid_class);
+ destroy_workqueue(work_queue);
+}
+
+MODULE_AUTHOR("Huawei Technologies CO., Ltd");
+MODULE_DESCRIPTION("Huawei RAID driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(HIRAID_DRV_VERSION);
+module_init(hiraid_init);
+module_exit(hiraid_exit);
--
2.22.0.windows.1
2
1

[PATCH openEuler-1.0-LTS v2] netfilter: conntrack: dccp: copy entire header to stack buffer, not just basic one
by Zhengchao Shao 16 Nov '23
by Zhengchao Shao 16 Nov '23
16 Nov '23
From: Florian Westphal <fw(a)strlen.de>
mainline inclusion
from mainline-v6.5-rc1
commit ff0a3a7d52ff7282dbd183e7fc29a1fe386b0c30
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I8EZSO
CVE: CVE-2023-39197
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Eric Dumazet says:
nf_conntrack_dccp_packet() has an unique:
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
And nothing more is 'pulled' from the packet, depending on the content.
dh->dccph_doff, and/or dh->dccph_x ...)
So dccp_ack_seq() is happily reading stuff past the _dh buffer.
BUG: KASAN: stack-out-of-bounds in nf_conntrack_dccp_packet+0x1134/0x11c0
Read of size 4 at addr ffff000128f66e0c by task syz-executor.2/29371
[..]
Fix this by increasing the stack buffer to also include room for
the extra sequence numbers and all the known dccp packet type headers,
then pull again after the initial validation of the basic header.
While at it, mark packets invalid that lack 48bit sequence bit but
where RFC says the type MUST use them.
Compile tested only.
v2: first skb_header_pointer() now needs to adjust the size to
only pull the generic header. (Eric)
Heads-up: I intend to remove dccp conntrack support later this year.
Fixes: 2bc780499aa3 ("[NETFILTER]: nf_conntrack: add DCCP protocol support")
Reported-by: Eric Dumazet <edumazet(a)google.com>
Signed-off-by: Florian Westphal <fw(a)strlen.de>
Reviewed-by: Eric Dumazet <edumazet(a)google.com>
Signed-off-by: Pablo Neira Ayuso <pablo(a)netfilter.org>
Conflicts:
net/netfilter/nf_conntrack_proto_dccp.c
Signed-off-by: Zhengchao Shao <shaozhengchao(a)huawei.com>
---
net/netfilter/nf_conntrack_proto_dccp.c | 50 ++++++++++++++++++++++++-
1 file changed, 48 insertions(+), 2 deletions(-)
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 3ba1f4d9934f..cd7a51a0f100 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -433,17 +433,47 @@ static u64 dccp_ack_seq(const struct dccp_hdr *dh)
ntohl(dhack->dccph_ack_nr_low);
}
+struct nf_conntrack_dccp_buf {
+ struct dccp_hdr dh; /* generic header part */
+ struct dccp_hdr_ext ext; /* optional depending dh->dccph_x */
+ union { /* depends on header type */
+ struct dccp_hdr_ack_bits ack;
+ struct dccp_hdr_request req;
+ struct dccp_hdr_response response;
+ struct dccp_hdr_reset rst;
+ } u;
+};
+
+static struct dccp_hdr *
+dccp_header_pointer(const struct sk_buff *skb, int offset, const struct dccp_hdr *dh,
+ struct nf_conntrack_dccp_buf *buf)
+{
+ unsigned int hdrlen = __dccp_hdr_len(dh);
+
+ if (hdrlen > sizeof(*buf))
+ return NULL;
+
+ return skb_header_pointer(skb, offset, hdrlen, buf);
+}
+
static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, enum ip_conntrack_info ctinfo)
{
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
- struct dccp_hdr _dh, *dh;
+ struct nf_conntrack_dccp_buf _dh;
u_int8_t type, old_state, new_state;
enum ct_dccp_roles role;
unsigned int *timeouts;
+ struct dccp_hdr *dh;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
+
+ /* pull again, including possible 48 bit sequences and subtype header */
+ dh = dccp_header_pointer(skb, dataoff, dh, &_dh);
+ if (!dh)
+ return NF_DROP;
+
type = dh->dccph_type;
if (type == DCCP_PKT_RESET &&
@@ -526,10 +556,20 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb, unsigned int dataoff,
u_int8_t pf, unsigned int hooknum)
{
+ static const unsigned long require_seq48 = 1 << DCCP_PKT_REQUEST |
+ 1 << DCCP_PKT_RESPONSE |
+ 1 << DCCP_PKT_CLOSEREQ |
+ 1 << DCCP_PKT_CLOSE |
+ 1 << DCCP_PKT_RESET |
+ 1 << DCCP_PKT_SYNC |
+ 1 << DCCP_PKT_SYNCACK;
struct dccp_hdr _dh, *dh;
unsigned int dccp_len = skb->len - dataoff;
unsigned int cscov;
const char *msg;
+ u8 type;
+
+ BUILD_BUG_ON(DCCP_PKT_INVALID >= BITS_PER_LONG);
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
if (dh == NULL) {
@@ -559,11 +599,17 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
goto out_invalid;
}
- if (dh->dccph_type >= DCCP_PKT_INVALID) {
+ type = dh->dccph_type;
+ if (type >= DCCP_PKT_INVALID) {
msg = "nf_ct_dccp: reserved packet type ";
goto out_invalid;
}
+ if (test_bit(type, &require_seq48) && !dh->dccph_x) {
+ msg = "nf_ct_dccp: type lacks 48bit sequence numbers";
+ goto out_invalid;
+ }
+
return NF_ACCEPT;
out_invalid:
--
2.34.1
2
1
From: Juan Zhou <zhoujuan51(a)h-partners.com>
The first patch refactors the hns debugfs function, and the second
patch supports querying SRQ context by debugfs.
Junxian Huang (1):
RDMA/hns: Refactor hns RoCE debugfs
wenglianfa (1):
RDMA/hns: Support SRQ Context tracing by debugfs
drivers/infiniband/hw/hns/hns_roce_debugfs.c | 318 +++++++++----------
drivers/infiniband/hw/hns/hns_roce_debugfs.h | 57 ++++
drivers/infiniband/hw/hns/hns_roce_device.h | 7 +-
drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 26 +-
drivers/infiniband/hw/hns/hns_roce_main.c | 1 -
drivers/infiniband/hw/hns/hns_roce_srq.c | 12 +
6 files changed, 242 insertions(+), 179 deletions(-)
--
2.30.0
1
2
The patch sets include two parts:
1. patch 1~15: Rebase smart_grid from openeuler-1.0-LTS to OLK-5.10
2. patch 16~19: introduce smart_grid zone qos and cpufreq
Since v4:
1. Place the highest level task in current domain level itself in
sched_grid_prefer_cpus
Since v3:
1. fix CI warning
Since v2:
1. static alloc sg_zone cpumask.
2. fix some warning
Hui Tang (13):
sched: Introduce smart grid scheduling strategy for cfs
sched: fix smart grid usage count
sched: fix WARN found by deadlock detect
sched: Fix possible deadlock in tg_set_dynamic_affinity_mode
sched: Fix negative count for jump label
sched: Fix timer storm for smart grid
sched: fix dereference NULL pointers
sched: Fix memory leak on error branch
sched: clear credit count in error branch
sched: Adjust few parameters range for smart grid
sched: Delete redundant updates to p->prefer_cpus
sched: Fix memory leak for smart grid
sched: Fix null pointer derefrence for sd->span
Wang ShaoBo (2):
sched: smart grid: init sched_grid_qos structure on QOS purpose
config: enable CONFIG_QOS_SCHED_SMART_GRID by default
Yipeng Zou (4):
sched: introduce smart grid qos zone
smart_grid: introduce /proc/pid/smart_grid_level
smart_grid: introduce smart_grid_strategy_ctrl sysctl
smart_grid: cpufreq: introduce smart_grid cpufreq control
arch/arm64/configs/openeuler_defconfig | 1 +
drivers/cpufreq/cpufreq.c | 234 ++++++++++++
fs/proc/array.c | 13 +
fs/proc/base.c | 76 ++++
include/linux/cpufreq.h | 11 +
include/linux/sched.h | 22 ++
include/linux/sched/grid_qos.h | 135 +++++++
include/linux/sched/sysctl.h | 5 +
init/Kconfig | 13 +
kernel/fork.c | 15 +-
kernel/sched/Makefile | 1 +
kernel/sched/core.c | 160 +++++++-
kernel/sched/fair.c | 496 ++++++++++++++++++++++++-
kernel/sched/grid/Makefile | 2 +
kernel/sched/grid/internal.h | 6 +
kernel/sched/grid/power.c | 27 ++
kernel/sched/grid/qos.c | 273 ++++++++++++++
kernel/sched/grid/stat.c | 47 +++
kernel/sched/sched.h | 48 +++
kernel/sysctl.c | 22 +-
mm/mempolicy.c | 12 +-
21 files changed, 1601 insertions(+), 18 deletions(-)
create mode 100644 include/linux/sched/grid_qos.h
create mode 100644 kernel/sched/grid/Makefile
create mode 100644 kernel/sched/grid/internal.h
create mode 100644 kernel/sched/grid/power.c
create mode 100644 kernel/sched/grid/qos.c
create mode 100644 kernel/sched/grid/stat.c
--
2.34.1
1
19

[PATCH openEuler-1.0-LTS,v2] netfilter: conntrack: dccp: copy entire header to stack buffer, not just basic one
by Zhengchao Shao 16 Nov '23
by Zhengchao Shao 16 Nov '23
16 Nov '23
From: Florian Westphal <fw(a)strlen.de>
mainline inclusion
from mainline-v6.5-rc1
commit ff0a3a7d52ff7282dbd183e7fc29a1fe386b0c30
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/I8EZSO
CVE: CVE-2023-39197
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?…
--------------------------------
Eric Dumazet says:
nf_conntrack_dccp_packet() has an unique:
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
And nothing more is 'pulled' from the packet, depending on the content.
dh->dccph_doff, and/or dh->dccph_x ...)
So dccp_ack_seq() is happily reading stuff past the _dh buffer.
BUG: KASAN: stack-out-of-bounds in nf_conntrack_dccp_packet+0x1134/0x11c0
Read of size 4 at addr ffff000128f66e0c by task syz-executor.2/29371
[..]
Fix this by increasing the stack buffer to also include room for
the extra sequence numbers and all the known dccp packet type headers,
then pull again after the initial validation of the basic header.
While at it, mark packets invalid that lack 48bit sequence bit but
where RFC says the type MUST use them.
Compile tested only.
v2: first skb_header_pointer() now needs to adjust the size to
only pull the generic header. (Eric)
Heads-up: I intend to remove dccp conntrack support later this year.
Fixes: 2bc780499aa3 ("[NETFILTER]: nf_conntrack: add DCCP protocol support")
Reported-by: Eric Dumazet <edumazet(a)google.com>
Signed-off-by: Florian Westphal <fw(a)strlen.de>
Reviewed-by: Eric Dumazet <edumazet(a)google.com>
Signed-off-by: Pablo Neira Ayuso <pablo(a)netfilter.org>
Conflicts:
net/netfilter/nf_conntrack_proto_dccp.c
Signed-off-by: Zhengchao Shao <shaozhengchao(a)huawei.com>
---
net/netfilter/nf_conntrack_proto_dccp.c | 50 ++++++++++++++++++++++++-
1 file changed, 48 insertions(+), 2 deletions(-)
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 3ba1f4d9934f..cd7a51a0f100 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -433,17 +433,47 @@ static u64 dccp_ack_seq(const struct dccp_hdr *dh)
ntohl(dhack->dccph_ack_nr_low);
}
+struct nf_conntrack_dccp_buf {
+ struct dccp_hdr dh; /* generic header part */
+ struct dccp_hdr_ext ext; /* optional depending dh->dccph_x */
+ union { /* depends on header type */
+ struct dccp_hdr_ack_bits ack;
+ struct dccp_hdr_request req;
+ struct dccp_hdr_response response;
+ struct dccp_hdr_reset rst;
+ } u;
+};
+
+static struct dccp_hdr *
+dccp_header_pointer(const struct sk_buff *skb, int offset, const struct dccp_hdr *dh,
+ struct nf_conntrack_dccp_buf *buf)
+{
+ unsigned int hdrlen = __dccp_hdr_len(dh);
+
+ if (hdrlen > sizeof(*buf))
+ return NULL;
+
+ return skb_header_pointer(skb, offset, hdrlen, buf);
+}
+
static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, enum ip_conntrack_info ctinfo)
{
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
- struct dccp_hdr _dh, *dh;
+ struct nf_conntrack_dccp_buf _dh;
u_int8_t type, old_state, new_state;
enum ct_dccp_roles role;
unsigned int *timeouts;
+ struct dccp_hdr *dh;
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
BUG_ON(dh == NULL);
+
+ /* pull again, including possible 48 bit sequences and subtype header */
+ dh = dccp_header_pointer(skb, dataoff, dh, &_dh);
+ if (!dh)
+ return NF_DROP;
+
type = dh->dccph_type;
if (type == DCCP_PKT_RESET &&
@@ -526,10 +556,20 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
struct sk_buff *skb, unsigned int dataoff,
u_int8_t pf, unsigned int hooknum)
{
+ static const unsigned long require_seq48 = 1 << DCCP_PKT_REQUEST |
+ 1 << DCCP_PKT_RESPONSE |
+ 1 << DCCP_PKT_CLOSEREQ |
+ 1 << DCCP_PKT_CLOSE |
+ 1 << DCCP_PKT_RESET |
+ 1 << DCCP_PKT_SYNC |
+ 1 << DCCP_PKT_SYNCACK;
struct dccp_hdr _dh, *dh;
unsigned int dccp_len = skb->len - dataoff;
unsigned int cscov;
const char *msg;
+ u8 type;
+
+ BUILD_BUG_ON(DCCP_PKT_INVALID >= BITS_PER_LONG);
dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
if (dh == NULL) {
@@ -559,11 +599,17 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
goto out_invalid;
}
- if (dh->dccph_type >= DCCP_PKT_INVALID) {
+ type = dh->dccph_type;
+ if (type >= DCCP_PKT_INVALID) {
msg = "nf_ct_dccp: reserved packet type ";
goto out_invalid;
}
+ if (test_bit(type, &require_seq48) && !dh->dccph_x) {
+ msg = "nf_ct_dccp: type lacks 48bit sequence numbers";
+ goto out_invalid;
+ }
+
return NF_ACCEPT;
out_invalid:
--
2.34.1
1
0